repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
m30m/dgl
[ "2190c39d674f76c65db9ee8da7b43d3021f19c29" ]
[ "python/dgl/backend/pytorch/tensor.py" ]
[ "from __future__ import absolute_import\n\nfrom distutils.version import LooseVersion\n\nimport scipy # Weird bug in new pytorch when import scipy after import torch\nimport torch as th\nimport builtins\nfrom torch.utils import dlpack\n\nfrom ... import ndarray as nd\nfrom ... import kernel as K\nfrom ...function.base import TargetCode\nfrom ...base import dgl_warning\n\nif LooseVersion(th.__version__) < LooseVersion(\"1.2.0\"):\n dgl_warning(\"Detected an old version of PyTorch. Suggest using torch>=1.2.0 \"\n \"for the best experience.\")\n\ndef data_type_dict():\n return {'float16' : th.float16,\n 'float32' : th.float32,\n 'float64' : th.float64,\n 'uint8' : th.uint8,\n 'int8' : th.int8,\n 'int16' : th.int16,\n 'int32' : th.int32,\n 'int64' : th.int64,\n 'bool' : th.bool}\n\ndef cpu():\n return th.device('cpu')\n\ndef tensor(data, dtype=None):\n return th.tensor(data, dtype=dtype)\n\ndef as_scalar(data):\n return data.item()\n\ndef get_preferred_sparse_format():\n \"\"\"Get the preferred sparse matrix format supported by the backend.\n\n Different backends have their preferred backend. This info is useful when\n constructing a sparse matrix.\n \"\"\"\n return \"coo\"\n\ndef sparse_matrix(data, index, shape, force_format=False):\n fmt = index[0]\n if fmt != 'coo':\n raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)\n spmat = th.sparse_coo_tensor(index[1], data, shape)\n return spmat, None\n\ndef sparse_matrix_indices(spmat):\n return ('coo', spmat._indices())\n\ndef is_tensor(obj):\n return isinstance(obj, th.Tensor)\n\ndef shape(input):\n return input.shape\n\ndef dtype(input):\n return input.dtype\n\ndef ndim(input):\n return input.dim()\n\ndef context(input):\n return input.device\n\ndef device_type(ctx):\n return ctx.type\n\ndef device_id(ctx):\n if ctx.index is None:\n return 0\n else:\n return ctx.index\n\ndef astype(input, ty):\n return input.type(ty)\n\ndef asnumpy(input):\n if isinstance(input, th.sparse.FloatTensor):\n return input.to_dense().cpu().detach().numpy()\n else:\n return input.cpu().detach().numpy()\n\ndef copy_to(input, ctx):\n if ctx.type == 'cpu':\n return input.cpu()\n elif ctx.type == 'cuda':\n if ctx.index is not None:\n th.cuda.set_device(ctx.index)\n return input.cuda()\n else:\n raise RuntimeError('Invalid context', ctx)\n\ndef sum(input, dim, keepdims=False):\n return th.sum(input, dim=dim, keepdim=keepdims)\n\ndef reduce_sum(input):\n return input.sum()\n\ndef mean(input, dim):\n return th.mean(input, dim=dim)\n\ndef reduce_mean(input):\n return input.mean()\n\ndef max(input, dim):\n # NOTE: the second argmax array is not returned\n return th.max(input, dim=dim)[0]\n\ndef reduce_max(input):\n return input.max()\n\ndef min(input, dim):\n # NOTE: the second argmin array is not returned\n return th.min(input, dim=dim)[0]\n\ndef reduce_min(input):\n return input.min()\n\ndef argsort(input, dim, descending):\n return th.argsort(input, dim=dim, descending=descending)\n\ndef topk(input, k, dim, descending=True):\n return th.topk(input, k, dim, largest=descending)[0]\n\ndef argtopk(input, k, dim, descending=True):\n return th.topk(input, k, dim, largest=descending)[1]\n\ndef exp(input):\n return th.exp(input)\n\ndef softmax(input, dim=-1):\n return th.softmax(input, dim=dim)\n\ndef cat(seq, dim):\n return th.cat(seq, dim=dim)\n\ndef stack(seq, dim):\n return th.stack(seq, dim=dim)\n\ndef split(input, sizes_or_sections, dim):\n return th.split(input, sizes_or_sections, dim)\n\ndef repeat(input, repeats, dim):\n # return th.repeat_interleave(input, repeats, dim) # PyTorch 1.1\n if dim < 0:\n dim += input.dim()\n return th.flatten(th.stack([input] * repeats, dim=dim+1), dim, dim+1)\n\ndef gather_row(data, row_index):\n return th.index_select(data, 0, row_index)\n\ndef slice_axis(data, axis, begin, end):\n return th.narrow(data, axis, begin, end - begin)\n\ndef take(data, indices, dim):\n new_shape = data.shape[:dim] + indices.shape + data.shape[dim+1:]\n return th.index_select(data, dim, indices.view(-1)).view(new_shape)\n\ndef narrow_row(x, start, stop):\n return x[start:stop]\n\ndef scatter_row(data, row_index, value):\n return data.index_copy(0, row_index, value)\n\ndef scatter_row_inplace(data, row_index, value):\n data[row_index] = value\n\ndef squeeze(input, dim):\n return th.squeeze(input, dim)\n\ndef unsqueeze(input, dim):\n return th.unsqueeze(input, dim)\n\ndef reshape(input, shape):\n return th.reshape(input ,shape)\n\ndef swapaxes(input, axis1, axis2):\n return th.transpose(input, axis1, axis2)\n\ndef zeros(shape, dtype, ctx):\n return th.zeros(shape, dtype=dtype, device=ctx)\n\ndef zeros_like(input):\n return th.zeros_like(input)\n\ndef ones(shape, dtype, ctx):\n return th.ones(shape, dtype=dtype, device=ctx)\n\ndef uniform(shape, dtype, ctx, low, high):\n return th.empty(shape, dtype=dtype, device=ctx).uniform_(low, high)\n\ndef pad_packed_tensor(input, lengths, value, l_min=None):\n old_shape = input.shape\n if isinstance(lengths, th.Tensor):\n max_len = as_scalar(lengths.max())\n else:\n max_len = builtins.max(lengths)\n\n if l_min is not None:\n max_len = builtins.max(max_len, l_min)\n\n batch_size = len(lengths)\n device = input.device\n x = input.new(batch_size * max_len, *old_shape[1:])\n x.fill_(value)\n index = []\n for i, l in enumerate(lengths):\n index.extend(range(i * max_len, i * max_len + l))\n index = th.tensor(index).to(device)\n return scatter_row(x, index, input).view(batch_size, max_len, *old_shape[1:])\n\ndef pack_padded_tensor(input, lengths):\n batch_size, max_len = input.shape[:2]\n device = input.device\n index = []\n for i, l in enumerate(lengths):\n index.extend(range(i * max_len, i * max_len + l))\n index = th.tensor(index).to(device)\n return gather_row(input.view(batch_size * max_len, -1), index)\n\ndef unsorted_1d_segment_sum(input, seg_id, n_segs, dim):\n y = th.zeros(n_segs, *input.shape[1:]).to(input)\n seg_id = seg_id.view((-1,) + (1,) * (input.dim() - 1)).expand_as(input)\n y = y.scatter_add_(dim, seg_id, input)\n return y\n\ndef unsorted_1d_segment_mean(input, seg_id, n_segs, dim):\n w = unsorted_1d_segment_sum(th.ones_like(seg_id), seg_id, n_segs, 0).to(input)\n w = w.clamp(min=1) # remove 0 entries\n y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)\n y = y / w.view((-1,) + (1,) * (y.dim() - 1))\n return y\n\ndef boolean_mask(input, mask):\n return input[mask]\n\ndef equal(x, y):\n return x == y\n\ndef logical_not(input):\n return ~input\n\ndef clone(input):\n return input.clone()\n\ndef unique(input):\n return th.unique(input)\n\ndef full_1d(length, fill_value, dtype, ctx):\n return th.full((length,), fill_value, dtype=dtype, device=ctx)\n\ndef nonzero_1d(input):\n x = th.nonzero(input).squeeze()\n return x if x.dim() == 1 else x.view(-1)\n\ndef sort_1d(input):\n return th.sort(input)\n\ndef arange(start, stop):\n return th.arange(start, stop, dtype=th.int64)\n\ndef rand_shuffle(arr):\n idx = th.randperm(len(arr))\n return arr[idx]\n\ndef zerocopy_to_dlpack(input):\n return dlpack.to_dlpack(input.contiguous())\n\ndef zerocopy_from_dlpack(dlpack_tensor):\n return dlpack.from_dlpack(dlpack_tensor)\n\ndef zerocopy_to_numpy(input):\n # NOTE: not zerocopy\n return asnumpy(input)\n\ndef zerocopy_from_numpy(np_array):\n return th.as_tensor(np_array)\n\ndef zerocopy_to_dgl_ndarray(input):\n return nd.from_dlpack(dlpack.to_dlpack(input.contiguous()))\n\ndef zerocopy_from_dgl_ndarray(input):\n return dlpack.from_dlpack(input.to_dlpack())\n\n\n\nclass BinaryReduce(th.autograd.Function):\n @staticmethod\n def forward(ctx, reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,\n out_size, lhs_map, rhs_map, out_map):\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)\n out_shape = feat_shape\n if binary_op == 'dot':\n out_shape = feat_shape[:-1]\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n K.binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, lhs_map[0], rhs_map[0], out_map[0])\n # normalize if mean reducer\n # NOTE(zihao): this is a temporary hack and we should have better solution in the future.\n if reducer == 'mean':\n degs = lhs_data.new_empty((out_data.shape[0],))\n degs_nd = zerocopy_to_dgl_ndarray(degs)\n if lhs != TargetCode.DST: # src or edge\n target = lhs\n n = lhs_data.shape[0]\n in_map = lhs_map[0]\n else: # rhs != TargetCode.DST\n target = rhs\n n = rhs_data.shape[0]\n in_map = rhs_map[0]\n in_ones = lhs_data.new_ones((n,))\n in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)\n K.copy_reduce(\n 'sum', graph, target, in_ones_nd, degs_nd, in_map, out_map[0])\n # reshape\n degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)\n out_data = out_data / degs\n else:\n degs = None\n # save_for_backward can only save variables\n ctx.backward_cache = (reducer, binary_op, graph, lhs, rhs, lhs_map,\n rhs_map, out_map, feat_shape, degs)\n ctx.save_for_backward(lhs_data, rhs_data, out_data)\n return out_data\n\n @staticmethod\n def backward(ctx, grad_out):\n reducer, binary_op, graph, lhs, rhs, lhs_map, rhs_map, out_map, \\\n feat_shape, degs = ctx.backward_cache\n lhs_data, rhs_data, out_data = ctx.saved_tensors\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n grad_lhs = None\n grad_rhs = None\n if reducer == 'mean':\n grad_out = grad_out / degs\n grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)\n if ctx.needs_input_grad[5]:\n grad_lhs = grad_out.new_empty((lhs_data_nd.shape[0],) + feat_shape)\n K.backward_lhs_binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_lhs),\n lhs_map[1], rhs_map[1], out_map[1])\n grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape)\n if ctx.needs_input_grad[6]:\n grad_rhs = grad_out.new_empty((rhs_data_nd.shape[0],) + feat_shape)\n K.backward_rhs_binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_rhs),\n lhs_map[1], rhs_map[1], out_map[1])\n grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)\n\n return None, None, None, None, None, grad_lhs, grad_rhs, None, None, None, \\\n None, None\n\n\ndef binary_reduce(reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data,\n out_size, lhs_map=(None, None), rhs_map=(None, None), out_map=(None, None)):\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)\n\n out_shape = feat_shape\n if binary_op == 'dot':\n out_shape = feat_shape[:-1]\n out_data = lhs_data.new_empty((out_size,) + out_shape)\n\n return BinaryReduce.apply(\n reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,\n out_size, lhs_map, rhs_map, out_map)\n\n\nclass CopyReduce(th.autograd.Function):\n @staticmethod\n def forward(ctx, reducer, graph, target, in_data, out_data, out_size, in_map,\n out_map):\n in_data_nd = zerocopy_to_dgl_ndarray(in_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n K.copy_reduce(\n reducer if reducer != 'mean' else 'sum',\n graph, target, in_data_nd, out_data_nd, in_map[0], out_map[0])\n # normalize if mean reducer\n # NOTE(zihao): this is a temporary hack and we should have better solution in the future.\n if reducer == 'mean':\n in_ones = in_data.new_ones((in_data.shape[0],))\n degs = in_data.new_empty((out_data.shape[0],))\n in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)\n degs_nd = zerocopy_to_dgl_ndarray(degs)\n K.copy_reduce(\n 'sum', graph, target, in_ones_nd, degs_nd, in_map[0], out_map[0])\n # reshape\n degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)\n out_data = out_data / degs\n else:\n degs = None\n # save_for_backward can only save variables\n ctx.backward_cache = (reducer, graph, target, in_map, out_map, degs)\n ctx.save_for_backward(in_data, out_data)\n return out_data\n\n @staticmethod\n def backward(ctx, grad_out):\n reducer, graph, target, in_map, out_map, degs = ctx.backward_cache\n in_data, out_data = ctx.saved_tensors\n in_data_nd = zerocopy_to_dgl_ndarray(in_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n grad_in = None\n if reducer == 'mean':\n grad_out = grad_out / degs\n grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)\n if ctx.needs_input_grad[3]:\n grad_in = grad_out.new_empty(in_data_nd.shape)\n K.backward_copy_reduce(\n reducer if reducer != 'mean' else 'sum',\n graph, target, in_data_nd, out_data_nd, grad_out_nd,\n zerocopy_to_dgl_ndarray(grad_in), in_map[1], out_map[1])\n return None, None, None, grad_in, None, None, None, None\n\n\ndef copy_reduce(reducer, graph, target, in_data, out_size, in_map=(None, None),\n out_map=(None, None)):\n out_data = in_data.new_empty((out_size,) + in_data.shape[1:])\n return CopyReduce.apply(reducer, graph, target, in_data, out_data, out_size, in_map, out_map)\n\n\ndef _reduce_grad(grad, shape):\n \"\"\"Reduce gradient on the broadcast dimension\n\n If there is broadcast in forward pass, gradients need to be reduced on\n broadcast dimension. This function checks the input tensor shape and\n gradient shape and perform the reduction.\n\n Parameters\n ----------\n grad: Tensor\n Gradient tensor\n shape: tuple\n Shape of input tensor\n\n Returns\n -------\n Tensor\n \"\"\"\n grad_shape = grad.shape[1:]\n in_shape = shape[1:]\n if in_shape == grad_shape:\n # no need to reduce\n return grad\n num_to_squeeze = len(grad_shape) - len(in_shape)\n # pad inshape\n in_shape = (1,) * num_to_squeeze + in_shape\n reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))\n reduce_idx += 1 # skip batch dim\n grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)\n return grad.view(shape)\n\ndef sync():\n # Pytorch performs computation synchronously, so no need for synchronization.\n pass\n" ]
[ [ "torch.mean", "torch.transpose", "torch.max", "torch.cat", "torch.zeros", "torch.sum", "torch.narrow", "torch.sparse_coo_tensor", "torch.unique", "torch.split", "torch.device", "torch.topk", "torch.softmax", "torch.ones", "torch.reshape", "torch.tensor", "torch.sort", "torch.utils.dlpack.from_dlpack", "torch.arange", "torch.nonzero", "torch.argsort", "torch.index_select", "torch.ones_like", "torch.squeeze", "torch.empty", "torch.full", "torch.min", "torch.zeros_like", "torch.unsqueeze", "torch.exp", "torch.stack", "torch.as_tensor", "torch.cuda.set_device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
echaussidon/desispec
[ "8a8bd59653861509dd630ffc8e1cd6c67f6cdd51", "8a8bd59653861509dd630ffc8e1cd6c67f6cdd51", "8a8bd59653861509dd630ffc8e1cd6c67f6cdd51", "8a8bd59653861509dd630ffc8e1cd6c67f6cdd51", "8a8bd59653861509dd630ffc8e1cd6c67f6cdd51" ]
[ "py/desispec/pipeline/db.py", "py/desispec/scripts/humidity_corrected_fiberflat.py", "py/desispec/qa/qa_quicklook.py", "py/desispec/workflow/timing.py", "py/desispec/scripts/editexptable.py" ]
[ "#\n# See top-level LICENSE.rst file for Copyright information\n#\n# -*- coding: utf-8 -*-\n\"\"\"\ndesispec.pipeline.db\n===========================\n\nPipeline processing database\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport re\nfrom collections import OrderedDict\n\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom desiutil.log import get_logger\n\nfrom .. import io\n\nimport fitsio\n\nfrom .defs import (task_states, task_int_to_state, task_state_to_int, task_name_sep)\n\n\ndef all_task_types():\n \"\"\"Get the list of possible task types that are supported.\n\n Returns:\n list: The list of supported task types.\n\n \"\"\"\n from . import tasks\n from .tasks.base import default_task_chain\n ttypes = [\"fibermap\", \"rawdata\"]\n ttypes.extend(tasks.base.default_task_chain)\n # Insert qadata after cframe\n idx = ttypes.index('cframe')\n ttypes.insert(idx+1, 'qadata')\n return ttypes\n\n\ndef task_sort(tasks):\n \"\"\"Sort a list of tasks by type.\n\n This takes a list of arbitrary tasks and sorts them by type. The result\n is placed in an ordered dictionary of lists in run order.\n\n Args:\n tasks (list): the list of input tasks.\n\n Returns:\n (OrderedDict): ordered dictionary of tasks sorted by type.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n sort = dict()\n ttypes = all_task_types()\n for tp in ttypes:\n sort[tp] = list()\n\n for tsk in tasks:\n sort[task_type(tsk)].append(tsk)\n\n ret = OrderedDict()\n for tp in ttypes:\n if len(sort[tp]) > 0:\n ret[tp] = sort[tp]\n return ret\n\n\ndef all_tasks(night, nside, expid=None):\n \"\"\"Get all possible tasks for a single night.\n\n This uses the filesystem to query the raw data for a particular night and\n return a dictionary containing all possible tasks for each task type. For\n objects which span multiple nights (e.g. spectra, redrock), this returns the\n tasks which are touched by the given night.\n\n Args:\n night (str): The night to scan for tasks.\n nside (int): The HEALPix NSIDE value to use.\n expid (int): Only get tasks for this single exposure.\n\n Returns:\n dict: a dictionary whose keys are the task types and where each value\n is a list of task properties.\n\n \"\"\"\n import desimodel.footprint\n\n log = get_logger()\n\n log.debug(\"io.get_exposures night={}\".format(night))\n\n expids = io.get_exposures(night, raw=True)\n\n full = dict()\n for t in all_task_types():\n full[t] = list()\n\n healpix_frames = []\n\n if expid is not None:\n if expid not in expids:\n raise RuntimeError(\"exposure ID {} not valid for night {}\"\\\n .format(expid, night))\n expids = [ expid ]\n\n for ex in sorted(expids):\n\n # get the fibermap for this exposure\n fibermap = io.get_raw_files(\"fibermap\", night, ex)\n\n log.debug(\"read {}\".format(fibermap))\n\n fmdata = io.read_fibermap(fibermap)\n header = fmdata.meta\n\n # fmdata, header = fitsio.read(fibermap, 'FIBERMAP', header=True)\n flavor = header[\"FLAVOR\"].strip().lower()\n if flavor not in [\"arc\",\"flat\",\"science\"] :\n log.error(\"Do not know what do to with fibermap flavor '{}' for file '{}\".format(flavor,fibermap))\n raise ValueError(\"Do not know what do to with fibermap flavor '{}' for file '{}\".format(flavor,fibermap))\n\n fmpix = dict()\n if (flavor != \"arc\") and (flavor != \"flat\"):\n # This will be used to track which healpix pixels are\n # touched by fibers from each spectrograph.\n ra = np.array(fmdata[\"TARGET_RA\"], dtype=np.float64)\n dec = np.array(fmdata[\"TARGET_DEC\"], dtype=np.float64)\n\n # rm NaN (possible depending on versions of fiberassign)\n valid_coordinates = (np.isnan(ra)==False)&(np.isnan(dec)==False)\n\n for spectro in np.unique( fmdata[\"SPECTROID\"] ) :\n ii=np.where(fmdata[\"SPECTROID\"][valid_coordinates]==spectro)[0]\n if ii.size == 0 : continue\n pixels = desimodel.footprint.radec2pix(nside, ra[valid_coordinates][ii], dec[valid_coordinates][ii])\n for pixel in np.unique(pixels) :\n props = dict()\n props[\"night\"] = int(night)\n props[\"expid\"] = int(ex)\n props[\"spec\"] = spectro\n props[\"nside\"] = nside\n props[\"pixel\"] = pixel\n props[\"ntargets\"] = np.sum(pixels==pixel)\n healpix_frames.append(props)\n # all spectro at once\n pixels = np.unique(desimodel.footprint.radec2pix(nside, ra[valid_coordinates], dec[valid_coordinates]))\n for pixel in pixels :\n props = dict()\n props[\"pixel\"] = pixel\n props[\"nside\"] = nside\n props[\"state\"] = \"waiting\"\n exists=False\n for entry in full[\"spectra\"] :\n if entry[\"pixel\"]==props[\"pixel\"] :\n exists=True\n break\n if not exists : full[\"spectra\"].append(props)\n exists=False\n for entry in full[\"redshift\"] :\n if entry[\"pixel\"]==props[\"pixel\"] :\n exists=True\n break\n if not exists : full[\"redshift\"].append(props)\n\n fmprops = dict()\n fmprops[\"night\"] = int(night)\n fmprops[\"expid\"] = int(ex)\n fmprops[\"flavor\"] = flavor\n fmprops[\"state\"] = \"done\"\n\n full[\"fibermap\"].append(fmprops)\n\n rdprops = dict()\n rdprops[\"night\"] = int(night)\n rdprops[\"expid\"] = int(ex)\n rdprops[\"flavor\"] = flavor\n rdprops[\"state\"] = \"done\"\n\n full[\"rawdata\"].append(rdprops)\n\n # Add the preprocessed pixel files\n for band in ['b', 'r', 'z']:\n # need to open the rawdata file to see how many spectros\n # and cameras are there\n for spec in np.unique( fmdata[\"SPECTROID\"] ) :\n pixprops = dict()\n pixprops[\"night\"] = int(night)\n pixprops[\"band\"] = band\n pixprops[\"spec\"] = spec\n pixprops[\"expid\"] = int(ex)\n pixprops[\"flavor\"] = flavor\n pixprops[\"state\"] = \"ready\"\n full[\"preproc\"].append(pixprops)\n\n if flavor == \"arc\" :\n # Add the PSF files\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"psf\"].append(props)\n\n # Add a PSF night file if does not exist\n exists=False\n for entry in full[\"psfnight\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"band\"]==props[\"band\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"psfnight\"].append(props)\n\n if flavor != \"arc\" :\n # Add extractions\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n\n # Add traceshift\n full[\"traceshift\"].append(props)\n\n # Add extractions\n full[\"extract\"].append(props)\n\n if flavor == \"flat\" :\n # Add a fiberflat task\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"fiberflat\"].append(props)\n # Add a fiberflat night file if does not exist\n exists=False\n for entry in full[\"fiberflatnight\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"band\"]==props[\"band\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"fiberflatnight\"].append(props)\n\n if flavor != \"arc\" and flavor != \"flat\":\n # Add sky\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"sky\"].append(props)\n # Add fluxcalib\n full[\"fluxcalib\"].append(props)\n # Add cframe\n full[\"cframe\"].append(props)\n # Add QA\n full[\"qadata\"].append(props)\n\n # Add starfit if does not exist\n exists=False\n for entry in full[\"starfit\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"expid\"]==props[\"expid\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"expid\"] = int(ex)\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"starfit\"].append(props)\n\n log.debug(\"done\")\n return full , healpix_frames\n\n\ndef check_tasks(tasklist, db=None, inputs=None):\n \"\"\"Check a list of tasks and return their state.\n\n If the database is specified, it is used to check the state of the tasks\n and their dependencies. Otherwise the filesystem is checked.\n\n Args:\n tasklist (list): list of tasks.\n db (pipeline.db.DB): The optional database to use.\n inputs (dict): optional dictionary containing the only input\n dependencies that should be considered.\n\n Returns:\n dict: The current state of all tasks.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n states = dict()\n\n if db is None:\n # Check the filesystem to see which tasks are done. Since we don't\n # have a DB, we can only distinguish between \"waiting\", \"ready\", and\n # \"done\" states.\n for tsk in tasklist:\n tasktype = task_type(tsk)\n st = \"waiting\"\n\n # Check dependencies\n deps = task_classes[tasktype].deps(tsk, db=db, inputs=inputs)\n\n if len(deps)==0 :\n # do not set state to ready of tasks with 0 dependencies\n ready = False\n else :\n ready = True\n for k, v in deps.items():\n if not isinstance(v, list):\n v = [ v ]\n for dp in v:\n deptype = task_type(dp)\n depfiles = task_classes[deptype].paths(dp)\n for odep in depfiles:\n if not os.path.isfile(odep):\n ready = False\n break\n if ready:\n st = \"ready\"\n\n done = True\n # Check outputs\n outfiles = task_classes[tasktype].paths(tsk)\n for out in outfiles:\n if not os.path.isfile(out):\n done = False\n break\n if done:\n st = \"done\"\n\n states[tsk] = st\n else:\n states = db.get_states(tasklist)\n\n return states\n\n\nclass DataBase:\n \"\"\"Class for tracking pipeline processing objects and state.\n \"\"\"\n def __init__(self):\n self._conn = None\n return\n\n\n def get_states_type(self, tasktype, tasks):\n \"\"\"Efficiently get the state of many tasks of a single type.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of task names.\n\n Returns:\n dict: the state of each task.\n\n \"\"\"\n states = None\n namelist = \",\".join([ \"'{}'\".format(x) for x in tasks ])\n\n log = get_logger()\n log.debug(\"opening db\")\n\n with self.cursor() as cur:\n log.debug(\"selecting in db\")\n cur.execute(\\\n 'select name, state from {} where name in ({})'.format(tasktype,\n namelist))\n st = cur.fetchall()\n log.debug(\"done\")\n states = { x[0] : task_int_to_state[x[1]] for x in st }\n return states\n\n\n def count_task_states(self, tasktype):\n \"\"\"Return a dictionary of how many tasks are in each state\n\n Args:\n tasktype (str): the type of these tasks.\n\n Returns:\n dict: keyed by state, values are number of tasks in that state0\n \"\"\"\n state_count = OrderedDict()\n for state in task_states:\n state_count[state] = 0\n\n with self.cursor() as cur:\n cur.execute( 'select name, state from {}'.format(tasktype))\n for name, intstate in cur.fetchall():\n state_count[task_int_to_state[intstate]] += 1\n\n return state_count\n\n\n def get_states(self, tasks):\n \"\"\"Efficiently get the state of many tasks at once.\n\n Args:\n tasks (list): list of task names.\n\n Returns:\n dict: the state of each task.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Get state of each type\n states = dict()\n for t, tlist in taskbytype.items():\n states.update(self.get_states_type(t, tlist))\n\n return states\n\n\n def set_states_type(self, tasktype, tasks, postprocessing=True):\n \"\"\"Efficiently get the state of many tasks of a single type.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of tuples containing the task name and the\n state to set.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_classes\n\n log = get_logger()\n log.debug(\"opening db\")\n\n with self.cursor() as cur:\n log.debug(\"updating in db\")\n for tsk in tasks:\n cur.execute(\"update {} set state = {} where name = '{}'\".format(tasktype, task_state_to_int[tsk[1]], tsk[0]))\n if postprocessing and tsk[1]==\"done\" :\n task_classes[tasktype].postprocessing(db=self,name=tsk[0],cur=cur)\n log.debug(\"done\")\n return\n\n\n def set_states(self, tasks):\n \"\"\"Efficiently set the state of many tasks at once.\n\n Args:\n tasks (list): list of tuples containing the task name and the\n state to set.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n # First find the type of each task.\n ttypes = dict()\n for tsk in tasks:\n ttypes[tsk[0]] = task_type(tsk[0])\n\n # Sort tasks into types\n taskbytype = dict()\n for t in all_task_types():\n taskbytype[t] = list()\n for tsk in tasks:\n taskbytype[ttypes[tsk[0]]].append(tsk)\n\n # Process each type\n for t, tlist in taskbytype.items():\n if len(tlist) > 0:\n self.set_states_type(t, tlist)\n return\n\n\n def get_submitted(self, tasks):\n \"\"\"Return the submitted flag for the list of tasks.\n\n Args:\n tasks (list): list of task names.\n\n Returns:\n (dict): the boolean submitted state of each task (True means that\n the task has been submitted).\n\n \"\"\"\n from .tasks.base import task_type\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Process each type\n submitted = dict()\n for t, tlist in taskbytype.items():\n if (t == \"spectra\") or (t == \"redshift\"):\n raise RuntimeError(\"spectra and redshift tasks do not have submitted flag.\")\n namelist = \",\".join([ \"'{}'\".format(x) for x in tlist ])\n with self.cursor() as cur:\n cur.execute(\\\n 'select name, submitted from {} where name in ({})'.format(t, namelist))\n sb = cur.fetchall()\n submitted.update({ x[0] : x[1] for x in sb })\n return submitted\n\n\n def set_submitted_type(self, tasktype, tasks, unset=False):\n \"\"\"Flag a list of tasks of a single type as submitted.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of task names.\n unset (bool): if True, invert the behavior and unset the submitted\n flag for these tasks.\n\n Returns:\n Nothing.\n\n \"\"\"\n val = 1\n if unset:\n val = 0\n with self.cursor() as cur:\n for tsk in tasks:\n cur.execute(\"update {} set submitted = {} where name = '{}'\".format(tasktype, val, tsk))\n return\n\n\n def set_submitted(self, tasks, unset=False):\n \"\"\"Flag a list of tasks as submitted.\n\n Args:\n tasks (list): list of task names.\n unset (bool): if True, invert the behavior and unset the submitted\n flag for these tasks.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_type\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Process each type\n for t, tlist in taskbytype.items():\n if (t == \"spectra\") or (t == \"redshift\"):\n raise RuntimeError(\"spectra and redshift tasks do not have submitted flag.\")\n self.set_submitted_type(tlist, unset=unset)\n return\n\n\n def update(self, night, nside, expid=None):\n \"\"\"Update DB based on raw data.\n\n This will use the usual io.meta functions to find raw exposures. For\n each exposure, the fibermap and all following objects will be added to\n the DB.\n\n Args:\n night (str): The night to scan for updates.\n nside (int): The current NSIDE value used for pixel grouping.\n expid (int): Only update the DB for this exposure.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n\n log = get_logger()\n\n alltasks, healpix_frames = all_tasks(night, nside, expid=expid)\n\n with self.cursor() as cur:\n # insert or ignore all healpix_frames\n log.debug(\"updating healpix_frame ...\")\n for entry in healpix_frames:\n # see if we already have this entry\n cmd = \"select exists(select 1 from healpix_frame where (expid = {} and spec = {} and nside = {} and pixel = {} ))\".format(entry[\"expid\"], entry[\"spec\"], entry[\"nside\"], entry[\"pixel\"])\n cur.execute(cmd)\n have_row = cur.fetchone()[0]\n\n if not have_row:\n cur.execute(\"insert into healpix_frame (night,expid,spec,nside,pixel,ntargets,state) values({},{},{},{},{},{},{})\".format(entry[\"night\"],entry[\"expid\"],entry[\"spec\"],entry[\"nside\"],entry[\"pixel\"],entry[\"ntargets\"],0))\n\n # read what is already in db\n tasks_in_db = {}\n for tt in all_task_types():\n cur.execute(\"select name from {}\".format(tt))\n tasks_in_db[tt] = [ x for (x, ) in cur.fetchall()]\n\n for tt in all_task_types():\n log.debug(\"updating {} ...\".format(tt))\n for tsk in alltasks[tt]:\n tname = task_classes[tt].name_join(tsk)\n if tname not in tasks_in_db[tt] :\n log.debug(\"adding {}\".format(tname))\n task_classes[tt].insert(cur, tsk)\n\n return\n\n\n def sync(self, night, specdone=False):\n \"\"\"Update states of tasks based on filesystem.\n\n Go through all tasks in the DB for the given night and determine their\n state on the filesystem. Then update the DB state to match.\n\n Args:\n night (str): The night to scan for updates.\n specdone: If true, set spectra to done if files exist.\n \"\"\"\n from .tasks.base import task_classes\n log = get_logger()\n\n # Get the list of task types excluding spectra and redshifts,\n # which will be handled separately.\n ttypes = [ t for t in all_task_types() if (t != \"spectra\") \\\n and (t != \"redshift\") ]\n\n tasks_in_db = None\n # Grab existing nightly tasks\n with self.cursor() as cur:\n tasks_in_db = {}\n for tt in ttypes:\n cur.execute(\"select name from {} where night = {}\"\\\n .format(tt, night))\n tasks_in_db[tt] = [ x for (x, ) in cur.fetchall() ]\n\n # For each task type, check status WITHOUT the DB, then set state.\n # Save out the cframe states for later use with the healpix_frame table\n cfstates = None\n for tt in ttypes:\n tstates = check_tasks(tasks_in_db[tt], db=None)\n st = [ (x, tstates[x]) for x in tasks_in_db[tt] ]\n self.set_states_type(tt, st)\n if tt == \"cframe\":\n cfstates = tstates.copy()\n\n # Now examine the spectra and redshift files. If the files exist,\n # we assume they are done and completely up to date. If the files\n # are not up to date, they must be manually deleted in order for the\n # sync to correctly reconstruct the database state.\n\n pixrows = self.select_healpix_frame({\"night\" : night})\n # First check the existence of the files touched by this night\n spec_exists = dict()\n red_exists = dict()\n for row in pixrows:\n if row[\"pixel\"] in spec_exists:\n continue\n spec_name = task_classes[\"spectra\"].name_join(row)\n red_name = task_classes[\"redshift\"].name_join(row)\n\n # Check spectra outputs\n outfiles = task_classes[\"spectra\"].paths(spec_name)\n spec_exists[row[\"pixel\"]] = True\n for out in outfiles:\n if not os.path.isfile(out):\n spec_exists[row[\"pixel\"]] = False\n break\n\n # Check redshift outputs\n outfiles = task_classes[\"redshift\"].paths(red_name)\n red_exists[row[\"pixel\"]] = True\n for out in outfiles:\n if not os.path.isfile(out):\n red_exists[row[\"pixel\"]] = False\n break\n\n # Now use all this info. Some internal helpers to avoid code\n # duplication\n def set_hpx_frame_0(row, spec, red, cur):\n self.update_healpix_frame_state(row, 0, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"waiting\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"waiting\", cur)\n return\n\n def set_hpx_frame_1(row, spec, red, cur):\n self.update_healpix_frame_state(row, 1, cur)\n # getready() will do this for us:\n #task_classes[\"spectra\"].state_set(\n # self, spec, \"ready\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"waiting\", cur)\n return\n\n def set_hpx_frame_2(row, spec, red, cur):\n self.update_healpix_frame_state(row, 2, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"done\", cur)\n # getready() will do this:\n #task_classes[\"redshift\"].state_set(\n # self, red, \"ready\", cur)\n return\n\n def set_hpx_frame_3(row, spec, red, cur):\n self.update_healpix_frame_state(row, 3, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"done\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"done\", cur)\n return\n\n with self.cursor() as cur:\n for row in pixrows:\n cfdone = True\n cfprops = row.copy()\n for band in [\"b\", \"r\", \"z\"]:\n cfprops[\"band\"] = band\n cf_name = task_classes[\"cframe\"].name_join(cfprops)\n if cfstates[cf_name] != \"done\":\n cfdone = False\n\n spec_name = task_classes[\"spectra\"].name_join(row)\n red_name = task_classes[\"redshift\"].name_join(row)\n\n if (not cfdone) and (not specdone) :\n # The cframes do not exist, so reset the state of the\n # spectra and redshift tasks.\n set_hpx_frame_0(row, spec_name, red_name, cur)\n else:\n # The cframe exists...\n if spec_exists[row[\"pixel\"]]:\n if red_exists[row[\"pixel\"]]:\n # We are all done (state 3)\n set_hpx_frame_3(row, spec_name, red_name, cur)\n else:\n # We are only at state 2\n set_hpx_frame_2(row, spec_name, red_name, cur)\n else:\n # We are at just at state 1\n set_hpx_frame_1(row, spec_name, red_name, cur)\n\n # Update ready state of tasks\n self.getready(night=night)\n\n return\n\n\n def cleanup(self, tasktypes=None, expid=None, cleanfailed=False,\n cleansubmitted=False):\n \"\"\"Reset states of tasks.\n\n Any tasks that are marked as \"running\" will have their\n state reset to \"ready\". This can be called if a job dies before\n completing all tasks.\n\n Args:\n tasktypes (list): if not None, clean up only tasks of these types.\n expid (int): if not None, only clean tasks related to this\n exposure ID. Note that tasks which are independent of\n an expid (psfnight, fiberflatnight, spectra, redshift)\n will be ignored if this option is given.\n cleanfailed (bool): if True, also reset failed tasks to ready.\n cleansubmitted (bool): if True, set submitted flag to False.\n\n \"\"\"\n tasks_running = None\n\n alltypes = all_task_types()\n ttypes = None\n if tasktypes is None:\n ttypes = alltypes\n else:\n for tt in tasktypes:\n if tt not in alltypes:\n raise RuntimeError(\"Cannot clean invalid task type {}\"\\\n .format(tt))\n ttypes = tasktypes\n\n # Grab existing nightly tasks\n with self.cursor() as cur:\n tasks_running = {}\n for tt in ttypes:\n hasexpid = (tt not in [\"psfnight\", \"fiberflatnight\", \"spectra\",\n \"redshift\"])\n if hasexpid:\n # This task type has an expid property.\n cmd = None\n if expid is not None:\n # We are cleaning only a single exposure.\n cmd = \"select name from {} where expid = {} and ( state = {}\".format(tt, expid, task_state_to_int[\"running\"])\n else:\n # We are cleaning all exposures for this task type.\n cmd = \"select name from {} where ( state = {}\".format(tt, task_state_to_int[\"running\"])\n if cleanfailed:\n cmd = \"{} or state = {} )\".format(cmd,\n task_state_to_int[\"failed\"])\n else:\n cmd = \"{} )\".format(cmd)\n cur.execute(cmd)\n tasks_running[tt] = [ x for (x, ) in cur.fetchall() ]\n if cleansubmitted:\n if expid is not None:\n cmd = \"update {} set submitted = 0 where expid = {}\".format(tt, expid)\n else:\n cmd = \"update {} set submitted = 0\".format(tt)\n cur.execute(cmd)\n else:\n # This task type has no concept of an exposure ID\n if expid is not None:\n # We specified an exposure ID, which makes no sense\n # for this task type. Skip it.\n tasks_running[tt] = list()\n continue\n else:\n # cleanup this task type.\n cmd = \"select name from {} where ( state = {}\".format(tt, task_state_to_int[\"running\"])\n if cleanfailed:\n cmd = \"{} or state = {} )\".format(cmd,\n task_state_to_int[\"failed\"])\n else:\n cmd = \"{} )\".format(cmd)\n cur.execute(cmd)\n tasks_running[tt] = [ x for (x, ) in cur.fetchall() ]\n if cleansubmitted:\n if (tt != \"spectra\") and (tt != \"redshift\"):\n cmd = \"update {} set submitted = 0\".format(tt)\n cur.execute(cmd)\n\n for tt in ttypes:\n if len(tasks_running[tt]) > 0:\n st = [ (x, \"waiting\") for x in tasks_running[tt] ]\n self.set_states_type(tt, st)\n\n self.getready()\n\n return\n\n\n def getready(self, night=None):\n \"\"\"Update DB, changing waiting to ready depending on status of dependencies .\n\n Args:\n night (str): The night to process.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n log = get_logger()\n\n # Get the list of task types excluding spectra and redshifts,\n # which will be handled separately.\n ttypes = [ t for t in all_task_types() if (t != \"spectra\") \\\n and (t != \"redshift\") ]\n\n with self.cursor() as cur:\n for tt in ttypes:\n # for each type of task, get the list of tasks in waiting mode\n cmd = \"select name from {} where state = {}\".format(tt, task_state_to_int[\"waiting\"])\n if night is not None:\n cmd = \"{} and night = {}\".format(cmd, night)\n cur.execute(cmd)\n tasks = [ x for (x, ) in cur.fetchall()]\n if len(tasks) > 0:\n log.debug(\"checking {} {} tasks ...\".format(len(tasks),tt))\n for tsk in tasks:\n task_classes[tt].getready(db=self, name=tsk, cur=cur)\n\n for tt in [ \"spectra\" , \"redshift\" ]:\n if tt == \"spectra\":\n required_healpix_frame_state = 1\n # means we have a cframe\n elif tt == \"redshift\":\n required_healpix_frame_state = 2\n # means we have an updated spectra file\n\n cur.execute('select nside,pixel from healpix_frame where state = {}'.format(required_healpix_frame_state))\n entries = cur.fetchall()\n for entry in entries :\n log.debug(\"{} of pixel {} is ready to run\".format(tt,entry[1]))\n cur.execute('update {} set state = {} where nside = {} and pixel = {}'.format(tt,task_state_to_int[\"ready\"],entry[0],entry[1]))\n\n log.debug(\"checking waiting {} tasks to see if they are done...\".format(tt))\n cmd = \"select pixel from {} where state = {}\".format(tt, task_state_to_int[\"waiting\"])\n cur.execute(cmd)\n pixels = [ x for (x, ) in cur.fetchall()]\n if len(pixels) > 0:\n log.debug(\"checking {} {} ...\".format(len(pixels),tt))\n if tt == \"spectra\":\n required_healpix_frame_state = 2\n elif tt == \"redshift\":\n required_healpix_frame_state = 3\n for pixel in pixels:\n cur.execute('select pixel from healpix_frame where pixel = {} and state != {}'.format(pixel,required_healpix_frame_state))\n entries = cur.fetchall()\n if len(entries)==0 :\n log.debug(\"{} task of pixel {} is done\".format(tt,pixel))\n cur.execute('update {} set state = {} where pixel = {}'.format(tt,task_state_to_int[\"done\"],pixel))\n return\n\n\n def update_healpix_frame_state(self, props, state, cur):\n if \"expid\" in props :\n # update from a cframe\n cmd = \"update healpix_frame set state = {} where expid = {} and spec = {} and state = {}\".format(state,props[\"expid\"],props[\"spec\"],props[\"state\"])\n else :\n # update from a spectra or redshift task\n cmd = \"update healpix_frame set state = {} where nside = {} and pixel = {} and state = {}\".format(state,props[\"nside\"],props[\"pixel\"],props[\"state\"])\n\n if cur is None :\n with self.cursor() as cur:\n cur.execute(cmd)\n else :\n cur.execute(cmd)\n return\n\n\n def select_healpix_frame(self, props):\n res = []\n with self.cursor() as cur:\n cmd = \"select * from healpix_frame where \"\n first=True\n for k in props.keys() :\n if not first : cmd += \" and \"\n first=False\n cmd += \"{}={}\".format(k,props[k])\n cur.execute(cmd)\n entries = cur.fetchall()\n # convert that to list of dictionaries\n for entry in entries :\n tmp = dict()\n for i, k in enumerate([\"night\", \"expid\", \"spec\", \"nside\",\n \"pixel\", \"ntargets\", \"state\"]):\n tmp[k] = entry[i]\n res.append(tmp)\n return res\n\n\n def create_healpix_frame_table(self) :\n with self.cursor() as cur:\n cmd = \"create table healpix_frame (night integer, expid integer, spec integer, nside integer, pixel integer, ntargets integer, state integer, unique(expid, spec, nside, pixel))\"\n cur.execute(cmd)\n\n return\n\n\nclass DataBaseSqlite(DataBase):\n \"\"\"Pipeline database using sqlite3 as the backend.\n\n Args:\n path (str): the filesystem path of the database to open. If None, then\n a temporary database is created in memory.\n mode (str): if \"r\", the database is open in read-only mode. If \"w\",\n the database is open in read-write mode and created if necessary.\n\n \"\"\"\n def __init__(self, path, mode):\n super(DataBaseSqlite, self).__init__()\n\n self._path = path\n self._mode = mode\n\n create = True\n if (self._path is not None) and os.path.exists(self._path):\n create = False\n\n if self._mode == 'r' and create:\n raise RuntimeError(\"cannot open a non-existent DB in read-only \"\n \" mode\")\n\n self._connstr = None\n\n # This timeout is in seconds\n self._busytime = 1000\n\n # Journaling options\n self._journalmode = \"persist\"\n self._syncmode = \"normal\"\n\n if create:\n self.initdb()\n return\n\n\n def _open(self):\n import sqlite3\n\n if self._path is None:\n # We are opening an in-memory DB\n self._conn = sqlite3.connect(\":memory:\")\n else:\n try:\n # only python3 supports uri option\n if self._mode == 'r':\n self._connstr = 'file:{}?mode=ro'.format(self._path)\n else:\n self._connstr = 'file:{}?mode=rwc'.format(self._path)\n self._conn = sqlite3.connect(self._connstr, uri=True,\n timeout=self._busytime)\n except:\n self._conn = sqlite3.connect(self._path, timeout=self._busytime)\n if self._mode == 'w':\n # In read-write mode, set the journaling\n self._conn.execute(\"pragma journal_mode={}\"\\\n .format(self._journalmode))\n self._conn.execute(\"pragma synchronous={}\".format(self._syncmode))\n # Other tuning options\n self._conn.execute(\"pragma temp_store=memory\")\n self._conn.execute(\"pragma page_size=4096\")\n self._conn.execute(\"pragma cache_size=4000\")\n return\n\n\n def _close(self):\n del self._conn\n self._conn = None\n return\n\n\n @contextmanager\n def cursor(self):\n import sqlite3\n self._open()\n cur = self._conn.cursor()\n cur.execute(\"begin transaction\")\n try:\n yield cur\n except sqlite3.DatabaseError as err:\n log = get_logger()\n log.error(err)\n cur.execute(\"rollback\")\n raise err\n else:\n try:\n cur.execute(\"commit\")\n except sqlite3.OperationalError:\n #- sqlite3 in py3.5 can't commit a read-only finished transaction\n pass\n finally:\n del cur\n self._close()\n\n\n def initdb(self):\n \"\"\"Create DB tables for all tasks if they do not exist.\n \"\"\"\n # check existing tables\n tables_in_db = None\n with self.cursor() as cur:\n cur.execute(\"select name FROM sqlite_master WHERE type='table'\")\n tables_in_db = [x for (x, ) in cur.fetchall()]\n\n # Create a table for every task type\n from .tasks.base import task_classes, task_type\n for tt, tc in task_classes.items():\n if tt not in tables_in_db:\n tc.create(self)\n\n if \"healpix_frame\" not in tables_in_db:\n self.create_healpix_frame_table()\n return\n\n\nclass DataBasePostgres(DataBase):\n \"\"\"Pipeline database using PostgreSQL as the backend.\n\n Args:\n host (str): The database server.\n port (int): The connection port.\n dbname (str): The database to connect.\n user (str): The user name for the connection. The password should be\n stored in the ~/.pgpass file.\n schema (str): The schema within the database. If this is specified,\n then the database is assumed to exist. Otherwise the schema is\n computed from a hash of the production location and will be\n created.\n authorize (str): If creating the schema, this is the list of\n additional roles that should be granted access.\n\n \"\"\"\n def __init__(self, host, port, dbname, user, schema=None, authorize=None):\n super(DataBasePostgres, self).__init__()\n\n self._schema = schema\n self._user = user\n self._dbname = dbname\n self._host = host\n self._port = port\n self._authorize = authorize\n\n self._proddir = os.path.abspath(io.specprod_root())\n\n create = False\n if self._schema is None:\n create = True\n self._schema = self._compute_schema()\n\n if create:\n self.initdb()\n return\n\n\n def _compute_schema(self):\n import hashlib\n md = hashlib.md5()\n md.update(self._proddir.encode())\n return \"pipe_{}\".format(md.hexdigest())\n\n\n def _open(self):\n import psycopg2 as pg2\n import time\n import numpy.random\n\n # Open connection. If psycopg2 raises an exception, then sleep\n # for a random time interval and keep trying.\n maxtry = 10\n ntry = 0\n while True:\n try:\n self._conn = pg2.connect(host=self._host, port=self._port,\n user=self._user, dbname=self._dbname)\n except pg2.OperationalError as err:\n log = get_logger()\n log.debug(\"PostgreSQL connection failed with '{}', will sleep and retry\".format(err))\n if ntry > maxtry:\n log.error(err)\n break\n numpy.random.seed(int(time.time()))\n sec = numpy.random.uniform() * 3.0\n time.sleep(sec)\n ntry += 1\n else:\n break\n\n return\n\n\n def _close(self):\n del self._conn\n self._conn = None\n return\n\n\n @property\n def schema(self):\n return self._schema\n\n\n def _have_schema(self, cur):\n com = \"select exists(select 1 from pg_namespace where nspname = '{}')\".format(self._schema)\n cur.execute(com)\n return cur.fetchone()[0]\n\n\n @contextmanager\n def cursor(self, skipcheck=False):\n import psycopg2\n self._open()\n cur = self._conn.cursor()\n if not skipcheck:\n have_schema = self._have_schema(cur)\n if not have_schema:\n raise RuntimeError(\"Postgres schema for production {} does\"\n \" not exist. Make sure you create the production with\"\n \" postgres options and source the top-level setup.sh\"\n \" file.\".format(self._proddir))\n cur.execute(\"set search_path to '{}'\".format(self._schema))\n cur.execute(\"begin transaction\")\n try:\n yield cur\n except psycopg2.DatabaseError as err:\n log = get_logger()\n log.error(err)\n cur.execute(\"rollback\")\n raise err\n else:\n cur.execute(\"commit\")\n finally:\n del cur\n self._close()\n\n\n def initdb(self):\n \"\"\"Create DB tables for all tasks if they do not exist.\n \"\"\"\n log = get_logger()\n # Check existence of the schema. If we were not passed the schema\n # in the constructor, it means that we are creating a new prod, so any\n # existing schema should be wiped and recreated.\n tables_in_db = None\n with self.cursor(skipcheck=True) as cur:\n # See if our schema already exists...\n have_schema = self._have_schema(cur)\n if have_schema:\n # We need to wipe it first\n com = \"drop schema {} cascade\".format(self._schema)\n log.debug(com)\n cur.execute(com)\n com = \"create schema {} authorization {}\"\\\n .format(self._schema, self._user)\n log.debug(com)\n cur.execute(com)\n\n if self._authorize is not None:\n com = \"grant usage on schema {} to {}\"\\\n .format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant select on tables to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant select,usage on sequences to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant execute on functions to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant usage on types to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n # Create a table of information about this prod\n com = \"create table {}.info (key text unique, val text)\"\\\n .format(self._schema)\n log.debug(com)\n cur.execute(com)\n com = \"insert into {}.info values ('{}', '{}')\"\\\n .format(self._schema, \"path\", self._proddir)\n log.debug(com)\n cur.execute(com)\n if 'USER' in os.environ:\n com = \"insert into {}.info values ('{}', '{}')\"\\\n .format(self._schema, \"created_by\", os.environ['USER'])\n log.debug(com)\n cur.execute(com)\n\n # check existing tables\n cur.execute(\"select tablename from pg_tables where schemaname = '{}'\".format(self.schema))\n tables_in_db = [x for (x, ) in cur.fetchall()]\n\n # Create a table for every task type\n from .tasks.base import task_classes, task_type\n for tt, tc in task_classes.items():\n if tt not in tables_in_db:\n tc.create(self)\n\n if \"healpix_frame\" not in tables_in_db:\n self.create_healpix_frame_table()\n\n return\n\n\ndef load_db(dbstring, mode=\"w\", user=None):\n \"\"\"Load a database from a connection string.\n\n This instantiates either an sqlite or postgresql database using a string.\n If this string begins with \"postgresql:\", then it is taken to be the\n information needed to connect to a postgres server. Otherwise it is\n assumed to be a filesystem path to use with sqlite. The mode is only\n meaningful when using sqlite. Postgres permissions are controlled through\n the user permissions.\n\n Args:\n dbstring (str): either a filesystem path (sqlite) or a colon-separated\n string of connection properties in the form\n \"postresql:<host>:<port>:<dbname>:<user>:<schema>\".\n mode (str): for sqlite, the mode.\n user (str): for postgresql, an alternate user name for opening the DB.\n This can be used to connect as a user with read-only access.\n\n Returns:\n DataBase: a derived database class of the appropriate type.\n\n \"\"\"\n if re.search(r\"postgresql:\", dbstring) is not None:\n props = dbstring.split(\":\")\n host = props[1]\n port = int(props[2])\n dbname = props[3]\n username = props[4]\n if user is not None:\n username = user\n schema = None\n if len(props) > 5:\n # Our DB string also contains the name of an existing\n # schema.\n schema = props[5]\n return DataBasePostgres(host=host, port=port, dbname=dbname,\n user=username, schema=schema)\n else:\n return DataBaseSqlite(dbstring, mode)\n", "\nfrom __future__ import absolute_import, division\n\nimport os\nimport fitsio\nimport argparse\nimport numpy as np\n\nfrom desiutil.log import get_logger\n\nfrom desispec.io import read_fiberflat,write_fiberflat,findfile,read_frame\nfrom desispec.io.fiberflat_vs_humidity import get_humidity,read_fiberflat_vs_humidity\nfrom desispec.calibfinder import CalibFinder\nfrom desispec.fiberflat_vs_humidity import compute_humidity_corrected_fiberflat\n\ndef parse(options=None):\n parser = argparse.ArgumentParser(description=\"Compute a fiberflat corrected for variations with humidity.\")\n\n parser.add_argument('-i','--infile', type = str, default = None, required=True,\n help = 'path of DESI exposure frame fits file')\n parser.add_argument('--fiberflat', type = str, default = None, required=True,\n help = 'path of DESI fiberflat fits file')\n parser.add_argument('--use-sky-fibers', action = 'store_true',\n help = 'use sky fibers to improve the correction')\n parser.add_argument('-o','--outfile', type = str, default = None, required=True,\n help = 'path of output fiberflar file')\n args = None\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n return args\n\n\ndef main(args) :\n\n log = get_logger()\n\n # just read frame header in case we don't need to do anything\n frame_header = fitsio.read_header(args.infile,\"FLUX\")\n\n if args.use_sky_fibers :\n # need full frame to adjust correction on data\n frame = read_frame(args.infile)\n else :\n frame = None\n\n cfinder = CalibFinder([frame_header])\n if not cfinder.haskey(\"FIBERFLATVSHUMIDITY\"):\n log.info(\"No information on fiberflat vs humidity for camera {}, simply link the input fiberflat\".format(frame_header[\"CAMERA\"]))\n if not os.path.islink(args.outfile) :\n relpath=os.path.relpath(args.fiberflat,os.path.dirname(args.outfile))\n os.symlink(relpath,args.outfile)\n return 0\n\n # read fiberflat\n calib_fiberflat = read_fiberflat(args.fiberflat)\n\n # read mean fiberflat vs humidity\n filename = cfinder.findfile(\"FIBERFLATVSHUMIDITY\")\n log.info(f\"reading {filename}\")\n mean_fiberflat_vs_humidity , humidity_array, ffh_wave, ffh_header = read_fiberflat_vs_humidity(filename)\n assert(np.allclose(calib_fiberflat.wave,ffh_wave))\n\n # now need to find the humidity for this frame and for this fiberflat\n night=frame_header[\"NIGHT\"]\n camera=frame_header[\"CAMERA\"]\n current_frame_humidity =get_humidity(night=night,expid=frame_header[\"EXPID\"],camera=camera)\n log.info(\"humidity during current exposure={:.2f}\".format(current_frame_humidity))\n\n\n\n # we can compute the correction now that we have everything in hand\n improved_fiberflat = compute_humidity_corrected_fiberflat(calib_fiberflat, mean_fiberflat_vs_humidity , humidity_array, current_frame_humidity, frame = frame)\n\n # add telemetry humidity for the dome flats for the record\n # try to read the night exposure table to get the list of flats\n first_expid = calib_fiberflat.header[\"EXPID\"]\n calib_night = calib_fiberflat.header[\"NIGHT\"]\n calib_humidity=[ get_humidity(calib_night,first_expid,camera) ]\n fiberflat_expid=[ first_expid]\n for expid in range(first_expid+1,first_expid+40) :\n filename=findfile(\"raw\",calib_night,expid)\n if not os.path.isfile(filename): continue\n head=fitsio.read_header(filename,1)\n if not \"OBSTYPE\" in head.keys() or head[\"OBSTYPE\"]!=\"FLAT\" :\n break\n fiberflat_expid.append(expid)\n calib_humidity.append(get_humidity(calib_night,expid,camera))\n log.debug(\"calib expids={}\".format(fiberflat_expid))\n log.debug(\"calib humidities={}\".format(calib_humidity))\n calib_humidity=np.mean(calib_humidity)\n if np.isnan(calib_humidity) :\n log.warning(\"missing humidity info for fiber flat, use link to input\")\n calib_humidity=0.\n else :\n log.info(\"mean humidity during calibration exposures={:.2f}\".format(calib_humidity))\n fit_humidity = improved_fiberflat.header[\"CALFHUM\"]\n if np.abs(fit_humidity-calib_humidity)>10 :\n message=\"large difference between best fit humidity during dome flats ({:.1f}) and value from telemetry ({:.1f})\".format(fit_humidity,calib_humidity)\n if np.abs(fit_humidity-calib_humidity)>20 :\n log.error(message)\n raise RuntimeError(message)\n log.warning(message)\n\n improved_fiberflat.header[\"CALTHUM\"] = (calib_humidity,\"dome flat humidity from telemetry\")\n\n # write it\n write_fiberflat(args.outfile,improved_fiberflat)\n log.info(\"wrote humidity corrected flat {}\".format(args.outfile))\n\n return 0\n", "\"\"\" \nMonitoring algorithms for Quicklook pipeline\n\"\"\"\n\nimport os,sys\nimport datetime\nimport numpy as np\nimport scipy.ndimage\nimport yaml\nimport re\nimport astropy.io.fits as fits\nimport desispec.qa.qa_plots_ql as plot\nimport desispec.quicklook.qlpsf\nimport desispec.qa.qa_plots_ql as fig\nfrom desispec.quicklook.qas import MonitoringAlg, QASeverity\nfrom desispec.quicklook import qlexceptions\nfrom desispec.quicklook import qllogger\nfrom desispec.quicklook.palib import resample_spec\nfrom astropy.time import Time\nfrom desispec.qa import qalib\nfrom desispec.io import qa, read_params\nfrom desispec.io.meta import findfile\nfrom desispec.io.sky import read_sky\nfrom desispec.image import Image as im\nfrom desispec.frame import Frame as fr\nfrom desispec.preproc import parse_sec_keyword\nfrom desispec.util import runcmd\nfrom desispec.qproc.qframe import QFrame\nfrom desispec.fluxcalibration import isStdStar\nfrom desitarget.targetmask import desi_mask\nimport astropy\nfrom astropy.io import fits\n\nqlog=qllogger.QLLogger(\"QuickLook\",0)\nlog=qlog.getlog()\n\ndef get_inputs(*args,**kwargs):\n '''\n Get inputs required for each QA\n '''\n inputs={}\n inputs[\"camera\"]=kwargs[\"camera\"]\n\n if \"paname\" not in kwargs: inputs[\"paname\"]=None\n else: inputs[\"paname\"]=kwargs[\"paname\"]\n\n if \"ReferenceMetrics\" in kwargs: inputs[\"refmetrics\"]=kwargs[\"ReferenceMetrics\"]\n else: inputs[\"refmetrics\"]=None\n\n inputs[\"amps\"]=False\n if \"amps\" in kwargs: inputs[\"amps\"]=kwargs[\"amps\"]\n\n if \"param\" in kwargs: inputs[\"param\"]=kwargs[\"param\"]\n else: inputs[\"param\"]=None\n\n inputs[\"psf\"]=None\n if \"PSFFile\" in kwargs: inputs[\"psf\"]=kwargs[\"PSFFile\"]\n\n inputs[\"fibermap\"]=None\n if \"FiberMap\" in kwargs: inputs[\"fibermap\"]=kwargs[\"FiberMap\"]\n\n if \"Peaks\" in kwargs: inputs[\"Peaks\"]=kwargs[\"Peaks\"]\n\n if \"qafile\" in kwargs: inputs[\"qafile\"] = kwargs[\"qafile\"]\n else: inputs[\"qafile\"]=None\n\n if \"qafig\" in kwargs: inputs[\"qafig\"]=kwargs[\"qafig\"]\n else: inputs[\"qafig\"]=None\n\n if \"plotconf\" in kwargs: inputs[\"plotconf\"]=kwargs[\"plotconf\"]\n else: inputs[\"plotconf\"]=None\n\n if \"hardplots\" in kwargs: inputs[\"hardplots\"]=kwargs[\"hardplots\"]\n else: inputs[\"hardplots\"]=False\n\n return inputs\n\ndef get_image(filetype,night,expid,camera,specdir):\n '''\n Make image object from file if in development mode\n '''\n #- Find correct file for QA\n imagefile = findfile(filetype,int(night),int(expid),camera,specprod_dir=specdir)\n\n #- Create necessary input for desispec.image\n image = fits.open(imagefile)\n pix = image['IMAGE'].data\n ivar = image['IVAR'].data\n mask = image['MASK'].data\n readnoise = image['READNOISE'].data\n meta = image['IMAGE'].header\n\n #- Create image object\n imageobj = im(pix,ivar,mask=mask,readnoise=readnoise,camera=camera,meta=meta)\n return imageobj\n\ndef get_frame(filetype,night,expid,camera,specdir):\n '''\n Make frame object from file if in development mode\n '''\n #- Find correct file for QA\n framefile = findfile(filetype,int(night),int(expid),camera,specprod_dir=specdir)\n\n #- Create necessary input for desispec.frame\n frame = fits.open(framefile)\n wave = frame['WAVE'].data\n flux = frame['FLUX'].data\n ivar = frame['IVAR'].data\n fibermap = frame['FIBERMAP'].data\n fibers = fibermap['FIBER']\n meta = frame['FLUX'].header\n\n #- Create frame object\n frameobj = QFrame(wave,flux,ivar,fibers=fibers,fibermap=fibermap,meta=meta)\n\n return frameobj\n\n\nclass Check_HDUs(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKHDUS\"\n import astropy\n rawtype=astropy.io.fits.hdu.hdulist.HDUList\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKHDUS\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKHDUS_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n MonitoringAlg.__init__(self,name,rawtype,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n\n if kwargs[\"singleqa\"] == 'Check_HDUs':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n rawfile = findfile('raw',int(night),int(expid),camera,rawdata_dir=kwargs[\"rawdir\"])\n raw = fits.open(rawfile)\n else: raw=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(raw,inputs)\n\n def run_qa(self,raw,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n\n rawimage=raw[camera.upper()].data\n header=raw[camera.upper()].header\n \n retval={}\n retval[\"EXPID\"]= '{0:08d}'.format(header[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"FLAVOR\"] = header[\"FLAVOR\"]\n #SE: quicklook to crash when a mismatched config file with the one in fits header\n from desispec.scripts import quicklook\n \n args=quicklook.parse() \n ad,fl = args.config.split(\"qlconfig_\")\n flvr = fl.split(\".yaml\")[0]\n #if flvr in ['darksurvey','graysurvey','brightsurvey']: flvr = 'science'\n if header[\"FLAVOR\"] == 'science': \n flvr = flvr.split(\"survey\")[0]\n if (header[\"FLAVOR\"] == flvr or header[\"FLAVOR\"] == format(flvr.upper()) or flvr == 'test'):\n log.info(\"The correct configuration file is being used!\")\n else:\n log.critical(\"Wrong configuration file is being used!\")\n sys.exit(\"Wrong configuration file! use the one for \"+str(header[\"FLAVOR\"]))\n\n elif (header[\"FLAVOR\"] == flvr or flvr == 'test'): \n log.info(\"The correct configuration file is being used!\")\n else: \n log.critical(\"Wrong configuration file is being used!\")\n sys.exit(\"Wrong configuration file! use the one for \"+str(header[\"FLAVOR\"]))\n \n\n if retval[\"FLAVOR\"] == 'science':\n retval[\"PROGRAM\"] = header[\"PROGRAM\"]\n else:\n pass\n retval[\"NIGHT\"] = header[\"NIGHT\"]\n kwargs=self.config['kwargs']\n \n\n HDUstat = \"NORMAL\" \n EXPNUMstat = \"NORMAL\" \n \n param['EXPTIME'] = header[\"EXPTIME\"]\n\n if camera != header[\"CAMERA\"]:\n log.critical(\"The raw FITS file is missing camera \"+camera)\n sys.exit(\"QuickLook Abort: CHECK THE RAW FITS FILE :\"+rawfile)\n HDUstat = 'ALARM'\n \n if header[\"EXPID\"] != kwargs['expid'] : \n log.critical(\"The raw FITS file is missing camera \"+camera)\n sys.exit(\"QuickLook Abort: EXPOSURE NUMBER DOES NOT MATCH THE ONE IN THE HEADER\") \n EXPNUMstat = \"ALARM\"\n \n \n \n if header[\"FLAVOR\"] != \"science\" :\n \n retval[\"METRICS\"] = {\"CHECKHDUS_STATUS\":HDUstat,\"EXPNUM_STATUS\":EXPNUMstat}\n\n else :\n retval[\"METRICS\"] = {\"CHECKHDUS_STATUS\":HDUstat,\"EXPNUM_STATUS\":EXPNUMstat}\n param['SEEING'] = header[\"SEEING\"]\n param['AIRMASS'] = header[\"AIRMASS\"]\n param['PROGRAM'] = header[\"PROGRAM\"]\n \n \n retval[\"PARAMS\"] = param \n \n if 'INHERIT' in header and header['INHERIT']:\n h0 = raw[0].header\n for key in h0:\n if key not in header:\n header[key] = h0[key]\n \n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Trace_Shifts(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"XYSHIFTS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"XYSHIFTS\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"XYSHIFTS_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n if \"XYSHIFTS_WARN_RANGE\" in parms and \"XYSHIFTS_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"XYSHIFTS_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"XYSHIFTS_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Trace_Shifts':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n \n #- qa dictionary \n retval={}\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = expid = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n \n \n retval[\"NIGHT\"] = night = image.meta[\"NIGHT\"]\n \n\n if param is None:\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n # create xytraceset object\n \n from desispec.calibfinder import findcalibfile\n from desispec.xytraceset import XYTraceSet\n #SE: all next lines till the dashed line exist just so that we get the psf name without hardcoding any address -> there must be a better way\n rawfile = findfile('raw',int(night),int(expid),camera,rawdata_dir=os.environ[\"QL_SPEC_DATA\"])\n hdulist=fits.open(rawfile)\n primary_header=hdulist[0].header\n camera_header =hdulist[camera].header\n hdulist.close()\n #--------------------------------------------------------\n psffile=findcalibfile([camera_header,primary_header],\"PSF\")\n psf=fits.open(psffile)\n xcoef=psf['XTRACE'].data\n ycoef=psf['YTRACE'].data\n wavemin=psf[\"XTRACE\"].header[\"WAVEMIN\"]\n wavemax=psf[\"XTRACE\"].header[\"WAVEMAX\"]\n npix_y=image.meta['NAXIS2']\n psftrace=XYTraceSet(xcoef,ycoef,wavemin,wavemax,npix_y=npix_y)\n\n # compute dx and dy\n from desispec.trace_shifts import compute_dx_from_cross_dispersion_profiles as compute_dx\n from desispec.trace_shifts import compute_dy_using_boxcar_extraction as compute_dy\n fibers=np.arange(500) #RS: setting nfibers to 500 for now\n ox,oy,odx,oex,of,ol=compute_dx(xcoef,ycoef,wavemin,wavemax,image,fibers=fibers)\n x_for_dy,y_for_dy,ody,ey,fiber_for_dy,wave_for_dy=compute_dy(psftrace,image,fibers)\n\n # return average shifts in x and y\n dx=np.mean(odx)\n dy=np.mean(ody)\n xyshift=np.array([dx,dy])\n\n retval[\"METRICS\"]={\"XYSHIFTS\":xyshift}\n retval[\"PARAMS\"]=param\n\n #get_outputs(qafile,qafig,retval,'plot_traceshifts')\n# outfile = qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Bias_From_Overscan(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"BIAS_OVERSCAN\"\n\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"BIAS_AMP\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"BIAS_AMP_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"BIAS_WARN_RANGE\" in parms and \"BIAS_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"BIAS_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"BIAS_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n \n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Bias_From_Overscan':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"CAMERA\"] = camera\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n retval[\"FLAVOR\"] = flavor = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"EXPTIME\"] = image.meta[\"EXPTIME\"]\n \n\n if retval[\"FLAVOR\"] == 'arc':\n pass\n else:\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n kwargs=self.config['kwargs']\n \n #SE: this would give the desispec version stored in DEPVER07 key of the raw simulated fits file :0.16.0.dev1830\n #RS: don't look for this if not using simulated files, differences in simulated headers vs. data headers cause this to crash\n if flavor == 'science':\n param['FITS_DESISPEC_VERSION'] = image.meta['DEPVER07'] \n import desispec\n from desispec import quicklook\n param['PROC_DESISPEC_VERSION']= desispec.__version__\n param['PROC_QuickLook_VERSION']= quicklook.__qlversion__\n \n \n if 'INHERIT' in image.meta and image.meta['INHERIT']:\n\n h0 = image.meta\n #h0 = header\n for key in h0:\n if key not in image.meta:\n image.meta[key] = h0[key]\n\n #RS: look for values in simulated data, if not found try finding data values\n try:\n bias_overscan = [image.meta['OVERSCN1'],image.meta['OVERSCN2'],image.meta['OVERSCN3'],image.meta['OVERSCN4']]\n except:\n bias_overscan = [image.meta['OVERSCNA'],image.meta['OVERSCNB'],image.meta['OVERSCNC'],image.meta['OVERSCND']]\n\n bias = np.mean(bias_overscan)\n\n if param is None:\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n\n retval[\"PARAMS\"] = param\n\n if amps:\n bias_amps=np.array(bias_overscan)\n retval[\"METRICS\"]={'BIAS_AMP':bias_amps}\n else:\n #retval[\"METRICS\"]={'BIAS':bias,\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DIFF3SIG\":diff3sig,\"DATA5SIG\":data5sig,\"BIAS_ROW\":mean_row}\n retval[\"METRICS\"]={}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_bias_overscan(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Get_RMS(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"RMS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"NOISE_AMP\" \n status=kwargs['statKey'] if 'statKey' in kwargs else \"NOISE_AMP_STATUS\" \n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n \n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n \n if \"NOISE_WARN_RANGE\" in parms and \"NOISE_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"NOISE_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"NOISE_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n \n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Get_RMS':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = flavor = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if flavor == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n\n # return rms values in rms/sqrt(exptime)\n #rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta[\"EXPTIME\"])) #- should we add dark current and/or readnoise to this as well?\n #rmsccd = np.mean([image.meta['RDNOISE1'],image.meta['RDNOISE2'],image.meta['RDNOISE3'],image.meta['RDNOISE4']]) #--> \"NOISE\":rmsccd\n \n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\") \n\n retval[\"PARAMS\"] = param\n\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # SE: this section is moved from BIAS_FROM_OVERSCAN to header\n\n data=[]\n row_data_amp1=[]\n row_data_amp2=[]\n row_data_amp3=[]\n row_data_amp4=[]\n bias_patnoise=[]\n #bias_overscan=[] \n #RS: loop through amps based on header info\n loop_amps = get_amp_ids(image.meta)\n exptime=image.meta[\"EXPTIME\"]\n if exptime == 0.:\n exptime = 1.\n for kk in loop_amps:\n sel=parse_sec_keyword(image.meta['BIASSEC'+kk])\n #- Obtain counts/second in bias region\n# pixdata=image[sel]/header[\"EXPTIME\"]\n pixdata=image.pix[sel]/exptime\n if kk == '1' or kk == 'A':\n for i in range(pixdata.shape[0]):\n row_amp1=pixdata[i]\n row_data_amp1.append(row_amp1)\n if kk == '2' or kk == 'B':\n \n for i in range(pixdata.shape[0]):\n row_amp2=pixdata[i]\n row_data_amp2.append(row_amp2)\n if kk == '3' or kk == 'C':\n \n for i in range(pixdata.shape[0]):\n row_amp3=pixdata[i]\n row_data_amp3.append(row_amp3)\n if kk == '4' or kk == 'D':\n \n for i in range(pixdata.shape[0]):\n row_amp4=pixdata[i]\n row_data_amp4.append(row_amp4)\n #- Compute statistics of the bias region that only reject\n # the 0.5% of smallest and largest values. (from sdssproc) \n isort=np.sort(pixdata.ravel())\n nn=isort.shape[0]\n bias=np.mean(isort[int(0.005*nn) : int(0.995*nn)])\n #bias_overscan.append(bias)\n data.append(isort)\n\n #- Combine data from each row per amp and take average\n # BIAS_ROW = mean_row \n median_row_amp1=[]\n for i in range(len(row_data_amp1)):\n median=np.median(row_data_amp1[i])\n median_row_amp1.append(median)\n \n rms_median_row_amp1= np.std(median_row_amp1)\n try:\n noise1 = image.meta['RDNOISE1']\n except:\n noise1 = image.meta['OBSRDNA']\n bias_patnoise.append(rms_median_row_amp1/noise1)\n \n median_row_amp2=[]\n for i in range(len(row_data_amp2)):\n median=np.median(row_data_amp2[i])\n median_row_amp2.append(median)\n \n rms_median_row_amp2= np.std(median_row_amp2)\n try:\n noise2 = image.meta['RDNOISE2']\n except:\n noise2 = image.meta['OBSRDNB']\n bias_patnoise.append(rms_median_row_amp2/noise2)\n \n \n median_row_amp3=[]\n for i in range(len(row_data_amp3)):\n median=np.median(row_data_amp3[i])\n median_row_amp3.append(median)\n \n rms_median_row_amp3= np.std(median_row_amp3)\n try:\n noise3 = image.meta['RDNOISE3']\n except:\n noise3 = image.meta['OBSRDNC']\n bias_patnoise.append(rms_median_row_amp3/noise3)\n \n median_row_amp4=[]\n for i in range(len(row_data_amp4)):\n median=np.median(row_data_amp4[i])\n median_row_amp4.append(median)\n \n rms_median_row_amp4= np.std(median_row_amp4)\n try:\n noise4 = image.meta['RDNOISE4']\n except:\n noise4 = image.meta['OBSRDND']\n bias_patnoise.append(rms_median_row_amp4/noise4)\n\n\n #- Calculate upper and lower bounds of 1, 2, and 3 sigma \n full_data=np.concatenate((data[0],data[1],data[2],data[3])).ravel()\n sig1_lo = np.percentile(full_data,50.-(param['PERCENTILES'][0]/2.))\n sig1_hi = np.percentile(full_data,50.+(param['PERCENTILES'][0]/2.))\n sig2_lo = np.percentile(full_data,50.-(param['PERCENTILES'][1]/2.))\n sig2_hi = np.percentile(full_data,50.+(param['PERCENTILES'][1]/2.))\n sig3_lo = np.percentile(full_data,50.-(param['PERCENTILES'][2]/2.))\n sig3_hi = np.percentile(full_data,50.+(param['PERCENTILES'][2]/2.))\n\n #- Find difference between upper and lower sigma bounds\n # DIFF1SIG: The number of counts separating the 1 sigma percentiles in the noise distribution (from the overscan region)\n diff1sig = sig1_hi - sig1_lo\n # DIFF2SIG: The number of counts separating 2 or 3 sigma in the noise distribution\n diff2sig = sig2_hi - sig2_lo\n diff3sig = sig3_hi - sig3_lo\n\n #-DATA5SIG: number of pixels more than 5 sigma below the bias level\n sig5_value = np.percentile(full_data,3e-5)\n data5sig = len(np.where(full_data <= sig5_value)[0])\n \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n if amps:\n rms_over_amps = [noise1,noise2,noise3,noise4]\n try:\n rms_amps = [image.meta['OBSRDN1'],image.meta['OBSRDN2'],image.meta['OBSRDN3'],image.meta['OBSRDN4']]\n except:\n rms_amps = [image.meta['OBSRDNA'],image.meta['OBSRDNB'],image.meta['OBSRDNC'],image.meta['OBSRDND']]\n retval[\"METRICS\"]={\"NOISE_AMP\":np.array(rms_amps),\"NOISE_OVERSCAN_AMP\":np.array(rms_over_amps),\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DATA5SIG\":data5sig,\"BIAS_PATNOISE\":bias_patnoise}#,\"NOISE_ROW\":noise_row,\"EXPNUM_WARN\":expnum,\"NOISE_OVER\":rmsover\n else:\n retval[\"METRICS\"]={\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DATA5SIG\":data5sig, \"BIAS_PATNOISE\":bias_patnoise} # Dropping \"NOISE_OVER\":rmsover,\"NOISE_ROW\":noise_row,\"EXPNUM_WARN\":expnum\n\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_RMS(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval \n\n def get_default_config(self):\n return {}\n\n\nclass Calc_XWSigma(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"XWSIGMA\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"XWSIGMA\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"XWSIGMA_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"XWSIGMA_WARN_RANGE\" in parms and \"XWSIGMA_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"XWSIGMA_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"XWSIGMA_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Calc_XWSigma':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n import desispec.quicklook.qlpsf\n from scipy.optimize import curve_fit\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n psffile=inputs[\"psf\"]\n psf=desispec.quicklook.qlpsf.PSF(psffile)\n amps=inputs[\"amps\"]\n allpeaks=inputs[\"Peaks\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat() \n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n #- Ensure that the QA will run even if 500 spectra aren't present\n if fibermap['FIBER'].shape[0] >= 500:\n fibers = 500\n else:\n fibers = fibermap['FIBER'].shape[0]\n\n #- Define number of pixels to be fit\n dp=param['PIXEL_RANGE']/2\n #- Get wavelength ranges around peaks\n peaks=allpeaks['{}_PEAKS'.format(camera[0].upper())]\n #- Maximum allowed fit sigma value\n maxsigma=param['MAX_SIGMA']\n\n xfails=[]\n wfails=[]\n xsigma=[]\n wsigma=[]\n xsigma_amp1=[]\n wsigma_amp1=[]\n xsigma_amp2=[]\n wsigma_amp2=[]\n xsigma_amp3=[]\n wsigma_amp3=[]\n xsigma_amp4=[]\n wsigma_amp4=[]\n \n for fiber in range(fibers):\n \n xs = -1 # SE: this prevents crash in \"XWSIGMA_AMP\" for when xs or ws is empty list -> try b9 of 20200515/00000001 \n ws = -1\n xsig=[]\n wsig=[]\n for peak in range(len(peaks)):\n #- Use psf information to convert wavelength to pixel values\n xpixel=desispec.quicklook.qlpsf.PSF.x(psf,ispec=fiber,wavelength=peaks[peak])[0][0]\n ypixel=desispec.quicklook.qlpsf.PSF.y(psf,ispec=fiber,wavelength=peaks[peak])[0][0]\n #- Find x and y pixel values around sky lines\n xpix_peak=np.arange(int(xpixel-dp),int(xpixel+dp),1)\n ypix_peak=np.arange(int(ypixel-dp),int(ypixel+dp),1)\n #- Fit gaussian to counts in pixels around sky line\n #- If any values fail, store x/w, wavelength, and fiber\n try:\n xpopt,xpcov=curve_fit(qalib.gauss,np.arange(len(xpix_peak)),image.pix[int(ypixel),xpix_peak])\n xs=np.abs(xpopt[2])\n if xs <= maxsigma:\n xsig.append(xs)\n else:\n xfail=[fiber,peaks[peak]]\n xfails.append(xfail)\n except:\n xfail=[fiber,peaks[peak]]\n xfails.append(xfail)\n pass\n try:\n wpopt,wpcov=curve_fit(qalib.gauss,np.arange(len(ypix_peak)),image.pix[ypix_peak,int(xpixel)])\n ws=np.abs(wpopt[2])\n if ws <= maxsigma:\n wsig.append(ws)\n else:\n wfail=[fiber,peaks[peak]]\n wfails.append(wfail)\n except:\n wfail=[fiber,peaks[peak]]\n wfails.append(wfail)\n pass\n\n #- Excluding fibers 240-260 in case some fibers overlap amps\n #- Excluding peaks in the center of image in case peak overlaps two amps\n #- This shouldn't cause a significant loss of information \n \n if amps:\n\n if fibermap['FIBER'][fiber]<240:\n if ypixel < 2000.:\n xsigma_amp1.append(xs)\n wsigma_amp1.append(ws)\n if ypixel > 2100.:\n xsigma_amp3.append(xs)\n wsigma_amp3.append(ws)\n\n if fibermap['FIBER'][fiber]>260:\n if ypixel < 2000.:\n xsigma_amp2.append(xs)\n wsigma_amp2.append(ws)\n if ypixel > 2100.:\n xsigma_amp4.append(xs)\n wsigma_amp4.append(ws)\n \n\n if len(xsig)!=0:\n xsigma.append(np.mean(xsig))\n if len(wsig)!=0:\n wsigma.append(np.mean(wsig))\n\n if fibermap['FIBER'].shape[0]<260:\n xsigma_amp2=[]\n xsigma_amp4=[]\n wsigma_amp2=[]\n wsigma_amp4=[]\n\n #- Calculate desired output metrics \n xsigma_med=np.median(np.array(xsigma))\n wsigma_med=np.median(np.array(wsigma))\n xsigma_amp=np.array([np.median(xsigma_amp1),np.median(xsigma_amp2),np.median(xsigma_amp3),np.median(xsigma_amp4)])\n wsigma_amp=np.array([np.median(wsigma_amp1),np.median(wsigma_amp2),np.median(wsigma_amp3),np.median(wsigma_amp4)])\n xwfails=np.array([xfails,wfails])\n\n\n #SE: mention the example here when the next lines are ineffective and when they are effective in removing the NaN from XWSIGMA_AMP--> XWSIGMA itself no longer includes any NaN value. As we both know, this is not the way to properly deal with NaNs -->let's see if switching to non-scipy fuction would bring about a better solution\n if len(xsigma)==0:\n xsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][0]]\n\n if len(wsigma)==0:\n wsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][1]]\n\n #- Combine metrics for x and w\n xwsigma_fib=np.array((xsigma,wsigma)) #- (2,nfib)\n xwsigma_med=np.array((xsigma_med,wsigma_med)) #- (2)\n xwsigma_amp=np.array((xsigma_amp,wsigma_amp))\n\n if amps:\n #if len(xsigma_amp1)==0 :\n #xsigma_amp1 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp2)==0 :\n #xsigma_amp2 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp3)==0 :\n #xsigma_amp3 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp4)==0 :\n #xsigma_amp4 = [param['XWSIGMA_REF'][0]]\n\n #if len(wsigma_amp1)==0 :\n #wsigma_amp1 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp2)==0 :\n #wsigma_amp2 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp3)==0 :\n #wsigma_amp3 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp4)==0 :\n #wsigma_amp4 = [param['XWSIGMA_REF'][1]]\n\n retval[\"METRICS\"]={\"XWSIGMA\":xwsigma_med,\"XWSIGMA_FIB\":xwsigma_fib,\"XWSIGMA_AMP\":xwsigma_amp}#,\"XWSHIFT\":xwshift,\"XWSHIFT_AMP\":xwshift_amp,\"XWSIGMA_SHIFT\": xwsigma_shift}\n else:\n retval[\"METRICS\"]={\"XWSIGMA\":xwsigma_med,\"XWSIGMA_FIB\":xwsigma_fib}#,\"XWSHIFT\":xwshift,\"XWSIGMA_SHIFT\": xwsigma_shift}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_XWSigma(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n \n def get_default_config(self):\n return {}\n\n\nclass Count_Pixels(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"COUNTPIX\"\n from desispec.image import Image as im\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"LITFRAC_AMP\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"LITFRAC_AMP_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n \n if \"LITFRAC_AMP_WARN_RANGE\" in parms and \"LITFRAC_AMP_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"LITFRAC_AMP_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"LITFRAC_AMP_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Count_Pixels':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n \n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n \n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n\n retval[\"PARAMS\"] = param\n\n #- get the counts for each amp\n npix_amps=[]\n litfrac_amps=[]\n\n from desispec.preproc import parse_sec_keyword\n #RS: loop through amps based on header info\n try:\n header_test=parse_sec_keyword(image.meta['CCDSEC1'])\n loop_amps=['1','2','3','4']\n except:\n loop_amps=['A','B','C','D']\n #- get amp boundary in pixels\n for kk in loop_amps:\n ampboundary=parse_sec_keyword(image.meta[\"CCDSEC\"+kk])\n try:\n rdnoise_thisamp=image.meta[\"RDNOISE\"+kk]\n except:\n rdnoise_thisamp=image.meta[\"OBSRDN\"+kk]\n npix_thisamp= image.pix[ampboundary][image.pix[ampboundary] > param['CUTPIX'] * rdnoise_thisamp].size #- no of pixels above threshold\n npix_amps.append(npix_thisamp)\n size_thisamp=image.pix[ampboundary].size\n litfrac_thisamp=round(np.float64(npix_thisamp)/size_thisamp,2) #- fraction of pixels getting light above threshold\n litfrac_amps.append(litfrac_thisamp)\n\t# retval[\"METRICS\"]={\"NPIX_AMP\",npix_amps,'LITFRAC_AMP': litfrac_amps}\n retval[\"METRICS\"]={\"LITFRAC_AMP\": litfrac_amps}\t\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_countpix(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass CountSpectralBins(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"COUNTBINS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"NGOODFIB\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"NGOODFIB_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"NGOODFIB_WARN_RANGE\" in parms and \"NGOODFIB_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"NGOODFIB_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"NGOODFIB_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'CountSpectralBins':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('frame',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n psf=inputs[\"psf\"]\n qafile=inputs[\"qafile\"]\n qafig=None #inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n grid=np.gradient(frame.wave)\n if not np.all(grid[0]==grid[1:]): \n log.debug(\"grid_size is NOT UNIFORM\")\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n retval[\"PARAMS\"] = param\n #- get the effective readnoise for the fibers \n #- readnoise per fib = readnoise per pix * sqrt(box car width)* sqrt(no. of bins in the amp) * binsize/pix size scale\n nspec=frame.nspec\n rdnoise_fib=np.zeros(nspec)\n if nspec > 250: #- upto 250 - amp 1 and 3, beyond that 2 and 4\n rdnoise_fib[:250]=[(frame.meta['RDNOISE1']+frame.meta['RDNOISE3'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*250\n rdnoise_fib[250:]=[(frame.meta['RDNOISE2']+frame.meta['RDNOISE4'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*(nspec-250)\n else:\n rdnoise_fib=[(frame.meta['RDNOISE1']+frame.meta['RDNOISE3'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*nspec\n threshold=[param['CUTBINS']*ii for ii in rdnoise_fib]\n #- compare the flux sum to threshold\n \n totcounts=frame.flux.sum(axis=1)\n passfibers=np.where(totcounts>threshold)[0] \n ngoodfibers=passfibers.shape[0]\n good_fibers=np.array([0]*frame.nspec)\n good_fibers[passfibers]=1 #- assign 1 for good fiber\n\n #- leaving the amps granularity needed for caching as defunct. If needed in future, this needs to be propagated through.\n amps=False\n leftmax=None\n rightmax=None\n bottommax=None\n topmin=None\n\n if amps: #- leaving this for now\n leftmax,rightmin,bottommax,topmin = qalib.fiducialregion(frame,psf)\n retval[\"LEFT_MAX_FIBER\"]=int(leftmax)\n retval[\"RIGHT_MIN_FIBER\"]=int(rightmin)\n retval[\"BOTTOM_MAX_WAVE_INDEX\"]=int(bottommax)\n retval[\"TOP_MIN_WAVE_INDEX\"]=int(topmin)\n\n retval[\"METRICS\"]={\"NGOODFIB\": ngoodfibers, \"GOOD_FIBERS\": good_fibers, \"TOTCOUNT_FIB\": totcounts}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_countspectralbins(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Continuum(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SKYCONT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"SKYCONT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"SKYCONT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"SKYCONT_WARN_RANGE\" in parms and \"SKYCONT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"SKYCONT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"SKYCONT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Continuum':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('fframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n camera=frame.meta[\"CAMERA\"]\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n wrange1=param[\"{}_CONT\".format(camera[0].upper())][0]\n wrange2=param[\"{}_CONT\".format(camera[0].upper())][1]\n\n retval[\"PARAMS\"] = param\n\n skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(\n frame, wrange1, wrange2)\n \n \n retval[\"METRICS\"]={\"SKYFIBERID\": skyfiber.tolist(), \"SKYCONT\":skycont, \"SKYCONT_FIBER\":meancontfiber}\n \n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_sky_continuum(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Rband(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SKYRBAND\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"SKYRBAND\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"SKYRBAND_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"SKYRBAND_WARN_RANGE\" in parms and \"SKYRBAND_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"SKYRBAND_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"SKYRBAND_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Rband':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('cframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n\n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n #- Find sky fibers\n objects=frame.fibermap['OBJTYPE']\n skyfibers=np.where(objects==\"SKY\")[0]\n\n flux=frame.flux\n wave=frame.wave\n #- Set appropriate filter and zero point\n if camera[0].lower() == 'r':\n responsefilter='decam2014-r'\n\n #- Get filter response information from speclite\n try:\n from pkg_resources import resource_filename\n responsefile=resource_filename('speclite','data/filters/{}.ecsv'.format(responsefilter))\n #- Grab wavelength and response information from file\n rfile=np.genfromtxt(responsefile)\n rfile=rfile[1:] # remove wavelength/response labels\n rwave=np.zeros(rfile.shape[0])\n response=np.zeros(rfile.shape[0])\n for i in range(rfile.shape[0]):\n rwave[i]=10.*rfile[i][0] # convert to angstroms\n response[i]=rfile[i][1]\n except:\n log.critical(\"Could not find filter response file, can't compute spectral magnitudes\")\n\n #- Convole flux with response information \n res=np.zeros(frame.wave.shape)\n for w in range(response.shape[0]):\n if w >= 1 and w<= response.shape[0]-2:\n ind=np.abs(frame.wave-rwave[w]).argmin()\n lo=(rwave[w]-rwave[w-1])/2\n wlo=rwave[w]-lo\n indlo=np.abs(frame.wave-wlo).argmin()\n hi=(rwave[w+1]-rwave[w])/2\n whi=rwave[w]+hi\n indhi=np.abs(frame.wave-whi).argmin()\n res[indlo:indhi]=response[w]\n skyrflux=res*flux[skyfibers]\n\n #- Calculate integrals for sky fibers\n integrals=[]\n for ii in range(len(skyrflux)):\n integrals.append(qalib.integrate_spec(frame.wave,skyrflux[ii]))\n integrals=np.array(integrals)\n\n #- Convert calibrated flux to fiber magnitude\n specmags=np.zeros(integrals.shape)\n specmags[integrals>0]=21.1-2.5*np.log10(integrals[integrals>0]/frame.meta[\"EXPTIME\"])\n avg_skyrband=np.mean(specmags[specmags>0])\n\n retval[\"METRICS\"]={\"SKYRBAND_FIB\":specmags,\"SKYRBAND\":avg_skyrband}\n\n #- If not in r channel, set reference and metrics to zero\n else:\n retval[\"PARAMS\"][\"SKYRBAND_{}_REF\".format(program.upper())]=[0.]\n zerospec=np.zeros_like(skyfibers)\n zerorband=0.\n retval[\"METRICS\"]={\"SKYRBAND_FIB\":zerospec,\"SKYRBAND\":zerorband}\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Peaks(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"PEAKCOUNT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"PEAKCOUNT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"PEAKCOUNT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"PEAKCOUNT_WARN_RANGE\" in parms and \"PEAKCOUNT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"PEAKCOUNT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"PEAKCOUNT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Peaks':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('fframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n from desispec.qa.qalib import sky_peaks\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n allpeaks=inputs[\"Peaks\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n # Parameters\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n param['B_PEAKS']=allpeaks['B_PEAKS']\n param['R_PEAKS']=allpeaks['R_PEAKS']\n param['Z_PEAKS']=allpeaks['Z_PEAKS']\n\n #nspec_counts, sky_counts, tgt_counts, tgt_counts_rms = sky_peaks(param, frame)\n nspec_counts, sky_counts, skyfibers, nskyfib= sky_peaks(param, frame)\n rms_nspec = np.std(nspec_counts)#qalib.getrms(nspec_counts)\n rms_skyspec = np.std(sky_counts)#qalib.getrms(sky_counts) \n \n sumcount_med_sky=np.median(sky_counts)\n\n retval[\"PARAMS\"] = param\n\n fiberid=frame.fibermap['FIBER']\n\n retval[\"METRICS\"]={\"FIBERID\":fiberid,\"PEAKCOUNT\":sumcount_med_sky,\"PEAKCOUNT_NOISE\":rms_skyspec,\"PEAKCOUNT_FIB\":nspec_counts,\"SKYFIBERID\":skyfibers, \"NSKY_FIB\":nskyfib}#,\"PEAKCOUNT_TGT\":tgt_counts,\"PEAKCOUNT_TGT_NOISE\":tgt_counts_rms}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_sky_peaks(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Residual(MonitoringAlg):\n \"\"\" \n Use offline sky_residual function to calculate sky residuals\n \"\"\"\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"RESIDUAL\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"RESIDNOISE\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"RESID_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"RESID_WARN_RANGE\" in parms and \"RESID_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"RESID_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"RESID_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Residual':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('sframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n skymodel=args[1]\n\n return self.run_qa(frame,skymodel,inputs)\n\n def run_qa(self,frame,skymodel,inputs):\n from desispec.sky import qa_skysub\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n \n if skymodel is None:\n raise IOError(\"Must have skymodel to find residual. It can't be None\")\n #- return values\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n \n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n \n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n\n qadict=qalib.sky_resid(param,frame,skymodel,quick_look=True)\n\n retval[\"METRICS\"] = {}\n for key in qadict.keys():\n retval[\"METRICS\"][key] = qadict[key]\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_residuals(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Integrate_Spec(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"INTEG\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"DELTAMAG_TGT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"DELTAMAG_TGT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"DELTAMAG_WARN_RANGE\" in parms and \"DELTAMAG_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"DELTAMAG_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"DELTAMAG_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Integrate_Spec':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('cframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n flux=frame.flux\n ivar=frame.ivar\n wave=frame.wave\n\n retval={}\n retval[\"PANAME\" ] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n flux=frame.flux\n wave=frame.wave\n #- Grab magnitudes for appropriate filter\n if camera[0].lower() == 'b':\n band = 'G'\n responsefilter='decam2014-g'\n elif camera[0].lower() == 'r':\n band = 'R'\n responsefilter='decam2014-r'\n elif camera[0].lower() == 'z':\n band = 'Z'\n responsefilter='decam2014-z'\n else:\n raise ValueError(\"Camera {} not in b, r, or z channels...\".format(camera))\n\n #- Find fibers per target type\n elgfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.ELG) != 0)[0]\n lrgfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.LRG) != 0)[0]\n qsofibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.QSO) != 0)[0]\n bgsfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.BGS_ANY) != 0)[0]\n mwsfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.MWS_ANY) != 0)[0]\n stdfibers = np.where(isStdStar(frame.fibermap))[0]\n skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]\n\n #- Setup target fibers per program\n if program == 'dark':\n objfibers = [elgfibers,lrgfibers,qsofibers,stdfibers]\n elif program == 'gray':\n objfibers = [elgfibers,stdfibers]\n elif program == 'bright':\n objfibers = [bgsfibers,mwsfibers,stdfibers]\n\n magnitudes=np.zeros(frame.nspec)\n key = 'FLUX_'+band\n magnitudes = 22.5 - 2.5*np.log10(frame.fibermap[key])\n #- Set objects with zero flux to 30 mag\n zeroflux = np.where(frame.fibermap[key]==0.)[0]\n magnitudes[zeroflux] = 30.\n\n #- Get filter response information from speclite\n try:\n from pkg_resources import resource_filename\n responsefile=resource_filename('speclite','data/filters/{}.ecsv'.format(responsefilter))\n #- Grab wavelength and response information from file\n rfile=np.genfromtxt(responsefile)\n rfile=rfile[1:] # remove wavelength/response labels\n rwave=np.zeros(rfile.shape[0])\n response=np.zeros(rfile.shape[0])\n for i in range(rfile.shape[0]):\n rwave[i]=10.*rfile[i][0] # convert to angstroms\n response[i]=rfile[i][1]\n except:\n log.critical(\"Could not find filter response file, can't compute spectral magnitudes\")\n\n #- Convole flux with response information \n res=np.zeros(frame.wave.shape)\n for w in range(response.shape[0]):\n if w >= 1 and w<= response.shape[0]-2:\n ind=np.abs(frame.wave-rwave[w]).argmin()\n lo=(rwave[w]-rwave[w-1])/2\n wlo=rwave[w]-lo\n indlo=np.abs(frame.wave-wlo).argmin()\n hi=(rwave[w+1]-rwave[w])/2\n whi=rwave[w]+hi\n indhi=np.abs(frame.wave-whi).argmin()\n res[indlo:indhi]=response[w]\n rflux=res*flux\n\n #- Calculate integrals for all fibers\n integrals=[]\n for ii in range(len(rflux)):\n integrals.append(qalib.integrate_spec(frame.wave,rflux[ii]))\n integrals=np.array(integrals)\n\n #- Convert calibrated flux to spectral magnitude\n specmags=np.zeros(integrals.shape)\n specmags[integrals>0]=21.1-2.5*np.log10(integrals[integrals>0]/frame.meta[\"EXPTIME\"])\n\n #- Save number of negative flux fibers\n negflux=np.where(specmags==0.)[0]\n num_negflux=len(negflux)\n\n #- Set sky and negative flux fibers to 30 mag\n specmags[skyfibers]=30.\n specmags[negflux]=30.\n\n #- Calculate integrals for each target type\n tgt_specmags=[]\n for T in objfibers:\n if num_negflux != 0:\n T=np.array(list(set(T) - set(negflux)))\n obj_integ=[]\n for ii in range(len(rflux[T])):\n obj_integ.append(qalib.integrate_spec(frame.wave,rflux[T][ii]))\n obj_integ = np.array(obj_integ)\n\n #- Convert calibrated flux to spectral magnitude per terget type\n #- Using ST magnitude system because frame flux is in units ergs/s/cm**2/A\n obj_specmags = np.zeros(obj_integ.shape)\n obj_specmags[obj_integ>0] = 21.1-2.5*np.log10(obj_integ[obj_integ>0]/frame.meta[\"EXPTIME\"])\n tgt_specmags.append(obj_specmags)\n\n tgt_specmags = np.array(tgt_specmags)\n\n #- Fiber magnitudes per target type\n tgt_mags=[]\n for obj in objfibers:\n if num_negflux != 0:\n obj=np.array(list(set(obj) - set(negflux)))\n tgt_mags.append(magnitudes[obj])\n\n tgt_mags = np.array(tgt_mags)\n\n #- Calculate delta mag, remove sky/negative flux fibers first\n remove_fib = np.array(list(set(skyfibers) | set(negflux)))\n nosky_specmags = np.delete(specmags,remove_fib)\n nosky_mags = np.delete(magnitudes,remove_fib)\n deltamag = nosky_specmags - nosky_mags\n\n #- Calculate avg delta mag per target type\n deltamag_tgt = tgt_specmags - tgt_mags \n deltamag_tgt_avg=[]\n for tgt in range(len(deltamag_tgt)):\n deltamag_tgt_avg.append(np.mean(deltamag_tgt[tgt]))\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n fiberid=frame.fibermap['FIBER']\n\n #SE: should not have any nan or inf at this point but let's keep it for safety measures here \n retval[\"METRICS\"]={\"FIBERID\":fiberid,\"NFIBNOTGT\":num_negflux,\"SPEC_MAGS\":specmags, \"DELTAMAG\":np.nan_to_num(deltamag), \"STD_FIBERID\":stdfibers, \"DELTAMAG_TGT\":np.nan_to_num(deltamag_tgt_avg)}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_integral(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval \n\n def get_default_config(self):\n return {}\n \nclass Calculate_SNR(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SNR\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"FIDSNR_TGT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"FIDSNR_TGT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"FIDSNR_TGT_WARN_RANGE\" in parms and \"FIDSNR_TGT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"FIDSNR_TGT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"FIDSNR_TGT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Calculate_SNR':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('sframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- return values\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = expid = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n objlist=[]\n if program == 'dark':\n objlist = ['ELG','LRG','QSO','STAR']\n elif program == 'gray':\n objlist = ['ELG','STAR']\n elif program == 'bright':\n objlist = ['BGS','MWS','STAR']\n\n retval[\"NIGHT\"] = night = frame.meta[\"NIGHT\"]\n\n ra = fibermap[\"TARGET_RA\"]\n dec = fibermap[\"TARGET_DEC\"]\n\n #- select band for mag, using DECAM_R if present\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n fidboundary=None\n\n qadict,fitsnr = qalib.orig_SNRFit(frame,night,camera,expid,param,fidboundary=fidboundary)\n\n #- Check for inf and nans in missing magnitudes for json support of QLF #TODO review this later\n\n for obj in range(len(qadict[\"SNR_MAG_TGT\"])):\n for mag in [qadict[\"SNR_MAG_TGT\"][obj]]:\n k=np.where(~np.isfinite(mag))[0]\n if len(k) > 0:\n log.warning(\"{} objects have no or unphysical magnitudes\".format(len(k)))\n mag=np.array(mag)\n mag[k]=26. #- Putting 26, so as to make sure within reasonable range for plots.\n retval[\"METRICS\"] = qadict\n retval[\"PARAMS\"] = param\n\n rescut=param[\"RESIDUAL_CUT\"]\n sigmacut=param[\"SIGMA_CUT\"]\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_SNR(retval,qafig,objlist,fitsnr,rescut,sigmacut,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\nclass Check_Resolution(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKARC\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKARC\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKARC_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"CHECKARC_WARN_RANGE\" in parms and \"CHECKARC_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"CHECKARC_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"CHECKARC_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Check_Resolution':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n #- Finding psf file for QA\n #file_psf = get_psf('psf',night,expid,camera,kwargs[\"specdir\"])\n else: file_psf = args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(file_psf,inputs)\n\n def run_qa(self,file_psf,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval['PANAME'] = paname\n kwargs=self.config['kwargs']\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{:08d}'.format(kwargs['expid'])\n retval[\"CAMERA\"] = camera\n retval[\"PROGRAM\"] = 'ARC'\n retval[\"FLAVOR\"] = 'arc'\n retval[\"NIGHT\"] = kwargs['night']\n \n\n # file_psf.ycoeff is not the wsigma_array.\n # FIX later.TEST QA with file_psf.ycoeff\n \n wsigma_array = file_psf.ysig_vs_wave_traceset._coeff\n p0 = wsigma_array[0:, 0:1]\n p1 = wsigma_array[0:, 1:2]\n p2 = wsigma_array[0:, 2:3]\n\n #- Save array of ones and zeros for good/no fits\n nfib = len(p0)\n nofit = np.where(p0 == 0.)[0]\n allfibs=np.ones(nfib)\n allfibs[nofit] = 0.\n #- Total number of fibers fit used as scalar metric\n ngoodfits = len(np.where(allfibs == 1.)[0])\n\n # Medians of Legendre Coeffs to be used as 'Model'\n medlegpolcoef = np.median(wsigma_array,axis = 0)\n\n wsigma_rms = np.sqrt(np.mean((wsigma_array - medlegpolcoef)**2,axis = 0))\n\n # Check how many of each parameter are outside of +- 2 RMS of the median.\n toperror = np.array([medlegpolcoef[val] + 2*wsigma_rms[val] for val in [0,1,2]])\n bottomerror = np.array([medlegpolcoef[val] - 2*wsigma_rms[val] for val in [0,1,2]])\n\n badparamrnum0 = list(np.where(np.logical_or(p0>toperror[0], p0<bottomerror[0]))[0])\n badparamrnum1 = list(np.where(np.logical_or(p1>toperror[1], p1<bottomerror[1]))[0])\n badparamrnum2 = list(np.where(np.logical_or(p2>toperror[2], p2<bottomerror[2]))[0])\n nbadparam = np.array([len(badparamrnum0), len(badparamrnum1), len(badparamrnum2)])\n\n retval[\"METRICS\"]={\"CHECKARC\":ngoodfits, \"GOODPSFS\":allfibs, \"CHECKPSF\":nbadparam}\n retval[\"DATA\"]={\"LPolyCoef0\":p0, \"LPolyCoef1\":p1, \"LPolyCoef2\":p2}\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_lpolyhist(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\nclass Check_FiberFlat(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKFLAT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKFLAT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKFLAT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"CHECKFLAT_WARN_RANGE\" in parms and \"CHECKFLAT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"CHECKFLAT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"CHECKFLAT_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Check_FiberFlat':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n else: fibflat=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(fibflat,inputs)\n\n def run_qa(self,fibflat,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n \n kwargs=self.config['kwargs']\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"PROGRAM\"] = 'FLAT'\n retval[\"FLAVOR\"] = 'flat'\n retval[\"NIGHT\"] = kwargs['night']\n retval[\"CAMERA\"] = fibflat.header['CAMERA']\n retval[\"EXPID\"] = '{:08d}'.format(kwargs['expid'])\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n #- Calculate mean and rms fiberflat value for each fiber\n fiberflat = fibflat.fiberflat\n avg_fiberflat=[]\n rms=[]\n for fib in range(len(fiberflat)):\n avg_fiberflat.append(np.mean(fiberflat[fib]))\n rms.append(np.std(fiberflat[fib]))\n\n #- Calculate mean of the fiber means for scalar metric\n avg_all = np.mean(avg_fiberflat)\n\n retval['METRICS'] = {\"FLATMEAN\":avg_fiberflat, \"FLATRMS\":rms, \"CHECKFLAT\":avg_all}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n return retval\n\n def get_default_config(self):\n return {}\n", "import os\nimport numpy as np\n\nimport time, datetime\n\n\n#######################################\n########## Time Functions #############\n#######################################\ndef what_night_is_it():\n \"\"\"\n Return the current night\n \"\"\"\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight\n\ndef get_nightly_start_time():\n \"\"\"\n Defines the time of day that the desi_daily_proc_manager should start (in Tucson local time).\n Before this time, the manager being woken by a cron job will exit immediately. Selected to give plenty of time\n between end of night and start of the next, but early enough to catch and begin running afternoon calibrations.\n\n Returns:\n 14: int. The number of hours after midnight that signifies the start of a new night of observing.\n \"\"\"\n return 14 # 2PM local Tucson time\n\n\ndef get_nightly_end_time():\n \"\"\"\n Defines when a night ends for desi_daily_proc_manager in local Tucson time. Once this time is exceeded,\n the manager will enter into queue cleanup mode and then exit when the jobs have finished. End of night is altered slightly\n for summer vs. winter.\n\n Returns:\n end_night: int. The number of hours after midnight that signifies the end of a night of observing.\n \"\"\"\n month = time.localtime().tm_mon\n if np.abs(month - 6) > 2:\n end_night = 8\n else:\n end_night = 7\n return end_night # local Tucson time the following morning\n\n\ndef ensure_tucson_time():\n \"\"\"\n Define the start and end of a 'night' based on times at the mountain. So ensure that the times are with respect to Arizona.\n \"\"\"\n if 'TZ' not in os.environ.keys() or os.environ['TZ'] != 'US/Arizona':\n os.environ['TZ'] = 'US/Arizona'\n time.tzset()\n\n\ndef nersc_format_datetime(timetup=None):\n \"\"\"\n Given a time tuple from the time module, this will return a string in the proper format to be properly interpreted\n by the NERSC Slurm queue scheduler.\n\n Args:\n timetup: tuple. A time.time() tuple object representing the time you want to trasnform into a Slurm readable string.\n\n Returns:\n str. String of the form YYYY-mm-ddTHH:MM:SS.\n \"\"\"\n if timetup is None:\n timetup = time.localtime()\n # YYYY-MM-DD[THH:MM[:SS]]\n return time.strftime('%Y-%m-%dT%H:%M:%S', timetup)\n\n\ndef nersc_start_time(night=None, starthour=None):\n \"\"\"\n Transforms a night and time into a YYYY-MM-DD[THH:MM[:SS]] time string Slurm can interpret\n\n Args:\n night: str or int. In the form YYYMMDD, the night the jobs are being run.\n starthour: str or int. The number of hours (between 0 and 24) after midnight where you began submitting jobs to the queue.\n\n Returns:\n str. String of the form YYYY-mm-ddTHH:MM:SS. Based on the given night and starthour\n \"\"\"\n if night is None:\n night = what_night_is_it()\n if starthour is None:\n starthour = get_nightly_start_time()\n starthour = int(starthour)\n timetup = time.strptime(f'{night}{starthour:02d}', '%Y%m%d%H')\n return nersc_format_datetime(timetup)\n\n\ndef nersc_end_time(night=None, endhour=None):\n \"\"\"\n Transforms a night and time into a YYYY-MM-DD[THH:MM[:SS]] time string Slurm can interpret. Correctly accounts for the fact\n that the night is defined starting at Noon on a given day.\n\n Args:\n night: str or int. In the form YYYMMDD, the night the jobs are being run.\n endhour: str or int. The number (between 0 and 24) of hours after midnight where you stop submitting jobs to the queue.\n\n Returns:\n str. String of the form YYYY-mm-ddTHH:MM:SS. Based on the given night and endhour\n \"\"\"\n if night is None:\n night = what_night_is_it()\n if endhour is None:\n endhour = get_nightly_end_time()\n\n endhour = int(endhour)\n yester_timetup = time.strptime(f'{night}{endhour:02d}', '%Y%m%d%H')\n yester_sec = time.mktime(yester_timetup)\n\n ## If ending in the PM, then the defined night is the same as the day it took place\n ## If in the AM then it corresponds to the following day, which requires adding 24 hours to the time.\n if endhour > 12:\n today_sec = yester_sec\n else:\n one_day_in_seconds = 24 * 60 * 60\n today_sec = yester_sec + one_day_in_seconds\n\n today_timetup = time.localtime(today_sec)\n return nersc_format_datetime(today_timetup)\n\n\ndef during_operating_hours(dry_run=False, starthour=None, endhour=None):\n \"\"\"\n Determines if the desi_daily_proc_manager should be running or not based on the time of day. Can be overwridden\n with dry_run for testing purposes.\n\n Args:\n dry_run: bool. If true, this is a simulation so return True so that the simulation can proceed.\n starthour: str or int. The number of hours (between 0 and 24) after midnight.\n endhour: str or int. The number (between 0 and 24) of hours after midnight. Assumes an endhour smaller than starthour\n implies the following day.\n\n Returns:\n bool. True if dry_run is true OR if the current time is between the starthour and endhour.\n \"\"\"\n if starthour is None:\n starthour = get_nightly_start_time()\n if endhour is None:\n endhour = get_nightly_end_time()\n ensure_tucson_time()\n hour = time.localtime().tm_hour\n\n if endhour < starthour:\n return dry_run or (hour < endhour) or (hour > starthour)\n else:\n return dry_run or ( (hour < endhour) and (hour > starthour) )\n", "\nfrom desispec.workflow.exptable import get_exposure_table_name,get_exposure_table_path, \\\n get_exposure_flags, get_last_step_options, get_exposure_table_column_defs, \\\n keyval_change_reporting, deconstruct_keyval_reporting\nfrom desispec.workflow.tableio import load_table, write_table\nfrom desispec.workflow.utils import pathjoin\nfrom desispec.io.util import parse_cameras, decode_camword, create_camword, parse_badamps, validate_badamps\n\nimport os\nimport numpy as np\nfrom astropy.table import Table\n\ndef process_int_range_inclusive(input_string):\n \"\"\"\n Given a str indicating a range of integers, this auto-detects the symbol used and returns that range as an INCLUSIVE\n numpy array of ints. Symbol can be ':', '-', or '..'.\n\n Args:\n input_string, str. String with integer range with the upper value being included in the output. E.g. 100:102\n returns 100,101,102.\n\n Returns:\n np.array. Array of ints for the range specified in the input_string.\n \"\"\"\n for symbol in [':','-','..']:\n if symbol in input_string:\n first,last = input_string.split(symbol)\n return np.arange(int(first),int(last)+1)\n\ndef parse_int_list_term(input_string, allints=None):\n \"\"\"\n Given a str this determines what integer values it represents. Whether that be \"all\" indicating all ints in the\n table column, a range of integers specified with ':', '-', or '..', or a single integer. This should not be a list.\n\n Args:\n input_string, str. String with either integer range, single integer, or 'all'. 'all' requires allints\n allints, np.array. One dimensional array of all integers. Returns if 'all' is specified.\n\n Returns:\n out_array, np.array. Array of ints for the string specified.\n \"\"\"\n if input_string.lower() == 'all' and allints is not None:\n out_array = np.asarray(allints)\n elif input_string.isnumeric():\n out_array = np.atleast_1d(int(input_string))\n elif np.any([symb in input_string for symb in [':','-','..']]):\n out_array = process_int_range_inclusive(input_string)\n else:\n raise ValueError(f\"Couldn't understand input {input_string}\")\n return out_array\n\ndef parse_int_list(input_string, allints=None, only_unique=True):\n \"\"\"\n Given a str this determines what integer values it represents. Whether that be \"all\" indicating all ints in the\n table column, a range of integers specified with ':', '-', or '..', a single integer, or an indeterminant number of\n them in a comma separated list.\n\n Args:\n input_string, str. String with either integer range, single integer, or 'all'. It can have a combination of\n multiple of these separated by a comma. 'all' requires allints.\n allints, np.array. One dimensional array of all integers. Returns if 'all' is specified.\n only_unique, bool. True if you want a unique set returned. Otherwise repeated entries in the input string\n are kept.\n\n Returns:\n out_array, np.array. Array of ints for the string specified.\n \"\"\"\n input_string = input_string.strip(' \\t,')\n out_array = np.atleast_1d()\n for substr in input_string.split(\",\"):\n out_array = np.append(out_array, parse_int_list_term(substr, allints=allints))\n if only_unique:\n out_array = np.unique(out_array)\n return out_array.astype(int)\n\ndef columns_not_to_report():\n \"\"\"\n Returns list of column names that shouldn't have reporting information saved because they are user-defined values.\n \"\"\"\n return ['COMMENTS', 'HEADERERR', 'BADCAMWORD', 'BADAMPS', 'LASTSTEP', 'EXPFLAG']\n\ndef columns_not_to_edit():\n \"\"\"\n Defines column names that shouldn't be edited.\n \"\"\"\n ## Occasionally unchanging things like NIGHT or TILEID have been missing in the headers, so we won't restrict\n ## that even though it typically shouldn't be edited if the data is there\n return ['EXPID', 'CAMWORD', 'OBSTYPE']\n\ndef columns_not_to_append():\n \"\"\"\n Defines column names that shouldn't be edited.\n \"\"\"\n return ['LASTSTEP', 'SURVEY', 'FA_SURV', 'FAPRGRM', 'GOALTYPE']\n\ndef validate_value(colname, value, joinsymb):\n \"\"\"\n Checks that the value provided matches the syntax of the colname given. If the syntax is incorrect\n an error is raised.\n\n Warning: may change the value of \"value\" and returns it.\n\n Args:\n colname, str. The name of the column that is being edited.\n value, any scalar type. The value that the column's current value should be changed to.\n\n Returns:\n value, any scalar type. The value that the column's current value should be changed to. This is verified to\n have the proper syntax for the colname given.\n\n \"\"\"\n ## Match data type and convert where necessary\n if colname == 'EXPFLAG':\n ## Make sure the exposure flag is a valid one\n expflags = get_exposure_flags()\n value = value.lower().replace(' ','_')\n if value not in expflags:\n raise ValueError(f\"Couldn't understand exposure flag: '{value}'. Available options are: {expflags}.\")\n elif colname == 'BADAMPS':\n ## Make sure we can decode the badamp value (or easily correct it so we can decode it)\n ## This raises an error if it can't be converted to a viable list\n value = validate_badamps(value, joinsymb=joinsymb)\n elif colname == 'BADCAMWORD':\n ## Make sure we can understand the cameras given\n ## This raises an error if it cant be parsed\n value = parse_cameras(value)\n elif colname == 'LASTSTEP':\n options = get_last_step_options()\n value = value.lower()\n if value not in options:\n raise ValueError(f\"Couldn't understand laststep: '{value}'. Available options are: {options}.\")\n elif joinsymb in value:\n print(f\"WARNING: For colname {colname} you provided a value '{value}' that contains the default\"+\n f\" joinsymbol='{joinsymb}'. This is allowed, but use at your own caution. Continuing...\")\n elif '|' in value:\n print(f\"WARNING: For colname {colname} you provided a value '{value}' that contains the default\"+\n \" indicator of an array string in the exposure tables (the 'pipe' i.e. '|'.\"+\n \" This is allowed, but use at your own caution. Continuing...\")\n else:\n ## Otherwise we don't have a strict syntax, so pass it\n pass\n return value\n\ndef document_in_comments(tablerow,colname,value,comment_col='HEADERERR'):\n \"\"\"\n Places \"reporting\" string in the appropriate comment column of the exposure table to document the edits being\n made.\n\n Note: This alters and returns the input tablerow. How astropy handles this may vary. As of Jan 2021, I believe a copy\n is made in memory upon altering of a tablerow object. So the output here should be returned and assigned to\n overwrite the old value in the input table.\n\n Args:\n tablerow, astropy.table.Row. A table row with columns colname and comment_col. Comment_col must be a numpy array.\n colname, str. The name of the column that is being edited.\n value, any scalar type. The value that the column's current value should be changed to.\n comment_col, str. The name of the comment column where the change reporting should be placed. Default is HEADERERR.\n\n Returns:\n tablerow, astropy.table.Row. A table row with columns colname and comment_col. Comment_col is a numpy array\n with the new reporting string included.\n\n \"\"\"\n colname = colname.upper()\n if colname in columns_not_to_report():\n return tablerow\n\n existing_entries = [colname in term for term in tablerow[comment_col]]\n if np.any(existing_entries):\n loc = np.where(existing_entries)[0][0]\n entry = tablerow[comment_col][loc]\n key, origval, oldval = deconstruct_keyval_reporting(entry)\n if key != colname:\n print(\"Key didn't match colname in document_in_comments\")\n raise\n new_entry = keyval_change_reporting(colname, origval, value)\n tablerow[comment_col][loc] = new_entry\n else:\n reporting = keyval_change_reporting(colname, tablerow[colname], value)\n tablerow[comment_col] = np.append(tablerow[comment_col], reporting)\n return tablerow\n\ndef change_exposure_table_rows(exptable, exp_str, colname, value, include_comment='', append_string=False,\n overwrite_value=False, joinsymb=','):\n \"\"\"\n Changes the column named colname to value of value for rows of exposure table in exptable that correspond to the\n exposures defined in exp_str.\n\n Note: This edits and returns the exptable given in the inputs.\n\n Args:\n exptable, astropy.table.Table. An exposure table defined in desispec.workflow.exptable. Each column is an exposure.\n exp_str, str. A string representing the exposure ID's for which you want to edit the column to a new value.\n The string can be any combination of integer ranges, single integers, or 'all'. Each range or integer\n is separated by a comma. 'all' implies all exposures. Ranges can be given using ':', '-', or '..'.\n All ranges are assumed to be inclusive.\n colname, str. The column name in the exptable where you want to change values.\n value, any scalar type. The value you want to change the column value of each exp_str exposure row to.\n include_comment, str. A user specified comment to be added to the COMMENTS column after setting colname to\n value for the given exp_str exposures.\n append_string, bool. True if you want to append your input value to the end of an existing string.\n overwrite_value, bool. Default is False. Must be set to True if you want to overwrite a non-default value.\n If current value is a default value for that column for that row,\n this doesn't need to be set.\n joinsymb, str. The symbol used to separate string elements that are being appended. Shouldn't be '|'.\n Default is ','.\n\n Returns:\n exptable, astropy.table.Table. The exposure table given in the input, with edits made to the column colname\n for the rows corresponding to the exposure ID's in exp_str.\n\n \"\"\"\n ## Make sure colname exists before proceeding\n ## Don't edit fixed columns\n colname = colname.upper()\n if colname in columns_not_to_edit():\n raise ValueError(f\"Not allowed to edit colname={colname}.\")\n if colname not in exptable.colnames:\n raise ValueError(f\"Colname {colname} not in exposure table\")\n if append_string and colname in columns_not_to_append():\n raise ValueError(f\"Cannot append_string to {colname}\")\n if append_string and overwrite_value:\n raise ValueError(\"Cannot append_string and overwrite_value.\")\n\n ## Parse the exposure numbers\n exposure_list = parse_int_list(exp_str, allints=exptable['EXPID'].data, only_unique=True)\n\n ## Match exposures to row numbers\n row_numbers = []\n for exp in exposure_list:\n rownum = np.where(exptable['EXPID'] == exp)[0]\n if rownum.size > 0:\n row_numbers.append(rownum[0])\n row_numbers = np.asarray(row_numbers)\n\n ## Make sure the value will work\n ## (returns as is if fine, corrects syntax if it can, or raises an error if it can't)\n value = validate_value(colname, value, joinsymb)\n\n ## If appending camwords, let's convert to camera list only once to save computation\n if colname == 'BADCAMWORD' and append_string:\n value_as_camlist = decode_camword(value)\n\n ## Inform user on whether reporting will be done\n if colname in columns_not_to_report():\n print(\"Won't do comment reporting for user defined column.\")\n\n ## Get column names and definitions\n colnames,coldtypes,coldeflts = get_exposure_table_column_defs(return_default_values=True)\n colnames,coldtypes,coldeflts = np.array(colnames),np.array(coldtypes),np.array(coldeflts,dtype=object)\n cur_dtype = coldtypes[colnames==colname][0]\n cur_default = coldeflts[colnames==colname][0]\n\n if include_comment != '' and 'COMMENTS' not in colnames:\n print(\"Given a comment to append to the exposure tables, but COMMENTS isn't in column names. \"+\n \"Not including comment\")\n\n ## Assign new value\n isstr = (cur_dtype in [str, np.str, np.str_] or type(cur_dtype) is str)\n isarr = (cur_dtype in [list, np.array, np.ndarray])\n appendable = (colname not in columns_not_to_append())\n\n if append_string and not isstr:\n raise ValueError(f\"Told to append_string but {colname} isn't a string: {cur_dtype}\")\n elif overwrite_value:\n print(f\"Overwriting values in column: {colname} to '{value}' for exposures: {exposure_list}.\")\n elif append_string:\n print(f\"Appending '{value}' to existing entries in column: {colname} for exposures: {exposure_list}.\")\n elif isarr:\n print(f\"Appending {value} to arrays in column: {colname} for exposures: {exposure_list}.\")\n else:\n print(f\"Changing default values in column: {colname} to '{value}' for exposures: {exposure_list}.\")\n\n orig_exptable = exptable.copy()[row_numbers]\n for rownum in row_numbers:\n if colname == 'BADCAMWORD' and exptable[colname][rownum] != cur_default and append_string:\n curcams = decode_camword(exptable[colname][rownum])\n if len(set(value_as_camlist).difference(set(curcams))) == 0:\n print(f\"For exposure: {exp}. Asked to append '{value}' to '{exptable[colname][rownum]}'\" +\n \" but all bad cameras are already present. Skipping and not commenting.\")\n continue\n else:\n curcams.extend(value_as_camlist)\n combinedcams = list(set(curcams))\n exptable[colname][rownum] = create_camword(combinedcams)\n elif colname == 'BADAMPS' and append_string and exptable[colname][rownum] != cur_default:\n curamps = exptable[colname][rownum].split(joinsymb)\n value_as_amplist = value.split(joinsymb)\n newvals = list(set(value_as_amplist).difference(set(curamps)))\n if len(newvals) == 0:\n print(f\"For exposure: {exp}. Asked to append '{value}' to '{exptable[colname][rownum]}'\"+\n \" but all badamps are already present. Skipping and not commenting.\")\n continue\n else:\n curamps.extend(newvals)\n exptable[colname][rownum] = joinsymb.join(curamps)\n elif isstr and append_string and exptable[colname][rownum] != cur_default:\n exptable[colname][rownum] += f'{joinsymb}{value}'\n elif isarr:\n if overwrite_value and len(exptable[colname][rownum])>0:\n exptable[rownum] = document_in_comments(exptable[rownum],colname,value)\n exptable[colname][rownum] = np.append(cur_default, value)\n else:\n exptable[colname][rownum] = np.append(exptable[colname][rownum], value)\n else:\n if overwrite_value or exptable[colname][rownum] == cur_default:\n exptable[rownum] = document_in_comments(exptable[rownum],colname,value)\n exptable[colname][rownum] = value\n else:\n exp = exptable[rownum]['EXPID']\n err = f\"In exposure {exp} for column {colname}: asked to fill non-default \" + \\\n f\"entry '{exptable[colname][rownum]}' with '{value}'.\\n\" + \\\n f\"\\t\\tTo overwrite, use --overwrite-value.\\n\"\n if appendable:\n err += \"\\t\\tTo append to the existing, use --append-string.\\n\"\n err += f\"\\t\\tOriginal column entries for requested exposures were:\\n\"\n for exp,val in zip(list(orig_exptable['EXPID']), list(orig_exptable[colname])):\n err += f\"\\t\\t\\t{exp}: {val}\\n\"\n err += \"\\n\\t\\tNo entries updated. Exiting.\"\n raise ValueError (err)\n\n if include_comment != '' and 'COMMENTS' in colnames:\n exptable['COMMENTS'][rownum] = np.append(exptable['COMMENTS'][rownum], include_comment)\n meaningful_comments = (exptable['COMMENTS'][rownum] != '')\n exptable['COMMENTS'][rownum] = exptable['COMMENTS'][rownum][meaningful_comments]\n\n return exptable\n\ndef edit_exposure_table(exp_str, colname, value, night=None, include_comment='', tablepath=None,\n append_string=False, overwrite_value=False, use_spec_prod=True,\n read_user_version=False, write_user_version=False, overwrite_file=True, joinsymb=','):\n \"\"\"\n Edits the exposure table on disk to change the column named colname to value of value for rows of exposure table\n that correspond to the exposures defined in exp_str. The table on disk can be defined using night given directly\n with tablepath.\n\n Note: This overwrites an exposure table file on disk by default.\n\n Args:\n exp_str, str. A string representing the exposure ID's for which you want to edit the column to a new value.\n The string can be any combination of integer ranges, single integers, or 'all'. Each range or integer\n is separated by a comma. 'all' implies all exposures. Ranges can be given using ':', '-', or '..'.\n All ranges are assumed to be inclusive.\n colname, str. The column name in the exptable where you want to change values.\n value, any scalar type. The value you want to change the column value of each exp_str exposure row to.\n night, str or int. The night the exposures were acquired on. This uniquely defines the exposure table.\n include_comment, str. A user specified comment to be added to the COMMENTS column after setting colname to\n value for the given exp_str exposures.\n tablepath, str. A relative or absolute path to the exposure table file, if named differently from the default\n in desispec.workflow.exptable.\n append_string, bool. True if you want to append your input value to the end of an existing string.\n overwrite_value, bool. Default is False. Must be set to True if you want to overwrite a non-default value.\n If current value is a default value for that column for that row,\n this doesn't need to be set.\n use_spec_prod, bool. True if you want to read in the exposure table defined by night from the currently\n defined SPECPROD as opposed to the exposure table repository location. Default is True.\n read_user_version, bool. True if you want to read in an exposure table saved including the current user's\n USER name. Meant for test editing of a file multiple times. If the file doesn't exist,\n the non-user value is loaded. Default is False.\n write_user_version, bool. True if you want to write in an exposure table saved including the current user's\n USER name. Meant for test editing of a file without overwriting the true exposure table.\n Default is False.\n overwrite_file, bool. True if you want to overwrite the file on disk. Default is True.\n joinsymb, str. The symbol used to separate string elements that are being appended. Shouldn't be '|'.\n Default is ','.\n \"\"\"\n ## Don't edit fixed columns\n colname = colname.upper()\n if tablepath is None and night is None:\n raise ValueError(\"Must specify night or the path to the table.\")\n if colname in columns_not_to_edit():\n raise ValueError(f\"Not allowed to edit colname={colname}.\")\n if append_string and colname in ['LASTSTEP', 'SURVEY', 'FA_SURV', 'FAPRGRM', 'GOALTYPE']:\n raise ValueError(f\"Cannot append_string to {colname}\")\n if append_string and overwrite_value:\n raise ValueError(\"Cannot append_string and overwrite_value.\")\n\n ## Get the file locations\n if tablepath is not None:\n path, name = os.path.split(tablepath)\n else:\n path = get_exposure_table_path(night=night, usespecprod=use_spec_prod)\n name = get_exposure_table_name(night=night)#, extension='.csv')\n\n pathname = pathjoin(path, name)\n user_pathname = os.path.join(path, name.replace('.csv', '_' + str(os.environ['USER']) + '.csv'))\n\n ## Read in the table\n if read_user_version:\n if os.path.isfile(user_pathname):\n exptable = load_table(tablename=user_pathname, tabletype='exptable')\n else:\n print(\"Couldn't locate a user version of the exposure table, loading the default version of the table.\")\n exptable = load_table(tablename=pathname, tabletype='exptable')\n else:\n exptable = load_table(tablename=pathname, tabletype='exptable')\n\n if exptable is None:\n print(\"There was a problem loading the exposure table... Exiting.\")\n return\n\n ## Do the modification\n outtable = change_exposure_table_rows(exptable, exp_str, colname, value, include_comment,\n append_string, overwrite_value, joinsymb)\n\n ## Write out the table\n if write_user_version:\n write_table(outtable, tablename=user_pathname, tabletype='exptable', overwrite=overwrite_file)\n print(f\"Wrote edited table to: {user_pathname}\")\n else:\n write_table(outtable, tablename=pathname, tabletype='exptable', overwrite=overwrite_file)\n print(f\"Wrote edited table to: {pathname}\")\n" ]
[ [ "numpy.unique", "numpy.isnan", "numpy.array", "numpy.where", "numpy.sum" ], [ "numpy.isnan", "numpy.abs", "numpy.mean", "numpy.allclose" ], [ "numpy.sqrt", "numpy.asarray", "numpy.nan_to_num", "numpy.all", "numpy.concatenate", "numpy.mean", "numpy.zeros_like", "numpy.where", "numpy.arange", "numpy.std", "numpy.zeros", "numpy.median", "numpy.genfromtxt", "numpy.logical_or", "numpy.delete", "numpy.log10", "numpy.array", "numpy.abs", "numpy.isfinite", "numpy.gradient", "numpy.percentile", "numpy.ones", "numpy.float64" ], [ "numpy.abs" ], [ "numpy.unique", "numpy.asarray", "numpy.atleast_1d", "numpy.append", "numpy.any", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IRC-SPHERE/SklearnHyperStream
[ "7799e0ea15135fe5cb2935bdd39b471c53ccf0ff", "7799e0ea15135fe5cb2935bdd39b471c53ccf0ff" ]
[ "online_learning_plugins/sklearn/tools/dataset/2017-08-22_v0.0.1.py", "example_autoencoder_keras.py" ]
[ "# The MIT License (MIT)\n# Copyright (c) 2014-2017 University of Bristol\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom hyperstream import Tool, StreamInstance\nfrom hyperstream.utils import check_input_stream_count\n\nfrom datetime import datetime, timedelta\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nimport numpy as np\nfrom pytz import UTC\n\n\nclass Dataset(Tool):\n def __init__(self, dataset, shuffle=True, epochs=1, seed=None):\n \"\"\"\n Converts a static dataset into a stream with timestamps\n\n Parameters\n ==========\n dataset: data structure with the following attributes\n data: matrix\n Matrix with one row per sample and one column per feature\n target: array of int\n Array of integers with one label per sample\n\n shuffle: boolean\n Value indicating if the data needs to be shuffled\n\n epochs: Integer\n Number of iterations that the data will be repeated\n\n seed: Integer\n seed for the shuffling process\n \"\"\"\n super(Dataset, self).__init__(dataset=dataset, shuffle=shuffle,\n epochs=epochs, seed=seed)\n\n @check_input_stream_count(0)\n def _execute(self, sources, alignment_stream, interval):\n \"\"\"\n Processes the input data and produces streamed data\n\n yelds\n =====\n stream : with date and dictionary with following entries\n x_tr: array of float\n Training values for the given data stream\n y_tr: array of int\n Training binary label corresponding to the given data stream\n x_te: array of float\n Test values for the given data stream\n y_te: array of int\n Test binary label corresponding to the given data stream\n \"\"\"\n x = self.dataset.data\n y = self.dataset.target\n # Binarize data\n classes = np.unique(y)\n y = label_binarize(y, classes)\n\n j = 0\n start_dt = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n for i in range(self.epochs):\n X_tr, X_te, Y_tr, Y_te = train_test_split(\n x, y, shuffle=self.shuffle, train_size=0.5, stratify=y,\n random_state=self.seed)\n for x_tr, y_tr in zip(X_tr, Y_tr):\n x_te, y_te = X_te[j % len(X_te)], Y_te[j % len(Y_te)]\n j += 1\n dt = (start_dt + timedelta(minutes=j)).replace(tzinfo=UTC)\n yield StreamInstance(dt, dict(x_tr=x_tr.reshape(1, -1),\n x_te=x_te.reshape(1, -1),\n y_tr=y_tr.reshape(1, -1),\n y_te=y_te.reshape(1, -1)))\n", "import numpy as np\nimport argparse\nimport re\n\nfrom datetime import datetime, timedelta\n\nfrom sklearn import datasets, linear_model\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import mean_squared_error as mse\n\nfrom hyperstream import HyperStream, TimeInterval\nfrom hyperstream.utils import UTC\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\nimport matplotlib.pyplot as plt\n\nfrom utils import generate_hidden_images\n\n\nclass MyKerasUnsupervised(object):\n \"\"\"This is a modification of the KerasUnsupervised in order to keep the\n labels with the original values.\n\n Implementation of the scikit-learn classifier API for Keras.\n \"\"\"\n def __init__(self, architecture='auto2s', lr=0.1):\n self.model = None\n self.architecture = architecture\n self.lr = lr\n\n def fit(self, x, classes=None, **kwargs):\n \"\"\"Constructs a new model with `build_fn` & fit the model to `(x, y)`.\n # Arguments\n x : array-like, shape `(n_samples, n_features)`\n Training samples where n_samples in the number of samples\n and n_features is the number of features.\n y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for X.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.fit`\n # Returns\n history : object\n details about the training history at each epoch.\n \"\"\"\n if self.model is None:\n self.model = self.create_model(input_dim=x.shape[1],\n architecture=self.architecture,\n lr=self.lr)\n return self.model.fit(x, x, batch_size=x.shape[0], epochs=1, verbose=0)\n\n def create_model(self, input_dim=1, optimizer='rmsprop',\n init='glorot_uniform', lr=1, momentum=0.0, decay=0.0,\n nesterov=False , architecture='lr'):\n \"\"\"\n Parameters\n ----------\n architecture: string: lr, mlp100, mlp100d, mlp100d100d\n \"\"\"\n\n\n previous_layer = input_dim\n aux_architecture = re.split('(\\d+)', architecture)\n self.architecture = [aux_architecture[0]]\n for s in aux_architecture[1:]:\n if s.isdigit():\n self.architecture.append(s)\n else:\n for ss in s:\n self.architecture.append(ss)\n if self.architecture[0] == 'auto':\n encoder = Sequential()\n decoder = Sequential()\n\n actual = encoder\n for i in range(1, len(self.architecture)):\n if self.architecture[i] == 'd':\n actual.add(Dropout(0.5))\n elif self.architecture[i] == 's':\n actual.add(Activation('sigmoid'))\n elif self.architecture[i] == 'r':\n actual.add(Activation('relu'))\n elif self.architecture[i] == 'm':\n actual.add(Dense(input_size, kernel_initializer=init))\n actual.add(Activation('softmax'))\n elif self.architecture[i] == 'n':\n actual.add(BatchNormalization())\n elif self.architecture[i].isdigit():\n actual_layer = int(self.architecture[i])\n actual.add(Dense(actual_layer, input_dim=previous_layer,\n kernel_initializer=init))\n previous_layer = actual_layer\n elif self.architecture[i] == 'l':\n continue\n elif self.architecture[i] == '_':\n actual = decoder\n else:\n raise(ValueError, 'Architecture with a wrong specification')\n\n decoder.add(Dense(input_dim, input_dim=previous_layer,\n kernel_initializer=init))\n else:\n raise(ValueError, 'Architecture with a wrong specification')\n\n model = Sequential()\n model.add(encoder)\n model.add(decoder)\n self.encoder = encoder\n self.decoder = decoder\n print(encoder.summary())\n print(decoder.summary())\n print(model.summary())\n\n if optimizer == 'sgd':\n optimizer = SGD(lr=lr, momentum=momentum, decay=decay,\n nesterov=nesterov)\n\n loss = 'mean_squared_error'\n model.compile(loss=loss, optimizer=optimizer)\n return model\n\n def predict(self, x):\n return self.model.predict(x, verbose=0)\n\n def transform(self, x):\n return self.encoder.predict(x, verbose=0)\n\n def inverse_transform(self, h):\n return self.decoder.predict(h, verbose=0)\n\n def score(self, x, pred):\n return mse(x, pred)\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-a', '--architecture', type=str,\n default='auto2l',\n help='''Autoencoder architecture in the following form:\n start always with auto, then add hidden layers\n specifying the number of units and the activation\n functions with a letter. The available letters are s\n for sigmoid, l for linear, m for softmax, d for\n dropout with fixed value of 0.5. Eg. 'auto20s10s2s'\n will generate an autoencoder with 20 sigmoid units, 10\n sigmoid units 2 sigmoid units, 2 sigmoid units, 10\n sigmoid units 20 sigmoid units and input_size units.''')\n parser.add_argument('-d', '--dataset', type=str, default='iris',\n help='''Dataset to use. Working options: iris,\n breast_cancer, wine, digits''')\n parser.add_argument('-e', '--epochs', type=int, default=10,\n help='Number of epochs to run the classifier')\n parser.add_argument('-s', '--seed', type=int, default=42,\n help='Seed for the data shuffle')\n parser.add_argument('-b', '--batchsize', type=int, default=1,\n help='Batch size during training')\n parser.add_argument('-l', '--learning-rate', type=float, default=1.0,\n help='Learning rate')\n\n return parser.parse_args()\n\n\ndef plot_digit(x):\n shape = int(np.sqrt(x.shape[1]))\n x.reshape(shape, shape)\n\n\ndef main(dataset, architecture, epochs, seed, batchsize, learning_rate):\n hs = HyperStream(loglevel=30)\n print(hs)\n print([p.channel_id_prefix for p in hs.config.plugins])\n\n M = hs.channel_manager.memory\n\n data = getattr(datasets, 'load_{}'.format(dataset))()\n data_tool = hs.plugins.sklearn.tools.dataset(data, shuffle=True,\n epochs=epochs, seed=seed)\n data_stream = M.get_or_create_stream('dataset')\n\n model = MyKerasUnsupervised(architecture=architecture, lr=learning_rate)\n unsupervised_tool = hs.plugins.sklearn.tools.unsupervised(model)\n unsupervised_stream = M.get_or_create_stream('unsupervised')\n\n now = datetime.utcnow().replace(tzinfo=UTC)\n now = (now - timedelta(hours=1)).replace(tzinfo=UTC)\n before = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n ti = TimeInterval(before, now)\n\n data_tool.execute(sources=[], sink=data_stream, interval=ti)\n\n print(\"Example of a data stream\")\n key, value = data_stream.window().iteritems().next()\n print('[%s]: %s' % (key, value))\n\n mini_batch_tool = hs.plugins.sklearn.tools.minibatch(batchsize=batchsize)\n mini_batch_stream = M.get_or_create_stream('mini_batch')\n mini_batch_tool.execute(sources=[data_stream], sink=mini_batch_stream,\n interval=ti)\n\n unsupervised_tool.execute(sources=[mini_batch_stream], sink=unsupervised_stream,\n interval=ti)\n\n scores = []\n for key, value in unsupervised_stream.window():\n scores.append(value['score'])\n\n # The data is repeated the number of epochs. This makes the mini-batches to\n # cycle and contain data from the begining and end of the dataset. This\n # makes possible that the number of scores is not divisible by epochs.\n if batchsize == 1:\n print(\"Test scores per epoch\")\n scores = np.array(scores).reshape(epochs, -1)\n print(scores.mean(axis=1).round(decimals=2))\n else:\n scores = np.array(scores).reshape(1,-1)\n print(\"Test scores per minibatch (cyclic)\")\n print(scores.round(decimals=2))\n\n if dataset == 'digits' and model.decoder.input_shape[1] == 2:\n minmax = 5\n image = generate_hidden_images(model, digit_size=8, n=15, minmax=minmax)\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111)\n ax.imshow(image, extent = [-minmax, minmax, -minmax, minmax], cmap='Greys')\n fig.savefig('autoencoder_{}.svg'.format(architecture))\n\n\nif __name__ == '__main__':\n arguments = get_arguments()\n main(**vars(arguments))\n" ]
[ [ "sklearn.preprocessing.label_binarize", "sklearn.model_selection.train_test_split", "numpy.unique" ], [ "matplotlib.pyplot.figure", "numpy.array", "numpy.sqrt", "sklearn.metrics.mean_squared_error" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AHammoudeh/Flow_AH
[ "16c5641be3e9e85511756f75efd002478edaee9b" ]
[ "flow/visualize/time_space_diagram.py" ]
[ "\"\"\"Generate a time space diagram for some networks.\n\nThis method accepts as input a csv file containing the sumo-formatted emission\nfile, and then uses this data to generate a time-space diagram, with the x-axis\nbeing the time (in seconds), the y-axis being the position of a vehicle, and\ncolor representing the speed of te vehicles.\n\nIf the number of simulation steps is too dense, you can plot every nth step in\nthe plot by setting the input `--steps=n`.\n\nNote: This script assumes that the provided network has only one lane on the\neach edge, or one lane on the main highway in the case of MergeNetwork.\n\nUsage\n-----\n::\n python time_space_diagram.py </path/to/emission>.csv </path/to/params>.json\n\"\"\"\nfrom flow.utils.rllib import get_flow_params\nfrom flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork, HighwayNetwork\n\nimport argparse\nimport csv\ntry:\n from matplotlib import pyplot as plt\nexcept ImportError:\n import matplotlib\n matplotlib.use('TkAgg')\n from matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport matplotlib.colors as colors\nimport numpy as np\n\n\n# networks that can be plotted by this method\nACCEPTABLE_NETWORKS = [\n RingNetwork,\n FigureEightNetwork,\n MergeNetwork,\n I210SubNetwork,\n HighwayNetwork\n]\n\n\ndef import_data_from_emission(fp):\n r\"\"\"Import relevant data from the predefined emission (.csv) file.\n\n Parameters\n ----------\n fp : str\n file path (for the .csv formatted file)\n\n Returns\n -------\n dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n \"\"\"\n # initialize all output variables\n veh_id, t, edge, rel_pos, vel, lane = [], [], [], [], [], []\n\n # import relevant data from emission file\n for record in csv.DictReader(open(fp)):\n veh_id.append(record['id'])\n t.append(record['time'])\n edge.append(record['edge_id'])\n rel_pos.append(record['relative_position'])\n vel.append(record['speed'])\n lane.append(record['lane_number'])\n\n # we now want to separate data by vehicle ID\n ret = {key: {'time': [], 'edge': [], 'pos': [], 'vel': [], 'lane': []}\n for key in np.unique(veh_id)}\n for i in range(len(veh_id)):\n ret[veh_id[i]]['time'].append(float(t[i]))\n ret[veh_id[i]]['edge'].append(edge[i])\n ret[veh_id[i]]['pos'].append(float(rel_pos[i]))\n ret[veh_id[i]]['vel'].append(float(vel[i]))\n ret[veh_id[i]]['lane'].append(float(lane[i]))\n\n return ret\n\n\ndef get_time_space_data(data, params):\n r\"\"\"Compute the unique inflows and subsequent outflow statistics.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters, including:\n\n * \"network\" (str): name of the network that was used when generating\n the emission file. Must be one of the network names mentioned in\n ACCEPTABLE_NETWORKS,\n * \"net_params\" (flow.core.params.NetParams): network-specific\n parameters. This is used to collect the lengths of various network\n links.\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n as_array\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Raises\n ------\n AssertionError\n if the specified network is not supported by this method\n \"\"\"\n # check that the network is appropriate\n assert params['network'] in ACCEPTABLE_NETWORKS, \\\n 'Network must be one of: ' + ', '.join(ACCEPTABLE_NETWORKS)\n\n # switcher used to compute the positions based on the type of network\n # switcher used to compute the positions based on the type of network\n switcher = {\n RingNetwork: _ring_road,\n MergeNetwork: _merge,\n FigureEightNetwork: _figure_eight,\n I210SubNetwork: _i210_subnetwork,\n HighwayNetwork: _highway,\n }\n\n # Collect a list of all the unique times.\n all_time = []\n for veh_id in data.keys():\n all_time.extend(data[veh_id]['time'])\n all_time = np.sort(np.unique(all_time))\n\n # Get the function from switcher dictionary\n func = switcher[params['network']]\n\n # Execute the function\n pos, speed, all_time = func(data, params, all_time)\n\n return pos, speed, all_time\n\n\ndef _merge(data, params, all_time):\n r\"\"\"Generate position and speed data for the merge.\n\n This only include vehicles on the main highway, and not on the adjacent\n on-ramp.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n inflow_edge_len = 100\n premerge = params['net'].additional_params['pre_merge_length']\n postmerge = params['net'].additional_params['post_merge_length']\n\n # generate edge starts\n edgestarts = {\n 'inflow_highway': 0,\n 'left': inflow_edge_len + 0.1,\n 'center': inflow_edge_len + premerge + 22.6,\n 'inflow_merge': inflow_edge_len + premerge + postmerge + 22.6,\n 'bottom': 2 * inflow_edge_len + premerge + postmerge + 22.7,\n ':left_0': inflow_edge_len,\n ':center_0': inflow_edge_len + premerge + 0.1,\n ':center_1': inflow_edge_len + premerge + 0.1,\n ':bottom_0': 2 * inflow_edge_len + premerge + postmerge + 22.6\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # prepare the speed and absolute position in a way that is compatible with\n # the space-time diagram, and compute the number of vehicles at each step\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge']):\n # avoid vehicles outside the main highway\n if edge in ['inflow_merge', 'bottom', ':bottom_0']:\n continue\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n return pos, speed, all_time\n\n\ndef _highway(data, params, all_time):\n r\"\"\"Generate position and speed data for the highway subnetwork.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n length = params['net'].additional_params['length']\n num_edges = params['net'].additional_params['num_edges']\n edge_len = length / num_edges\n edge_starts = {}\n for i in range(num_edges):\n edge_starts.update({\"highway_{}\".format(i): i * edge_len, \":edge_{}_0\".format(i): i * edge_len})\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'],\n data[veh_id]['pos'],\n edge_starts)\n\n # track only vehicles that were around during this time period\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n observed_row_list = []\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge'],\n data[veh_id]['lane']):\n # avoid vehicles not on the relevant edges. Also only check the second to\n # last lane\n if edge not in edge_starts.keys() or ti not in all_time:\n continue\n else:\n if i not in observed_row_list:\n observed_row_list.append(i)\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n pos = pos[:, observed_row_list]\n speed = speed[:, observed_row_list]\n\n return pos, speed, all_time\n\n\ndef _ring_road(data, params, all_time):\n r\"\"\"Generate position and speed data for the ring road.\n\n Vehicles that reach the top of the plot simply return to the bottom and\n continue.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n ring_length = params['net'].additional_params[\"length\"]\n junction_length = 0.1 # length of inter-edge junctions\n\n edgestarts = {\n \"bottom\": 0,\n \":right_0\": 0.25 * ring_length,\n \"right\": 0.25 * ring_length + junction_length,\n \":top_0\": 0.5 * ring_length + junction_length,\n \"top\": 0.5 * ring_length + 2 * junction_length,\n \":left_0\": 0.75 * ring_length + 2 * junction_length,\n \"left\": 0.75 * ring_length + 3 * junction_length,\n \":bottom_0\": ring_length + 3 * junction_length\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time']):\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n return pos, speed, all_time\n\n\ndef _i210_subnetwork(data, params, all_time):\n r\"\"\"Generate position and speed data for the i210 subnetwork.\n\n We only look at the second to last lane of edge 119257908#1-AddedOnRampEdge\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n #\n # edge_starts = {\"119257908#0\": 0,\n # \"119257908#1-AddedOnRampEdge\": 686.98}\n desired_lane = 1\n edge_starts = {\"119257914\": 0,\n \"119257908#0\": 61.58,\n \"119257908#1-AddedOnRampEdge\": 686.98 + 61.58}\n # edge_starts = {\"119257908#0\": 0}\n # edge_starts = {\"119257908#1-AddedOnRampEdge\": 0}\n # desired_lane = 5\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'],\n data[veh_id]['pos'],\n edge_starts)\n\n # create the output variables\n # TODO(@ev) handle subsampling better than this\n low_time = int(0 / params['sim'].sim_step)\n high_time = int(1600 / params['sim'].sim_step)\n all_time = all_time[low_time:high_time]\n\n # track only vehicles that were around during this time period\n observed_row_list = []\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge'],\n data[veh_id]['lane']):\n # avoid vehicles not on the relevant edges. Also only check the second to\n # last lane\n if edge not in edge_starts.keys() or ti not in all_time or lane != desired_lane:\n continue\n else:\n if i not in observed_row_list:\n observed_row_list.append(i)\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n pos = pos[:, observed_row_list]\n speed = speed[:, observed_row_list]\n\n return pos, speed, all_time\n\n\ndef _figure_eight(data, params, all_time):\n r\"\"\"Generate position and speed data for the figure eight.\n\n The vehicles traveling towards the intersection from one side will be\n plotted from the top downward, while the vehicles from the other side will\n be plotted from the bottom upward.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n net_params = params['net']\n ring_radius = net_params.additional_params['radius_ring']\n ring_edgelen = ring_radius * np.pi / 2.\n intersection = 2 * ring_radius\n junction = 2.9 + 3.3 * net_params.additional_params['lanes']\n inner = 0.28\n\n # generate edge starts\n edgestarts = {\n 'bottom': inner,\n 'top': intersection / 2 + junction + inner,\n 'upper_ring': intersection + junction + 2 * inner,\n 'right': intersection + 3 * ring_edgelen + junction + 3 * inner,\n 'left': 1.5 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner,\n 'lower_ring': 2 * intersection + 3 * ring_edgelen + 2 * junction + 4 * inner,\n ':bottom_0': 0,\n ':center_1': intersection / 2 + inner,\n ':top_0': intersection + junction + inner,\n ':right_0': intersection + 3 * ring_edgelen + junction + 2 * inner,\n ':center_0': 1.5 * intersection + 3 * ring_edgelen + junction + 3 * inner,\n ':left_0': 2 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner,\n # for aimsun\n 'bottom_to_top': intersection / 2 + inner,\n 'right_to_left': junction + 3 * inner,\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time']):\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n # reorganize data for space-time plot\n figure_eight_len = 6 * ring_edgelen + 2 * intersection + 2 * junction + 10 * inner\n intersection_loc = [edgestarts[':center_1'] + intersection / 2,\n edgestarts[':center_0'] + intersection / 2]\n pos[pos < intersection_loc[0]] += figure_eight_len\n pos[np.logical_and(pos > intersection_loc[0], pos < intersection_loc[1])] \\\n += - intersection_loc[1]\n pos[pos > intersection_loc[1]] = \\\n - pos[pos > intersection_loc[1]] + figure_eight_len + intersection_loc[0]\n\n return pos, speed, all_time\n\n\ndef _get_abs_pos(edge, rel_pos, edgestarts):\n \"\"\"Compute the absolute positions from edges and relative positions.\n\n This is the variable we will ultimately use to plot individual vehicles.\n\n Parameters\n ----------\n edge : list of str\n list of edges at every time step\n rel_pos : list of float\n list of relative positions at every time step\n edgestarts : dict\n the absolute starting position of every edge\n\n Returns\n -------\n list of float\n the absolute positive for every sample\n \"\"\"\n ret = []\n for edge_i, pos_i in zip(edge, rel_pos):\n ret.append(pos_i + edgestarts[edge_i])\n return ret\n\n\ndef _get_abs_pos_1_edge(edges, rel_pos, edge_starts):\n \"\"\"Compute the absolute positions from a subset of edges.\n\n This is the variable we will ultimately use to plot individual vehicles.\n\n Parameters\n ----------\n edges : list of str\n list of edges at every time step\n rel_pos : list of float\n list of relative positions at every time step\n edge_starts : dict\n the absolute starting position of every edge\n\n Returns\n -------\n list of float\n the absolute positive for every sample\n \"\"\"\n ret = []\n for edge_i, pos_i in zip(edges, rel_pos):\n if edge_i in edge_starts.keys():\n ret.append(pos_i + edge_starts[edge_i])\n else:\n ret.append(-1)\n return ret\n\n\ndef make_ts_diagram(flow_params, emission_path, min_speed, max_speed, start, stop, title):\n # flow_params is imported as a dictionary\n if '.json' in flow_params:\n flow_params = get_flow_params(flow_params)\n else:\n module = __import__(\"examples.exp_configs.non_rl\", fromlist=[flow_params])\n flow_params = getattr(module, flow_params).flow_params\n\n # import data from the emission.csv file\n emission_data = import_data_from_emission(emission_path)\n\n # compute the position and speed for all vehicles at all times\n pos, speed, time = get_time_space_data(emission_data, flow_params)\n\n # some plotting parameters\n cdict = {\n 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)),\n 'green': ((0, 0, 0), (0.2, 0, 0), (0.6, 1, 1), (1, 1, 1)),\n 'blue': ((0, 0, 0), (0.2, 0, 0), (0.6, 0, 0), (1, 0, 0))\n }\n my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024)\n\n # perform plotting operation\n fig = plt.figure(figsize=(16, 9))\n ax = plt.axes()\n norm = plt.Normalize(min_speed, max_speed)\n cols = []\n\n xmin = max(time[0], start)\n xmax = min(time[-1], stop)\n xbuffer = (xmax - xmin) * 0.025 # 2.5% of range\n ymin, ymax = np.amin(pos), np.amax(pos)\n ybuffer = (ymax - ymin) * 0.025 # 2.5% of range\n\n ax.set_xlim(xmin - xbuffer, xmax + xbuffer)\n ax.set_ylim(ymin - ybuffer, ymax + ybuffer)\n\n for indx_car in range(pos.shape[1]):\n unique_car_pos = pos[:, indx_car]\n\n if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork:\n indices = np.where(pos[:, indx_car] != 0)[0]\n unique_car_speed = speed[indices, indx_car]\n points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2)\n else:\n\n # discontinuity from wraparound\n disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1\n unique_car_time = np.insert(time, disc, np.nan)\n unique_car_pos = np.insert(unique_car_pos, disc, np.nan)\n unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan)\n #\n points = np.array(\n [unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap=my_cmap, norm=norm)\n\n # Set the values used for color mapping\n lc.set_array(unique_car_speed)\n lc.set_linewidth(1.75)\n cols.append(lc)\n\n plt.title(title, fontsize=25)\n plt.ylabel('Position (m)', fontsize=20)\n plt.xlabel('Time (s)', fontsize=20)\n\n for col in cols:\n line = ax.add_collection(col)\n cbar = plt.colorbar(line, ax=ax, norm=norm)\n cbar.set_label('Velocity (m/s)', fontsize=20)\n cbar.ax.tick_params(labelsize=18)\n\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n\n ###########################################################################\n # Note: For MergeNetwork only #\n if flow_params['network'] == 'MergeNetwork': #\n plt.plot(time, [0] * pos.shape[0], linewidth=3, color=\"white\") #\n plt.plot(time, [-0.1] * pos.shape[0], linewidth=3, color=\"white\") #\n ###########################################################################\n\n plt.show()\n\n\nif __name__ == '__main__':\n # create the parser\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='[Flow] Generates time space diagrams for flow networks.',\n epilog='python time_space_diagram.py </path/to/emission>.csv '\n '</path/to/flow_params>.json')\n\n # required arguments\n parser.add_argument('emission_path', type=str,\n help='path to the csv file.')\n parser.add_argument('flow_params', type=str,\n help='path to the flow_params json file.')\n\n # optional arguments\n parser.add_argument('--steps', type=int, default=1,\n help='rate at which steps are plotted.')\n parser.add_argument('--title', type=str, default='Time Space Diagram',\n help='rate at which steps are plotted.')\n parser.add_argument('--max_speed', type=int, default=8,\n help='The maximum speed in the color range.')\n parser.add_argument('--min_speed', type=int, default=0,\n help='The minimum speed in the color range.')\n parser.add_argument('--start', type=float, default=0,\n help='initial time (in sec) in the plot.')\n parser.add_argument('--stop', type=float, default=float('inf'),\n help='final time (in sec) in the plot.')\n\n args = parser.parse_args()\n\n make_ts_diagram(args.flow_params, args.emission_path, args.min_speed,\n args.max_speed, args.start, args.stop, args.title)\n" ]
[ [ "numpy.amax", "matplotlib.pyplot.axes", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.where", "numpy.unique", "matplotlib.colors.LinearSegmentedColormap", "numpy.diff", "numpy.insert", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.amin", "matplotlib.collections.LineCollection", "matplotlib.pyplot.Normalize", "numpy.logical_and", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonassagild/Track-to-Track-Fusion
[ "6bb7fbe6a6e2d9a2713c47f211899226485eee79" ]
[ "scripts/plot_results_kf_dependence.py" ]
[ "\"\"\"plot_stuff script to plot things\n\nJust temporary code to plot things. Not for producing results, but for testing code.\n\"\"\"\nimport numpy as np\nimport scipy\nfrom stonesoup.types.state import GaussianState\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Ellipse\n\n\nfrom trackers.kalman_filter_dependent_fusion import kalman_filter_dependent_fusion\n\nfrom utils.scenario_generator import generate_scenario_2\nfrom utils import open_object\nfrom utils.save_figures import save_figure\n\n# run dependent fusion and plot\n\nseed = 1996\n\nsigma_process = 1\nsigma_meas_radar = 5\nsigma_meas_ais = 10\nnum_steps = 15\n\nsave_fig = True\n\ngenerate_scenario_2(seed=seed, permanent_save=False, sigma_process=sigma_process, sigma_meas_radar=sigma_meas_radar,\n sigma_meas_ais=sigma_meas_ais, timesteps=num_steps)\n\nfolder = \"temp\" # temp instead of seed, as it is not a permanent save\n\n# load ground truth and the measurements\ndata_folder = \"../scenarios/scenario2/\" + folder + \"/\"\nground_truth = open_object.open_object(data_folder + \"ground_truth.pk1\")\nmeasurements_radar = open_object.open_object(data_folder + \"measurements_radar.pk1\")\nmeasurements_ais = open_object.open_object(data_folder + \"measurements_ais.pk1\")\n\n# load start_time\nstart_time = open_object.open_object(data_folder + \"start_time.pk1\")\n\n# prior\nprior = GaussianState([0, 1, 0, 1], np.diag([1.5, 0.5, 1.5, 0.5]) ** 2, timestamp=start_time)\n\n# tracker\nkf_dependent_fusion = kalman_filter_dependent_fusion(measurements_radar, measurements_ais, start_time, prior,\n sigma_process_radar=sigma_process,\n sigma_process_ais=sigma_process,\n sigma_meas_radar=sigma_meas_radar,\n sigma_meas_ais=sigma_meas_ais)\n\n# hacky way; just so its easy to reuse code\nmeasurement_model_radar = kf_dependent_fusion.measurement_model_radar\nmeasurement_model_ais = measurement_model_radar\n\ntracks_fused, tracks_ais, tracks_radar = kf_dependent_fusion.track()\n\n# plot\nfig = plt.figure(figsize=(10, 6))\nax = fig.add_subplot(1, 1, 1)\nax.set_xlabel(\"$x$\")\nax.set_ylabel(\"$y$\")\nax.axis('equal')\nax.plot([state.state_vector[0] for state in ground_truth],\n [state.state_vector[2] for state in ground_truth],\n linestyle=\"--\",\n label='Ground truth')\nax.scatter([state.state_vector[0] for state in measurements_radar],\n [state.state_vector[1] for state in measurements_radar],\n color='b',\n label='Measurements Radar')\nax.scatter([state.state_vector[0] for state in measurements_ais],\n [state.state_vector[1] for state in measurements_ais],\n color='r',\n label='Measurements AIS')\n\n# add ellipses to the posteriors\nfor state in tracks_radar:\n w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.2,\n color='b')\n ax.add_artist(ellipse)\n\nfor state in tracks_ais:\n w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.2,\n color='r')\n ax.add_patch(ellipse)\n\nfor track_fused in tracks_fused:\n w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused.covar @ measurement_model_ais.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(track_fused.state_vector[0], track_fused.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.5,\n color='green')\n ax.add_patch(ellipse)\n\n# add ellipses to add legend todo do this less ugly\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='r',\n alpha=0.2,\n label='Posterior AIS')\nax.add_patch(ellipse)\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='b',\n alpha=0.2,\n label='Posterior Radar')\nax.add_patch(ellipse)\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='green',\n alpha=0.5,\n label='Posterior Fused')\nax.add_patch(ellipse)\n\nax.legend(prop={'size': 12})\ntitle = \"Scenario 1 with $\\sigma_{AIS} = \" + str(sigma_meas_ais) + \"$, $\\sigma_{radar} = \" + str(sigma_meas_radar) + \\\n \"$ and $\\sigma_{process} = \" + str(sigma_process) + \\\n \"$. \\n Fusion is performed accounting for the common process noise.\"\nax.set_title(title, fontsize=20)\nfig.show()\nif save_fig:\n save_figure(\"../results/final_results/scenario_examples\", \"scenario1_example.pdf\", fig)\n\n# # plot estimate for estimate\n# # plot\n# fig_2 = plt.figure(figsize=(10, 6))\n# ax = fig_2.add_subplot(1, 1, 1)\n# ax.set_xlabel(\"$x$\")\n# ax.set_ylabel(\"$y$\")\n# ax.axis('equal')\n# ax.plot([state.state_vector[0] for state in ground_truth],\n# [state.state_vector[2] for state in ground_truth],\n# linestyle=\"--\",\n# label='Ground truth')\n# # ax.scatter([state.state_vector[0] for state in measurements_radar],\n# # [state.state_vector[1] for state in measurements_radar],\n# # color='b',\n# # label='Measurements Radar')\n# # ax.scatter([state.state_vector[0] for state in measurements_ais],\n# # [state.state_vector[1] for state in measurements_ais],\n# # color='r',\n# # label='Measurements AIS')\n#\n# # for i in range(0, len(tracks_fused)):\n# # # plot measurements\n# # ax.scatter([measurements_radar[i + 1].state_vector[0]],\n# # [measurements_radar[i + 1].state_vector[1]],\n# # color='b',\n# # label='Measurements Radar')\n# # ax.scatter([measurements_ais[i + 1].state_vector[0]],\n# # [measurements_ais[i + 1].state_vector[1]],\n# # color='r',\n# # label='Measurements AIS')\n# #\n# # # plot one and one estimate\n# # state_radar = tracks_radar[i + 1]\n# # w, v = np.linalg.eig(measurement_model_radar.matrix() @ state_radar.covar @ measurement_model_radar.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_radar.state_vector[0], state_radar.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.2,\n# # color='b')\n# # ax.add_artist(ellipse)\n# #\n# # state_ais = tracks_ais[i + 1]\n# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_ais.covar @ measurement_model_ais.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_ais.state_vector[0], state_ais.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.2,\n# # color='r')\n# # ax.add_patch(ellipse)\n# #\n# # state_fused = tracks_fused[i]\n# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_fused.covar @ measurement_model_ais.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_fused.state_vector[0], state_fused.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.5,\n# # color='green')\n# # ax.add_patch(ellipse)\n# #\n# # fig_2.show()\n# # input(\"Press Enter to continue...\")\n\n\n#\n# # add ellipses to the posteriors\n# for state in tracks_radar:\n# w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.2,\n# color='b')\n# ax.add_artist(ellipse)\n#\n# for state in tracks_ais:\n# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.2,\n# color='r')\n# ax.add_patch(ellipse)\n#\n# for track_fused in tracks_fused:\n# w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused[1] @ measurement_model_ais.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(track_fused[0][0], track_fused[0][2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.5,\n# color='green')\n# ax.add_patch(ellipse)\n\n# fig_2.show()\n" ]
[ [ "numpy.diag", "matplotlib.patches.Ellipse", "numpy.sqrt", "numpy.rad2deg", "numpy.arctan2", "numpy.argmax", "numpy.argmin", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gkuling/BIRADS_BERT
[ "f218d05283df90e536b210efbb4fab1d6dff082d" ]
[ "examples/MLM_Training_transformers.py" ]
[ "'''\nCopyright (c) 2020, Martel Lab, Sunnybrook Research Institute\nCodes inspired by Hugging Face Transformers package code run_mlm.py\nhttps://github.com/huggingface/transformers/blob/main/examples/pytorch/\nlanguage-modeling/run_mlm.py\n\nDescription: Training code used to train a BERT embedding in Masked Language\nModeling for BERTFineTuning Codes.\n\nInput: train and test folders filled with .txt documents holding a list of\nsentences. These .txt files can be created with TextPReProcessingBERTModel.py\nfile.\nOutput: A saved Transformer model based on Huggingface Transformers package.\nThis includes a cnofig.json, eval_results.txt, pytorch_model.bin,\ntrianing_args.bin, and vocab.txt.\n'''\nimport sys\nsys.path.append('.')\n\nimport argparse\nimport os\nimport torch\nimport logging\nimport random\nimport numpy as np\nfrom transformers import BertConfig, BertForMaskedLM, AdamW, \\\n get_linear_schedule_with_warmup, BertTokenizer\nfrom torch.utils.data import DataLoader, SequentialSampler, RandomSampler\nfrom tqdm import tqdm, trange\nfrom tokenizers.implementations import BertWordPieceTokenizer\nfrom transformers.data.data_collator import DataCollatorForLanguageModeling\nfrom transformers.data.datasets import TextDataset\n\nfrom datetime import datetime as dt\n\ntic = dt.now()\n\nparser = argparse.ArgumentParser()\nlogger = logging.getLogger(__name__)\n# Required parameters\nparser.add_argument(\"--train_data_file\", default=None, type=str,\n required=True,\n help=\"The input training data in a .txt file\"\n \"files.\")\nparser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions \"\n \"and checkpoints will be written.\")\nparser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\nparser.add_argument(\"--per_gpu_train_batch_size\", default=16, type=int,\n help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\nparser.add_argument(\"--eval_data_file\", default=None, type=str,\n required=False,\n help=\"The input training data in a .txt file\"\n \"files.\")\nparser.add_argument(\"--num_train_epochs\", default=1.0, type=float,\n help=\"Total number of training epochs to perform.\")\nparser.add_argument(\"--warmup_steps\", default=2000, type=int,\n help=\"Linear warmup over warmup_steps.\")\nparser.add_argument('--save_steps', type=int, default=10000,\n help=\"Save checkpoint every X updates steps.\")\nparser.add_argument('--data_portion', type=float, default=1.0,\n help=\"The portion of the training data you wish to load. \"\n \"(1.0 for all data, >1.0 for a portion\")\nparser.add_argument('--logging_steps', type=int, default=10000,\n help=\"Log every X updates steps.\")\nparser.add_argument('--block_size', type=int, default=32,\n help=\"Max sequence length used in tokenizer and dataset.\")\nparser.add_argument(\"--start_from_checkpoint\", action='store_true',\n help=\"Start training from latest checkpoint.\")\nparser.add_argument(\"--preliminary_model\", type=str, default='fromScratch',\n help='Choice to start the model from a previously trained '\n 'model or start from scratch. Used with '\n 'model.from_pretrained(preliminary_model. ')\nargs = parser.parse_args()\n\ndef set_seed(sd):\n random.seed(sd)\n np.random.seed(sd)\n torch.manual_seed(sd)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(sd)\n\ndef evaluate(args, model, eval_dataset, tokenizer, step, prefix=\"\"):\n \"\"\"\n Evaluation of model\n :param args: input arguments from parser\n :param model: pytorch model to be evaluated\n :param eval_dataset: dataset used for evaluation\n :param tokenizer: tokenizer used by the model\n :param step: the current step in training\n :param prefix: prescript to be added to the beginning of save file\n :return: results of evaluation\n \"\"\"\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n print('')\n eval_output_dir = args.output_dir\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm=True,\n mlm_probability=0.15\n )\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,\n batch_size=eval_batch_size,\n collate_fn=data_collator\n )\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader,\n desc=\"Evaluating\",\n position=0,\n leave=True):\n\n with torch.no_grad():\n outputs = model(input_ids=batch['input_ids'].to(args.device),\n labels=batch['labels'].to(args.device))\n loss = outputs['loss']\n eval_loss += loss.mean().item()\n\n nb_eval_steps += 1\n\n eval_loss /= nb_eval_steps\n perplexity = torch.exp(torch.tensor(eval_loss))\n\n result = {\n \"perplexity\": perplexity,\n 'loss': eval_loss,\n \"Iteration\": str(step)\n }\n\n output_eval_file = os.path.join(eval_output_dir, prefix,\n \"eval_results.txt\")\n with open(output_eval_file, \"a\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n writer.write('\\n')\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s, \" % (key, str(result[key])))\n\n writer.close()\n\n return result\n\ndef train(args, train_dataset, model, tokenizer, eval_dataset=None):\n \"\"\"\n Train the model\n :param args: input arguments from parser\n :param train_dataset: dataset used for training\n :param model: pytorch model to be evaluated\n :param tokenizer: tokenizer used by the model\n :param eval_dataset: dataset used for evaluation\n :return:\n \"\"\"\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm=True,\n mlm_probability=0.15\n )\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler,\n batch_size=args.train_batch_size,\n collate_fn=data_collator\n )\n\n init_total = len(\n train_dataloader) * args.num_train_epochs\n\n # loading a modle from a checkpoint if neccesary\n if args.start_from_checkpoint:\n chk_pt_fdlr = [fldr for fldr in os.listdir(args.output_dir) if\n fldr.startswith('checkpoint')]\n chk_pt_fdlr.sort()\n logger.info(\"***** Running training from checkpoint: \" + str(\n chk_pt_fdlr[-1]) + \"*****\")\n global_step = int(''.join([chr for chr in chk_pt_fdlr[-1]\n if chr.isdigit()]))\n it_total = init_total - global_step\n args.num_train_epochs = np.round(it_total / len(train_dataloader))\n # model = BertForMaskedLM(config=config)\n model = BertForMaskedLM.from_pretrained(args.output_dir + '/' +\n chk_pt_fdlr[-1])\n model.to(args.device)\n\n logger.info('Loaded checkpoint model. Beginning training.')\n else:\n global_step = 0\n it_total = init_total\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if\n not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if\n any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5,\n eps=1e-8)\n if global_step > args.warmup_steps:\n scheduler = \\\n get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=init_total)\n for _ in range(global_step):\n scheduler.step()\n logger.info('Initialized LR Scheduler and brought it to current step.')\n else:\n scheduler = \\\n get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=it_total)\n # multi-gpu training\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\",\n args.per_gpu_train_batch_size)\n logger.info(\" Total optimization steps = %d\", it_total)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\")\n set_seed(seed) # Added here for reproducibility (even between python 2\n # and 3)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader,\n desc=\"Iteration\",\n position=0,\n leave=True)\n epoch_iterator.set_postfix({'loss': 'Initialized'})\n for step, batch in enumerate(epoch_iterator):\n model.train()\n outputs = model(input_ids=batch['input_ids'].to(args.device),\n labels=batch['labels'].to(args.device))\n # model outputs are always tuple in transformers (see doc)\n loss = outputs['loss']\n\n if args.n_gpu > 1:\n # mean() to average on multi-gpu parallel training\n loss = loss.mean()\n\n loss.backward()\n\n tr_loss += loss.item()\n epoch_iterator.set_postfix({'loss': loss.item()})\n\n torch.nn.utils.clip_grad_norm_(model.parameters(),\n 1.0)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n results = evaluate(args, model, eval_dataset, tokenizer,\n step=global_step)\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = 'checkpoint'\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir,\n '{}-{}'.format(checkpoint_prefix,\n global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module \\\n if hasattr(model, 'module') \\\n else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args,\n os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n return global_step, tr_loss / global_step, model\n\n\nargs.mlm = True\n\nif os.path.exists(args.output_dir) and os.listdir(\n args.output_dir) and not args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use \"\n \"--overwrite_output_dir to overcome.\".format(\n args.output_dir))\n\n# Setup CUDA, GPU & distributed training\ndevice = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\nargs.n_gpu = torch.cuda.device_count()\n\nargs.device = device\n\n# Setup logging\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger.info(\n \"Device: %s, n_gpu: %s\", device, args.n_gpu)\n\n# Set seed\nseed = 20210325\nset_seed(seed)\n\nlogger.info(\"Beginning Tokenizer Training on data in \" + args.train_data_file)\npaths = args.train_data_file\nargs.vocab_size = int(''.join([char for char in args.train_data_file.split(\n '/')[-1] if char.isnumeric()]))\nif not args.preliminary_model != 'fromScratch' and \\\n not args.start_from_checkpoint:\n # Building custom Tokenizer\n tokenizer = BertWordPieceTokenizer(\n clean_text=True,\n strip_accents=True,\n lowercase=True,\n )\n tokenizer.train(\n paths,\n vocab_size=args.vocab_size + 5,\n min_frequency=2,\n show_progress=True,\n special_tokens=[\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"],\n limit_alphabet=1000,\n wordpieces_prefix=\"##\",\n )\n tokenizer.save_model(args.output_dir)\n\nif args.preliminary_model != 'fromScratch':\n tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)\nelse:\n tokenizer = BertTokenizer.from_pretrained(args.output_dir)\n\nconfig = BertConfig.from_pretrained('bert-base-cased')\nconfig.vocab_size = tokenizer.vocab_size\nif args.preliminary_model != 'fromScratch':\n model = BertForMaskedLM.from_pretrained(args.preliminary_model)\nelse:\n model = BertForMaskedLM(config=config)\nmodel.to(args.device)\n\ntrain_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=args.train_data_file,\n block_size=32,\n overwrite_cache=args.overwrite_output_dir\n)\n\neval_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=args.eval_data_file,\n block_size=32,\n overwrite_cache=args.overwrite_output_dir\n)\nif args.data_portion < 1.0:\n train_dataset.examples = train_dataset.examples[:int(len(\n train_dataset.examples)*args.data_portion)]\n eval_dataset.examples = eval_dataset.examples[:int(len(\n eval_dataset.examples)*args.data_portion)]\n logger.info(\"Training and validation set limited to \" + str(\n args.data_portion) + \" portion of original data.\")\n\nlogger.info(\"Training/evaluation parameters %s\", args)\n\nglobal_step, tr_loss, model = train(args,\n train_dataset,\n model,\n tokenizer,\n eval_dataset=eval_dataset)\nlogger.info(\" global_step = %s, average loss = %s\", global_step,\n tr_loss)\n\n# Do the saving\n# Create output directory if needed\nif not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\nlogger.info(\"Saving model checkpoint to %s\", args.output_dir)\n# Save a trained model, configuration and tokenizer using `save_pretrained()`.\n# They can then be reloaded using `from_pretrained()`\n# Take care of parallel training\nmodel_to_save = model.module if hasattr(model,\n 'module') else model\nmodel_to_save.save_pretrained(args.output_dir)\n\n# Good practice: save your training arguments together with the trained model\ntorch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n# Load a trained model and vocabulary that you have fine-tuned\nmodel = BertForMaskedLM.from_pretrained(args.output_dir)\nif args.preliminary_model != 'fromScratch':\n tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)\nelse:\n tokenizer = BertTokenizer.from_pretrained(args.output_dir)\nmodel.to(args.device)\n\n# Evaluation\nresults = {}\nif args.do_eval:\n checkpoints = [args.output_dir]\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split('-')[-1] if len(\n checkpoints) > 1 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find(\n 'checkpoint') != -1 else \"\"\n\n model = BertForMaskedLM.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, eval_dataset, tokenizer, step='TestSet')\n result = dict(\n (k + '_{}'.format(global_step), v) for k, v in result.items())\n results.update(result)\ntoc = dt.now()\nprint(\"End of MLM_Training_transformers.py Script.\")\nprint('Total Script Runtime: ' + str(toc-tic))\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "torch.tensor", "torch.nn.DataParallel", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PurplePean/AIX360
[ "0a71cfe372b91078dd7887d7597371e09d84f968", "4037d6347c40405f342b07da5d341fcd21081cfa" ]
[ "aix360/data/ted_data/GenerateData.py", "tests/rbm/test_Logistic_Rule_Regression.py" ]
[ "# This file will generate a synthetic dataset to predict employee attrition\r\n# Like most datasets it will have a feature vector and a Y label for each instance.\r\n# However, unlike most datasets it will also have an Explanation (E) for each instance, encoded as an non-negative integer.\r\n# This is motivated by the TED framework, but can be used by other explainability algorithms as a metric for explainability\r\n# See the AIES'19 paper by Hind et al for more information on the TED framework.\r\n# See the tutorial notebook TED_Cartesian_test for information about how to use this dataset and the TED framework.\r\n# The comments in this code also provide some insight into how this dataset is generated\r\n\r\nimport random\r\nfrom random import choices\r\nimport pandas as pd\r\n\r\nAny = -99 # This is only applicable in the rule\r\nLow = -1 # These 3, Low, Med, High, can be values in the dataset and are used in the rules\r\nMed = -2\r\nHigh = -3\r\nYes = -10 # This is the positive Y label\r\nNo = -11 # This is the negative Y label\r\nRandom = -12 # This signfiies a random choice should be made for the Y label (either Yes or No) ]\r\n\r\n# Features, values, and distribution, details below\r\nfeatureThresholds = [\r\n # 1 Position: 4(5%), 3(20%), 2(30%), 1(45%)\r\n [4, [0.05, 0.20, 0.30, 0.45]],\r\n\r\n # 2 Organization \"Org\": 3(30%); 2(30%); 1(40%)\r\n [3, [0.30, 0.30, 0.40]],\r\n\r\n # 3 Potential \"Pot\": Yes (50%), No (50%)\r\n [2, [0.50, 0.50]],\r\n\r\n # 4 Rating value \"Rat\": High(15%), Med(80%), Low(5%)\r\n [3, [0.15, 0.80, 0.05]],\r\n\r\n # 5 Rating Slope \"Slope\": High (15%), Med(80%), Low(5%)\r\n [3, [0.15, 0.80, 0.05]],\r\n\r\n # 6 Salary Competitiveness \"Sal\": High (10%); Med(70%); Low(20%)\r\n [3, [0.10, 0.70, 0.20]],\r\n\r\n # 7 Tenure Low \"TenL\" & High Values \"TenH\": [0..360], 30% in 0..24; 30% in 25..60; 40% in 61..360\r\n [3, [0.30, 0.30, 0.40], [[0, 24], [25, 60], [61, 360]]],\r\n\r\n # 8 Position Tenure Low \"BTenL\" & High Values \"BTenH\": [0..360], 70% in 0..12; 20% in 13..24; 10% in 25..360\r\n # Position tenure needs to be lower than tenure, ensured in generation code below\r\n [3, [0.70, 0.20, 0.10], [[0, 12], [13, 24], [25, 360]]]\r\n]\r\n\r\n# Some convenient population lists\r\nHighMedLowPopulation = [High, Med, Low]\r\nYesNoPopulation = [Yes, No]\r\nIndex3Population = [0, 1, 2]\r\nInteger4Population = [4, 3, 2, 1]\r\nInteger3Population = [3, 2, 1]\r\n\r\n# Rules used to label a feature vector with a label and an explanation\r\n# Format: features, label, explanation #, Explanation String \r\nRetentionRules = [ \r\n #POS ORG Pot RAT Slope SALC TENL H BTEN LH \r\n [Any, 1, Any, High, Any,\tLow, Any, Any, Any, Any, #0\r\n Yes, 2, \"Seeking Higher Salary in Org 1\"],\r\n [1, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#1\r\n Yes, 3, \"Promotion Lag, Org 1, Position 1\"],\r\n [2, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#2\r\n Yes, 3, \"Promotion Lag, Org 1, Position 2\"],\r\n [3, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#3\r\n Yes, 3, \"Promotion Lag, Org 1, Position 3\"],\r\n [1, 2,\t Any, Any, Any,\tAny, Any, Any, 20, Any,\t#4\r\n Yes, 4, \"Promotion Lag, Org 2, Position 1\"],\r\n [2, 2,\t Any, Any, Any,\tAny, Any, Any, 20, Any,\t#5\r\n Yes, 4, \"Promotion Lag, Org 2, Position 2\"],\r\n [3, 2, Any, Any, Any,\tAny, Any, Any, 30, Any,\t#6\r\n Yes, 5, \"Promotion Lag, Org 2, Position 3\"],\r\n [1, 3, Any, Any, Any,\tAny, Any, Any, 20, Any,\t#7\r\n Yes, 6, \"Promotion Lag, Org 3, Position 1\"],\r\n [2, 3,\t Any, Any, Any,\tAny, Any, Any, 30, Any,\t#8\r\n Yes, 7, \"Promotion Lag, Org 3, Position 2\"],\r\n [3, 3,\t Any, Any, Any,\tAny, Any, Any, 30, Any,\t#9\r\n Yes, 7, \"Promotion Lag, Org 3, Position 3\"],\r\n [1, 1, Any, Any, Any,\tAny, 0, 12, Any, Any,\t#10\r\n Yes, 8, \"New employee, Org 1, Position 1\"],\r\n [2, 1, Any, Any, Any,\tAny, 0, 12, Any, Any,\t#11\r\n Yes, 8, \"New employee, Org 1, Position 2\"],\r\n [3, 1, Any, Any, Any,\tAny, 0, 30, Any, Any,\t#12\r\n Yes, 9, \"New employee, Org 1, Position 3\"],\r\n [1, 2, Any, Any, Any,\tAny, 0, 24, Any, Any,\t#13\r\n Yes, 10, \"New employee, Org 2, Position 1\"],\r\n [2, 2, Any, Any, Any,\tAny, 0, 30, Any, Any,\t#14\r\n Yes, 11, \"New employee, Org 2, Position 2\"],\r\n [Any, 1, Any, Low, High, Any, Any, Any, Any, Any,\t#15\r\n Yes, 13, \"Disappointing evaluation, Org 1\"],\r\n [Any, 2, Any, Low, High, Any, Any, Any, Any, Any,\t#16\r\n Yes, 14, \"Disappointing evaluation, Org 2\"],\r\n [Any, Any, Yes, Med, High, Low, Any, Any, Any, Any,\t#17\r\n Yes, 15, \"Compensation doesn't match evaluations, Med rating\"],\r\n [Any, Any, Yes, High, High, Low, Any, Any, Any, Any,\t#18\r\n Yes, 15, \"Compensation doesn't match evaluations, High rating\"],\r\n [Any, 1, Yes, Med, High, Med, Any, Any, Any, Any,\t#19\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 1, Med rating\"],\r\n [Any, 2, Yes, Med, High, Med, Any, Any, Any, Any,\t#20\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 2, Med rating\"],\r\n [Any, 1, Yes, High, High, Med, Any, Any, Any, Any,\t#21\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 1, High rating\"],\r\n [Any, 2, Yes, High, High, Med, Any, Any, Any, Any,\t#22\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 2, High rating\"],\r\n [Any, 1, Any, Any, Med,\tMed, 120, 180, Any, Any,\t#23\r\n\t Yes, 17, \"Mid-career crisis, Org 1\"],\r\n [Any, 2, Yes, Any, Any,\tMed, 130, 190, Any, Any,\t#24\r\n\t Yes, 18, \"Mid-career crisis, Org 2\"]\r\n]\r\n\r\ndef ruleValToString(val):\r\n \"\"\" Convert the value passed into a string \"\"\"\r\n if val == Any :\r\n return \"Any\"\r\n elif val == Low :\r\n return \"Low\"\r\n elif val == Med :\r\n return \"Med\"\r\n elif val == High :\r\n return \"High\"\r\n elif val == Yes :\r\n return \"Yes\"\r\n elif val == No :\r\n return \"No\"\r\n elif val == Random :\r\n return \"Random\"\r\n else :\r\n return str(val)\r\n\r\ndef printFeatureStringHeader() :\r\n \"\"\" Print the feature headings \"\"\"\r\n print(\" Feature Headings\")\r\n print(\"[Pos, Org, Pot, Rating, Slope, Salary Competitiveness, Tenure, Position Tenure]\")\r\n \r\ndef featuresToString(featureVector) :\r\n \"\"\" Convert a feature vector into is string format\"\"\"\r\n val = \"[\"\r\n for i in range(0, 2) : # These features are just ints, Position, Organization\r\n val += str(featureVector[i])\r\n val += \" \" \r\n for i in range(2, 6) : # show encoding for these: Potential, Rating, Rating Slope, Salary Competiveness\r\n val += ruleValToString(featureVector[i]) \r\n val += \" \"\r\n for i in range(6, 8) : # These features are just ints: Tenure and Position Tenure\r\n val += str(featureVector[i])\r\n val += \" \" \r\n val += \"]\"\r\n return val\r\n\r\ndef printRule(rule) :\r\n \"\"\" Print the passed rule \"\"\"\r\n print(\"Rule: \", end='')\r\n for i in rule[0:1]: # ints or Any: Position and Organization\r\n if i == Any:\r\n print(ruleValToString(i) + \", \", end='')\r\n\r\n for i in rule[2:5]: # encoded: Potentional, Rating, Rating Slope, Salary Competitiveness\r\n print(ruleValToString(i) + \", \", end='')\r\n\r\n for i in rule[6:9]: # next 4 are ints or ANY: Tenure Low, Tenure High, Position Tenure Low, Position Tenure High\r\n if i == Any :\r\n print(ruleValToString(i) + \", \", end='')\r\n else :\r\n print(str(i) + \", \", end='') \r\n print(\"==> \"+ ruleValToString(rule[10]) + \"[\" + str(rule[11]) + \"] \" + str(rule[12]))\r\n\r\ndef printRules(rules) :\r\n \"\"\" print all rules\"\"\"\r\n for r in rules:\r\n printRule(r)\r\n\r\n########################################################################\r\n\r\ndef chooseRangeValue(thresholds, rangeList):\r\n \"\"\" Generate a random value based on the probability weights (thresholds) and list of ranges passed\r\n Args: \r\n thresholds : list of probalities for each choice\r\n rangeList: a list of pair lists giving the lower and upper bounds to choose value from \r\n \"\"\"\r\n\r\n # pick a number 1..3 from weights\r\n rangeVal = choices(Index3Population, thresholds)\r\n\r\n # get the appropriate range given rangeVal\r\n interval = rangeList[rangeVal[0]]\r\n\r\n # construct a population list from the result\r\n intervalPopulation = list(range(interval[0], interval[1]))\r\n\r\n # construct a equally prob weights list\r\n numElements = interval[1] - interval[0]\r\n probVal = 1.0 / numElements\r\n probList = [probVal] * numElements\r\n\r\n # now choose the value from the population based on the weights\r\n val = choices(intervalPopulation, probList)\r\n return val[0]\r\n\r\n\r\ndef chooseValueAndAppend(instance, population, weights) :\r\n \"\"\" Choose a random value from the population using weights list and append it to the passed instance\r\n \"\"\"\r\n val = choices(population, weights)\r\n instance.append(val[0])\r\n\r\ndef generateFeatures(numInstances) :\r\n \"\"\" generate the features (X) values for the dataset\r\n Args:\r\n numInstances (int) : number of instances to genreate\r\n Returns:\r\n dataset (list of lists) : the dataset with features, but no labels or explanations yet\r\n \"\"\"\r\n assert(numInstances > 0)\r\n\r\n dataset = []\r\n for i in range(numInstances) :\r\n instance = []\r\n\r\n #POS ORG Pot Rating Slope SALC TENL H BTEN LH \r\n chooseValueAndAppend(instance, Integer4Population, featureThresholds[0][1]) # Position\r\n chooseValueAndAppend(instance, Integer3Population, featureThresholds[1][1]) # Org\r\n chooseValueAndAppend(instance, YesNoPopulation, featureThresholds[2][1]) # Potential\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[3][1]) # Rating\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[4][1]) # Rating slope\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[5][1]) # Sal competitiveness\r\n\r\n val1 = chooseRangeValue(featureThresholds[6][1], featureThresholds[6][2]) # Tenure\r\n instance.append(val1)\r\n\r\n # Position tenure needs to be <= Tenure\r\n val2 = chooseRangeValue(featureThresholds[7][1], featureThresholds[7][2]) # Pos Tenure\r\n if val2 > val1 :\r\n val2 = val1\r\n instance.append(val2)\r\n dataset.append(instance)\r\n \r\n return dataset\r\n\r\n#####################################################################################################\r\n\r\ndef match(ruleVal, featureVal) :\r\n \"\"\" Check if passed ruleVal matches the featureVal or if ruleVal is Any, which matches everything \r\n \"\"\"\r\n\r\n # print(\"Match called: \"+ ruleValToString(ruleVal) + \" \" + ruleValToString(featureVal))\r\n if ruleVal == Any :\r\n return True\r\n return (ruleVal == featureVal)\r\n\r\ndef intervalMatch(ruleValLower, ruleValUpper, featureVal) :\r\n \"\"\" Check to see if featureVal is in the interval defined by [ruleValLower, ruleValUpper)\r\n \"\"\"\r\n\r\n # Any in lower bound matches all values, (upper bound doesn't matter)\r\n if ruleValLower == Any :\r\n return True\r\n\r\n if ruleValLower <= featureVal :\r\n # Any in upper bound means infinitity\r\n if featureVal < ruleValUpper or ruleValUpper == Any :\r\n return True\r\n \r\n return False\r\n\r\ndef ruleMatch(rule, featureVector) :\r\n \"\"\" Determine if the passed featureVector matches the passed rule \r\n \"\"\"\r\n if (False) :\r\n print(\"ruleMatch called, \", end=\"\")\r\n printRule(rule)\r\n print(\" feature vector: \" + featuresToString(featureVector) )\r\n\r\n for i in range(0, 6) : # loop over first 6 features, 0..5\r\n if not match(rule[i], featureVector[i]) : # if we don't find a feature match, the rule doesn't match\r\n # print(\"Didn't match feature #\", i, ruleValToString(featureVector[i]))\r\n return False\r\n \r\n # These features are interval-based, so need a different matching routine\r\n if not intervalMatch(rule[6], rule[7], featureVector[6]) : # rule[6] and rule[7] have the lower and upper bounds of interval\r\n # print(\"Didn't match feature # 6: \", featureVector[6])\r\n return False\r\n if not intervalMatch(rule[8], rule[9], featureVector[7]) : # rule[8] and rule[9] have the lower and upper bounds of interval\r\n # print(\"Didn't match feature # 7: \", featureVector[7])\r\n return False\r\n \r\n # print(\"Matched all features\")\r\n return True # if we didn't find a non-match by now, we found a match\r\n\r\ndef findRule(instance, ruleSet) :\r\n \"\"\" find the rule(s) that matches the feture vector passed\r\n \"\"\"\r\n\r\n # print(\"*Looking for rule match for Feature vector: \" + featuresToString(instance))\r\n ruleNumber = 0 # counter to track rule number\r\n ruleMatches = [] # will hold all rule numbers that matched\r\n for rule in ruleSet :\r\n if (ruleMatch(rule, instance)) :\r\n ruleMatches.append(ruleNumber)\r\n counts[ruleNumber] += 1 # update global histogram of rule matches for stats reporting\r\n\r\n if (False) :\r\n print(\" ruleMatch found at rule #\" + str(ruleNumber))\r\n print(\" \", end=\"\")\r\n printRule(rule)\r\n\r\n ruleNumber += 1\r\n\r\n return ruleMatches\r\n\r\ndef countAnys(rule) :\r\n \"\"\" Count the number of Anys in the passed rule. An \"Any\" is a wildcard that matches all values\r\n \"\"\"\r\n count = 0\r\n for feature in RetentionRules[rule] :\r\n if feature == Any :\r\n count += 1\r\n\r\n return count\r\n\r\ndef pickBestRule(ruleList) :\r\n \"\"\" Choose the rule with the least number of Any's in it\r\n \"\"\"\r\n assert(len(ruleList) > 0)\r\n\r\n # print(\"ruleList: \", ruleList)\r\n minAnys = len(RetentionRules[0]) + 1 # initialize to a value larger than possible # of Anys in a rule\r\n bestRule = -1\r\n for rule in ruleList :\r\n # Count # of Any's in rule # rule\r\n count = countAnys(rule)\r\n if count < minAnys :\r\n minAnys = count\r\n bestRule = rule\r\n\r\n assert(bestRule != -1) # We should find a best rule\r\n return bestRule\r\n\r\ndef addLabelsAndExplanations(dataset, rules) :\r\n \"\"\" This function will use a ruleset to add labels (Y) and explanations/rules (E) to a passed dataset\r\n Arg:\r\n dataset (list of lists) : a list of feature vectors (list)\r\n rules (list of lists) : a list of rules\r\n \"\"\"\r\n\r\n noMatches = 0 # Counters to record how often there are no (Yes) matches, 1 (Yes) match, and multiple (Yes) matches\r\n multiMatches = 0\r\n oneMatches = 0\r\n for instance in dataset :\r\n ruleMatches = findRule(instance, rules)\r\n\r\n if len(ruleMatches) == 0 : # We didn't match a (Yes) rule, so this ia No situation\r\n rule = NoRiskRuleNum\r\n label = No\r\n noMatches +=1\r\n elif len(ruleMatches) > 1 : # Matched multiple Yes rules, need to pick one\r\n rule = pickBestRule(ruleMatches)\r\n assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid\r\n label = Yes\r\n multiMatches += 1\r\n else : # Found 1 Yes rule match, it's the winner\r\n rule = ruleMatches[0]\r\n label = Yes\r\n oneMatches += 1\r\n assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid\r\n\r\n # print(\"Label: \" + ruleValToString(label) + \", Rule: \" + ruleValToString(rule))\r\n\r\n instance.append(label)\r\n instance.append(rule) # add the label and explanation (rule #) to the featureVector\r\n\r\n if (True) :\r\n print(\"\\nRule matching statistics: \")\r\n totalYes = oneMatches + multiMatches\r\n total = oneMatches + multiMatches + noMatches\r\n print(\" Yes Labels: {}/{} ({:.2f}%)\".format(totalYes, total, totalYes/total*100))\r\n print(\" Matched 1 Yes rule: {}/{} ({:.2f}%)\".format(oneMatches, totalYes, oneMatches/totalYes*100))\r\n print(\" Matched multiple Yes rules: {}/{} ({:.2f}%)\".format(multiMatches, totalYes, multiMatches/totalYes*100))\r\n print(\" No Laels: {}/{} ({:.2f}%)\".format(noMatches, total, noMatches/total*100))\r\n\r\ndef printRuleUsage(counts, total) :\r\n print(\"\\nHistogram of rule usage:\")\r\n ruleNum = 0\r\n for num in counts :\r\n print(\" Rule {} was used {} times, {:.2f}%\".format(ruleNum, num, num/total*100))\r\n ruleNum += 1\r\n\r\n \r\nnumRentionRules = len(RetentionRules)\r\ncounts = [0]*numRentionRules\r\nNoRiskRuleNum = numRentionRules # the No Risk to leave rule is 1 more than than the total rules [0..]\r\n\r\nrandom.seed(1)\r\n# printFeatureStringHeader()\r\nnumInstances = 10000\r\ndataset = generateFeatures(numInstances)\r\n\r\naddLabelsAndExplanations(dataset, RetentionRules)\r\n\r\nprintRuleUsage(counts, numInstances)\r\n\r\n# insert TED headers\r\nNumFeatures = len(featureThresholds)\r\nheader = list(range(NumFeatures))\r\nheader.append(\"Y\")\r\nheader.append(\"E\")\r\ndataset.insert(0, header)\r\n\r\n# write to csv file\r\nmy_df = pd.DataFrame(dataset)\r\nmy_df.to_csv('Retention.csv', index=False, header=False)\r\n", "import json\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\nimport unittest\n\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n\nfrom aix360.algorithms.rbm import FeatureBinarizer, GLRMExplainer, LogisticRuleRegression\n\n\nclass TestLogisticRuleRegression(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.bc = load_breast_cancer()\n\n def test_classification(self):\n bc_df = pd.DataFrame(self.bc.data, columns=self.bc.feature_names)\n X_train, X_test, Y_train, Y_test = train_test_split(bc_df, self.bc.target, test_size = 0.2, random_state = 31)\n fb = FeatureBinarizer(negations=True)\n X_train_fb = fb.fit_transform(X_train)\n X_test_fb = fb.transform(X_test)\n\n self.assertEqual(len(X_train_fb.columns), 540)\n self.assertEqual(len(X_test_fb.columns), 540)\n\n logistic_model = LogisticRuleRegression(maxSolverIter=2000)\n explainer = GLRMExplainer(logistic_model)\n explainer.fit(X_train_fb, Y_train)\n Y_pred = explainer.predict(X_test_fb)\n\n self.assertGreater(accuracy_score(Y_test, Y_pred), 0.85)\n self.assertGreater(precision_score(Y_test, Y_pred), 0.85)\n self.assertGreater(recall_score(Y_test, Y_pred), 0.85)\n self.assertGreater(f1_score(Y_test, Y_pred), 0.9)\n\n explanation = explainer.explain()\n expected = pd.DataFrame(columns=[\"rule\", \"coefficient\"], data=[\n ['(intercept)', -11.2],\n ['worst perimeter <= 116.46 AND worst concave points <= 0.15', -11.9],\n ['worst concave points <= 0.15', 10.1],\n ['worst perimeter <= 116.46 AND worst concave points <= 0.18', 9.8],\n ['worst area <= 930.88', 5.4],\n ['worst area > 680.60 AND worst concavity > 0.22', -3.3],\n ['worst perimeter <= 116.46 AND worst smoothness <= 0.16', 3.1],\n ['mean concave points <= 0.05', 1.5],\n ['worst concavity <= 0.27', 0.9],\n ['worst concave points <= 0.12', 0.63],\n ['worst perimeter <= 104.38', -0.02]\n ])\n assert_frame_equal(explanation, expected, check_dtype=False, check_exact=False, check_less_precise=1)\n\n figs, _ = explainer.visualize(bc_df, fb)\n with open('tests/rbm/logistic_plot_data.json') as fp:\n plot_data = json.load(fp)\n for k,v in plot_data.items():\n obtained_plot = figs[k].axes[0].lines[0].get_xydata()\n assert_allclose(np.array(v), obtained_plot, rtol=1e-2)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.DataFrame" ], [ "sklearn.datasets.load_breast_cancer", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "sklearn.metrics.f1_score", "numpy.array", "sklearn.metrics.recall_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
asadziach/CarND-Semantic-Segmentation
[ "c3431ab5dc3878b82bfc66e7384005f7a93fcb16" ]
[ "main.py" ]
[ "import os.path\nimport tensorflow as tf\nimport helper\nimport ImageProcessor\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\nimport scipy.misc\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\nimport time\nimport timeit\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion(\n '1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn(\n 'No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n graph = tf.get_default_graph()\n image_input = graph.get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n\n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\n\n\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 tensor\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 tensor\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 tensor\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of tensor\n \"\"\"\n # Outputs of pooling layers 3 and 4 are scaled before they are fed into\n # the 1x1 convolutions.\n vgg_layer3_out = tf.multiply(vgg_layer3_out, 0.0001)\n vgg_layer4_out = tf.multiply(vgg_layer4_out, 0.01)\n\n regularizer = tf.contrib.layers.l2_regularizer(1e-3)\n conv_1x1_l3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n conv_1x1_l4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n conv_1x1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n\n tensor = tf.layers.conv2d_transpose(\n conv_1x1_l7, num_classes, 4, strides=(2, 2), padding='same', kernel_regularizer=regularizer)\n tensor = tf.add(tensor, conv_1x1_l4)\n tensor = tf.layers.conv2d_transpose(\n tensor, num_classes, 4, strides=(2, 2), padding='same', kernel_regularizer=regularizer)\n tensor = tf.add(tensor, conv_1x1_l3)\n tensor = tf.layers.conv2d_transpose(\n tensor, num_classes, 16, strides=(8, 8), padding='same', kernel_regularizer=regularizer)\n\n return tensor\n\n\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n labels = tf.reshape(correct_label, (-1, num_classes))\n\n cross_entropy_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n\n '''\n When adding l2-regularization, setting a regularizer in the arguments of \n the tf.layers is not enough. Regularization loss terms must be manually \n added to your loss function. otherwise regularization is not implemented.\n '''\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n\n cross_entropy_loss = tf.add(cross_entropy_loss, sum(regularization_losses))\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\n\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, step, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate, saver=None):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n\n for image, label in (get_batches_fn(batch_size)):\n _, loss = sess.run(\n [train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label,\n keep_prob: 1.0, learning_rate: 1e-4})\n print('Epoch: {} loss: {:.3f}'.format(step + 1, loss))\n \n if saver:\n saver.save(sess, \"./ckpts/model.ckpt\", global_step=step)\n \n return loss\n\n\n#tests.test_train_nn(train_nn)\n\n\ndef run():\n batches = 13\n epochs = 80\n restore_model = True\n training = True\n compute_iou = True\n save_inference_samples = True\n do_exteranl_tests = False\n save_graph = True\n\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n # Change following to switch datasets\n dataset = helper.KittiDataset(data_dir, image_shape)\n num_classes = dataset.get_num_classes()\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n with tf.Session() as sess:\n correct_label = tf.placeholder(\n tf.int32, [None, None, None, num_classes])\n learning_rate = tf.placeholder(tf.float32)\n\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = dataset.gen_batch_function()\n\n input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(\n sess, vgg_path)\n tensor = layers(layer3_out, layer4_out, layer7_out, num_classes)\n logits, optimizer, cross_entropy_loss = optimize(tensor, correct_label, learning_rate,\n num_classes)\n\n if compute_iou:\n predictions = tf.argmax(tf.nn.softmax(tensor), axis=-1)\n gt = tf.argmax(correct_label, axis=-1)\n mean_iou, iou_update_op = tf.metrics.mean_iou(\n gt, predictions, num_classes)\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n saver = tf.train.Saver(max_to_keep=2, keep_checkpoint_every_n_hours=1)\n restore_path = tf.train.latest_checkpoint('./ckpts/')\n if restore_path and restore_model:\n print(\"Resotring model from: %s \" % restore_path)\n saver.restore(sess, restore_path)\n\n for step in range(epochs):\n if training:\n print(\"Training...\")\n start_time = timeit.default_timer()\n loss = train_nn(sess, step, batches, get_batches_fn, optimizer, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate, saver)\n elapsed = timeit.default_timer() - start_time\n print('Epoch: {} loss: {:.3f} time: {:.3f}'.format(step + 1, loss, elapsed))\n \n if save_inference_samples:\n print(\"Saving inference samples...\")\n dataset.save_inference_samples(\n runs_dir, sess, logits, keep_prob, input_image)\n\n #compute mean_iou on traning images\n if compute_iou:\n print(\"Computing IOU...\")\n mean_ious = []\n for image, label in (get_batches_fn(batches)):\n sess.run([predictions, iou_update_op], feed_dict={\n input_image: image, correct_label: label, keep_prob: 1})\n # Avoiding headaches\n # http://ronny.rest/blog/post_2017_09_11_tf_metrics/\n mean_ious.append(sess.run(mean_iou))\n print(\"Mean IOU: {:.3f}\".format(sum(mean_ious) / len(mean_ious)))\n \n if do_exteranl_tests:\n print(\"Processing test images...\")\n processor = ImageProcessor.ImageProcessor(\n image_shape, sess, logits, keep_prob, input_image)\n for idx, image_file in enumerate(glob(\"./test_images/*.jpg\")):\n image = scipy.misc.imread(image_file)\n image = processor.process_image(image)\n scipy.misc.imsave(os.path.join(\n \"output_images\", str(idx) + \".png\"), image)\n\n print(\"Processing test video...\")\n videoname = 'test_video'\n output_file = videoname + '_output.mp4'\n input_file = videoname + '.mp4'\n\n clip = VideoFileClip(input_file)\n video_clip = clip.fl_image(processor.process_image)\n video_clip.write_videofile(output_file, audio=False)\n\n if save_graph:\n print(\"Saving graph...\")\n # Save GraphDef\n tf.train.write_graph(sess.graph_def,'.','graph.pb', as_text=False)\n \n print(\"Done.\")\n\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.test.gpu_device_name", "tensorflow.layers.conv2d_transpose", "tensorflow.train.AdamOptimizer", "tensorflow.get_default_graph", "tensorflow.get_collection", "tensorflow.add", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.train.write_graph", "tensorflow.layers.conv2d", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.metrics.mean_iou", "tensorflow.multiply", "tensorflow.train.latest_checkpoint", "tensorflow.local_variables_initializer", "tensorflow.nn.softmax", "tensorflow.reshape", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.saved_model.loader.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
huilin16/PaddleRS
[ "ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a", "b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f", "b6f7033f3c0ca7bc6952456c0a0f53eef6c1c07f", "ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a" ]
[ "tools/mask2geojson.py", "paddlers/models/ppdet/utils/colormap.py", "paddlers/models/ppdet/modeling/mot/matching/jde_matching.py", "paddlers/models/ppgan/models/firstorder_model.py" ]
[ "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport codecs\r\nimport argparse\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport geojson\r\nfrom geojson import Polygon, Feature, FeatureCollection\r\n\r\nfrom utils import Raster, Timer\r\n\r\n\r\ndef _gt_convert(x, y, geotf):\r\n x_geo = geotf[0] + x * geotf[1] + y * geotf[2]\r\n y_geo = geotf[3] + x * geotf[4] + y * geotf[5]\r\n return x_geo, y_geo\r\n\r\n\r\n@Timer\r\ndef convert_data(mask_path, save_path, epsilon=0):\r\n raster = Raster(mask_path)\r\n img = raster.getArray()\r\n ext = save_path.split(\".\")[-1]\r\n if ext != \"json\" and ext != \"geojson\":\r\n raise ValueError(\"The ext of `save_path` must be `json` or `geojson`, not {}.\".format(ext))\r\n geo_writer = codecs.open(save_path, \"w\", encoding=\"utf-8\")\r\n clas = np.unique(img)\r\n cv2_v = (cv2.__version__.split(\".\")[0] == \"3\")\r\n feats = []\r\n if not isinstance(epsilon, (int, float)):\r\n epsilon = 0\r\n for iclas in range(1, len(clas)):\r\n tmp = np.zeros_like(img).astype(\"uint8\")\r\n tmp[img == iclas] = 1\r\n # TODO: Detect internal and external contour\r\n results = cv2.findContours(tmp, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_TC89_KCOS)\r\n contours = results[1] if cv2_v else results[0]\r\n # hierarchys = results[2] if cv2_v else results[1]\r\n if len(contours) == 0:\r\n continue\r\n for contour in contours:\r\n contour = cv2.approxPolyDP(contour, epsilon, True)\r\n polys = []\r\n for point in contour:\r\n x, y = point[0]\r\n xg, yg = _gt_convert(x, y, raster.geot)\r\n polys.append((xg, yg))\r\n polys.append(polys[0])\r\n feat = Feature(\r\n geometry=Polygon([polys]), properties={\"class\": int(iclas)})\r\n feats.append(feat)\r\n gjs = FeatureCollection(feats)\r\n geo_writer.write(geojson.dumps(gjs))\r\n geo_writer.close()\r\n\r\n\r\nparser = argparse.ArgumentParser(description=\"input parameters\")\r\nparser.add_argument(\"--mask_path\", type=str, required=True, \\\r\n help=\"The path of mask tif.\")\r\nparser.add_argument(\"--save_path\", type=str, required=True, \\\r\n help=\"The path to save the results, file suffix is `*.json/geojson`.\")\r\nparser.add_argument(\"--epsilon\", type=float, default=0, \\\r\n help=\"The CV2 simplified parameters, `0` is the default.\")\r\n\r\nif __name__ == \"__main__\":\r\n args = parser.parse_args()\r\n convert_data(args.mask_path, args.save_path, args.epsilon)\r\n", "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\n\ndef colormap(rgb=False):\n \"\"\"\n Get colormap\n\n The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py\n \"\"\"\n color_list = np.array([\n 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078,\n 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000,\n 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000,\n 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667,\n 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000,\n 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000,\n 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000,\n 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500,\n 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667,\n 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333,\n 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000,\n 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333,\n 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000,\n 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000,\n 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167,\n 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000,\n 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000,\n 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000,\n 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000,\n 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833,\n 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286,\n 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714,\n 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000\n ]).astype(np.float32)\n color_list = color_list.reshape((-1, 3)) * 255\n if not rgb:\n color_list = color_list[:, ::-1]\n return color_list\n", "# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nThis code is based on https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/tracker/matching.py\r\n\"\"\"\r\n\r\nimport lap\r\nimport scipy\r\nimport numpy as np\r\nfrom scipy.spatial.distance import cdist\r\nfrom ..motion import kalman_filter\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n__all__ = [\r\n 'merge_matches',\r\n 'linear_assignment',\r\n 'cython_bbox_ious',\r\n 'iou_distance',\r\n 'embedding_distance',\r\n 'fuse_motion',\r\n]\r\n\r\n\r\ndef merge_matches(m1, m2, shape):\r\n O, P, Q = shape\r\n m1 = np.asarray(m1)\r\n m2 = np.asarray(m2)\r\n\r\n M1 = scipy.sparse.coo_matrix(\r\n (np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))\r\n M2 = scipy.sparse.coo_matrix(\r\n (np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))\r\n\r\n mask = M1 * M2\r\n match = mask.nonzero()\r\n match = list(zip(match[0], match[1]))\r\n unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))\r\n unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))\r\n\r\n return match, unmatched_O, unmatched_Q\r\n\r\n\r\ndef linear_assignment(cost_matrix, thresh):\r\n if cost_matrix.size == 0:\r\n return np.empty(\r\n (0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(\r\n range(cost_matrix.shape[1]))\r\n matches, unmatched_a, unmatched_b = [], [], []\r\n cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\r\n for ix, mx in enumerate(x):\r\n if mx >= 0:\r\n matches.append([ix, mx])\r\n unmatched_a = np.where(x < 0)[0]\r\n unmatched_b = np.where(y < 0)[0]\r\n matches = np.asarray(matches)\r\n return matches, unmatched_a, unmatched_b\r\n\r\n\r\ndef cython_bbox_ious(atlbrs, btlbrs):\r\n ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)\r\n if ious.size == 0:\r\n return ious\r\n try:\r\n import cython_bbox\r\n except Exception as e:\r\n print('cython_bbox not found, please install cython_bbox.'\r\n 'for example: `pip install cython_bbox`.')\r\n raise e\r\n\r\n ious = cython_bbox.bbox_overlaps(\r\n np.ascontiguousarray(\r\n atlbrs, dtype=np.float),\r\n np.ascontiguousarray(\r\n btlbrs, dtype=np.float))\r\n return ious\r\n\r\n\r\ndef iou_distance(atracks, btracks):\r\n \"\"\"\r\n Compute cost based on IoU between two list[STrack].\r\n \"\"\"\r\n if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or (\r\n len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\r\n atlbrs = atracks\r\n btlbrs = btracks\r\n else:\r\n atlbrs = [track.tlbr for track in atracks]\r\n btlbrs = [track.tlbr for track in btracks]\r\n _ious = cython_bbox_ious(atlbrs, btlbrs)\r\n cost_matrix = 1 - _ious\r\n\r\n return cost_matrix\r\n\r\n\r\ndef embedding_distance(tracks, detections, metric='euclidean'):\r\n \"\"\"\r\n Compute cost based on features between two list[STrack].\r\n \"\"\"\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n det_features = np.asarray(\r\n [track.curr_feat for track in detections], dtype=np.float)\r\n track_features = np.asarray(\r\n [track.smooth_feat for track in tracks], dtype=np.float)\r\n cost_matrix = np.maximum(0.0, cdist(track_features, det_features,\r\n metric)) # Nomalized features\r\n return cost_matrix\r\n\r\n\r\ndef fuse_motion(kf,\r\n cost_matrix,\r\n tracks,\r\n detections,\r\n only_position=False,\r\n lambda_=0.98):\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n gating_dim = 2 if only_position else 4\r\n gating_threshold = kalman_filter.chi2inv95[gating_dim]\r\n measurements = np.asarray([det.to_xyah() for det in detections])\r\n for row, track in enumerate(tracks):\r\n gating_distance = kf.gating_distance(\r\n track.mean,\r\n track.covariance,\r\n measurements,\r\n only_position,\r\n metric='maha')\r\n cost_matrix[row, gating_distance > gating_threshold] = np.inf\r\n cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_\r\n ) * gating_distance\r\n return cost_matrix\r\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# code was heavily based on https://github.com/AliaksandrSiarohin/first-order-model\n# Users should be careful about adopting these functions in any commercial matters.\n# https://github.com/AliaksandrSiarohin/first-order-model/blob/master/LICENSE.md\n\nimport paddle\n\nfrom .base_model import BaseModel\nfrom .builder import MODELS\nfrom .discriminators.builder import build_discriminator\nfrom .generators.builder import build_generator\nfrom ..modules.init import init_weights\nfrom ..solver import build_optimizer\nfrom paddle.optimizer.lr import MultiStepDecay\nfrom ..modules.init import reset_parameters, uniform_\nimport paddle.nn as nn\nimport numpy as np\nfrom paddle.utils import try_import\nimport paddle.nn.functional as F\nimport cv2\nimport os\n\n\ndef init_weight(net):\n def reset_func(m):\n if isinstance(m, (nn.BatchNorm, nn.BatchNorm2D, nn.SyncBatchNorm)):\n m.weight = uniform_(m.weight, 0, 1)\n elif hasattr(m, 'weight') and hasattr(m, 'bias'):\n reset_parameters(m)\n\n net.apply(reset_func)\n\n\[email protected]()\nclass FirstOrderModel(BaseModel):\n \"\"\" This class implements the FirstOrderMotion model, FirstOrderMotion paper:\n https://proceedings.neurips.cc/paper/2019/file/31c0b36aef265d9221af80872ceb62f9-Paper.pdf.\n \"\"\"\n\n def __init__(self,\n common_params,\n train_params,\n generator,\n discriminator=None):\n super(FirstOrderModel, self).__init__()\n\n # def local var\n self.input_data = None\n self.generated = None\n self.losses_generator = None\n self.train_params = train_params\n # define networks\n generator_cfg = generator\n generator_cfg.update({'common_params': common_params})\n generator_cfg.update({'train_params': train_params})\n generator_cfg.update({\n 'dis_scales': discriminator.discriminator_cfg.scales\n })\n self.nets['Gen_Full'] = build_generator(generator_cfg)\n discriminator_cfg = discriminator\n discriminator_cfg.update({'common_params': common_params})\n discriminator_cfg.update({'train_params': train_params})\n self.nets['Dis'] = build_discriminator(discriminator_cfg)\n self.visualizer = Visualizer()\n self.test_loss = []\n self.is_train = False\n\n def setup_lr_schedulers(self, lr_cfg):\n self.kp_lr = MultiStepDecay(\n learning_rate=lr_cfg['lr_kp_detector'],\n milestones=lr_cfg['epoch_milestones'],\n gamma=0.1)\n self.gen_lr = MultiStepDecay(\n learning_rate=lr_cfg['lr_generator'],\n milestones=lr_cfg['epoch_milestones'],\n gamma=0.1)\n self.dis_lr = MultiStepDecay(\n learning_rate=lr_cfg['lr_discriminator'],\n milestones=lr_cfg['epoch_milestones'],\n gamma=0.1)\n self.lr_scheduler = {\n \"kp_lr\": self.kp_lr,\n \"gen_lr\": self.gen_lr,\n \"dis_lr\": self.dis_lr\n }\n\n def setup_net_parallel(self):\n if isinstance(self.nets['Gen_Full'], paddle.DataParallel):\n self.nets['kp_detector'] = self.nets[\n 'Gen_Full']._layers.kp_extractor\n self.nets['generator'] = self.nets['Gen_Full']._layers.generator\n self.nets['discriminator'] = self.nets['Dis']._layers.discriminator\n else:\n self.nets['kp_detector'] = self.nets['Gen_Full'].kp_extractor\n self.nets['generator'] = self.nets['Gen_Full'].generator\n self.nets['discriminator'] = self.nets['Dis'].discriminator\n\n def setup_optimizers(self, lr_cfg, optimizer):\n self.setup_net_parallel()\n # init params\n init_weight(self.nets['kp_detector'])\n init_weight(self.nets['generator'])\n init_weight(self.nets['discriminator'])\n\n # define loss functions\n self.losses = {}\n\n self.optimizers['optimizer_KP'] = build_optimizer(\n optimizer,\n self.kp_lr,\n parameters=self.nets['kp_detector'].parameters())\n self.optimizers['optimizer_Gen'] = build_optimizer(\n optimizer,\n self.gen_lr,\n parameters=self.nets['generator'].parameters())\n self.optimizers['optimizer_Dis'] = build_optimizer(\n optimizer,\n self.dis_lr,\n parameters=self.nets['discriminator'].parameters())\n\n def setup_input(self, input):\n self.input_data = input\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n self.losses_generator, self.generated = \\\n self.nets['Gen_Full'](self.input_data.copy(), self.nets['discriminator'])\n\n def backward_G(self):\n loss_values = [val.mean() for val in self.losses_generator.values()]\n loss = paddle.add_n(loss_values)\n self.losses = dict(zip(self.losses_generator.keys(), loss_values))\n loss.backward()\n\n def backward_D(self):\n losses_discriminator = self.nets['Dis'](self.input_data.copy(),\n self.generated)\n loss_values = [val.mean() for val in losses_discriminator.values()]\n loss = paddle.add_n(loss_values)\n loss.backward()\n self.losses.update(dict(zip(losses_discriminator.keys(), loss_values)))\n\n def train_iter(self, optimizers=None):\n self.train = True\n self.forward()\n # update G\n self.set_requires_grad(self.nets['discriminator'], False)\n self.optimizers['optimizer_KP'].clear_grad()\n self.optimizers['optimizer_Gen'].clear_grad()\n self.backward_G()\n self.optimizers['optimizer_KP'].step()\n self.optimizers['optimizer_Gen'].step()\n\n # update D\n if self.train_params['loss_weights']['generator_gan'] != 0:\n self.set_requires_grad(self.nets['discriminator'], True)\n self.optimizers['optimizer_Dis'].clear_grad()\n self.backward_D()\n self.optimizers['optimizer_Dis'].step()\n\n def test_iter(self, metrics=None):\n if not self.is_train:\n self.is_train = True\n self.setup_net_parallel()\n\n self.nets['kp_detector'].eval()\n self.nets['generator'].eval()\n with paddle.no_grad():\n kp_source = self.nets['kp_detector'](self.input_data['video'][:, :,\n 0])\n for frame_idx in range(self.input_data['video'].shape[2]):\n source = self.input_data['video'][:, :, 0]\n driving = self.input_data['video'][:, :, frame_idx]\n kp_driving = self.nets['kp_detector'](driving)\n out = self.nets['generator'](source,\n kp_source=kp_source,\n kp_driving=kp_driving)\n out.update({'kp_source': kp_source, 'kp_driving': kp_driving})\n loss = paddle.abs(out['prediction'] - driving).mean().cpu(\n ).numpy()\n self.test_loss.append(loss)\n self.visual_items['driving_source_gen'] = self.visualizer.visualize(\n driving, source, out)\n print(\"Reconstruction loss: %s\" % np.mean(self.test_loss))\n self.nets['kp_detector'].train()\n self.nets['generator'].train()\n\n class InferGenerator(paddle.nn.Layer):\n def set_generator(self, generator):\n self.generator = generator\n\n def forward(self, source, kp_source, kp_driving, kp_driving_initial):\n kp_norm = {k: v for k, v in kp_driving.items()}\n\n kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])\n kp_norm['value'] = kp_value_diff + kp_source['value']\n\n jacobian_diff = paddle.matmul(\n kp_driving['jacobian'],\n paddle.inverse(kp_driving_initial['jacobian']))\n kp_norm['jacobian'] = paddle.matmul(jacobian_diff,\n kp_source['jacobian'])\n out = self.generator(\n source, kp_source=kp_source, kp_driving=kp_norm)\n return out['prediction']\n\n def export_model(self, export_model=None, output_dir=None, inputs_size=[]):\n\n source = paddle.rand(shape=inputs_size[0], dtype='float32')\n driving = paddle.rand(shape=inputs_size[1], dtype='float32')\n value = paddle.rand(shape=inputs_size[2], dtype='float32')\n j = paddle.rand(shape=inputs_size[3], dtype='float32')\n value2 = paddle.rand(shape=inputs_size[2], dtype='float32')\n j2 = paddle.rand(shape=inputs_size[3], dtype='float32')\n driving1 = {'value': value, 'jacobian': j}\n driving2 = {'value': value2, 'jacobian': j2}\n driving3 = {'value': value, 'jacobian': j}\n\n if output_dir is None:\n output_dir = 'inference_model'\n outpath = os.path.join(output_dir, \"fom_dy2st\")\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n paddle.jit.save(\n self.nets['Gen_Full'].kp_extractor,\n os.path.join(outpath, \"kp_detector\"),\n input_spec=[source])\n infer_generator = self.InferGenerator()\n infer_generator.set_generator(self.nets['Gen_Full'].generator)\n paddle.jit.save(\n infer_generator,\n os.path.join(outpath, \"generator\"),\n input_spec=[source, driving1, driving2, driving3])\n\n\[email protected]()\nclass FirstOrderModelMobile(FirstOrderModel):\n \"\"\" This class implements the FirstOrderMotionMobile model, modified according to the FirstOrderMotion paper:\n https://proceedings.neurips.cc/paper/2019/file/31c0b36aef265d9221af80872ceb62f9-Paper.pdf.\n \"\"\"\n\n def __init__(self,\n common_params,\n train_params,\n generator_ori,\n generator,\n mode,\n kp_weight_path=None,\n gen_weight_path=None,\n discriminator=None):\n super(FirstOrderModel, self).__init__()\n modes = [\"kp_detector\", \"generator\", \"both\"]\n assert mode in modes\n # def local var\n self.input_data = None\n self.generated = None\n self.losses_generator = None\n self.train_params = train_params\n\n # fix origin fom model for distill\n generator_ori_cfg = generator_ori\n generator_ori_cfg.update({'common_params': common_params})\n generator_ori_cfg.update({'train_params': train_params})\n generator_ori_cfg.update({\n 'dis_scales': discriminator.discriminator_cfg.scales\n })\n self.Gen_Full_ori = build_generator(generator_ori_cfg)\n discriminator_cfg = discriminator\n discriminator_cfg.update({'common_params': common_params})\n discriminator_cfg.update({'train_params': train_params})\n self.nets['Dis'] = build_discriminator(discriminator_cfg)\n\n # define networks\n generator_cfg = generator\n generator_cfg.update({'common_params': common_params})\n generator_cfg.update({'train_params': train_params})\n generator_cfg.update({\n 'dis_scales': discriminator.discriminator_cfg.scales\n })\n if (mode == \"kp_detector\"):\n print(\"just train kp_detector, fix generator\")\n generator_cfg.update({\n 'generator_cfg': generator_ori_cfg['generator_cfg']\n })\n elif mode == \"generator\":\n print(\"just train generator, fix kp_detector\")\n generator_cfg.update({\n 'kp_detector_cfg': generator_ori_cfg['kp_detector_cfg']\n })\n elif mode == \"both\":\n print(\"train both kp_detector and generator\")\n self.mode = mode\n self.nets['Gen_Full'] = build_generator(generator_cfg)\n self.kp_weight_path = kp_weight_path\n self.gen_weight_path = gen_weight_path\n self.visualizer = Visualizer()\n self.test_loss = []\n self.is_train = False\n\n def setup_net_parallel(self):\n if isinstance(self.nets['Gen_Full'], paddle.DataParallel):\n self.nets['kp_detector'] = self.nets[\n 'Gen_Full']._layers.kp_extractor\n self.nets['generator'] = self.nets['Gen_Full']._layers.generator\n self.nets['generator'] = self.nets['Gen_Full']._layers.generator\n self.nets['discriminator'] = self.nets['Dis']._layers.discriminator\n else:\n self.nets['kp_detector'] = self.nets['Gen_Full'].kp_extractor\n self.nets['generator'] = self.nets['Gen_Full'].generator\n self.nets['discriminator'] = self.nets['Dis'].discriminator\n self.kp_detector_ori = self.Gen_Full_ori.kp_extractor\n if self.is_train:\n return\n\n from ppgan.utils.download import get_path_from_url\n vox_cpk_weight_url = 'https://paddlegan.bj.bcebos.com/applications/first_order_model/vox-cpk.pdparams'\n weight_path = get_path_from_url(vox_cpk_weight_url)\n checkpoint = paddle.load(weight_path)\n if (self.mode == \"kp_detector\"):\n print(\"load pretrained generator... \")\n self.nets['generator'].set_state_dict(checkpoint['generator'])\n for param in self.nets['generator'].parameters():\n param.stop_gradient = True\n elif self.mode == \"generator\":\n print(\"load pretrained kp_detector... \")\n self.nets['kp_detector'].set_state_dict(checkpoint['kp_detector'])\n for param in self.nets['kp_detector'].parameters():\n param.stop_gradient = True\n\n def setup_optimizers(self, lr_cfg, optimizer):\n self.setup_net_parallel()\n # init params\n init_weight(self.nets['discriminator'])\n self.optimizers['optimizer_Dis'] = build_optimizer(\n optimizer,\n self.dis_lr,\n parameters=self.nets['discriminator'].parameters())\n\n if (self.mode == \"kp_detector\"):\n init_weight(self.nets['kp_detector'])\n self.optimizers['optimizer_KP'] = build_optimizer(\n optimizer,\n self.kp_lr,\n parameters=self.nets['kp_detector'].parameters())\n elif self.mode == \"generator\":\n init_weight(self.nets['generator'])\n self.optimizers['optimizer_Gen'] = build_optimizer(\n optimizer,\n self.gen_lr,\n parameters=self.nets['generator'].parameters())\n elif self.mode == \"both\":\n super(FirstOrderModelMobile,\n self).setup_optimizers(lr_cfg, optimizer)\n print(\"load both pretrained kp_detector and generator\")\n checkpoint = paddle.load(self.kp_weight_path)\n self.nets['kp_detector'].set_state_dict(checkpoint['kp_detector'])\n checkpoint = paddle.load(self.gen_weight_path)\n self.nets['generator'].set_state_dict(checkpoint['generator'])\n\n # define loss functions\n self.losses = {}\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n if (self.mode == \"kp_detector_distill\"):\n self.losses_generator, self.generated = \\\n self.nets['Gen_Full'](self.input_data.copy(), self.nets['discriminator'], self.kp_detector_ori)\n else:\n self.losses_generator, self.generated = \\\n self.nets['Gen_Full'](self.input_data.copy(), self.nets['discriminator'])\n\n def train_iter(self, optimizers=None):\n self.is_train = True\n if (self.mode == \"both\"):\n super(FirstOrderModelMobile, self).train_iter(optimizers=optimizers)\n return\n self.forward()\n # update G\n self.set_requires_grad(self.nets['discriminator'], False)\n if (self.mode == \"kp_detector\"):\n self.optimizers['optimizer_KP'].clear_grad()\n self.backward_G()\n self.optimizers['optimizer_KP'].step()\n if (self.mode == \"generator\"):\n self.optimizers['optimizer_Gen'].clear_grad()\n self.backward_G()\n self.optimizers['optimizer_Gen'].step()\n\n # update D\n if self.train_params['loss_weights']['generator_gan'] != 0:\n self.set_requires_grad(self.nets['discriminator'], True)\n self.optimizers['optimizer_Dis'].clear_grad()\n self.backward_D()\n self.optimizers['optimizer_Dis'].step()\n\n\nclass Visualizer:\n def __init__(self, kp_size=3, draw_border=False, colormap='gist_rainbow'):\n plt = try_import('matplotlib.pyplot')\n self.kp_size = kp_size\n self.draw_border = draw_border\n self.colormap = plt.get_cmap(colormap)\n\n def draw_image_with_kp(self, image, kp_array):\n image = np.copy(image)\n spatial_size = np.array(image.shape[:2][::-1])[np.newaxis]\n kp_array = spatial_size * (kp_array + 1) / 2\n num_kp = kp_array.shape[0]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = (image * 255).astype(np.uint8)\n for kp_ind, kp in enumerate(kp_array):\n color = cv2.applyColorMap(\n np.array(kp_ind / num_kp * 255).astype(np.uint8),\n cv2.COLORMAP_JET)[0][0]\n color = (int(color[0]), int(color[1]), int(color[2]))\n image = cv2.circle(image, (int(kp[1]), int(kp[0])), self.kp_size,\n color, 3)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR).astype('float32') / 255.0\n return image\n\n def create_image_column_with_kp(self, images, kp):\n image_array = np.array(\n [self.draw_image_with_kp(v, k) for v, k in zip(images, kp)])\n return self.create_image_column(image_array)\n\n def create_image_column(self, images, draw_border=False):\n if draw_border:\n images = np.copy(images)\n images[:, :, [0, -1]] = (1, 1, 1)\n images[:, :, [0, -1]] = (1, 1, 1)\n return np.concatenate(list(images), axis=0)\n\n def create_image_grid(self, *args):\n out = []\n for arg in args:\n if type(arg) == tuple:\n out.append(self.create_image_column_with_kp(arg[0], arg[1]))\n else:\n out.append(self.create_image_column(arg))\n return np.concatenate(out, axis=1)\n\n def visualize(self, driving, source, out):\n images = []\n # Source image with keypoints\n source = source.cpu().numpy()\n kp_source = out['kp_source']['value'].cpu().numpy()\n source = np.transpose(source, [0, 2, 3, 1])\n images.append((source, kp_source))\n\n # Equivariance visualization\n if 'transformed_frame' in out:\n transformed = out['transformed_frame'].cpu().numpy()\n transformed = np.transpose(transformed, [0, 2, 3, 1])\n transformed_kp = out['transformed_kp']['value'].cpu().numpy()\n images.append((transformed, transformed_kp))\n\n # Driving image with keypoints\n kp_driving = out['kp_driving']['value'].cpu().numpy()\n driving = driving.cpu().numpy()\n driving = np.transpose(driving, [0, 2, 3, 1])\n images.append((driving, kp_driving))\n\n # Deformed image\n if 'deformed' in out:\n deformed = out['deformed'].cpu().numpy()\n deformed = np.transpose(deformed, [0, 2, 3, 1])\n images.append(deformed)\n\n # Result with and without keypoints\n prediction = out['prediction'].cpu().numpy()\n prediction = np.transpose(prediction, [0, 2, 3, 1])\n if 'kp_norm' in out:\n kp_norm = out['kp_norm']['value'].cpu().numpy()\n images.append((prediction, kp_norm))\n images.append(prediction)\n\n ## Occlusion map\n if 'occlusion_map' in out:\n occlusion_map = out['occlusion_map'].cpu().tile([1, 3, 1, 1])\n occlusion_map = F.interpolate(\n occlusion_map, size=source.shape[1:3]).numpy()\n occlusion_map = np.transpose(occlusion_map, [0, 2, 3, 1])\n images.append(occlusion_map)\n\n # Deformed images according to each individual transform\n if 'sparse_deformed' in out:\n full_mask = []\n for i in range(out['sparse_deformed'].shape[1]):\n image = out['sparse_deformed'][:, i].cpu()\n image = F.interpolate(image, size=source.shape[1:3])\n mask = out['mask'][:, i:(i + 1)].cpu().tile([1, 3, 1, 1])\n mask = F.interpolate(mask, size=source.shape[1:3])\n image = np.transpose(image.numpy(), (0, 2, 3, 1))\n mask = np.transpose(mask.numpy(), (0, 2, 3, 1))\n\n if i != 0:\n color = np.array(\n self.colormap((i - 1) / (out['sparse_deformed'].shape[1]\n - 1)))[:3]\n else:\n color = np.array((0, 0, 0))\n\n color = color.reshape((1, 1, 1, 3))\n\n images.append(image)\n if i != 0:\n images.append(mask * color)\n else:\n images.append(mask)\n\n full_mask.append(mask * color)\n\n images.append(sum(full_mask))\n\n image = self.create_image_grid(*images)\n image = (255 * image).astype(np.uint8)\n return image\n" ]
[ [ "numpy.zeros_like", "numpy.unique" ], [ "numpy.array" ], [ "numpy.asarray", "numpy.ascontiguousarray", "scipy.spatial.distance.cdist", "numpy.where", "numpy.empty" ], [ "numpy.concatenate", "numpy.copy", "numpy.mean", "numpy.transpose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lufeifeina/models
[ "d7d260d4c690e5163070e21d75df372ab559ea23", "d7d260d4c690e5163070e21d75df372ab559ea23", "d7d260d4c690e5163070e21d75df372ab559ea23", "d7d260d4c690e5163070e21d75df372ab559ea23", "d7d260d4c690e5163070e21d75df372ab559ea23" ]
[ "official/core/train_lib.py", "official/vision/tasks/maskrcnn.py", "official/nlp/modeling/layers/reuse_transformer.py", "official/vision/modeling/backbones/resnet_deeplab_test.py", "official/nlp/modeling/layers/relative_attention.py" ]
[ "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TFM common training driver library.\"\"\"\n# pytype: disable=attribute-error\nimport os\nfrom typing import Any, Mapping, Optional, Tuple, List\n\n# Import libraries\n\nfrom absl import logging\nimport orbit\nimport tensorflow as tf\n\nfrom official.core import actions\nfrom official.core import base_task\nfrom official.core import base_trainer\nfrom official.core import config_definitions\nfrom official.core import train_utils\n\nmaybe_create_best_ckpt_exporter = train_utils.maybe_create_best_ckpt_exporter\n\n\ndef run_experiment(\n distribution_strategy: tf.distribute.Strategy,\n task: base_task.Task,\n mode: str,\n params: config_definitions.ExperimentConfig,\n model_dir: str,\n run_post_eval: bool = False,\n save_summary: bool = True,\n train_actions: Optional[List[orbit.Action]] = None,\n eval_actions: Optional[List[orbit.Action]] = None,\n trainer: Optional[base_trainer.Trainer] = None,\n controller_cls=orbit.Controller\n) -> Tuple[tf.keras.Model, Mapping[str, Any]]:\n \"\"\"Runs train/eval configured by the experiment params.\n\n Args:\n distribution_strategy: A distribution distribution_strategy.\n task: A Task instance.\n mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval'\n or 'continuous_eval'.\n params: ExperimentConfig instance.\n model_dir: A 'str', a path to store model checkpoints and summaries.\n run_post_eval: Whether to run post eval once after training, metrics logs\n are returned.\n save_summary: Whether to save train and validation summary.\n train_actions: Optional list of Orbit train actions.\n eval_actions: Optional list of Orbit eval actions.\n trainer: the base_trainer.Trainer instance. It should be created within the\n strategy.scope().\n controller_cls: The controller class to manage the train and eval process.\n Must be a orbit.Controller subclass.\n\n Returns:\n A 2-tuple of (model, eval_logs).\n model: `tf.keras.Model` instance.\n eval_logs: returns eval metrics logs when run_post_eval is set to True,\n otherwise, returns {}.\n \"\"\"\n\n with distribution_strategy.scope():\n if not trainer:\n trainer = train_utils.create_trainer(\n params,\n task,\n train='train' in mode,\n evaluate=('eval' in mode) or run_post_eval,\n checkpoint_exporter=maybe_create_best_ckpt_exporter(\n params, model_dir))\n\n if trainer.checkpoint:\n if model_dir is None:\n raise ValueError('model_dir must be specified, but got None')\n checkpoint_manager = tf.train.CheckpointManager(\n trainer.checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=trainer.global_step,\n checkpoint_interval=params.trainer.checkpoint_interval,\n init_fn=trainer.initialize)\n else:\n checkpoint_manager = None\n\n train_actions = [] if not train_actions else train_actions\n train_actions += actions.get_train_actions(\n params, trainer, model_dir, checkpoint_manager=checkpoint_manager)\n\n eval_actions = [] if not eval_actions else eval_actions\n eval_actions += actions.get_eval_actions(params, trainer, model_dir)\n\n controller = controller_cls(\n strategy=distribution_strategy,\n trainer=trainer if 'train' in mode else None,\n evaluator=trainer,\n global_step=trainer.global_step,\n steps_per_loop=params.trainer.steps_per_loop,\n checkpoint_manager=checkpoint_manager,\n summary_dir=os.path.join(model_dir, 'train') if (save_summary) else None,\n eval_summary_dir=os.path.join(model_dir,\n params.trainer.validation_summary_subdir) if\n (save_summary) else None,\n summary_interval=params.trainer.summary_interval if\n (save_summary) else None,\n train_actions=train_actions,\n eval_actions=eval_actions)\n\n logging.info('Starts to execute mode: %s', mode)\n with distribution_strategy.scope():\n if mode == 'train' or mode == 'train_and_post_eval':\n controller.train(steps=params.trainer.train_steps)\n elif mode == 'train_and_eval':\n controller.train_and_evaluate(\n train_steps=params.trainer.train_steps,\n eval_steps=params.trainer.validation_steps,\n eval_interval=params.trainer.validation_interval)\n elif mode == 'eval':\n controller.evaluate(steps=params.trainer.validation_steps)\n elif mode == 'continuous_eval':\n\n def timeout_fn():\n if trainer.global_step.numpy() >= params.trainer.train_steps:\n return True\n return False\n\n controller.evaluate_continuously(\n steps=params.trainer.validation_steps,\n timeout=params.trainer.continuous_eval_timeout,\n timeout_fn=timeout_fn)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)\n\n num_params = train_utils.try_count_params(trainer.model)\n if num_params is not None:\n logging.info('Number of trainable params in model: %f Millions.',\n num_params / 10.**6)\n\n flops = train_utils.try_count_flops(trainer.model)\n if flops is not None:\n logging.info('FLOPs (multi-adds) in model: %f Billions.',\n flops / 10.**9 / 2)\n\n if run_post_eval or mode == 'train_and_post_eval':\n with distribution_strategy.scope():\n return trainer.model, controller.evaluate(\n steps=params.trainer.validation_steps)\n else:\n return trainer.model, {}\n", "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"MaskRCNN task definition.\"\"\"\n\nimport os\nfrom typing import Any, Dict, Optional, List, Tuple, Mapping\n\nfrom absl import logging\nimport tensorflow as tf\nfrom official.common import dataset_fn as dataset_fn_lib\nfrom official.core import base_task\nfrom official.core import task_factory\nfrom official.vision.configs import maskrcnn as exp_cfg\nfrom official.vision.dataloaders import input_reader_factory\nfrom official.vision.dataloaders import maskrcnn_input\nfrom official.vision.dataloaders import tf_example_decoder\nfrom official.vision.dataloaders import tf_example_label_map_decoder\nfrom official.vision.evaluation import coco_evaluator\nfrom official.vision.evaluation import coco_utils\nfrom official.vision.losses import maskrcnn_losses\nfrom official.vision.modeling import factory\n\n\ndef zero_out_disallowed_class_ids(batch_class_ids: tf.Tensor,\n allowed_class_ids: List[int]):\n \"\"\"Zero out IDs of classes not in allowed_class_ids.\n\n Args:\n batch_class_ids: A [batch_size, num_instances] int tensor of input\n class IDs.\n allowed_class_ids: A python list of class IDs which we want to allow.\n\n Returns:\n filtered_class_ids: A [batch_size, num_instances] int tensor with any\n class ID not in allowed_class_ids set to 0.\n \"\"\"\n\n allowed_class_ids = tf.constant(allowed_class_ids,\n dtype=batch_class_ids.dtype)\n\n match_ids = (batch_class_ids[:, :, tf.newaxis] ==\n allowed_class_ids[tf.newaxis, tf.newaxis, :])\n\n match_ids = tf.reduce_any(match_ids, axis=2)\n return tf.where(match_ids, batch_class_ids, tf.zeros_like(batch_class_ids))\n\n\n@task_factory.register_task_cls(exp_cfg.MaskRCNNTask)\nclass MaskRCNNTask(base_task.Task):\n \"\"\"A single-replica view of training procedure.\n\n Mask R-CNN task provides artifacts for training/evalution procedures,\n including loading/iterating over Datasets, initializing the model, calculating\n the loss, post-processing, and customized metrics with reduction.\n \"\"\"\n\n def build_model(self):\n \"\"\"Build Mask R-CNN model.\"\"\"\n\n input_specs = tf.keras.layers.InputSpec(\n shape=[None] + self.task_config.model.input_size)\n\n l2_weight_decay = self.task_config.losses.l2_weight_decay\n # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.\n # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)\n # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)\n l2_regularizer = (tf.keras.regularizers.l2(\n l2_weight_decay / 2.0) if l2_weight_decay else None)\n\n model = factory.build_maskrcnn(\n input_specs=input_specs,\n model_config=self.task_config.model,\n l2_regularizer=l2_regularizer)\n\n if self.task_config.freeze_backbone:\n model.backbone.trainable = False\n\n return model\n\n def initialize(self, model: tf.keras.Model):\n \"\"\"Loading pretrained checkpoint.\"\"\"\n\n if not self.task_config.init_checkpoint:\n return\n\n ckpt_dir_or_file = self.task_config.init_checkpoint\n if tf.io.gfile.isdir(ckpt_dir_or_file):\n ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)\n\n # Restoring checkpoint.\n if self.task_config.init_checkpoint_modules == 'all':\n ckpt = tf.train.Checkpoint(**model.checkpoint_items)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n else:\n ckpt_items = {}\n if 'backbone' in self.task_config.init_checkpoint_modules:\n ckpt_items.update(backbone=model.backbone)\n if 'decoder' in self.task_config.init_checkpoint_modules:\n ckpt_items.update(decoder=model.decoder)\n\n ckpt = tf.train.Checkpoint(**ckpt_items)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n\n logging.info('Finished loading pretrained checkpoint from %s',\n ckpt_dir_or_file)\n\n def build_inputs(\n self,\n params: exp_cfg.DataConfig,\n input_context: Optional[tf.distribute.InputContext] = None,\n dataset_fn: Optional[dataset_fn_lib.PossibleDatasetType] = None):\n \"\"\"Build input dataset.\"\"\"\n decoder_cfg = params.decoder.get()\n if params.decoder.type == 'simple_decoder':\n decoder = tf_example_decoder.TfExampleDecoder(\n include_mask=self._task_config.model.include_mask,\n regenerate_source_id=decoder_cfg.regenerate_source_id,\n mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)\n elif params.decoder.type == 'label_map_decoder':\n decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(\n label_map=decoder_cfg.label_map,\n include_mask=self._task_config.model.include_mask,\n regenerate_source_id=decoder_cfg.regenerate_source_id,\n mask_binarize_threshold=decoder_cfg.mask_binarize_threshold)\n else:\n raise ValueError('Unknown decoder type: {}!'.format(params.decoder.type))\n\n parser = maskrcnn_input.Parser(\n output_size=self.task_config.model.input_size[:2],\n min_level=self.task_config.model.min_level,\n max_level=self.task_config.model.max_level,\n num_scales=self.task_config.model.anchor.num_scales,\n aspect_ratios=self.task_config.model.anchor.aspect_ratios,\n anchor_size=self.task_config.model.anchor.anchor_size,\n dtype=params.dtype,\n rpn_match_threshold=params.parser.rpn_match_threshold,\n rpn_unmatched_threshold=params.parser.rpn_unmatched_threshold,\n rpn_batch_size_per_im=params.parser.rpn_batch_size_per_im,\n rpn_fg_fraction=params.parser.rpn_fg_fraction,\n aug_rand_hflip=params.parser.aug_rand_hflip,\n aug_scale_min=params.parser.aug_scale_min,\n aug_scale_max=params.parser.aug_scale_max,\n skip_crowd_during_training=params.parser.skip_crowd_during_training,\n max_num_instances=params.parser.max_num_instances,\n include_mask=self._task_config.model.include_mask,\n mask_crop_size=params.parser.mask_crop_size)\n\n if not dataset_fn:\n dataset_fn = dataset_fn_lib.pick_dataset_fn(params.file_type)\n\n reader = input_reader_factory.input_reader_generator(\n params,\n dataset_fn=dataset_fn,\n decoder_fn=decoder.decode,\n parser_fn=parser.parse_fn(params.is_training))\n dataset = reader.read(input_context=input_context)\n\n return dataset\n\n def _build_rpn_losses(\n self, outputs: Mapping[str, Any],\n labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"Build losses for Region Proposal Network (RPN).\"\"\"\n rpn_score_loss_fn = maskrcnn_losses.RpnScoreLoss(\n tf.shape(outputs['box_outputs'])[1])\n rpn_box_loss_fn = maskrcnn_losses.RpnBoxLoss(\n self.task_config.losses.rpn_huber_loss_delta)\n rpn_score_loss = tf.reduce_mean(\n rpn_score_loss_fn(outputs['rpn_scores'], labels['rpn_score_targets']))\n rpn_box_loss = tf.reduce_mean(\n rpn_box_loss_fn(outputs['rpn_boxes'], labels['rpn_box_targets']))\n return rpn_score_loss, rpn_box_loss\n\n def _build_frcnn_losses(\n self, outputs: Mapping[str, Any],\n labels: Mapping[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"Build losses for Fast R-CNN.\"\"\"\n cascade_ious = self.task_config.model.roi_sampler.cascade_iou_thresholds\n\n frcnn_cls_loss_fn = maskrcnn_losses.FastrcnnClassLoss()\n frcnn_box_loss_fn = maskrcnn_losses.FastrcnnBoxLoss(\n self.task_config.losses.frcnn_huber_loss_delta,\n self.task_config.model.detection_head.class_agnostic_bbox_pred)\n\n # Final cls/box losses are computed as an average of all detection heads.\n frcnn_cls_loss = 0.0\n frcnn_box_loss = 0.0\n num_det_heads = 1 if cascade_ious is None else 1 + len(cascade_ious)\n for cas_num in range(num_det_heads):\n frcnn_cls_loss_i = tf.reduce_mean(\n frcnn_cls_loss_fn(\n outputs['class_outputs_{}'\n .format(cas_num) if cas_num else 'class_outputs'],\n outputs['class_targets_{}'\n .format(cas_num) if cas_num else 'class_targets']))\n frcnn_box_loss_i = tf.reduce_mean(\n frcnn_box_loss_fn(\n outputs['box_outputs_{}'.format(cas_num\n ) if cas_num else 'box_outputs'],\n outputs['class_targets_{}'\n .format(cas_num) if cas_num else 'class_targets'],\n outputs['box_targets_{}'.format(cas_num\n ) if cas_num else 'box_targets']))\n frcnn_cls_loss += frcnn_cls_loss_i\n frcnn_box_loss += frcnn_box_loss_i\n frcnn_cls_loss /= num_det_heads\n frcnn_box_loss /= num_det_heads\n return frcnn_cls_loss, frcnn_box_loss\n\n def _build_mask_loss(self, outputs: Mapping[str, Any]) -> tf.Tensor:\n \"\"\"Build losses for the masks.\"\"\"\n mask_loss_fn = maskrcnn_losses.MaskrcnnLoss()\n mask_class_targets = outputs['mask_class_targets']\n if self.task_config.allowed_mask_class_ids is not None:\n # Classes with ID=0 are ignored by mask_loss_fn in loss computation.\n mask_class_targets = zero_out_disallowed_class_ids(\n mask_class_targets, self.task_config.allowed_mask_class_ids)\n return tf.reduce_mean(\n mask_loss_fn(outputs['mask_outputs'], outputs['mask_targets'],\n mask_class_targets))\n\n def build_losses(self,\n outputs: Mapping[str, Any],\n labels: Mapping[str, Any],\n aux_losses: Optional[Any] = None) -> Dict[str, tf.Tensor]:\n \"\"\"Build Mask R-CNN losses.\"\"\"\n rpn_score_loss, rpn_box_loss = self._build_rpn_losses(outputs, labels)\n frcnn_cls_loss, frcnn_box_loss = self._build_frcnn_losses(outputs, labels)\n if self.task_config.model.include_mask:\n mask_loss = self._build_mask_loss(outputs)\n else:\n mask_loss = tf.constant(0.0, dtype=tf.float32)\n\n params = self.task_config\n model_loss = (\n params.losses.rpn_score_weight * rpn_score_loss +\n params.losses.rpn_box_weight * rpn_box_loss +\n params.losses.frcnn_class_weight * frcnn_cls_loss +\n params.losses.frcnn_box_weight * frcnn_box_loss +\n params.losses.mask_weight * mask_loss)\n\n total_loss = model_loss\n if aux_losses:\n reg_loss = tf.reduce_sum(aux_losses)\n total_loss = model_loss + reg_loss\n\n total_loss = params.losses.loss_weight * total_loss\n losses = {\n 'total_loss': total_loss,\n 'rpn_score_loss': rpn_score_loss,\n 'rpn_box_loss': rpn_box_loss,\n 'frcnn_cls_loss': frcnn_cls_loss,\n 'frcnn_box_loss': frcnn_box_loss,\n 'mask_loss': mask_loss,\n 'model_loss': model_loss,\n }\n return losses\n\n def _build_coco_metrics(self):\n \"\"\"Build COCO metrics evaluator.\"\"\"\n if (not self._task_config.model.include_mask\n ) or self._task_config.annotation_file:\n self.coco_metric = coco_evaluator.COCOEvaluator(\n annotation_file=self._task_config.annotation_file,\n include_mask=self._task_config.model.include_mask,\n per_category_metrics=self._task_config.per_category_metrics)\n else:\n # Builds COCO-style annotation file if include_mask is True, and\n # annotation_file isn't provided.\n annotation_path = os.path.join(self._logging_dir, 'annotation.json')\n if tf.io.gfile.exists(annotation_path):\n logging.info(\n 'annotation.json file exists, skipping creating the annotation'\n ' file.')\n else:\n if self._task_config.validation_data.num_examples <= 0:\n logging.info('validation_data.num_examples needs to be > 0')\n if not self._task_config.validation_data.input_path:\n logging.info('Can not create annotation file for tfds.')\n logging.info(\n 'Creating coco-style annotation file: %s', annotation_path)\n coco_utils.scan_and_generator_annotation_file(\n self._task_config.validation_data.input_path,\n self._task_config.validation_data.file_type,\n self._task_config.validation_data.num_examples,\n self.task_config.model.include_mask, annotation_path,\n regenerate_source_id=self._task_config.validation_data.decoder\n .simple_decoder.regenerate_source_id)\n self.coco_metric = coco_evaluator.COCOEvaluator(\n annotation_file=annotation_path,\n include_mask=self._task_config.model.include_mask,\n per_category_metrics=self._task_config.per_category_metrics)\n\n def build_metrics(self, training: bool = True):\n \"\"\"Build detection metrics.\"\"\"\n metrics = []\n if training:\n metric_names = [\n 'total_loss',\n 'rpn_score_loss',\n 'rpn_box_loss',\n 'frcnn_cls_loss',\n 'frcnn_box_loss',\n 'mask_loss',\n 'model_loss'\n ]\n for name in metric_names:\n metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))\n\n else:\n if self._task_config.use_coco_metrics:\n self._build_coco_metrics()\n if self._task_config.use_wod_metrics:\n # To use Waymo open dataset metrics, please install one of the pip\n # package `waymo-open-dataset-tf-*` from\n # https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md#use-pre-compiled-pippip3-packages-for-linux\n # Note that the package is built with specific tensorflow version and\n # will produce error if it does not match the tf version that is\n # currently used.\n try:\n from official.vision.evaluation import wod_detection_evaluator # pylint: disable=g-import-not-at-top\n except ModuleNotFoundError:\n logging.error('waymo-open-dataset should be installed to enable Waymo'\n ' evaluator.')\n raise\n self.wod_metric = wod_detection_evaluator.WOD2dDetectionEvaluator()\n\n return metrics\n\n def train_step(self,\n inputs: Tuple[Any, Any],\n model: tf.keras.Model,\n optimizer: tf.keras.optimizers.Optimizer,\n metrics: Optional[List[Any]] = None):\n \"\"\"Does forward and backward.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n images, labels = inputs\n num_replicas = tf.distribute.get_strategy().num_replicas_in_sync\n with tf.GradientTape() as tape:\n outputs = model(\n images,\n image_shape=labels['image_info'][:, 1, :],\n anchor_boxes=labels['anchor_boxes'],\n gt_boxes=labels['gt_boxes'],\n gt_classes=labels['gt_classes'],\n gt_masks=(labels['gt_masks'] if self.task_config.model.include_mask\n else None),\n training=True)\n outputs = tf.nest.map_structure(\n lambda x: tf.cast(x, tf.float32), outputs)\n\n # Computes per-replica loss.\n losses = self.build_losses(\n outputs=outputs, labels=labels, aux_losses=model.losses)\n scaled_loss = losses['total_loss'] / num_replicas\n\n # For mixed_precision policy, when LossScaleOptimizer is used, loss is\n # scaled for numerical stability.\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n scaled_loss = optimizer.get_scaled_loss(scaled_loss)\n\n tvars = model.trainable_variables\n grads = tape.gradient(scaled_loss, tvars)\n # Scales back gradient when LossScaleOptimizer is used.\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n grads = optimizer.get_unscaled_gradients(grads)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n\n logs = {self.loss: losses['total_loss']}\n\n if metrics:\n for m in metrics:\n m.update_state(losses[m.name])\n\n return logs\n\n def validation_step(self,\n inputs: Tuple[Any, Any],\n model: tf.keras.Model,\n metrics: Optional[List[Any]] = None):\n \"\"\"Validatation step.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the keras.Model.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n images, labels = inputs\n\n outputs = model(\n images,\n anchor_boxes=labels['anchor_boxes'],\n image_shape=labels['image_info'][:, 1, :],\n training=False)\n\n logs = {self.loss: 0}\n if self._task_config.use_coco_metrics:\n coco_model_outputs = {\n 'detection_boxes': outputs['detection_boxes'],\n 'detection_scores': outputs['detection_scores'],\n 'detection_classes': outputs['detection_classes'],\n 'num_detections': outputs['num_detections'],\n 'source_id': labels['groundtruths']['source_id'],\n 'image_info': labels['image_info']\n }\n if self.task_config.model.include_mask:\n coco_model_outputs.update({\n 'detection_masks': outputs['detection_masks'],\n })\n logs.update(\n {self.coco_metric.name: (labels['groundtruths'], coco_model_outputs)})\n\n if self.task_config.use_wod_metrics:\n wod_model_outputs = {\n 'detection_boxes': outputs['detection_boxes'],\n 'detection_scores': outputs['detection_scores'],\n 'detection_classes': outputs['detection_classes'],\n 'num_detections': outputs['num_detections'],\n 'source_id': labels['groundtruths']['source_id'],\n 'image_info': labels['image_info']\n }\n logs.update(\n {self.wod_metric.name: (labels['groundtruths'], wod_model_outputs)})\n return logs\n\n def aggregate_logs(self, state=None, step_outputs=None):\n if self._task_config.use_coco_metrics:\n if state is None:\n self.coco_metric.reset_states()\n self.coco_metric.update_state(\n step_outputs[self.coco_metric.name][0],\n step_outputs[self.coco_metric.name][1])\n if self._task_config.use_wod_metrics:\n if state is None:\n self.wod_metric.reset_states()\n self.wod_metric.update_state(\n step_outputs[self.wod_metric.name][0],\n step_outputs[self.wod_metric.name][1])\n if state is None:\n # Create an arbitrary state to indicate it's not the first step in the\n # following calls to this function.\n state = True\n return state\n\n def reduce_aggregated_logs(self, aggregated_logs, global_step=None):\n logs = {}\n if self._task_config.use_coco_metrics:\n logs.update(self.coco_metric.result())\n if self._task_config.use_wod_metrics:\n logs.update(self.wod_metric.result())\n return logs\n", "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras-based TransformerEncoder block layer.\"\"\"\nimport tensorflow as tf\n\nfrom official.modeling import tf_utils\nfrom official.nlp.modeling.layers import reuse_attention as attention\n\n\nclass ReuseTransformer(tf.keras.layers.Layer):\n \"\"\"Transformer layer.\n\n This layer implements the ReuseTransformer Encoder from\n \"Leveraging redundancy in attention with Reuse Transformers\".\n (https://arxiv.org/abs/2110.06821)\n \"\"\"\n\n def __init__(self,\n num_attention_heads,\n inner_dim,\n inner_activation,\n head_size=None,\n output_range=None,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_bias=True,\n norm_first=False,\n norm_epsilon=1e-12,\n output_dropout=0.0,\n attention_dropout=0.0,\n inner_dropout=0.0,\n attention_initializer=None,\n attention_axes=None,\n reuse_attention=0,\n use_relative_pe=False,\n pe_max_seq_length=512,\n layer_idx=None,\n max_reuse_layer_idx=None,\n **kwargs):\n \"\"\"Initializes `ReuseTransformer`.\n\n Args:\n num_attention_heads: Number of attention heads.\n inner_dim: The output dimension of the first Dense layer in a two-layer\n feedforward network.\n inner_activation: The activation for the first Dense layer in a two-layer\n feedforward network.\n head_size: Projection size of heads.\n output_range: the sequence output range, [0, output_range) for slicing the\n target sequence. `None` means the target sequence is not sliced.\n kernel_initializer: Initializer for dense layer kernels.\n bias_initializer: Initializer for dense layer biases.\n kernel_regularizer: Regularizer for dense layer kernels.\n bias_regularizer: Regularizer for dense layer biases.\n activity_regularizer: Regularizer for dense layer activity.\n kernel_constraint: Constraint for dense layer kernels.\n bias_constraint: Constraint for dense layer kernels.\n use_bias: Whether to enable use_bias in attention layer. If set False,\n use_bias in attention layer is disabled.\n norm_first: Whether to normalize inputs to attention and intermediate\n dense layers. If set False, output of attention and intermediate dense\n layers is normalized.\n norm_epsilon: Epsilon value to initialize normalization layers.\n output_dropout: Dropout probability for the post-attention and output\n dropout.\n attention_dropout: Dropout probability for within the attention layer.\n inner_dropout: Dropout probability for the first Dense layer in a\n two-layer feedforward network.\n attention_initializer: Initializer for kernels of attention layers. If set\n `None`, attention layers use kernel_initializer as initializer for\n kernel.\n attention_axes: axes over which the attention is applied. `None` means\n attention over all axes, but batch, heads, and features.\n reuse_attention: reuse_attention: An integer specifying number of heads\n to reuse. -1 for all heads.\n use_relative_pe: whether to use relative position bias.\n pe_max_seq_length: used to set the size of the relative positin encodings.\n layer_idx: the idx of this layer.\n max_reuse_layer_idx: layer idx (if passed) greater than this value will\n not reuse attention scores from previous layers.\n **kwargs: keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n\n self._num_heads = num_attention_heads\n self._inner_dim = inner_dim\n self._inner_activation = inner_activation\n self._head_size = head_size\n self._attention_dropout = attention_dropout\n self._attention_dropout_rate = attention_dropout\n self._output_dropout = output_dropout\n self._output_dropout_rate = output_dropout\n self._output_range = output_range\n self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self._bias_initializer = tf.keras.initializers.get(bias_initializer)\n self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self._activity_regularizer = tf.keras.regularizers.get(activity_regularizer)\n self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self._bias_constraint = tf.keras.constraints.get(bias_constraint)\n self._use_bias = use_bias\n self._norm_first = norm_first\n self._norm_epsilon = norm_epsilon\n self._inner_dropout = inner_dropout\n self._reuse_attention = reuse_attention\n self._use_relative_pe = use_relative_pe\n self._pe_max_seq_length = pe_max_seq_length\n self._layer_idx = layer_idx\n self._max_reuse_layer_idx = max_reuse_layer_idx\n # Overwrite for the first layer and layers greater than max_reuse_layer_idx.\n if self._layer_idx is not None and (\n self._layer_idx == 0 or (self._max_reuse_layer_idx is not None and\n self._max_reuse_layer_idx < self._layer_idx)):\n self._reuse_attention = 0\n if attention_initializer:\n self._attention_initializer = tf.keras.initializers.get(\n attention_initializer)\n else:\n self._attention_initializer = tf_utils.clone_initializer(\n self._kernel_initializer)\n self._attention_axes = attention_axes\n\n def build(self, input_shape):\n if isinstance(input_shape, tf.TensorShape):\n input_tensor_shape = input_shape\n elif isinstance(input_shape, (list, tuple)):\n input_tensor_shape = tf.TensorShape(input_shape[0])\n else:\n raise ValueError(\n \"The type of input shape argument is not supported, got: %s\" %\n type(input_shape))\n einsum_equation = \"abc,cd->abd\"\n if len(input_tensor_shape.as_list()) > 3:\n einsum_equation = \"...bc,cd->...bd\"\n hidden_size = input_tensor_shape[-1]\n if self._head_size is None:\n if hidden_size % self._num_heads != 0:\n raise ValueError(\n \"The input size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, self._num_heads))\n self._attention_head_size = int(hidden_size // self._num_heads)\n else:\n self._attention_head_size = self._head_size\n common_kwargs = dict(\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activity_regularizer=self._activity_regularizer,\n kernel_constraint=self._kernel_constraint,\n bias_constraint=self._bias_constraint)\n self._attention_layer = attention.ReuseMultiHeadAttention(\n num_heads=self._num_heads,\n key_dim=self._attention_head_size,\n dropout=self._attention_dropout,\n use_bias=self._use_bias,\n kernel_initializer=self._attention_initializer,\n bias_initializer=tf_utils.clone_initializer(self._bias_initializer),\n attention_axes=self._attention_axes,\n reuse_attention=self._reuse_attention,\n use_relative_pe=self._use_relative_pe,\n pe_max_seq_length=self._pe_max_seq_length,\n name=\"self_attention\",\n **common_kwargs)\n self._attention_dropout = tf.keras.layers.Dropout(\n rate=self._output_dropout)\n # Use float32 in layernorm for numeric stability.\n # It is probably safe in mixed_float16, but we haven't validated this yet.\n self._attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"self_attention_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=tf.float32))\n self._intermediate_dense = tf.keras.layers.EinsumDense(\n einsum_equation,\n output_shape=(None, self._inner_dim),\n bias_axes=\"d\",\n kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),\n bias_initializer=tf_utils.clone_initializer(self._bias_initializer),\n name=\"intermediate\",\n **common_kwargs)\n policy = tf.keras.mixed_precision.global_policy()\n if policy.name == \"mixed_bfloat16\":\n # bfloat16 causes BERT with the LAMB optimizer to not converge\n # as well, so we use float32.\n # TODO(b/154538392): Investigate this.\n policy = tf.float32\n self._intermediate_activation_layer = tf.keras.layers.Activation(\n self._inner_activation, dtype=policy)\n self._inner_dropout_layer = tf.keras.layers.Dropout(\n rate=self._inner_dropout)\n self._output_dense = tf.keras.layers.EinsumDense(\n einsum_equation,\n output_shape=(None, hidden_size),\n bias_axes=\"d\",\n name=\"output\",\n kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),\n bias_initializer=tf_utils.clone_initializer(self._bias_initializer),\n **common_kwargs)\n self._output_dropout = tf.keras.layers.Dropout(rate=self._output_dropout)\n # Use float32 in layernorm for numeric stability.\n self._output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"output_layer_norm\",\n axis=-1,\n epsilon=self._norm_epsilon,\n dtype=tf.float32)\n\n super(ReuseTransformer, self).build(input_shape)\n\n def get_config(self):\n config = {\n \"num_attention_heads\":\n self._num_heads,\n \"inner_dim\":\n self._inner_dim,\n \"inner_activation\":\n self._inner_activation,\n \"head_size\":\n self._head_size,\n \"output_dropout\":\n self._output_dropout_rate,\n \"attention_dropout\":\n self._attention_dropout_rate,\n \"output_range\":\n self._output_range,\n \"reuse_attention\":\n self._reuse_attention,\n \"use_relative_pe\": self._use_relative_pe,\n \"pe_max_seq_length\": self._pe_max_seq_length,\n \"max_reuse_layer_idx\": self._max_reuse_layer_idx,\n \"kernel_initializer\":\n tf.keras.initializers.serialize(self._kernel_initializer),\n \"bias_initializer\":\n tf.keras.initializers.serialize(self._bias_initializer),\n \"kernel_regularizer\":\n tf.keras.regularizers.serialize(self._kernel_regularizer),\n \"bias_regularizer\":\n tf.keras.regularizers.serialize(self._bias_regularizer),\n \"activity_regularizer\":\n tf.keras.regularizers.serialize(self._activity_regularizer),\n \"kernel_constraint\":\n tf.keras.constraints.serialize(self._kernel_constraint),\n \"bias_constraint\":\n tf.keras.constraints.serialize(self._bias_constraint),\n \"use_bias\":\n self._use_bias,\n \"norm_first\":\n self._norm_first,\n \"norm_epsilon\":\n self._norm_epsilon,\n \"inner_dropout\":\n self._inner_dropout,\n \"attention_initializer\":\n tf.keras.initializers.serialize(self._attention_initializer),\n \"attention_axes\": self._attention_axes,\n }\n base_config = super(ReuseTransformer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, inputs):\n \"\"\"Transformer self-attention encoder block call.\n\n Args:\n inputs: a single tensor or a list of tensors.\n `input tensor` as the single sequence of embeddings.\n [`input tensor`, `attention mask`] to have the additional attention\n mask.\n [`query tensor`, `attention mask`, `attention scores`] to have\n additional attention scores for reuse computation. If `attention scores`\n is None, the reuse_attention flag will be ignored.\n Returns:\n An output tensor with the same dimensions as input/query tensor.\n Attention scores if return_attention_scores is true.\n \"\"\"\n if isinstance(inputs, (list, tuple)):\n if len(inputs) == 2:\n input_tensor, attention_mask = inputs\n reuse_attention_scores = None\n elif len(inputs) == 3:\n input_tensor, attention_mask, reuse_attention_scores = inputs\n else:\n raise ValueError(\"Unexpected inputs to %s with length at %d\" %\n (self.__class__, len(inputs)))\n else:\n input_tensor, attention_mask, reuse_attention_scores = (inputs, None,\n None)\n\n key_value = None\n\n if self._reuse_attention != 0 and reuse_attention_scores is None:\n raise ValueError(\n \"reuse_attention_scores cannot be None when reuse_attention != 0.\")\n\n if self._output_range:\n if self._norm_first:\n source_tensor = input_tensor[:, 0:self._output_range, :]\n input_tensor = self._attention_layer_norm(input_tensor)\n if key_value is not None:\n key_value = self._attention_layer_norm(key_value)\n target_tensor = input_tensor[:, 0:self._output_range, :]\n if attention_mask is not None:\n attention_mask = attention_mask[:, 0:self._output_range, :]\n if reuse_attention_scores is not None:\n reuse_attention_scores = reuse_attention_scores[:, :,\n 0:self._output_range, :]\n else:\n if self._norm_first:\n source_tensor = input_tensor\n input_tensor = self._attention_layer_norm(input_tensor)\n if key_value is not None:\n key_value = self._attention_layer_norm(key_value)\n target_tensor = input_tensor\n\n if key_value is None:\n key_value = input_tensor\n attention_output = self._attention_layer(\n query=target_tensor, value=key_value, attention_mask=attention_mask,\n reuse_attention_scores=reuse_attention_scores,\n return_attention_scores=True)\n attention_output, attention_scores = attention_output\n attention_output = self._attention_dropout(attention_output)\n if self._norm_first:\n attention_output = source_tensor + attention_output\n else:\n attention_output = self._attention_layer_norm(target_tensor +\n attention_output)\n if self._norm_first:\n source_attention_output = attention_output\n attention_output = self._output_layer_norm(attention_output)\n\n inner_output = self._intermediate_dense(attention_output)\n inner_output = self._intermediate_activation_layer(inner_output)\n inner_output = self._inner_dropout_layer(inner_output)\n layer_output = self._output_dense(inner_output)\n layer_output = self._output_dropout(layer_output)\n\n if self._norm_first:\n return source_attention_output + layer_output, attention_scores\n\n # During mixed precision training, layer norm output is always fp32 for now.\n # Casts fp32 for the subsequent add.\n layer_output = tf.cast(layer_output, tf.float32)\n layer_output = self._output_layer_norm(layer_output + attention_output)\n return layer_output, attention_scores\n", "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for resnet_deeplab models.\"\"\"\n\n# Import libraries\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.vision.modeling.backbones import resnet_deeplab\n\n\nclass ResNetTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(\n (128, 50, 4, 8),\n (128, 101, 4, 8),\n (128, 152, 4, 8),\n (128, 200, 4, 8),\n (128, 50, 4, 16),\n (128, 101, 4, 16),\n (128, 152, 4, 16),\n (128, 200, 4, 16),\n )\n def test_network_creation(self, input_size, model_id,\n endpoint_filter_scale, output_stride):\n \"\"\"Test creation of ResNet models.\"\"\"\n tf.keras.backend.set_image_data_format('channels_last')\n\n network = resnet_deeplab.DilatedResNet(model_id=model_id,\n output_stride=output_stride)\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\n endpoints = network(inputs)\n print(endpoints)\n self.assertAllEqual([\n 1, input_size / output_stride, input_size / output_stride,\n 512 * endpoint_filter_scale\n ], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list())\n\n @parameterized.parameters(\n ('v0', None, 0.0),\n ('v1', None, 0.0),\n ('v1', 0.25, 0.0),\n ('v1', 0.25, 0.2),\n )\n def test_network_features(self, stem_type, se_ratio,\n init_stochastic_depth_rate):\n \"\"\"Test additional features of ResNet models.\"\"\"\n input_size = 128\n model_id = 50\n endpoint_filter_scale = 4\n output_stride = 8\n\n tf.keras.backend.set_image_data_format('channels_last')\n\n network = resnet_deeplab.DilatedResNet(\n model_id=model_id,\n output_stride=output_stride,\n stem_type=stem_type,\n se_ratio=se_ratio,\n init_stochastic_depth_rate=init_stochastic_depth_rate)\n inputs = tf.keras.Input(shape=(input_size, input_size, 3), batch_size=1)\n endpoints = network(inputs)\n print(endpoints)\n self.assertAllEqual([\n 1, input_size / output_stride, input_size / output_stride,\n 512 * endpoint_filter_scale\n ], endpoints[str(int(np.math.log2(output_stride)))].shape.as_list())\n\n @combinations.generate(\n combinations.combine(\n strategy=[\n strategy_combinations.cloud_tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n ],\n use_sync_bn=[False, True],\n ))\n def test_sync_bn_multiple_devices(self, strategy, use_sync_bn):\n \"\"\"Test for sync bn on TPU and GPU devices.\"\"\"\n inputs = np.random.rand(64, 128, 128, 3)\n\n tf.keras.backend.set_image_data_format('channels_last')\n\n with strategy.scope():\n network = resnet_deeplab.DilatedResNet(\n model_id=50, output_stride=8, use_sync_bn=use_sync_bn)\n _ = network(inputs)\n\n @parameterized.parameters(1, 3, 4)\n def test_input_specs(self, input_dim):\n \"\"\"Test different input feature dimensions.\"\"\"\n tf.keras.backend.set_image_data_format('channels_last')\n\n input_specs = tf.keras.layers.InputSpec(shape=[None, None, None, input_dim])\n network = resnet_deeplab.DilatedResNet(\n model_id=50, output_stride=8, input_specs=input_specs)\n\n inputs = tf.keras.Input(shape=(128, 128, input_dim), batch_size=1)\n _ = network(inputs)\n\n def test_serialize_deserialize(self):\n # Create a network object that sets all of its config options.\n kwargs = dict(\n model_id=50,\n output_stride=8,\n stem_type='v0',\n se_ratio=0.25,\n init_stochastic_depth_rate=0.2,\n use_sync_bn=False,\n activation='relu',\n norm_momentum=0.99,\n norm_epsilon=0.001,\n kernel_initializer='VarianceScaling',\n kernel_regularizer=None,\n bias_regularizer=None,\n )\n network = resnet_deeplab.DilatedResNet(**kwargs)\n\n expected_config = dict(kwargs)\n self.assertEqual(network.get_config(), expected_config)\n\n # Create another network object from the first object's config.\n new_network = resnet_deeplab.DilatedResNet.from_config(network.get_config())\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras-based relative attention layers.\"\"\"\nimport math\nimport string\nimport tensorflow as tf\n\n_CHR_IDX = string.ascii_lowercase\n\n\ndef _build_proj_equation(free_dims, bound_dims, output_dims):\n \"\"\"Builds an einsum equation for projections inside multi-head attention.\"\"\"\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n bias_axes = \"\"\n letter_offset = 0\n for i in range(free_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n bias_axes += char\n equation = \"%s,%s->%s\" % (input_str, kernel_str, output_str)\n\n return equation, bias_axes, len(output_str)\n\n\ndef _get_output_shape(output_rank, known_last_dims):\n return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)\n\n\ndef _rel_shift(x, klen=-1):\n \"\"\"Performs relative shift to form the relative attention score.\"\"\"\n\n x = tf.transpose(x, perm=[2, 3, 0, 1])\n x_size = tf.shape(x)\n\n x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]])\n x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])\n x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]])\n x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1])\n\n x = tf.transpose(x, perm=[2, 3, 0, 1])\n\n return x\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass MultiHeadRelativeAttention(tf.keras.layers.MultiHeadAttention):\n \"\"\"A multi-head attention layer with relative attention + position encoding.\n\n This layer shares the same input/output projections as the common\n `tf.keras.layers.MultiHeadAttention` layer.\n\n When it calculates attention logits, position encoding is projected to form\n relative keys. The logits are composed by shifted relative logits and content\n logits.\n\n **Note: This layer is currently experimental.\n\n Attributes:\n kernel_initializer: The kernel initializer. Defaults to variance_scaling.\n\n Call args:\n query: Query `Tensor` of shape `[B, T, dim]`.\n value: Value `Tensor` of shape `[B, S, dim]`.\n content_attention_bias: Bias `Tensor` for content based attention of shape\n `[num_heads, dim]`.\n positional_attention_bias: Bias `Tensor` for position based attention of\n shape `[num_heads, dim]`.\n key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use\n `value` for both `key` and `value`, which is the most common case.\n relative_position_encoding: Relative positional encoding `Tensor` of shape\n `[B, L, dim]`.\n segment_matrix: Optional `Tensor` representing segmentation IDs used in\n XLNet of shape `[B, S, S + M]`.\n segment_encoding: Optional `Tensor` representing the segmentation encoding\n as used in XLNet of shape `[2, num_heads, dim]`.\n segment_attention_bias: Optional trainable bias parameter added to the query\n had when calculating the segment-based attention score used in XLNet of\n shape `[num_heads, dim]`.\n state: Optional `Tensor` of shape `[B, M, E]` where M is the length of the\n state or memory. If passed, this is also attended over as in Transformer\n XL.\n attention_mask: A boolean mask of shape `[B, T, S]` that prevents attention\n to certain positions.\n \"\"\"\n\n def __init__(self,\n kernel_initializer=\"variance_scaling\",\n **kwargs):\n super().__init__(kernel_initializer=kernel_initializer,\n **kwargs)\n\n def _build_from_signature(self, query, value, key=None):\n super(MultiHeadRelativeAttention, self)._build_from_signature(\n query=query,\n value=value,\n key=key)\n if hasattr(value, \"shape\"):\n value_shape = tf.TensorShape(value.shape)\n else:\n value_shape = value\n if key is None:\n key_shape = value_shape\n elif hasattr(key, \"shape\"):\n key_shape = tf.TensorShape(key.shape)\n else:\n key_shape = key\n\n common_kwargs = dict(\n kernel_initializer=self._kernel_initializer,\n bias_initializer=self._bias_initializer,\n kernel_regularizer=self._kernel_regularizer,\n bias_regularizer=self._bias_regularizer,\n activity_regularizer=self._activity_regularizer,\n kernel_constraint=self._kernel_constraint,\n bias_constraint=self._bias_constraint)\n\n with tf.init_scope():\n einsum_equation, _, output_rank = _build_proj_equation(\n key_shape.rank - 1, bound_dims=1, output_dims=2)\n self._encoding_dense = tf.keras.layers.EinsumDense(\n einsum_equation,\n output_shape=_get_output_shape(output_rank - 1,\n [self._num_heads, self._key_dim]),\n bias_axes=None,\n name=\"encoding\",\n **common_kwargs)\n\n def compute_attention(self,\n query,\n key,\n value,\n position,\n content_attention_bias,\n positional_attention_bias,\n segment_matrix=None,\n segment_encoding=None,\n segment_attention_bias=None,\n attention_mask=None):\n \"\"\"Computes the attention.\n\n This function defines the computation inside `call` with projected\n multihead Q, K, V, R inputs.\n\n Args:\n query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.\n key: Projected key `Tensor` of shape `[B, S + M, N, key_dim]`.\n value: Projected value `Tensor` of shape `[B, S + M, N, key_dim]`.\n position: Projected position `Tensor` of shape `[B, L, N, key_dim]`.\n content_attention_bias: Trainable bias parameter added to the query head\n when calculating the content-based attention score.\n positional_attention_bias: Trainable bias parameter added to the query\n head when calculating the position-based attention score.\n segment_matrix: Optional `Tensor` representing segmentation IDs used in\n XLNet.\n segment_encoding: Optional trainable `Tensor` representing the\n segmentation encoding as used in XLNet.\n segment_attention_bias: Optional trainable bias parameter added to the\n query had when calculating the segment-based attention score used in\n XLNet.\n attention_mask: (default None) Optional mask that is added to attention\n logits. If state is not None, the mask source sequence dimension should\n extend M.\n\n Returns:\n attention_output: Multi-headed output of attention computation of shape\n `[B, S, N, key_dim]`.\n\n \"\"\"\n content_attention = tf.einsum(self._dot_product_equation,\n key,\n query + content_attention_bias)\n positional_attention = tf.einsum(self._dot_product_equation,\n position,\n query + positional_attention_bias)\n positional_attention = _rel_shift(\n positional_attention, klen=tf.shape(content_attention)[3])\n\n if segment_matrix is not None:\n segment_attention = tf.einsum(\"bind,snd->bnis\",\n query + segment_attention_bias,\n segment_encoding)\n target_shape = tf.shape(positional_attention)\n segment_attention = tf.where(\n tf.broadcast_to(tf.expand_dims(segment_matrix, 1), target_shape),\n tf.broadcast_to(segment_attention[:, :, :, 1:], target_shape),\n tf.broadcast_to(segment_attention[:, :, :, :1], target_shape))\n attention_sum = (\n content_attention + positional_attention + segment_attention)\n else:\n attention_sum = content_attention + positional_attention\n\n attention_scores = tf.multiply(\n attention_sum, 1.0 / math.sqrt(float(self._key_dim)))\n\n attention_scores = self._masked_softmax(attention_scores, attention_mask)\n\n attention_output = self._dropout_layer(attention_scores)\n\n attention_output = tf.einsum(self._combine_equation,\n attention_output,\n value)\n return attention_output\n\n def call(self,\n query,\n value,\n content_attention_bias,\n positional_attention_bias,\n key=None,\n relative_position_encoding=None,\n segment_matrix=None,\n segment_encoding=None,\n segment_attention_bias=None,\n state=None,\n attention_mask=None):\n \"\"\"Compute multi-head relative attention over inputs.\n\n Size glossary:\n * Number of heads (H): the number of attention heads.\n * Value size (V): the size of each value embedding per head.\n * Key size (K): the size of each key embedding per head. Equally, the size\n of each query embedding per head. Typically K <= V.\n * Batch dimensions (B).\n * Query (target) attention axes shape (T).\n * Value (source) attention axes shape (S), the rank must match the target.\n * Encoding length (L): The relative positional encoding length.\n\n Args:\n query: attention input.\n value: attention input.\n content_attention_bias: A trainable bias parameter added to the query head\n when calculating the content-based attention score.\n positional_attention_bias: A trainable bias parameter added to the query\n head when calculating the position-based attention score.\n key: attention input.\n relative_position_encoding: relative positional encoding for key and\n value.\n segment_matrix: Optional `Tensor` representing segmentation IDs used in\n XLNet.\n segment_encoding: Optional `Tensor` representing the segmentation encoding\n as used in XLNet.\n segment_attention_bias: Optional trainable bias parameter added to the\n query had when calculating the segment-based attention score used in\n XLNet.\n state: (default None) optional state. If passed, this is also attended\n over as in TransformerXL.\n attention_mask: (default None) Optional mask that is added to attention\n logits. If state is not None, the mask source sequence dimension should\n extend M.\n\n Returns:\n attention_output: The result of the computation, of shape [B, T, E],\n where `T` is for target sequence shapes and `E` is the query input last\n dimension if `output_shape` is `None`. Otherwise, the multi-head outputs\n are projected to the shape specified by `output_shape`.\n \"\"\"\n if not self._built_from_signature:\n self._build_from_signature(query, value, key=key)\n if key is None:\n key = value\n if state is not None and state.shape.ndims > 1:\n value = tf.concat([state, value], 1)\n key = tf.concat([state, key], 1)\n\n # `query` = [B, T, N ,H]\n query = self._query_dense(query)\n\n # `key` = [B, S + M, N, H]\n key = self._key_dense(key)\n\n # `value` = [B, S + M, N, H]\n value = self._value_dense(value)\n\n # `position` = [B, L, N, H]\n position = self._encoding_dense(relative_position_encoding)\n\n attention_output = self.compute_attention(\n query=query,\n key=key,\n value=value,\n position=position,\n content_attention_bias=content_attention_bias,\n positional_attention_bias=positional_attention_bias,\n segment_matrix=segment_matrix,\n segment_encoding=segment_encoding,\n segment_attention_bias=segment_attention_bias,\n attention_mask=attention_mask)\n\n # `attention_output` = [B, S, N, H]\n attention_output = self._output_dense(attention_output)\n\n return attention_output\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass TwoStreamRelativeAttention(MultiHeadRelativeAttention):\n \"\"\"Two-stream relative self-attention for XLNet.\n\n In XLNet, each token has two associated vectors at each self-attention layer,\n the content stream (h) and the query stream (g).\n\n The content stream is the self-attention stream as in Transformer XL and\n represents the context and content (the token itself).\n\n The query stream only has access to contextual information and the position,\n but not the content.\n\n This layer shares the same build signature as\n `tf.keras.layers.MultiHeadAttention` but has different input/output\n projections.\n\n **Note: This layer is currently experimental.\n\n Call args:\n content_stream: `Tensor` of shape `[B, T, dim]`.\n content_attention_bias: Bias `Tensor` for content based attention of shape\n `[num_heads, dim]`.\n positional_attention_bias: Bias `Tensor` for position based attention of\n shape `[num_heads, dim]`.\n query_stream: `Tensor` of shape `[B, P, dim]`.\n target_mapping: `Tensor` of shape `[B, P, S]`.\n relative_position_encoding: Relative positional encoding `Tensor` of shape\n `[B, L, dim]`.\n segment_matrix: Optional `Tensor` representing segmentation IDs used in\n XLNet of shape `[B, S, S + M]`.\n segment_encoding: Optional `Tensor` representing the segmentation\n encoding as used in XLNet of shape `[2, num_heads, dim]`.\n segment_attention_bias: Optional trainable bias parameter added to the\n query had when calculating the segment-based attention score used in\n XLNet of shape `[num_heads, dim]`.\n state: Optional `Tensor` of shape [B, M, E] where M is the length of the\n state or memory.\n If passed, this is also attended over as in Transformer XL.\n content_attention_mask: a boolean mask of shape `[B, T, S]` that\n prevents attention to certain positions for content attention computation.\n query_attention_mask: a boolean mask of shape `[B, T, S]` that\n prevents attention to certain position for query attention computation.\n \"\"\"\n\n def call(self,\n content_stream,\n content_attention_bias,\n positional_attention_bias,\n query_stream,\n relative_position_encoding,\n target_mapping=None,\n segment_matrix=None,\n segment_encoding=None,\n segment_attention_bias=None,\n state=None,\n content_attention_mask=None,\n query_attention_mask=None):\n \"\"\"Compute multi-head relative attention over inputs.\n\n Size glossary:\n * Number of heads (H): the number of attention heads.\n * Value size (V): the size of each value embedding per head.\n * Key size (K): the size of each key embedding per head. Equally, the size\n of each query embedding per head. Typically K <= V.\n * Number of predictions (P): the number of predictions.\n * Batch dimensions (B).\n * Query (target) attention axes shape (T).\n * Value (source) attention axes shape (S), the rank must match the target.\n * Encoding length (L): The relative positional encoding length.\n\n Args:\n content_stream: The content representation, commonly referred to as h.\n This serves a similar role to the standard hidden states in\n Transformer-XL.\n content_attention_bias: A trainable bias parameter added to the query head\n when calculating the content-based attention score.\n positional_attention_bias: A trainable bias parameter added to the query\n head when calculating the position-based attention score.\n query_stream: The query representation, commonly referred to as g. This\n only has access to contextual information and position, but not content.\n If not provided, then this is MultiHeadRelativeAttention with\n self-attention.\n relative_position_encoding: relative positional encoding for key and\n value.\n target_mapping: Optional `Tensor` representing the target mapping used in\n partial prediction.\n segment_matrix: Optional `Tensor` representing segmentation IDs used in\n XLNet.\n segment_encoding: Optional `Tensor` representing the segmentation encoding\n as used in XLNet.\n segment_attention_bias: Optional trainable bias parameter added to the\n query head when calculating the segment-based attention score.\n state: (default None) optional state. If passed, this is also attended\n over as in TransformerXL and XLNet.\n content_attention_mask: (default None) Optional mask that is added to\n content attention logits. If state is not None, the mask source sequence\n dimension should extend M.\n query_attention_mask: (default None) Optional mask that is added to query\n attention logits. If state is not None, the mask source sequence\n dimension should extend M.\n\n Returns:\n content_attention_output, query_attention_output: the results of the\n computation, both of shape [B, T, E]. `T` is for target sequence shapes,\n `E` is the query input last dimension if `output_shape` is `None`.\n Otherwise, the multi-head outputs are projected to the shape specified\n by `output_shape`.\n \"\"\"\n if not self._built_from_signature:\n self._build_from_signature(content_stream, content_stream, content_stream)\n if state is not None and state.shape.ndims > 1:\n content_and_memory_stream = tf.concat([state, content_stream], 1)\n else:\n content_and_memory_stream = content_stream\n\n # `query` = [B, T, N, H]\n query = self._query_dense(content_stream)\n\n # `key` = [B, S + M, N, H]\n key = self._key_dense(content_and_memory_stream)\n\n # `value` = [B, S + M, N, H]\n value = self._value_dense(content_and_memory_stream)\n\n # `position` = [B, L, N, H]\n position = self._encoding_dense(relative_position_encoding)\n\n content_attention_output = self.compute_attention(\n query=query,\n key=key,\n value=value,\n position=position,\n content_attention_bias=content_attention_bias,\n positional_attention_bias=positional_attention_bias,\n segment_matrix=segment_matrix,\n segment_encoding=segment_encoding,\n segment_attention_bias=segment_attention_bias,\n attention_mask=content_attention_mask)\n\n # `content_attention_output` = [B, S, N, H]\n content_attention_output = self._output_dense(content_attention_output)\n\n query_attention_output = None\n if query_stream is not None:\n query = self._query_dense(query_stream)\n if target_mapping is not None:\n query = tf.einsum(\"bmnd,bml->blnd\", query, target_mapping)\n query_attention_output = self.compute_attention(\n query=query,\n key=key,\n value=value,\n position=position,\n content_attention_bias=content_attention_bias,\n positional_attention_bias=positional_attention_bias,\n segment_matrix=segment_matrix,\n segment_encoding=segment_encoding,\n segment_attention_bias=segment_attention_bias,\n attention_mask=query_attention_mask)\n query_attention_output = tf.einsum(\"blnd,bml->bmnd\",\n query_attention_output,\n target_mapping)\n else:\n query_attention_output = self.compute_attention(\n query=query,\n key=key,\n value=value,\n position=position,\n content_attention_bias=content_attention_bias,\n positional_attention_bias=positional_attention_bias,\n segment_matrix=segment_matrix,\n segment_encoding=segment_encoding,\n segment_attention_bias=segment_attention_bias,\n attention_mask=query_attention_mask)\n query_attention_output = self._output_dense(query_attention_output)\n\n return content_attention_output, query_attention_output\n" ]
[ [ "tensorflow.train.CheckpointManager" ], [ "tensorflow.io.gfile.isdir", "tensorflow.constant", "tensorflow.train.latest_checkpoint", "tensorflow.shape", "tensorflow.reduce_any", "tensorflow.train.Checkpoint", "tensorflow.keras.regularizers.l2", "tensorflow.reduce_sum", "tensorflow.io.gfile.exists", "tensorflow.cast", "tensorflow.zeros_like", "tensorflow.distribute.get_strategy", "tensorflow.keras.layers.InputSpec", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.TensorShape", "tensorflow.keras.layers.Activation", "tensorflow.keras.constraints.get", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.cast", "tensorflow.keras.initializers.serialize", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.mixed_precision.global_policy", "tensorflow.keras.layers.Dropout", "tensorflow.keras.initializers.get" ], [ "tensorflow.keras.Input", "tensorflow.python.distribute.combinations.combine", "tensorflow.test.main", "numpy.random.rand", "tensorflow.keras.backend.set_image_data_format", "numpy.math.log2", "tensorflow.keras.layers.InputSpec" ], [ "tensorflow.TensorShape", "tensorflow.transpose", "tensorflow.concat", "tensorflow.shape", "tensorflow.slice", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.expand_dims", "tensorflow.einsum", "tensorflow.init_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.4", "2.5", "2.6", "2.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.12" ] } ]
szabi-luxonis/openvino
[ "c8dd831fc3ba68a256ab47edb4f6bf3cb5e804be" ]
[ "model-optimizer/extensions/middle/UpsampleToResample.py" ]
[ "\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\nimport math\nfrom typing import Dict\n\nimport numpy as np\n\nfrom extensions.ops.Cast import Cast\nfrom extensions.ops.elementwise import Mul\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.common.layout import get_height_dim, get_width_dim, get_depth_dim\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input\nfrom mo.graph.graph import Graph, Node\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.shape import Shape\nfrom mo.ops.strided_slice import StridedSlice\n\n\nclass UpsampleToResample(MiddleReplacementPattern):\n enabled = True\n force_clean_up = True\n\n def run_after(self):\n from extensions.middle.pass_separator import MiddleStart\n return [MiddleStart]\n\n def run_before(self):\n from extensions.middle.pass_separator import MiddleFinish\n return [MiddleFinish]\n\n def pattern(self):\n return dict(\n nodes=[\n ('upsample', dict(kind='op', op='Upsample')),\n ('output', dict(kind='data'))],\n edges=[('upsample', 'output')]\n )\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n log.debug('UpsampleToResample is triggered')\n upsample = match['upsample']\n upsample_name = upsample.soft_get('name', upsample.id)\n input_shape = upsample.in_port(0).data.get_shape()\n input_shape_rank = len(input_shape)\n if input_shape_rank not in [4, 5]:\n log.warning('The input shape is not 4D or 5D for op {}'.format(upsample.soft_get('name')))\n return\n\n depth_scale = None\n if len(upsample.in_nodes()) == 2:\n if upsample.in_node(1).value is None:\n return\n scales = upsample.in_node(1).value\n assert len(scales) in (4, 5), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(\n len(scales), upsample_name)\n if not (math.isclose(scales[0], 1, rel_tol=1e-5) and math.isclose(scales[1], 1, rel_tol=1e-5)):\n return\n height_scale = scales[2]\n width_scale = scales[3]\n if len(scales) == 5:\n depth_scale = scales[4]\n else:\n height_scale = upsample['height_scale']\n width_scale = upsample['width_scale']\n\n if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():\n upsample.in_port(1).disconnect()\n\n shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()\n\n layout = graph.graph['layout']\n\n if input_shape_rank == 4:\n begin_value = int64_array([get_height_dim(layout, input_shape_rank)])\n factor_value = np.array([height_scale, width_scale])\n else:\n begin_value = int64_array([get_depth_dim(layout, input_shape_rank)])\n factor_value = np.array([depth_scale, height_scale, width_scale])\n\n ss = create_op_with_const_inputs(graph, StridedSlice,\n {1: begin_value,\n 2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),\n 3: int64_array([1])\n },\n {'name': upsample_name + '/ss_0_port',\n 'begin_mask': int64_array([1]),\n 'end_mask': int64_array([1]),\n 'new_axis_mask': int64_array([0]),\n 'shrink_axis_mask': int64_array([0]),\n 'ellipsis_mask': int64_array([0])\n }\n )\n\n mul = create_op_node_with_second_input(graph, Mul, factor_value, {'name': upsample_name + '/factor_mul_'})\n\n source = upsample.in_port(0).get_connection().get_source()\n source.connect(shape.in_port(0))\n shape.out_port(0).connect(ss.in_port(0))\n\n ss.out_port(0).connect(mul.in_port(0))\n\n # Create Interpolate operation\n if input_shape_rank == 4:\n axes = int64_array([get_height_dim(layout, input_shape_rank),\n get_width_dim(layout, input_shape_rank)])\n else:\n axes = int64_array([get_depth_dim(layout, input_shape_rank),\n get_height_dim(layout, input_shape_rank),\n get_width_dim(layout, input_shape_rank)])\n\n resample_op = Interpolate(graph, dict(name=upsample_name + '/Interpolate',\n axes=axes, mode=upsample.attrs()['mode'],\n antialias=0, convert_to_resample=True)).create_node()\n\n upsample.add_input_port(1, skip_if_exist=True)\n assert upsample.in_port(1).disconnected()\n mul.out_port(0).connect(resample_op.in_port(1))\n\n upsample.in_port(0).get_connection().set_destination(resample_op.in_port(0))\n upsample.out_port(0).get_connection().set_source(resample_op.out_port(0))\n\n convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()\n convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()\n\n mul.in_port(0).get_connection().insert_node(convert_to_float)\n mul.out_port(0).get_connection().insert_node(convert_to_int)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pandinosaurus/gala
[ "975ed783a6cb3c0afe24a921afdacf2f27184fcf", "975ed783a6cb3c0afe24a921afdacf2f27184fcf" ]
[ "tests/test_watershed.py", "gala/imio.py" ]
[ "import os\nimport time\nimport numpy as np\nfrom scipy import ndimage as nd\nfrom numpy.testing import assert_array_equal, assert_array_less\n\nfrom gala import morpho\n\nrundir = os.path.dirname(__file__)\n\ndef time_me(function):\n def wrapped(*args, **kwargs):\n start = time.time()\n r = function(*args, **kwargs)\n end = time.time()\n return r, (end-start)*1000\n return wrapped\n\n\ntest_idxs = list(range(4))\nnum_tests = len(test_idxs)\nfns = [os.path.join(rundir, 'toy-data/test-%02i-probabilities.txt' % i)\n for i in test_idxs]\nprobs = list(map(np.loadtxt, fns))\nfns = [os.path.join(rundir, 'toy-data/test-%02i-watershed.txt' % i)\n for i in test_idxs]\nresults = [np.loadtxt(fn, dtype=np.int32) for fn in fns]\nlandscape = np.array([1,0,1,2,1,3,2,0,2,4,1,0])\n\n\ndef test_watershed_images():\n wss = [morpho.watershed(probs[i], dams=(i == 0)) for i in range(2)]\n for i, (ws, res) in enumerate(zip(wss, results)):\n yield (assert_array_equal, ws, res,\n 'Image watershed test %i failed.' % i)\n\n\ndef test_watershed():\n regular_watershed_result = np.array([1,1,1,0,2,0,3,3,3,0,4,4])\n regular_watershed = morpho.watershed(landscape, dams=True)\n assert_array_equal(regular_watershed, regular_watershed_result)\n\n\ndef test_watershed_nodams():\n nodam_watershed_result = np.array([1,1,1,2,2,2,3,3,3,4,4,4])\n nodam_watershed = morpho.watershed(landscape, dams=False)\n assert_array_equal(nodam_watershed, nodam_watershed_result)\n\n\ndef test_watershed_seeded():\n seeds_bool = (landscape == 0)\n seeds_unique = nd.label(seeds_bool)[0]\n seeded_watershed_result = np.array([1,1,1,1,1,0,2,2,2,0,3,3])\n seeded_watershed1 = morpho.watershed(landscape, seeds_bool, dams=True)\n seeded_watershed2 = morpho.watershed(landscape, seeds_unique, dams=True)\n assert_array_equal(seeded_watershed1, seeded_watershed_result)\n assert_array_equal(seeded_watershed2, seeded_watershed_result)\n\n\ndef test_watershed_seeded_nodams():\n seeds_bool = landscape==0\n seeded_nodam_ws_result = np.array([1,1,1,1,1,1,2,2,2,3,3,3])\n seeded_nodam_ws = morpho.watershed(landscape,\n seeds=seeds_bool, override_skimage=True, dams=False)\n assert_array_equal(seeded_nodam_ws, seeded_nodam_ws_result)\n\n\ndef test_watershed_saddle_basin():\n saddle_landscape = np.array([[0,0,3],[2,1,2],[0,0,3]])\n saddle_result = np.array([[1,1,1],[0,0,0],[2,2,2]])\n saddle_ws = morpho.watershed(saddle_landscape, dams=True)\n assert_array_equal(saddle_ws, saddle_result)\n\n\ndef test_watershed_plateau_performance():\n \"\"\"Test time taken by watershed on plateaus is acceptable.\n \n Versions prior to 2d319e performed redundant computations in the\n idxs_adjacent_to_labels queue which resulted in an explosion in \n runtime on plateaus. This test checks against that behavior.\n \"\"\"\n plat = np.ones((11,11))\n plat[5,5] = 0\n timed_watershed = time_me(morpho.watershed)\n time_taken = timed_watershed(plat)[1]\n assert_array_less(time_taken, 100, 'watershed plateau too slow')\n\n\nif __name__ == '__main__':\n from numpy import testing\n testing.run_module_suite()\n", "# built-ins\nimport os\nimport json\nfrom os.path import split as split_path, join as join_path\nfrom fnmatch import filter as fnfilter\nimport logging\nimport itertools as it\nimport subprocess\nimport tempfile as tmp\n\n# libraries\nimport h5py\nfrom PIL import Image\n\nfrom scipy.ndimage.measurements import label\n\nfrom numpy import array, uint8, uint16, uint32, uint64, zeros, \\\n zeros_like, squeeze, fromstring, ndim, concatenate, newaxis, swapaxes, \\\n savetxt, unique, double, cumsum, ndarray\nimport numpy as np\n\nfrom skimage.io.collection import alphanumeric_key\nfrom skimage.io import imread\n\n# local files\nfrom . import evaluate\nfrom . import morpho\n\n### Auto-detect file format\n\nsupported_image_extensions = ['png', 'tif', 'tiff', 'jpg', 'jpeg']\n\ndef read_image_stack(fn, *args, **kwargs):\n \"\"\"Read a 3D volume of images in image or .h5 format into a numpy.ndarray.\n\n This function attempts to automatically determine input file types and\n wraps specific image-reading functions.\n\n Parameters\n ----------\n fn : filename (string)\n A file path or glob pattern specifying one or more valid image files.\n The file format is automatically determined from this argument.\n\n *args : filenames (string, optional)\n More than one positional argument will be interpreted as a list of\n filenames pointing to all the 2D images in the stack.\n\n **kwargs : keyword arguments (optional)\n Arguments to be passed to the underlying functions. A 'crop'\n keyword argument is supported, as a list of length 6:\n [xmin, xmax, ymin, ymax, zmin, zmax]. Use 'None' for no crop in\n that coordinate.\n\n Returns\n -------\n stack : 3-dimensional numpy ndarray\n\n Notes\n -----\n If reading in .h5 format, keyword arguments are passed through to\n read_h5_stack().\n\n Automatic file type detection may be deprecated in the future.\n \"\"\"\n # TODO: Refactor. Rather than have implicit designation of stack format\n # based on filenames (*_boundpred.h5, etc), require explicit parameters\n # in config JSON files.\n if os.path.isdir(fn):\n fn += '/'\n d, fn = split_path(os.path.expanduser(fn))\n if len(d) == 0: d = '.'\n crop = kwargs.get('crop', [None]*6)\n if crop is None:\n crop = [None]*6\n if len(crop) == 4: crop.extend([None]*2)\n elif len(crop) == 2: crop = [None]*4 + crop\n kwargs['crop'] = crop\n if any(fn.endswith(ext) for ext in supported_image_extensions):\n # image types, such as a set of pngs or a multi-page tiff\n xmin, xmax, ymin, ymax, zmin, zmax = crop\n if len(args) > 0 and type(args[0]) == str and args[0].endswith(fn[-3:]):\n # input is a list of filenames\n fns = [fn] + [split_path(f)[1] for f in args]\n else:\n # input is a filename pattern to match\n fns = fnfilter(os.listdir(d), fn)\n if len(fns) == 1 and fns[0].endswith('.tif'):\n stack = read_multi_page_tif(join_path(d,fns[0]), crop)\n else:\n fns.sort(key=alphanumeric_key) # sort filenames numerically\n fns = fns[zmin:zmax]\n im0 = imread(join_path(d, fns[0]))\n ars = (imread(join_path(d, fn)) for fn in fns)\n im0 = im0[xmin:xmax, ymin:ymax]\n dtype = im0.dtype\n stack = zeros((len(fns),)+im0.shape, dtype)\n for i, im in enumerate(ars):\n stack[i] = im[xmin:xmax,ymin:ymax]\n elif fn.endswith('_boundpred.h5') or fn.endswith('_processed.h5'):\n # Ilastik batch prediction output file\n stack = read_prediction_from_ilastik_batch(os.path.join(d,fn), **kwargs)\n elif fn.endswith('.h5'):\n # other HDF5 file\n stack = read_h5_stack(join_path(d,fn), *args, **kwargs)\n elif os.path.isfile(os.path.join(d, 'superpixel_to_segment_map.txt')):\n # Raveler export\n stack = raveler_to_labeled_volume(d, *args, **kwargs)\n return squeeze(stack)\n\ndef write_image_stack(npy_vol, fn, **kwargs):\n \"\"\"Write a numpy.ndarray 3D volume to a stack of images or an HDF5 file.\n \n Parameters\n ----------\n npy_vol : numpy ndarray\n The volume to be written to disk.\n \n fn : string\n The filename to be written, or a format string when writing a 3D\n stack to a 2D format (e.g. a png image stack).\n \n **kwargs : keyword arguments\n Keyword arguments to be passed to wrapped functions. See\n corresponding docs for valid arguments.\n \n Returns\n -------\n out : None\n\n Examples\n --------\n >>> import numpy as np\n >>> from gala.imio import write_image_stack\n >>> im = 255 * np.array([\n ... [[0, 1, 0], [1, 0, 1], [0, 1, 0]],\n ... [[1, 0, 1], [0, 1, 0], [1, 0, 1]]], dtype=uint8)\n >>> im.shape\n (2, 3, 3)\n >>> write_image_stack(im, 'image-example-%02i.png', axis=0)\n >>> import os\n >>> fns = sorted(filter(lambda x: x.endswith('.png'), os.listdir('.')))\n >>> fns # two 3x3 images\n ['image-example-00.png', 'image-example-01.png']\n >>> os.remove(fns[0]); os.remove(fns[1]) # doctest cleanup\n \"\"\"\n fn = os.path.expanduser(fn)\n if fn.endswith('.png'):\n write_png_image_stack(npy_vol, fn, **kwargs)\n elif fn.endswith('.h5'):\n write_h5_stack(npy_vol, fn, **kwargs)\n elif fn.endswith('.vtk'):\n write_vtk(npy_vol, fn, **kwargs)\n else:\n raise ValueError('Image format not supported: ' + fn + '\\n')\n\n### Standard image formats (png, tiff, etc.)\n\ndef pil_to_numpy(img):\n \"\"\"Convert an Image object to a numpy array.\n \n Parameters\n ----------\n img : Image object (from the Python Imaging Library)\n \n Returns\n -------\n ar : numpy ndarray\n The corresponding numpy array (same shape as the image)\n \"\"\"\n ar = squeeze(array(img.getdata()).reshape((img.size[1], img.size[0], -1)))\n return ar\n\ndef read_multi_page_tif(fn, crop=[None]*6):\n \"\"\"Read a multi-page tif file into a numpy array.\n \n Parameters\n ----------\n fn : string\n The filename of the image file being read.\n \n Returns\n -------\n ar : numpy ndarray\n The image stack in array format.\n\n Notes\n -----\n Currently, only grayscale images are supported.\n \"\"\"\n xmin, xmax, ymin, ymax, zmin, zmax = crop\n img = Image.open(fn)\n pages = []\n if zmin is not None and zmin > 0:\n img.seek(zmin)\n eof = False\n while not eof and img.tell() != zmax:\n pages.append(pil_to_numpy(img)[...,newaxis])\n try:\n img.seek(img.tell()+1)\n except EOFError:\n eof = True\n return concatenate(pages, axis=-1)\n\n\ndef write_png_image_stack(npy_vol, fn, axis=-1, bitdepth=None):\n \"\"\"Write a numpy.ndarray 3D volume to a stack of .png images.\n\n Parameters\n ----------\n npy_vol : numpy ndarray, shape (M, N, P)\n The volume to be written to disk.\n\n fn : format string\n The file pattern to which to write the volume.\n\n axis : int, optional (default = -1)\n The axis along which output the images. If the input array has shape\n (M, N, P), and axis is 1, the function will write N images of shape\n (M, P) to disk. In keeping with Python convention, -1 specifies the\n last axis.\n\n Returns\n -------\n None : None\n No value is returned.\n\n Notes\n -----\n Only 8-bit and 16-bit single-channel images are currently supported.\n \"\"\"\n npy_vol = swapaxes(npy_vol, 0, axis)\n fn = os.path.expanduser(fn)\n if 0 <= npy_vol.max() <= 1 and npy_vol.dtype == double:\n bitdepth = 16 if None else bitdepth\n imdtype = uint16 if bitdepth == 16 else uint8\n npy_vol = ((2**bitdepth-1)*npy_vol).astype(imdtype)\n if 1 < npy_vol.max() < 256 and bitdepth is None or bitdepth == 8:\n mode = 'L'\n mode_base = 'L'\n npy_vol = uint8(npy_vol)\n elif 256 <= np.max(npy_vol) < 2**16 and bitdepth is None or \\\n bitdepth == 16:\n mode = 'I;16'\n mode_base = 'I'\n npy_vol = uint16(npy_vol)\n else:\n mode = 'RGBA'\n mode_base = 'RGBA'\n npy_vol = uint32(npy_vol)\n for z, pl in enumerate(npy_vol):\n im = Image.new(mode_base, pl.T.shape)\n im.frombytes(pl.tostring(), 'raw', mode)\n im.save(fn % z)\n\n### VTK structured points array format\n\ndef extract_segments(seg, ids):\n \"\"\"Get a uint8 volume containing only the specified segment ids.\n\n Parameters\n ----------\n seg : array of int\n The input segmentation.\n ids : list of int, maximum length 255\n A list of segments to extract from `seg`.\n\n Returns\n -------\n segs : array of uint8\n A volume with 1, 2, ..., ``len(ids)`` labels where the required\n segments were, and 0 elsewhere.\n\n Notes\n -----\n This function is designed to output volumes to VTK format for\n viewing in ITK-SNAP\n\n Examples\n --------\n >>> segments = array([[45, 45, 51, 51],\n ... [45, 83, 83, 51]])\n >>> extract_segments(segments, [83, 45])\n array([[2, 2, 0, 0],\n [2, 1, 1, 0]], dtype=uint8)\n \"\"\"\n segs = np.zeros(seg.shape, dtype=np.uint8)\n for i, s in enumerate(ids):\n segs[seg == s] = i + 1\n return segs\n\n\nnumpy_type_to_vtk_string = {\n np.uint8:'unsigned_char', np.int8:'char', np.uint16:'unsigned_short',\n np.int16:'short', np.uint32:'unsigned_int', np.int32:'int',\n np.uint64:'unsigned_long', np.int64:'long', np.float32:'float',\n np.float64:'double'\n}\n\n\nvtk_string_to_numpy_type = \\\n dict([(v,k) for k, v in numpy_type_to_vtk_string.items()])\n\ndef write_vtk(ar, fn, spacing=[1.0, 1.0, 1.0]):\n \"\"\"Write 3D volume to VTK structured points format file.\n\n Code adapted from Erik Vidholm's writeVTK.m Matlab implementation.\n\n Parameters\n ----------\n ar : a numpy array, shape (M, N, P)\n The array to be written to disk.\n fn : string\n The desired output filename.\n spacing : iterable of float, optional (default: [1.0, 1.0, 1.0])\n The voxel spacing in x, y, and z.\n\n Returns\n -------\n None : None\n This function does not have a return value.\n \"\"\"\n # write header\n f = open(fn, 'wb')\n f.write(b'# vtk DataFile Version 3.0\\n')\n f.write(b'created by write_vtk (Python implementation by JNI)\\n')\n f.write(b'BINARY\\n')\n f.write(b'DATASET STRUCTURED_POINTS\\n')\n f.write(str.encode(' '.join(['DIMENSIONS'] +\n list(map(str, ar.shape[-1::-1]))) + '\\n'))\n f.write(str.encode(' '.join(['ORIGIN'] + list(map(str, zeros(3)))) + '\\n'))\n f.write(str.encode(' '.join(['SPACING'] + list(map(str, spacing))) + '\\n'))\n f.write(str.encode('POINT_DATA ' + str(ar.size) + '\\n'))\n f.write(str.encode('SCALARS image_data ' +\n numpy_type_to_vtk_string[ar.dtype.type] + '\\n'))\n f.write(b'LOOKUP_TABLE default\\n');\n f.close()\n\n # write data as binary\n f = open(fn, 'ab')\n f.write(ar.tobytes())\n f.close()\n\n\ndef read_vtk(fin):\n \"\"\"Read a numpy volume from a VTK structured points file.\n\n Code adapted from Erik Vidholm's readVTK.m Matlab implementation.\n\n Parameters\n ----------\n fin : string\n The input filename.\n\n Returns\n -------\n ar : numpy ndarray\n The array contained in the file.\n \"\"\"\n f = open(fin, 'rb')\n num_lines_in_header = 10\n lines = [bytes.decode(f.readline()) for i in range(num_lines_in_header)]\n shape_line = [line for line in lines if line.startswith('DIMENSIONS')][0]\n type_line = [line for line in lines \n if line.startswith('SCALARS') or line.startswith('VECTORS')][0]\n ar_shape = [int(b) for b in shape_line.rstrip().split(' ')[-1:0:-1]]\n ar_type = vtk_string_to_numpy_type[type_line.rstrip().split(' ')[2]]\n if type_line.startswith('VECTORS'):\n ar_shape.append(-1)\n ar = fromstring(f.read(), ar_type).reshape(ar_shape)\n return ar\n\n### HDF5 format\n\ndef read_h5_stack(fn, group='stack', crop=[None]*6, **kwargs):\n \"\"\"Read a volume in HDF5 format into numpy.ndarray.\n\n Parameters\n ----------\n fn : string\n The filename of the input HDF5 file.\n group : string, optional (default 'stack')\n The group within the HDF5 file containing the dataset.\n crop : list of int, optional (default '[None]*6', no crop)\n A crop to get of the volume of interest. Only available for 2D and 3D\n volumes.\n\n Returns\n -------\n stack : numpy ndarray\n The stack contained in fn, possibly cropped.\n \"\"\"\n fn = os.path.expanduser(fn)\n dset = h5py.File(fn, 'r')\n if group not in dset:\n raise ValueError(\"HDF5 file (%s) doesn't have group (%s)!\" % \n (fn, group))\n a = dset[group]\n if ndim(a) == 2:\n xmin, xmax, ymin, ymax = crop[:4]\n a = a[xmin:xmax, ymin:ymax]\n elif ndim(a) == 3:\n xmin, xmax, ymin, ymax, zmin, zmax = crop\n a = a[xmin:xmax, ymin:ymax, zmin:zmax]\n stack = array(a)\n dset.close()\n return stack\n\ndef compute_sp_to_body_map(sps, bodies):\n \"\"\"Return unique (sp, body) pairs from a superpixel map and segmentation.\n\n Parameters\n ----------\n sps : numpy ndarray, arbitrary shape\n The superpixel (supervoxel) map.\n bodies : numpy ndarray, same shape as sps\n The corresponding segmentation.\n\n Returns\n -------\n sp_to_body : numpy ndarray, shape (NUM_SPS, 2)\n\n Notes\n -----\n No checks are made for sane inputs. This means that incorrect input,\n such as non-matching shapes, or superpixels mapping to more than one\n segment, will result in undefined behavior downstream with no warning.\n \"\"\"\n sp_to_body = unique(list(zip(sps.ravel(), bodies.ravel()))).astype(uint64)\n return sp_to_body\n\ndef write_mapped_segmentation(superpixel_map, sp_to_body_map, fn, \n sp_group='stack', sp_to_body_group='transforms'):\n \"\"\"Write a mapped segmentation to an HDF5 file.\n\n Parameters\n ----------\n superpixel_map : numpy ndarray, arbitrary shape\n sp_to_body_map : numpy ndarray, shape (NUM_SPS, 2)\n A many-to-one map of superpixels to bodies (segments), specified as\n rows of (superpixel, body) pairs.\n fn : string\n The output filename.\n sp_group : string, optional (default 'stack')\n the group within the HDF5 file to store the superpixel map.\n sp_to_body_group : string, optional (default 'transforms')\n the group within the HDF5 file to store the superpixel to body map.\n\n Returns\n -------\n None\n \"\"\"\n fn = os.path.expanduser(fn)\n fout = h5py.File(fn, 'w')\n fout.create_dataset(sp_group, data=superpixel_map)\n fout.create_dataset(sp_to_body_group, data=sp_to_body_map)\n fout.close()\n\n\ndef read_mapped_segmentation(fn, \n sp_group='stack', sp_to_body_group='transforms'):\n \"\"\"Read a volume in mapped HDF5 format into a numpy.ndarray pair.\n\n Parameters\n ----------\n fn : string\n The filename to open.\n sp_group : string, optional (default 'stack')\n The group within the HDF5 file where the superpixel map is stored.\n sp_to_body_group : string, optional (default 'transforms')\n The group within the HDF5 file where the superpixel to body map is\n stored.\n\n Returns\n -------\n segmentation : numpy ndarray, same shape as 'superpixels', int type\n The segmentation induced by the superpixels and map.\n \"\"\"\n sps, sp2body = read_mapped_segmentation_raw(fn, sp_group, sp_to_body_group)\n segmentation = apply_segmentation_map(sps, sp2body)\n return segmentation\n\ndef apply_segmentation_map(superpixels, sp_to_body_map):\n \"\"\"Return a segmentation from superpixels and a superpixel to body map.\n\n Parameters\n ----------\n superpixels : numpy ndarray, arbitrary shape, int type\n A superpixel (or supervoxel) map (aka label field).\n sp_to_body_map : numpy ndarray, shape (NUM_SUPERPIXELS, 2), int type\n An array of (superpixel, body) map pairs.\n\n Returns\n -------\n segmentation : numpy ndarray, same shape as 'superpixels', int type\n The segmentation induced by the superpixels and map.\n \"\"\"\n forward_map = np.zeros(sp_to_body_map[:, 0].max() + 1,\n sp_to_body_map.dtype)\n forward_map[sp_to_body_map[:, 0]] = sp_to_body_map[:, 1]\n segmentation = forward_map[superpixels]\n return segmentation\n\ndef read_mapped_segmentation_raw(fn, \n sp_group='stack', sp_to_body_group='transforms'):\n \"\"\"Read a volume in mapped HDF5 format into a numpy.ndarray pair.\n\n Parameters\n ----------\n fn : string\n The filename to open.\n sp_group : string, optional (default 'stack')\n The group within the HDF5 file where the superpixel map is stored.\n sp_to_body_group : string, optional (default 'transforms')\n The group within the HDF5 file where the superpixel to body map is\n stored.\n\n Returns\n -------\n sp_map : numpy ndarray, arbitrary shape\n The superpixel (or supervoxel) map.\n sp_to_body_map : numpy ndarray, shape (NUM_SUPERPIXELS, 2)\n The superpixel to body (segment) map, as (superpixel, body) pairs.\n \"\"\"\n fn = os.path.expanduser(fn)\n dset = h5py.File(fn, 'r')\n if sp_group not in dset:\n raise ValueError(\n \"HDF5 file (%s) doesn't have group (%s)!\" % (fn, sp_group))\n if sp_to_body_group not in dset:\n raise ValueError(\n \"HDF5 file (%s) doesn't have group (%s)!\" % (fn, sp_to_body_group))\n sp_map = array(dset[sp_group])\n sp_to_body_map = array(dset[sp_to_body_group])\n dset.close()\n return sp_map, sp_to_body_map\n\n\ndef write_h5_stack(npy_vol, fn, group='stack', compression=None, chunks=None,\n shuffle=None, attrs=None):\n \"\"\"Write a numpy.ndarray 3D volume to an HDF5 file.\n\n Parameters\n ----------\n npy_vol : numpy ndarray\n The array to be saved to HDF5.\n fn : string\n The output filename.\n group : string, optional (default: 'stack')\n The group within the HDF5 file to write to.\n compression : {None, 'gzip', 'szip', 'lzf'}, optional (default: None)\n The compression to use, if any. Note that 'lzf' is only available\n through h5py, so implementations in other languages will not be able\n to read files created with this compression.\n chunks : tuple, True, or None (default: None)\n Whether to use chunking in the HDF5 dataset. Default is None. True\n lets h5py choose a chunk size automatically. Otherwise, use a tuple\n of int of the same length as `npy_vol.ndim`. From the h5py\n documentation: \"In the real world, chunks of size 10kB - 300kB work\n best, especially for compression. Very small chunks lead to lots of\n overhead in the file, while very large chunks can result in \n inefficient I/O.\"\n shuffle : bool, optional\n Shuffle the bytes on disk to improve compression efficiency.\n attrs : dict, optional\n A dictionary, keyed by string, of attributes to append to the dataset.\n \"\"\"\n fn = os.path.expanduser(fn)\n fout = h5py.File(fn, 'a')\n if group in fout:\n del fout[group]\n fout.create_dataset(group, data=npy_vol, compression=compression,\n chunks=chunks, shuffle=shuffle)\n if attrs is not None:\n for attr, value in attrs.items():\n fout[group].attrs[attr] = value\n fout.close()\n\n### Raveler format\n\ndef ucm_to_raveler(ucm, sp_threshold=0.0, body_threshold=0.1, **kwargs):\n \"\"\"Return Raveler map from a UCM.\n \n Parameters\n ----------\n ucm : numpy ndarray, shape (M, N, P)\n An ultrametric contour map. This is a map of scored segment boundaries\n such that if A, B, and C are segments, then \n score(A, B) = score(B, C) >= score(A, C), for some permutation of\n A, B, and C.\n A hierarchical agglomeration process produces a UCM.\n sp_threshold : float, optional (default: 0.0)\n The value for which to threshold the UCM to obtain the superpixels.\n body_threshold : float, optional (default: 0.1)\n The value for which to threshold the UCM to obtain the segments/bodies.\n The condition `body_threshold >= sp_threshold` should hold in order\n to obtain sensible results.\n **kwargs : dict, optional\n Keyword arguments to be passed through to `segs_to_raveler`.\n\n Returns\n -------\n superpixels : numpy ndarray, shape (M, N, P)\n The superpixel map. Non-zero superpixels are unique to each plane.\n That is, `np.unique(superpixels[i])` and `np.unique(superpixels[j])` \n have only 0 as their intersection.\n sp_to_segment : numpy ndarray, shape (Q, 3)\n The superpixel to segment map. Segments are unique to each plane. The\n first number on each line is the plane number.\n segment_to_body : numpy ndarray, shape (R, 2)\n The segment to body map.\n \"\"\"\n sps = label(ucm < sp_threshold)[0]\n bodies = label(ucm <= body_threshold)[0]\n return segs_to_raveler(sps, bodies, **kwargs)\n\ndef segs_to_raveler(sps, bodies, min_size=0, do_conn_comp=False, sps_out=None):\n \"\"\"Return a Raveler tuple from 3D superpixel and body maps.\n \n Parameters\n ----------\n sps : numpy ndarray, shape (M, N, P)\n The supervoxel map.\n bodies : numpy ndarray, shape (M, N, P)\n The body map. Superpixels should not map to more than one body.\n min_size : int, optional (default: 0)\n Superpixels smaller than this size on a particular plane are blacked\n out.\n do_conn_comp : bool (default: False)\n Whether to do a connected components operation on each plane. This is\n required if we want superpixels to be contiguous on each plane, since\n 3D-contiguous superpixels are not guaranteed to be contiguous along\n a slice.\n sps_out : numpy ndarray, shape (M, N, P), optional (default: None)\n A Raveler-compatible superpixel map, meaning that superpixels are\n unique to each plane along axis 0. (See `superpixels` in the return\n values.) If provided, this saves significant computation time.\n\n Returns\n -------\n superpixels : numpy ndarray, shape (M, N, P)\n The superpixel map. Non-zero superpixels are unique to each plane.\n That is, `np.unique(superpixels[i])` and `np.unique(superpixels[j])` \n have only 0 as their intersection.\n sp_to_segment : numpy ndarray, shape (Q, 3)\n The superpixel to segment map. Segments are unique to each plane. The\n first number on each line is the plane number.\n segment_to_body : numpy ndarray, shape (R, 2)\n The segment to body map.\n \"\"\"\n if sps_out is None:\n sps_out = raveler_serial_section_map(sps, min_size, do_conn_comp, False)\n segment_map = raveler_serial_section_map(bodies, min_size, do_conn_comp)\n segment_to_body = unique(list(zip(segment_map.ravel(), bodies.ravel())))\n segment_to_body = segment_to_body[segment_to_body[:,0] != 0]\n segment_to_body = concatenate((array([[0,0]]), segment_to_body), axis=0)\n sp_to_segment = []\n for i, (sp_map_i, segment_map_i, body_map_i) in \\\n enumerate(zip(sps_out, segment_map, bodies)):\n segment_map_i *= sp_map_i.astype(bool)\n valid = (sp_map_i != 0) + (segment_map_i == 0)\n sp_to_segment.append(\n unique(list(zip(it.repeat(i), sp_map_i[valid], segment_map_i[valid]))))\n valid = segment_map != 0\n logging.debug('plane %i done'%i)\n logging.info('total superpixels before: ' + str(len(unique(sps))) +\n ' total superpixels after: ' + str(len(unique(sps_out))))\n sp_to_segment = concatenate(sp_to_segment, axis=0)\n return sps_out, sp_to_segment, segment_to_body\n\ndef raveler_serial_section_map(nd_map, min_size=0, do_conn_comp=False, \n globally_unique_ids=True):\n \"\"\"Produce `serial_section_map` and label one corner of each plane as 0.\n\n Raveler chokes when there are no pixels with label 0 on a plane, so this\n function produces the serial section map as normal but then adds a 0 to\n the [0, 0] corner of each plane, IF the volume doesn't already have 0\n pixels.\n\n Notes\n -----\n See `serial_section_map` for more info.\n \"\"\"\n nd_map = serial_section_map(nd_map, min_size, do_conn_comp, \n globally_unique_ids)\n if not (nd_map == 0).any():\n nd_map[:,0,0] = 0\n return nd_map\n\ndef serial_section_map(nd_map, min_size=0, do_conn_comp=False, \n globally_unique_ids=True):\n \"\"\"Produce a plane-by-plane superpixel map with unique IDs.\n\n Raveler requires sps to be unique and different on each plane. This\n function converts a fully 3D superpixel map to a serial-2D superpixel\n map compatible with Raveler.\n\n Parameters\n ----------\n nd_map : np.ndarray, int, shape (M, N, P)\n The original superpixel map.\n min_size : int (optional, default 0)\n Remove superpixels smaller than this size (on each plane)\n do_conn_comp : bool (optional, default False)\n In some cases, a single supervoxel may result in two disconnected\n superpixels in 2D. Set to True to force these to have different IDs.\n globally_unique_ids : bool (optional, default True)\n If True, every plane has unique IDs, with plane n having IDs {i1, i2,\n ..., in} and plane n+1 having IDs {in+1, in+2, ..., in+ip}, and so on.\n\n Returns\n -------\n relabeled_planes : np.ndarray, int, shape (M, N, P)\n A volume equal to nd_map but with superpixels relabeled along axis 0.\n That is, the input volume is reinterpreted as M slices of shape (N, P).\n \"\"\"\n if do_conn_comp:\n label_fct = label\n else:\n def label_fct(a):\n relabeled, fmap, imap = evaluate.relabel_from_one(a)\n return relabeled, len(imap)\n def remove_small(a):\n return morpho.remove_small_connected_components(a, min_size)\n mplanes = map(remove_small, nd_map)\n relabeled_planes, nids_per_plane = zip(*map(label_fct, mplanes))\n start_ids = concatenate((array([0], int), cumsum(nids_per_plane)[:-1])) \\\n if globally_unique_ids else [0]*len(nids_per_plane)\n relabeled_planes = [(relabeled_plane + start_id)[newaxis, ...]\n for relabeled_plane, start_id in zip(relabeled_planes, start_ids)]\n return concatenate(relabeled_planes, axis=0)\n\ndef write_to_raveler(sps, sp_to_segment, segment_to_body, directory, gray=None,\n raveler_dir='/usr/local/raveler-hdf', nproc_contours=16,\n body_annot=None):\n \"\"\"Output a segmentation to Raveler format. \n\n Parameters\n ----------\n sps : np.ndarray, int, shape (nplanes, nx, ny)\n The superpixel map. Superpixels can only occur on one plane.\n sp_to_segment : np.ndarray, int, shape (nsps + nplanes, 3)\n Superpixel-to-segment map as a 3 column list of (plane number,\n superpixel id, segment id). Segments must be unique to a plane, and\n each plane must contain the map {0: 0}\n segment_to_body: np.ndarray, int, shape (nsegments, 2)\n The segment to body map.\n directory: string \n The directory in which to write the stack. This directory and all\n necessary subdirectories will be created.\n gray: np.ndarray, uint8 or uint16, shape (nplanes, nx, ny) (optional)\n The grayscale images corresponding to the superpixel maps.\n raveler dir: string (optional, default `/usr/local/raveler-hdf`)\n Where Raveler is installed.\n nproc_contours: int (optional, default 16) \n How many processes to use when generating the Raveler contours.\n body_annot: dict or np.ndarray (optional)\n Either a dictionary to write to JSON in Raveler body annotation\n format, or a numpy ndarray of the segmentation from which to compute\n orphans and non traversing bodies (which then get written out as body\n annotations).\n\n Returns\n -------\n None\n\n Notes\n -----\n Raveler is the EM segmentation proofreading tool developed in-house at\n Janelia for the FlyEM project.\n \"\"\"\n sp_path = os.path.join(directory, 'superpixel_maps')\n im_path = os.path.join(directory, 'grayscale_maps')\n tile_path = os.path.join(directory, 'tiles')\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # write superpixel->segment->body maps\n savetxt(os.path.join(directory, 'superpixel_to_segment_map.txt'),\n sp_to_segment, '%i') \n savetxt(os.path.join(directory, 'segment_to_body_map.txt'), \n segment_to_body, '%i')\n\n # write superpixels\n if not os.path.exists(sp_path): \n os.mkdir(sp_path)\n write_png_image_stack(sps, os.path.join(sp_path, 'sp_map.%05i.png'),\n bitdepth=16, axis=0)\n\n # write grayscale\n if gray is not None:\n if not os.path.exists(im_path): \n os.mkdir(im_path)\n write_png_image_stack(gray, \n os.path.join(im_path, 'img.%05d.png'), axis=0)\n\n # body annotations\n if body_annot is not None:\n if type(body_annot) == ndarray:\n orphans = morpho.orphans(body_annot)\n non_traversing = morpho.non_traversing_segments(body_annot)\n body_annot = raveler_body_annotations(orphans, non_traversing)\n write_json(body_annot, os.path.join(directory, 'annotations-body.json'))\n\n # make tiles, bounding boxes, and contours, and compile HDF5 stack info.\n with tmp.TemporaryFile() as tmp_stdout:\n try: \n def call(arglist):\n return subprocess.call(arglist, stdout=tmp_stdout)\n r1 = call(['createtiles', im_path, sp_path, tile_path])\n r2 = call(['bounds', directory])\n r3 = call(['compilestack', directory])\n except:\n logging.warning(\n 'Error during Raveler export post-processing step. ' +\n 'Possible causes are that you do not have Raveler installed ' +\n 'or you did not specify the correct installation path.')\n logging.warning('Return codes: %i, %i, %i' % (r1, r2, r3))\n# with sys.exc_info() as ex:\n# logging.warning('Exception info:\\n' + '\\n'.join(map(str, ex)))\n # make permissions friendly for proofreaders.\n try:\n subprocess.call(['chmod', '-R', 'go=u', directory])\n except:\n logging.warning('Could not change Raveler export permissions.')\n\ndef raveler_output_shortcut(svs, seg, gray, outdir, sps_out=None):\n \"\"\"Compute the Raveler format and write to directory, all at once.\n \n Parameters\n ----------\n svs : np.ndarray, int, shape (M, N, P)\n The supervoxel map.\n seg : np.ndarray, int, shape (M, N, P)\n The segmentation map. It is assumed that no supervoxel crosses\n any segment boundary.\n gray : np.ndarray, uint8, shape (M, N, P)\n The grayscale EM images corresponding to the above segmentations.\n outdir : string\n The export directory for the Raveler volume.\n sps_out : np.ndarray, int, shape (M, N, P) (optional)\n The precomputed serial section 2D superpixel map. Output will be\n much faster if this is provided.\n\n Returns\n -------\n sps_out : np.ndarray, int, shape (M, N, P)\n The computed serial section 2D superpixel map. Keep this when\n making multiple calls to `raveler_output_shortcut` with the\n same supervoxel map.\n \"\"\"\n sps_out, sp2seg, seg2body = segs_to_raveler(svs, seg, sps_out=sps_out)\n write_to_raveler(sps_out, sp2seg, seg2body, outdir, gray, body_annot=seg)\n return sps_out\n\ndef raveler_body_annotations(orphans, non_traversing=None):\n \"\"\"Return a Raveler body annotation dictionary of orphan segments.\n\n Orphans are labeled as body annotations with `not sure` status and\n a string indicating `orphan` in the comments field.\n\n Non-traversing segments have only one contact with the surface of\n the volume, and are labeled `does not traverse` in the comments.\n\n Parameters\n ----------\n orphans : iterable of int\n The ID numbers corresponding to orphan segments.\n non_traversing : iterable of int (optional, default None)\n The ID numbers of segments having only one exit point in the volume.\n\n Returns\n -------\n body_annotations : dict\n A dictionary containing entries for 'data' and 'metadata' as\n specified in the Raveler body annotations format [1, 2].\n\n References\n ----------\n [1] https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format\n and:\n [2] https://wiki.janelia.org/wiki/display/flyem/generic+file+format\n \"\"\"\n data = [{'status': 'not sure', 'comment': 'orphan', 'body ID': int(o)}\n for o in orphans]\n if non_traversing is not None:\n data.extend([{'status': 'not sure', 'comment': 'does not traverse',\n 'body ID': int(n)} for n in non_traversing])\n metadata = {'description': 'body annotations', 'file version': 2}\n return {'data': data, 'metadata': metadata}\n\ndef write_json(annot, fn='annotations-body.json', directory=None):\n \"\"\"Write an annotation dictionary in Raveler format to a JSON file.\n \n The annotation file format is described in:\n https://wiki.janelia.org/wiki/display/flyem/body+annotation+file+format\n and:\n https://wiki.janelia.org/wiki/display/flyem/generic+file+format\n\n Parameters\n ----------\n annot : dict\n A body annotations dictionary (described in pages above).\n fn : string (optional, default 'annotations-body.json')\n The filename to which to write the file.\n directory : string (optional, default None, or '.')\n A directory in which to write the file.\n\n Returns\n -------\n None\n \"\"\"\n if directory is not None:\n fn = join_path(directory, fn)\n with open(fn, 'w') as f:\n json.dump(annot, f, indent=2)\n\n\ndef raveler_rgba_to_int(im, ignore_alpha=True):\n \"\"\"Convert a volume using Raveler's RGBA encoding to int. [1]\n\n Parameters\n ----------\n im : np.ndarray, shape (M, N, P, 4)\n The image stack to be converted.\n ignore_alpha : bool, optional\n By default, the alpha channel does not encode anything. However, if\n we ever need 32 bits, it would be used. This function supports that\n with `ignore_alpha=False`. (default is True.)\n\n Returns\n -------\n im_int : np.ndarray, shape (M, N, P)\n The label volume.\n\n References\n ----------\n [1] https://wiki.janelia.org/wiki/display/flyem/Proofreading+data+and+formats\n \"\"\"\n if im.ndim == 4 and im.shape[3] == 4:\n if ignore_alpha:\n im = im[..., :3]\n im_int = (im * 255 ** np.arange(im.shape[3])).sum(axis=3)\n else:\n im_int = im\n return im_int\n\n\ndef raveler_to_labeled_volume(rav_export_dir, get_glia=False, \n use_watershed=False, probability_map=None, crop=None):\n \"\"\"Import a raveler export stack into a labeled segmented volume.\n \n Parameters\n ----------\n rav_export_dir : string\n The directory containing the Raveler stack.\n get_glia : bool (optional, default False)\n Return the segment numbers corresponding to glia, if available.\n use_watershed : bool (optional, default False)\n Fill in 0-labeled voxels using watershed.\n probability_map : np.ndarray, same shape as volume to be read (optional)\n If `use_watershed` is True, use `probability_map` as the landscape. If\n this is not provided, it uses a flat landscape.\n crop : tuple of int (optional, default None)\n A 6-tuple of [xmin, xmax, ymin, ymax, zmin, zmax].\n\n Returns\n -------\n output_volume : np.ndarray, shape (Z, X, Y)\n The segmentation in the Raveler volume.\n glia : list of int (optional, only returned if `get_glia` is True)\n The IDs in the segmentation corresponding to glial cells.\n \"\"\"\n from . import morpho\n spmap = read_image_stack(\n os.path.join(rav_export_dir, 'superpixel_maps', '*.png'), crop=crop)\n spmap = raveler_rgba_to_int(spmap)\n sp2seg_list = np.loadtxt(\n os.path.join(rav_export_dir, 'superpixel_to_segment_map.txt'), uint32)\n seg2bod_list = np.loadtxt(\n os.path.join(rav_export_dir, 'segment_to_body_map.txt'), uint32)\n sp2seg = {}\n max_sp = sp2seg_list[:,1].max()\n start_plane = sp2seg_list[:,0].min()\n for z, sp, seg in sp2seg_list:\n if z not in sp2seg:\n sp2seg[z] = zeros(max_sp+1, uint32)\n sp2seg[z][sp] = seg\n max_seg = seg2bod_list[:,0].max()\n seg2bod = zeros(max_seg+1, uint32)\n seg2bod[seg2bod_list[:,0]] = seg2bod_list[:,1]\n initial_output_volume = zeros_like(spmap)\n for i, m in enumerate(spmap):\n j = start_plane + i\n initial_output_volume[i] = seg2bod[sp2seg[j][m]]\n if use_watershed:\n probs = np.ones_like(spmap) if probability_map is None \\\n else probability_map\n output_volume = morpho.watershed(probs, seeds=initial_output_volume)\n else:\n output_volume = initial_output_volume\n if (output_volume[:, 0, 0] == 0).all() and \\\n (output_volume == 0).sum() == output_volume.shape[0]:\n output_volume[:, 0, 0] = output_volume[:, 0, 1]\n if get_glia:\n annots = json.load(\n open(os.path.join(rav_export_dir, 'annotations-body.json'), 'r'))\n glia = [a['body ID'] for a in annots['data'] \n if a.get('comment', None) == 'glia']\n return output_volume, glia\n else:\n return output_volume\n\n### Ilastik formats\n\n# obtained from Ilastik 0.5.4\nilastik_label_colors = \\\n [0xffff0000, 0xff00ff00, 0xffffff00, 0xff0000ff, \n 0xffff00ff, 0xff808000, 0xffc0c0c0, 0xfff2022d] \n\ndef write_ilastik_project(images, labels, fn, label_names=None):\n \"\"\"Write one or more image volumes and corresponding labels to Ilastik.\n \n Parameters\n ----------\n images : np.ndarray or list of np.ndarray, shapes (M_i, N_i[, P_i])\n The grayscale images to be saved.\n labels : np.ndarray or list of np.ndarray, same shapes as `images`\n The label maps corresponding to the images.\n fn : string\n The filename to save the project in.\n label_names : list of string (optional)\n The names corresponding to each label in `labels`. (Not implemented!)\n\n Returns\n -------\n None\n\n Notes\n -----\n Limitations:\n Assumes the same labels are used for all images.\n Supports only grayscale images and volumes, and a maximum of 8 labels.\n Requires at least one unlabeled voxel in the label field.\n \"\"\"\n f = h5py.File(fn, 'w')\n if type(images) != list:\n images = [images]\n labels = [labels]\n ulbs = unique(concatenate(list(map(unique, labels))))[1:]\n colors = array(ilastik_label_colors[:len(ulbs)])\n names = ['Label %i'%i for i in ulbs]\n names = array(names, '|S%i'%max(map(len, names)))\n label_attributes = {'color':colors, 'name':names, 'number':ulbs}\n for i, (im, lb) in enumerate(zip(images, labels)):\n if im.ndim == 2:\n new_shape = (1,1)+im.shape+(1,)\n elif im.ndim == 3:\n new_shape = (1,)+im.shape+(1,)\n else:\n raise ValueError('Unsupported number of dimensions in image.')\n im = im.reshape(new_shape)\n lb = lb.reshape(new_shape)\n root = 'DataSets/dataItem%02i/'%i\n f[root+'data'] = im\n f[root+'labels'] = lb\n for k, v in label_attributes.items():\n f[root+'labels'].attrs[k] = v\n f[root].attrs['Name'] = ''\n f[root].attrs['fileName'] = ''\n for subgroup in ['Description', 'Labeler', 'Name']:\n f['Project/%s'%subgroup] = array('', dtype='|S1')\n f['ilastikVersion'] = array(0.5)\n f.close()\n\ndef write_ilastik_batch_volume(im, fn):\n \"\"\"Write a volume to an HDF5 file for Ilastik batch processing.\n \n Parameters\n ----------\n im : np.ndarray, shape (M, N[, P])\n The image volume to be saved.\n fn : string\n The filename in which to save the volume.\n\n Returns\n -------\n None\n \"\"\"\n if im.ndim == 2:\n im = im.reshape((1,1)+im.shape+(1,))\n elif im.ndim == 3:\n im = im.reshape((1,)+im.shape+(1,))\n else:\n raise ValueError('Unsupported number of dimensions in image.')\n write_h5_stack(im, fn, group='/volume/data')\n\ndef read_prediction_from_ilastik_batch(fn, **kwargs):\n \"\"\"Read the prediction produced by Ilastik from batch processing.\n \n Parameters\n ----------\n fn : string\n The filename to read from.\n group : string (optional, default '/volume/prediction')\n Where to read from in the HDF5 file hierarchy.\n single_channel : bool (optional, default True)\n Read only the 0th channel (final dimension) from the volume.\n\n Returns\n -------\n None\n \"\"\"\n if 'group' not in kwargs:\n kwargs['group'] = '/volume/prediction'\n a = squeeze(read_h5_stack(fn, **kwargs))\n if kwargs.get('single_channel', True):\n a = a[..., 0]\n return a\n\n\ndef read_cremi(fn, datasets=['/volumes/raw', '/volumes/labels/neuron_ids']):\n \"\"\"Read volume formatted as described in CREMI data challenge [1]_.\n\n The format is HDF5, with:\n - raw image data (uint8) in: /volumes/raw\n - (optional) membrane prediction data (uint8, inverted) in:\n /volumes/membrane\n - synaptic cleft annotations in: /volumes/labels/clefts\n - neuron ids (uint64) in: /volumes/labels/neuron_ids\n - (optional) fragment data (uint64) in: /volumes/labels/fragments\n\n We currently ignore the synaptic cleft annotations, and return only\n the raw image and the neuron ids.\n\n Parameters\n ----------\n fn : string\n The input filename.\n\n Returns\n -------\n datasets : list of array\n The arrays corresponding to the requested datasets.\n\n References\n ----------\n .. [1]: https://cremi.org/data/\n \"\"\"\n out = [read_h5_stack(fn, group=ds) for ds in datasets]\n return out\n\n\ndef write_cremi(data_dict, fn, resolution=(40., 4., 4.)):\n \"\"\"Write a volume formatted as described in CREMI data challenge [1]_.\n\n Parameters\n ----------\n data_dict : dictionary of string to arrays\n The data dictionary mapping HDF groups to arrays.\n fn : string\n The filename to write to.\n resolution : tuple of float, optional\n The resolution along each axis of the datasets. Currently, this\n is the same for each dataset written.\n \"\"\"\n for group, data in data_dict.items():\n write_h5_stack(data, fn, group=group, compression='gzip',\n attrs={'resolution': resolution})\n" ]
[ [ "numpy.testing.run_module_suite", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.testing.assert_array_less", "scipy.ndimage.label", "numpy.array", "numpy.loadtxt" ], [ "numpy.swapaxes", "numpy.ones_like", "numpy.uint32", "numpy.unique", "numpy.uint8", "numpy.squeeze", "numpy.arange", "numpy.cumsum", "scipy.ndimage.measurements.label", "numpy.concatenate", "numpy.ndim", "numpy.max", "numpy.uint16", "numpy.zeros_like", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
xiangruhuang/OpenPCDet
[ "d82d9594a0629ffed0c457aedc304e0805e93221", "d82d9594a0629ffed0c457aedc304e0805e93221" ]
[ "pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py", "pcdet/ops/torch_hash/test_torch_hash.py" ]
[ "import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules\nfrom ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils\nfrom ....utils import common_utils\n\n\ndef bilinear_interpolate_torch(im, x, y):\n \"\"\"\n Args:\n im: (H, W, C) [y, x]\n x: (N)\n y: (N)\n\n Returns:\n\n \"\"\"\n x0 = torch.floor(x).long()\n x1 = x0 + 1\n\n y0 = torch.floor(y).long()\n y1 = y0 + 1\n\n x0 = torch.clamp(x0, 0, im.shape[1] - 1)\n x1 = torch.clamp(x1, 0, im.shape[1] - 1)\n y0 = torch.clamp(y0, 0, im.shape[0] - 1)\n y1 = torch.clamp(y1, 0, im.shape[0] - 1)\n\n Ia = im[y0, x0]\n Ib = im[y1, x0]\n Ic = im[y0, x1]\n Id = im[y1, x1]\n\n wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)\n wb = (x1.type_as(x) - x) * (y - y0.type_as(y))\n wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)\n wd = (x - x0.type_as(x)) * (y - y0.type_as(y))\n ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)\n return ans\n\n\ndef sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):\n \"\"\"\n Args:\n rois: (M, 7 + C)\n points: (N, 3)\n sample_radius_with_roi:\n num_max_points_of_part:\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n if points.shape[0] < num_max_points_of_part:\n distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)\n min_dis, min_dis_roi_idx = distance.min(dim=-1)\n roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)\n point_mask = min_dis < roi_max_dim + sample_radius_with_roi\n else:\n start_idx = 0\n point_mask_list = []\n while start_idx < points.shape[0]:\n distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)\n min_dis, min_dis_roi_idx = distance.min(dim=-1)\n roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)\n cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi\n point_mask_list.append(cur_point_mask)\n start_idx += num_max_points_of_part\n point_mask = torch.cat(point_mask_list, dim=0)\n\n if point_mask.sum() == 0:\n point_mask = torch.zeros(points.shape[0], dtype=torch.bool, device=points.device)\n point_mask[0] = True\n assert point_mask.sum() > 0\n sampled_points = points[point_mask, :]\n\n return sampled_points, point_mask\n\n\ndef sector_fps(points, num_sampled_points, num_sectors, seg_labels=None):\n \"\"\"\n Args:\n points: (N, 3)\n num_sampled_points: int\n num_sectors: int\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n sector_size = np.pi * 2 / num_sectors\n point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi\n sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)\n xyz_points_list = []\n xyz_batch_cnt = []\n num_sampled_points_list = []\n for k in range(num_sectors):\n mask = (sector_idx == k)\n cur_num_points = mask.sum().item()\n if cur_num_points > 0:\n xyz_points_list.append(points[mask])\n xyz_batch_cnt.append(cur_num_points)\n ratio = cur_num_points / points.shape[0]\n num_sampled_points_list.append(\n min(cur_num_points, math.ceil(ratio * num_sampled_points))\n )\n\n if len(xyz_batch_cnt) == 0:\n xyz_points_list.append(points)\n xyz_batch_cnt.append(len(points))\n num_sampled_points_list.append(num_sampled_points)\n print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')\n\n xyz = torch.cat(xyz_points_list, dim=0)\n xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()\n sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()\n\n sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(\n xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt\n ).long()\n\n sampled_points = xyz[sampled_pt_idxs]\n if seg_labels is not None:\n seg_labels = seg_labels[sampled_pt_idxs]\n return sampled_points, seg_labels\n else:\n return sampled_points\n\n\nclass VoxelSetAbstraction(nn.Module):\n def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,\n num_rawpoint_features=None, **kwargs):\n super().__init__()\n self.model_cfg = model_cfg\n self.voxel_size = voxel_size\n self.point_cloud_range = point_cloud_range\n self.on_seg = model_cfg.get(\"ON_SEG\", False)\n self.suffix = model_cfg.get(\"SUFFIX\", '')\n\n SA_cfg = self.model_cfg.SA_LAYER\n\n self.SA_layers = nn.ModuleList()\n self.SA_layer_names = []\n self.downsample_times_map = {}\n c_in = 0\n for src_name in self.model_cfg.FEATURES_SOURCE:\n if src_name in ['bev', 'raw_points']:\n continue\n self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR\n\n if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:\n input_channels = SA_cfg[src_name].MLPS[0][0] \\\n if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]\n else:\n input_channels = SA_cfg[src_name]['INPUT_CHANNELS']\n\n cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(\n input_channels=input_channels, config=SA_cfg[src_name]\n )\n self.SA_layers.append(cur_layer)\n self.SA_layer_names.append(src_name)\n\n c_in += cur_num_c_out\n \n\n if 'bev' in self.model_cfg.FEATURES_SOURCE:\n c_bev = num_bev_features\n c_in += c_bev\n\n if 'raw_points' in self.model_cfg.FEATURES_SOURCE:\n self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(\n input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']\n )\n\n c_in += cur_num_c_out\n\n self.vsa_point_feature_fusion = nn.Sequential(\n nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),\n nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),\n nn.ReLU(),\n )\n self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES\n self.num_point_features_before_fusion = c_in\n\n def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):\n \"\"\"\n Args:\n keypoints: (N1 + N2 + ..., 4)\n bev_features: (B, C, H, W)\n batch_size:\n bev_stride:\n\n Returns:\n point_bev_features: (N1 + N2 + ..., C)\n \"\"\"\n x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]\n y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]\n\n x_idxs = x_idxs / bev_stride\n y_idxs = y_idxs / bev_stride\n\n point_bev_features_list = []\n for k in range(batch_size):\n bs_mask = (keypoints[:, 0] == k)\n\n cur_x_idxs = x_idxs[bs_mask]\n cur_y_idxs = y_idxs[bs_mask]\n cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)\n point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)\n point_bev_features_list.append(point_bev_features)\n\n point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)\n return point_bev_features\n\n def sectorized_proposal_centric_sampling(self, roi_boxes, points, seg_labels=None):\n \"\"\"\n Args:\n roi_boxes: (M, 7 + C)\n points: (N, 3)\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n\n sampled_points, point_mask = sample_points_with_roi(\n rois=roi_boxes, points=points,\n sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,\n num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)\n )\n if seg_labels is not None:\n seg_labels = seg_labels[point_mask]\n return sector_fps(\n points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,\n num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS, seg_labels=seg_labels\n )\n\n def get_sampled_points(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n\n Returns:\n keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]\n \"\"\"\n batch_size = batch_dict['batch_size']\n on_seg = self.on_seg and (\"seg_labels\" in batch_dict)\n if self.model_cfg.POINT_SOURCE == 'raw_points':\n src_points = batch_dict['points'][:, 1:4]\n if on_seg:\n src_seg_labels = batch_dict['seg_labels'][:, 1] # segmentation labels\n batch_indices = batch_dict['points'][:, 0].long()\n elif self.model_cfg.POINT_SOURCE == 'voxel_centers':\n src_points = common_utils.get_voxel_centers(\n batch_dict['voxel_coords'][:, 1:4],\n downsample_times=1,\n voxel_size=self.voxel_size,\n point_cloud_range=self.point_cloud_range\n )\n batch_indices = batch_dict['voxel_coords'][:, 0].long()\n else:\n raise NotImplementedError\n keypoints_list = []\n if on_seg:\n keypoint_labels_list = []\n\n if isinstance(self.model_cfg.SAMPLE_METHOD, list):\n use_fps = 'FPS' in self.model_cfg.SAMPLE_METHOD\n use_spc = 'SPC' in self.model_cfg.SAMPLE_METHOD\n else:\n use_fps = 'FPS' == self.model_cfg.SAMPLE_METHOD\n use_spc = 'SPC' == self.model_cfg.SAMPLE_METHOD\n\n for bs_idx in range(batch_size):\n bs_mask = (batch_indices == bs_idx)\n sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)\n keypoints = []\n if on_seg:\n sampled_seg_labels = src_seg_labels[bs_mask].unsqueeze(dim=0) # (1, N, 3)\n keypoint_labels = []\n if use_fps:\n cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(\n sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS\n ).long()\n\n if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:\n times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1\n non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]\n cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]\n\n cur_keypoints = sampled_points[0][cur_pt_idxs[0]]\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n if on_seg:\n keypoint_labels.append(sampled_seg_labels[0][cur_pt_idxs[0]])\n\n if use_spc:\n if on_seg:\n cur_keypoints, cur_keypoint_labels = self.sectorized_proposal_centric_sampling(\n roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0],\n seg_labels=sampled_seg_labels[0]\n )\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n keypoint_labels.append(cur_keypoint_labels)\n else:\n cur_keypoints = self.sectorized_proposal_centric_sampling(\n roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]\n )\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n\n keypoints = torch.cat(keypoints, axis=0) if isinstance(keypoints, list) else keypoints[0] # [x, N, 3]\n keypoints_list.append(keypoints)\n if on_seg:\n keypoint_labels = torch.cat(keypoint_labels, axis=0) if isinstance(keypoint_labels, list) else keypoint_labels[0] # [x, N, 3]\n keypoint_labels_list.append(keypoint_labels)\n\n keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)\n\n if on_seg:\n keypoint_labels = torch.cat(keypoint_labels_list, dim=0).view(-1)\n return keypoints, keypoint_labels\n else:\n return keypoints\n\n @staticmethod\n def aggregate_keypoint_features_from_one_source(\n batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,\n filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None\n ):\n \"\"\"\n\n Args:\n aggregate_func:\n xyz: (N, 3)\n xyz_features: (N, C)\n xyz_bs_idxs: (N)\n new_xyz: (M, 3)\n new_xyz_batch_cnt: (batch_size), [N1, N2, ...]\n\n filter_neighbors_with_roi: True/False\n radius_of_neighbor: float\n num_max_points_of_part: int\n rois: (batch_size, num_rois, 7 + C)\n Returns:\n\n \"\"\"\n xyz_batch_cnt = xyz.new_zeros(batch_size).int()\n if filter_neighbors_with_roi:\n point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz\n point_features_list = []\n for bs_idx in range(batch_size):\n bs_mask = (xyz_bs_idxs == bs_idx)\n _, valid_mask = sample_points_with_roi(\n rois=rois[bs_idx], points=xyz[bs_mask],\n sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,\n )\n point_features_list.append(point_features[bs_mask][valid_mask])\n xyz_batch_cnt[bs_idx] = valid_mask.sum()\n\n valid_point_features = torch.cat(point_features_list, dim=0)\n xyz = valid_point_features[:, 0:3]\n xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None\n else:\n for bs_idx in range(batch_size):\n xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()\n\n pooled_points, pooled_features = aggregate_func(\n xyz=xyz.contiguous(),\n xyz_batch_cnt=xyz_batch_cnt,\n new_xyz=new_xyz,\n new_xyz_batch_cnt=new_xyz_batch_cnt,\n features=xyz_features.contiguous(),\n )\n return pooled_features\n\n def forward(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size:\n keypoints: (B, num_keypoints, 3)\n multi_scale_3d_features: {\n 'x_conv4': ...\n }\n points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]\n spatial_features: optional\n spatial_features_stride: optional\n\n Returns:\n point_features: (N, C)\n point_coords: (N, 4)\n\n \"\"\"\n on_seg = self.on_seg and (\"seg_labels\" in batch_dict)\n if on_seg:\n keypoints, keypoint_labels = self.get_sampled_points(batch_dict)\n else:\n keypoints = self.get_sampled_points(batch_dict)\n\n point_features_list = []\n if 'bev' in self.model_cfg.FEATURES_SOURCE:\n point_bev_features = self.interpolate_from_bev_features(\n keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],\n bev_stride=batch_dict['spatial_features_stride']\n )\n point_features_list.append(point_bev_features)\n\n batch_size = batch_dict['batch_size']\n\n new_xyz = keypoints[:, 1:4].contiguous()\n new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()\n for k in range(batch_size):\n new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()\n\n if 'raw_points' in self.model_cfg.FEATURES_SOURCE:\n raw_points = batch_dict['points']\n\n pooled_features = self.aggregate_keypoint_features_from_one_source(\n batch_size=batch_size, aggregate_func=self.SA_rawpoints,\n xyz=raw_points[:, 1:4],\n xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,\n xyz_bs_idxs=raw_points[:, 0],\n new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,\n filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),\n radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),\n rois=batch_dict.get('rois', None)\n )\n point_features_list.append(pooled_features)\n\n for k, src_name in enumerate(self.SA_layer_names):\n cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices\n cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()\n\n xyz = common_utils.get_voxel_centers(\n cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],\n voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range\n )\n\n pooled_features = self.aggregate_keypoint_features_from_one_source(\n batch_size=batch_size, aggregate_func=self.SA_layers[k],\n xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],\n new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,\n filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),\n radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),\n rois=batch_dict.get('rois', None)\n )\n\n point_features_list.append(pooled_features)\n\n point_features = torch.cat(point_features_list, dim=-1)\n\n if on_seg:\n batch_dict[f'point_seg_labels{self.suffix}'] = keypoint_labels # (BXN, 1)\n batch_dict[f'point_features_before_fusion{self.suffix}'] = point_features.view(-1, point_features.shape[-1])\n point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))\n\n batch_dict[f'point_features{self.suffix}'] = point_features # (BxN, C)\n batch_dict[f'point_coords{self.suffix}'] = keypoints # (BxN, 4)\n\n return batch_dict\n", "import torch\nfrom .torch_hash_cuda import (\n track_graphs_cpu\n)\n\nnum_graphs=5\npoints = torch.randn(50, 3)\ngraph_idx = torch.randint(0, num_graphs, [50]).long()\nprint(graph_idx)\ntrack_graphs_cpu(points, graph_idx, num_graphs)\nimport ipdb; ipdb.set_trace()\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.floor", "torch.zeros", "torch.cat", "torch.nn.ModuleList", "torch.nn.ReLU", "torch.tensor", "torch.nn.Linear", "torch.clamp", "torch.t", "torch.atan2" ], [ "torch.randn", "torch.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dxs/neighbour-analyser
[ "609c220b1352f9c3e64ea96ff43007584712efb0" ]
[ "testing/car_record.py" ]
[ "import cv2 as cv\nimport argparse\nimport sys\nimport numpy as np \nimport os.path \n\n#set constants\nFRONT_CAMERA = 1\nBACK_CAMERA = 0\ni = 0\n\nconfThreshold = 0.5 #Confidence threshold\nnmsThreshold = 0.4 #Non-maximum suppression threshold\ninpWidth = 416 #Width of network's input image\ninpHeight = 416 #Height of network's input image\n\n# Load names of classes\nclassesFile = 'coco.names'\nclasses = None\nwith open(classesFile, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n\n# LOAD MODEL AND CLASSES\n# Give the configuration and weight files for the model and load the network using them.\nmodelConfiguration = 'yolov3-tiny.cfg' # Network configuration\nmodelWeights = 'yolov3-tiny.weights' #Pre-trained network's weights\n\nnet = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\nnet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n\n# Get the names of the output layers\ndef getOutputsNames(net):\n # Get the names of all the layers in the network\n layersNames = net.getLayerNames()\n # Get the names of the output layers, i.e. the layers with unconnected outputs\n return [layersNames[i[0] -1] for i in net.getUnconnectedOutLayers()]\n\n# Process inputs\noutputFile = 'cars.avi'\n\ncap = cv.VideoCapture(BACK_CAMERA)\n\n# Get the video writer initialized to save the output video when needed\nvideo_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))\n\ncounter_seen_car_ago = 0\n\nwhile cv.waitKey(1) < 0:\n # Get frame from the video\n hasFrame, frame = cap.read()\n\n if not hasFrame:\n print('Done processing !!!')\n cv.waitKey(3000)\n break\n\n if counter_seen_car_ago > 0:\n counter_seen_car_ago = counter_seen_car_ago-1\n video_writer.write(frame.astype(np.uint8))\n continue\n \n # Create a 4D blob from a frame\n blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n if(classes[classId] == 'car'):\n video_writer.write(frame.astype(np.uint8))\n i = i + 1\n counter_seen_car_ago = 10\n print('save img {0}'.format(i))\n \n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Thomas-Schatz/scone-phobia
[ "55577d150ff71fd1f1c52073143c64e242b28600", "55577d150ff71fd1f1c52073143c64e242b28600" ]
[ "scone_phobia/utils/apply_analyses.py", "scone_phobia/analyses/error_sim.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 20 10:59:28 2018\n\n@author: Thomas Schatz\n\nCode for managing the analysis of a set of ABXpy minimal-pair scores \nfor ABX tasks with the following structure:\n ON phone BY speaker, previous and following phonetic context)\n\nThe main function is apply_analysis, see readme.md for usage example.\n\nIn the current implementation, the metadata associated with each minimal-pair\nfile can be stored directly in the name of the file, following a scheme described below.\nNote that this scheme is parameterized by the 'primary-metadata' section of\nthe config file.\nAt minima, the filename should be sufficient to deduce all relevant metadata for carrying\nout the analyses and plots.\nThe scone_phobia/metadata folder can be used to store information associating filename\ncomponents with further metadata. It can be useful to keep filenames from getting too long.\n\nThis code requires a folder where pickles containing the minimal-pair scores are stored.\nIf resampling of the scores is needed (e.g. to obtain estimate of variability for\nthe analysis results), this folder should also contain a 'resampling' subfolder\nwhere pickles containing resample of the minimal-pair scores are stored.\n\nThese minimal-pair scores pickles can be obtained with precompute_mp_scores.py\nand resample of those with resample_mp_scores.py. Note that both these scripts\nwill name pickles based on the name of the original ABXpy results filename,\nso it's probably a good idea to name those original results files in accordance\nwith the naming scheme described below.\n\nPart of this code could probably be generalized to analysing results from \nother ABX tasks. If we need to do that, Not sure if we should try to increase\nthe scope of the current library, if we should do two independent libraries with\nsome (a lot of?) redundant code or if we should have an independent abstract\nlibrary being called by several libraries applied to particular tasks.\n\"\"\"\n\nimport pandas\nimport os\nimport os.path as path\nimport scone_phobia.utils.mp_scores as mp_scores\nimport yaml\n\n\ndef load_cfg_from_file(f):\n # decorator that will load keyword cfg argument\n # from \"../config.yml\" unless it is specified explicitly\n def wrapper(*args, **kwargs):\n if not('cfg' in kwargs) or (kwargs['cfg'] is None):\n dir = path.dirname(path.realpath(__file__))\n cfg_file = path.join(dir, \"..\", \"config.yml\")\n with open(cfg_file, 'r') as ymlfile:\n kwargs['cfg'] = yaml.load(ymlfile, Loader=yaml.Loader)['primary-metadata']\n return f(*args, **kwargs)\n return wrapper\n\n\n\"\"\"\nFilename parsing utilities\n\nParse filenames for ABX results files and derivatives based on the 'primary-metadata'\nspecified in '../config.yml'.\n\nFilenames (without the extension) should be of the form:\n\n Property1valueProperty1key__Property2valueProperty2key__...___PropertyNvaluePropertyNkey.extension\n\nwhere the property values and keys should not contain any double underscores and\nwhere the property keys should correspond to the keys in the 'primary-metadata' \nsection of the '../config.yml' file. The extension can be whatever file extension\nis appropriate. For example the following would be valid filenames for the 'primary-metadata'\nsection of the config file template ('../config.yml.example'):\n\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis.pickle\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis.txt\n MFCCmodel__Nonetrain__CSJtest__COSdis.pickle\n\nThe first one, for example, would be parsed into the following list of pairs:\n\n [('model type', 'HMM-GMM'),\n ('training set', 'WSJ'),\n ('test set', 'CSJ'),\n ('dissimilarity', 'KL')]\n\nFor bootstrap related files, filenames will look like:\n\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis__batchsize50__batch3.pickle\n\nwhere the number after 'batchsize' indicates the size of the resampling\nbatches and the number after 'batch' is the batch ID for this particular file.\nFor these files, the resampling batch size and batch ID are also\nreturned.\n\"\"\"\n\ndef suffix_split(token, cfg, err_message):\n \"\"\"\n Helper function for parsing filenames.\n Looking for a key from cfg that would be\n a suffix of token. There should be one\n and only one.\n \"\"\"\n matches = []\n for key in cfg:\n if len(token) >= len(key):\n if token[-len(key):] == key:\n matches.append((key, token[:-len(key)]))\n assert len(matches) == 1, err_message\n return matches[0]\n\n\n@load_cfg_from_file\ndef parse_res_fname(fpath, cfg=None):\n name, _ = path.splitext(path.split(fpath)[1])\n err_message = (\"Results filename {} is not correctly formatted.\"\n \" Check your config file and \"\n \"formatting instructions in analyze_mp_scores.py.\"\n ).format(name)\n N = len(cfg)\n tokens = name.split('__')\n assert len(tokens) == N, err_message\n used_keys = []\n res = []\n for token in tokens:\n key, value = suffix_split(token, cfg, err_message)\n assert not key in used_keys, err_message\n used_keys.append(key)\n res.append((cfg[key], value))\n return res\n\n\n@load_cfg_from_file\ndef parse_bootres_fname(fpath, cfg=None):\n name, _ = path.splitext(path.split(fpath)[1])\n err_message = (\"Bootstrap results filename filename {} is not correctly\"\n \" formatted. Check your config file and \"\n \"formatting instructions in analyze_mp_scores.py.\"\n ).format(name)\n N = len(cfg)\n tokens = name.split('__')\n assert len(tokens) == N+2, err_message\n properties = parse_res_fname('__'.join(tokens[:N]), cfg=cfg)\n batch = tokens[-1]\n assert len(batch) >= 5 and batch[:5] == 'batch', batch\n batch = int(batch[5:])\n batchsize = tokens[-2]\n assert len(batchsize) >= 9 and batchsize[:9] == 'batchsize', batchsize\n batchsize = int(batchsize[9:])\n properties.append(('batch ID', batch))\n properties.append(('batch size', batchsize))\n return properties\n\n\n\n############################\n## Fetch and analyse data #\n############################\n\ndef fetch_data(analysis, mp_folder, filt=None, encoding=None,\n add_metadata=None):\n \"\"\"Use the above to get just the right data\"\"\"\n get_metadata = lambda x, parse=parse_res_fname: parse(x)\n df = mp_scores.load_mp_errors(mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding) # load all mp scores in a big df\n if not(add_metadata is None):\n df = add_metadata(df)\n df = analysis(df)\n return df\n\n\ndef fetch_resampled_data(analysis,\n resampling_file=None,\n resampled_mp_folder=None,\n filt=None,\n encoding=None,\n add_metadata=None,\n verbose=0):\n # Getting resampled minimal-pair scores to estimate variability.\n # This can take time so if resampling_file is not None,\n # results are saved once they are computed\n get_metadata = lambda x, parse=parse_bootres_fname: parse(x)\n if resampling_file is None:\n boot_dfs = mp_scores.resample_analysis(analysis,\n resampled_mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding,\n add_metadata=add_metadata,\n verbose=verbose)\n else:\n boot_dfs = mp_scores.resample_analysis_cached(resampling_file,\n analysis,\n resampled_mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding,\n add_metadata=add_metadata,\n verbose=verbose)\n boot_df = pandas.concat(boot_dfs)\n return boot_df\n\n\ndef resampling_filts(resample_caching_scheme, mp_folder, user_filt=None):\n \"\"\"\n Function used to specify various way of caching resamples of analysis\n results.\n It is the responsibility of this function to ensure that all\n caching filters are consistent with user_filt.\n See apply_analysis below.\n TODO? Could add a scheme where caching is done by type of model.\n \"\"\"\n caching_filts = []\n mp_files = [path.splitext(e)[0] for e in os.listdir(mp_folder)\n if path.splitext(e)[1] == '.pickle']\n if resample_caching_scheme == 'mp_file':\n for mp_fname in mp_files:\n # only use caching filters useful given user-provided filt\n if (user_filt is None) or user_filt(mp_fname):\n # use second arg default value to avoid scope issues \n filt = lambda boot_mp_fname, mp_fname=mp_fname:\\\n mp_fname in boot_mp_fname\n caching_filts.append((mp_fname, filt))\n elif resample_caching_scheme == 'sametestset_mp_filepairs':\n # analysis not assumed symmetric, so we loop over all\n # pairs\n for mp_fname1 in mp_files:\n # only use caching filters useful given user-provided filt\n if user_filt(mp_fname1):\n metadata1 = dict(parse_res_fname(mp_fname1))\n for mp_fname2 in mp_files:\n # only use caching filters useful given user-provided filt\n if user_filt(mp_fname2):\n metadata2 = dict(parse_res_fname(mp_fname2))\n if metadata1['test set'] == metadata2['test set']:\n filt_name = mp_fname1 + '___' + mp_fname2 # hacky\n # use args default values to avoid scope issues\n filt = lambda bname, n1=mp_fname1, n2=mp_fname2: \\\n n1 in bname or n2 in bname\n caching_filts.append((filt_name, filt))\n else:\n raise ValueError(('Unsupported resample caching scheme '\n '{}'.format(resample_caching_scheme)))\n return caching_filts\n\n\ndef apply_analysis(analysis, mp_folder,\n filt=None,\n add_metadata=None,\n resampling=False,\n resample_caching_scheme=None,\n analysis_folder=None,\n pickle_encoding=None,\n resampled_pickle_encoding=\"latin1\",\n verbose=0):\n \"\"\"\n analysis: function that takes a pandas dataframe containing all\n required minimal-pair scores and returns the analysis results\n of interest (in a pandas dataframe if resampling=True).\n mp_folder: folder where the pickles containing minimal-pair scores are stored\n if resampling=True, mp_folder should also contain a 'resampling' subfolder\n where pickles containing resampled versions of the minimal-pair scores\n are stored.\n filt: string -> bool function, that takes the name of a file in mp_folder\n and returns True iff that file should be included in the analysis. If\n set to None, all available files are included.\n add_metadata: pandas.Dataframe -> pandas.Dataframe function, that takes a\n raw mp_scores Dataframe (containing only 'contrast', 'error' and primary\n metadata columns, where primary metadata is as specified in the\n config.yml file) and adds some additional metadata columns to it\n resampling: whether or not to use resampling. Currently, this is only\n supported for minimal pairs averaged on speaker first then on context.\n This returns the full bootstrapped data and also adds resampling-based\n standard deviation estimate to the analysis results.\n resample_caching_scheme: if resampling is True, determines whether and how\n to cache resampled analysis results. Caching results on disk is useful:\n - if applying the analysis on resamples takes too long (if there are\n N resamples, the duration required for the analysis will be\n multiplied by N compared to applying the analysis without\n resampling)\n - if loading resampled minimal pair scores at once for all relevant\n files in mp_folder (as determined by filt) exhausts the available\n memory\n Currently there is only three supported values for\n resample_caching_scheme:\n - None: no caching\n - 'mp_file': will create one cache file per (non-resampled)\n minimal-pair scores file.\n ** This should only be used for analyses which can be applied\n independently for each set of minimal pair scores obtained\n in the same ABX task with the same features\n and dissimilarity function **\n - 'sametestset_mp_filepairs': will create one cache file\n per (ordered) pair of (non-resampled) minimal-pair scores\n files sharing the same test set.\n ** This should only be used for analyses comparing patterns of\n discriminability in the same ABX task for pairs of \n (features/dissimilarity function couples). **\n analysis_folder: currently only used if resampling=True and\n resample_caching_scheme is not None, to specify where to store cached\n analysis resamples.\n pickle_encoding and resampled_pickle_encoding: useful to ensure pickles\n containing minimal pair scores, resp. resampled versions of those, will be\n read correctly, for example if they have been computed under a different \n python environment than the current one.\n \"\"\"\n if filt is None:\n filt = lambda mp_fname: True \n df = fetch_data(analysis, mp_folder, filt=filt, encoding=pickle_encoding,\n add_metadata=add_metadata)\n if resampling:\n boot_dfs = []\n resampled_mp_folder = path.join(mp_folder, 'resampling')\n if resample_caching_scheme is None:\n resampling_file = None\n boot_dfs.append(\n fetch_resampled_data(analysis, resampling_file,\n resampled_mp_folder,\n filt=filt,\n encoding=resampled_pickle_encoding,\n add_metadata=add_metadata,\n verbose=verbose))\n else:\n caching_filts = resampling_filts(resample_caching_scheme,\n mp_folder,\n user_filt=filt)\n assert not(analysis_folder is None)\n for filt_name, caching_filt in caching_filts:\n # keep and_filt in case we add other resampling caching schemes\n # where the caching filts are defined more coarsely than some\n # possible user-provided filters.\n and_filt = lambda mp_fname, f1=filt, f2=caching_filt:\\\n f1(mp_fname) and f2(mp_fname)\n resampling_file = path.join(analysis_folder,\n '{}.pickle'.format(filt_name))\n boot_dfs.append(\n fetch_resampled_data(analysis, resampling_file,\n resampled_mp_folder,\n filt=and_filt,\n encoding=resampled_pickle_encoding,\n add_metadata=add_metadata,\n verbose=verbose))\n boot_df = pandas.concat(boot_dfs)\n # Add resulting standard deviation estimates to main dataframe \n df = mp_scores.estimate_std(df, boot_df)\n # TODO: permutation tests\n return df, boot_df\n else:\n return df\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 27 14:13:48 2018\n\n@author: Thomas Schatz\n\nCompute similarity between patterns of discrimination errors obtained by\ndifferent models on a common test set.\n\nThe main function is error_sim.\n\nThe similarity used is 1 - angular distance,\nwhere the angular distance is defined as:\n \n 2*arccos(cosine similarity)/pi\n \nwhich defines a proper metric taking values between 0 and 1 (the 2 factor\ncan be used because the distance is computed between vectors of discrimination\nerrors which are always positive). See wikipedia for more details\n(https://en.wikipedia.org/wiki/Cosine_similarity)\n\n\nThis assumes:\n 1. That the pandas.DataFrame specified as argument contains at least\n a'contrast', an 'error' and a 'test set' column.\n 2. That individual error patterns are obtained by grouping all lines\n sharing the same values in all columns but 'contrast' and 'error'.\n\n\"\"\"\n\nimport numpy as np\nimport pandas\n\n\ndef cosine_sim(a, b):\n return np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))\n\ndef angular_distance(a, b, eps=10**-9):\n s = cosine_sim(a, b)\n # deal with rounding errors\n if s>1 and (s-1) < eps:\n s=1\n if s<-1 and (-1-s) < eps:\n s=-1\n return 2*np.arccos(s)/np.pi\n\ndef angular_sim(a, b):\n return 1-angular_distance(a, b)\n\ndef compute_err_sim(df, sim=angular_sim):\n # compute similarity betwen 'error_A' and 'error_B' columns of df\n return pandas.Series({'err_sim' : sim(df['error A'], df['error B'])},\n index=['err_sim'])\n\n\ndef error_sim(df):\n \"\"\"\n Main function.\n \"\"\"\n assert 'test set' in df.columns, df.columns\n assert 'error' in df.columns, df.columns\n assert 'contrast' in df.columns, df.columns\n # First merge on contrast and test set\n merge_cols = ['contrast', 'test set']\n if 'test language' in df.columns:\n merge_cols.append('test language')\n if 'test register' in df.columns:\n merge_cols.append('test register')\n # if there is more metadata derived from test set, could be specified here,\n # but it's not a big deal if it isn't, there just will be some duplicated\n # columns. \n df = df.merge(df, on=merge_cols, suffixes=(' A', ' B'))\n # Second group by all columns but contrast and error_A/B and get cosine sim\n groupby_cols = list(df.columns)\n del groupby_cols[groupby_cols.index('contrast')]\n del groupby_cols[groupby_cols.index('error A')]\n del groupby_cols[groupby_cols.index('error B')]\n res_df = df.groupby(groupby_cols).apply(compute_err_sim)\n # multi-indices -> cols\n res_df.reset_index(level=res_df.index.names, inplace=True)\n return res_df\n\n\n\"\"\"\n\nPossible extension to consider:\n \n For each possible pair of models, ranking\n contrasts as a function of how much they contribute to the dissimilarity\n in error patterns between the two models.\n \n One way to operationalize: look at (signed) angle change when removing\n contrast of interest. Let's say angle is a0 and we add a contrast with\n associated errA, errB, leading to new angle a1, then:\n \n cos(a0) = np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))\n cos(a1) = (np.dot(a, b) + errA*errB) / (sqrt(||a||^2 + errA^2) * same with b)\n \n Then assuming errA, errB << ||a||, ||b||, cos(a1) denom. is to first order:\n \n ||a||||b|| sqrt(1+errA^2/||a||^2+errB^2/||b||^2)\n \n Then applying developpement of 1/sqrt(1+x) en 0 (1 - x/2), we get:\n \n cos(a1) # (np.dot(a, b) + errA*errB) * [ 1 - .5(errA/||a||)^2 - .5(errB/||b||)^2 ] / (||a||||b||)\n \n i.e. to first order:\n \n cos(a1) # cos(a0) * [ 1 - .5(errA/||a||)^2 - .5(errB/||b||)^2 ] + errA*errB /(||a||||b||)\n \n We can rewrite ||a|| and ||b|| as resp. N*mA, N*mB where N is the number of\n contrasts (excluding the one we're adding) and mA, mB are the average\n errors for modelA, resp. model B, on these contrasts. Let us then define\n rA := errA/mA the ratio of errA to the average error on other contrasts for\n model A and similarly rB:= errB/mB. We get:\n\n cos(a1) # cos(a0) * [ 1 - rA^2/(2N^2) - rB^2/(2N^2) ] + rA*rB/N^2\n \n \n For small angles a0, a1, we get to first order:\n\n 1-a1 # (1-a0) * [ 1 - rA^2/(2N^2) - rB^2/(2N^2) ] + rA*rB/N^2\n a1-a0 # [1-a0] [rA^2/(2N^2) + rB^2/(2N^2)] - rA*rB/N^2\n a1-a0 # 1/(2N^2) * [(rA-rB)^2 - a0[rA^2+rB^2]]\n \n Note that the second term suggests there is an effect of adding dimensions\n (contrasts), maybe some sort of uniformisation effect related to the curse\n of dimensionality. Maybe there is some sort of principled way of correcting\n for this?\n\n We have, in particular: \n \n a1-a0 #< 1/(2N^2) * (rA-rB)^2\n\n This suggests to rank contrasts based on (rA-rB)^2. \n \n This might give uninformative results if the errors are not well estimated\n and is probably unstable for very small errors. It might be a good idea to\n do it on broad classes of interest to make sure estimates are stable and/or\n to plot the values obtained on a correlation plot with conservative\n confidence intervals.\n \n A completely different but perhaps easier approach to the same problem:\n do GLM modeling of the ABX errors and look for the regression factors\n driving cross-linguistic differences.\n \n\"\"\"" ]
[ [ "pandas.concat" ], [ "numpy.arccos", "numpy.dot", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
mwittgen/rogue
[ "4be0e9a4d17bdd3987a268f54ad195ee1093190d" ]
[ "tests/test_list_memory.py" ]
[ "#!/usr/bin/env python3\n#-----------------------------------------------------------------------------\n# This file is part of the rogue software platform. It is subject to\n# the license terms in the LICENSE.txt file found in the top-level directory\n# of this distribution and at:\n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.\n# No part of the rogue software platform, including this file, may be\n# copied, modified, propagated, or distributed except according to the terms\n# contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\n\n# Comment added by rherbst for demonstration purposes.\nimport pyrogue as pr\nimport pyrogue.interfaces.simulation\nimport rogue.interfaces.memory\nimport numpy as np\nimport random\nimport time\n\n#rogue.Logging.setLevel(rogue.Logging.Warning)\n#import logging\n#logger = logging.getLogger('pyrogue')\n#logger.setLevel(logging.DEBUG)\n\nclass ListDevice(pr.Device):\n\n # Last comment added by rherbst for demonstration.\n def __init__(\n self,\n name = 'ListDevice',\n description = 'List Device Test',\n **kwargs):\n\n super().__init__(\n name = name,\n description = description,\n **kwargs)\n\n ##############################\n # Variables\n ##############################\n\n self.add(pr.RemoteVariable(\n name = 'UInt32List',\n offset = 0x0000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'Int32List',\n offset = 0x1000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.Int,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt48List',\n offset = 0x2000,\n bitSize = 48 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 48,\n valueStride = 48\n ))\n\n self.add(pr.RemoteVariable(\n name = 'FloatList',\n offset = 0x3000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.Float,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'DoubleList',\n offset = 0x4000,\n bitSize = 64 * 32,\n bitOffset = 0x0000,\n base = pr.Double,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 64,\n valueStride = 64\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt16List',\n offset = 0x5000,\n bitSize = 16 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 16,\n valueStride = 16\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt21List',\n offset = 0x6000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 21,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'BoolList',\n offset = 0x7000,\n bitSize = 32,\n bitOffset = 0x0000,\n base = pr.Bool,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 1,\n valueStride = 1\n ))\n\nclass DummyTree(pr.Root):\n\n def __init__(self):\n pr.Root.__init__(self,\n name='dummyTree',\n description=\"Dummy tree for example\",\n timeout=2.0,\n pollEn=False)\n #serverPort=None)\n\n # Use a memory space emulator\n sim = rogue.interfaces.memory.Emulate(4,0x1000)\n self.addInterface(sim)\n\n self.add(ListDevice(\n offset = 0,\n memBase = sim\n ))\n\ndef test_memory():\n\n UInt32ListARaw = [int(random.random()*1000) for i in range(32)]\n Int32ListARaw = [int(random.random()*1000) for i in range(32)]\n UInt48ListARaw = [int(random.random()*1000) for i in range(32)]\n FloatListARaw = [random.random()*1000 for i in range(32)]\n DoubleListARaw = [random.random()*1000 for i in range(32)]\n UInt16ListARaw = [int(random.random()*1000) for i in range(32)]\n UInt21ListARaw = [int(random.random()*1000) for i in range(32)]\n BoolListARaw = [int(random.random()*1000)%2==0 for i in range(32)]\n\n UInt32ListA = np.array(UInt32ListARaw,np.uint32)\n Int32ListA = np.array(Int32ListARaw,np.int32)\n UInt48ListA = np.array(UInt48ListARaw,np.uint64)\n FloatListA = np.array(FloatListARaw,np.float32)\n DoubleListA = np.array(DoubleListARaw,np.float64)\n UInt16ListA = np.array(UInt16ListARaw,np.uint32)\n UInt21ListA = np.array(UInt21ListARaw,np.uint32)\n BoolListA = np.array(BoolListARaw,bool)\n\n UInt32ListB = [int(random.random()*1000) for i in range(32)]\n Int32ListB = [int(random.random()*1000) for i in range(32)]\n UInt48ListB = [int(random.random()*1000) for i in range(32)]\n FloatListB = [random.random()*1000 for i in range(32)]\n DoubleListB = [random.random()*1000 for i in range(32)]\n UInt16ListB = [int(random.random()*1000) for i in range(32)]\n UInt21ListB = [int(random.random()*1000) for i in range(32)]\n BoolListB = [int(random.random()*1000)%2==0 for i in range(32)]\n\n with DummyTree() as root:\n\n with root.updateGroup():\n root.ListDevice.UInt32List.set(UInt32ListARaw)\n root.ListDevice.Int32List.set(Int32ListARaw)\n root.ListDevice.UInt48List.set(UInt48ListARaw)\n root.ListDevice.FloatList.set(FloatListARaw)\n root.ListDevice.DoubleList.set(DoubleListARaw)\n root.ListDevice.UInt16List.set(UInt16ListARaw)\n root.ListDevice.UInt21List.set(UInt21ListARaw)\n root.ListDevice.BoolList.set(BoolListARaw)\n\n UInt32ListAA = root.ListDevice.UInt32List.get()\n Int32ListAA = root.ListDevice.Int32List.get()\n UInt48ListAA = root.ListDevice.UInt48List.get()\n FloatListAA = root.ListDevice.FloatList.get()\n DoubleListAA = root.ListDevice.DoubleList.get()\n UInt16ListAA = root.ListDevice.UInt16List.get()\n UInt21ListAA = root.ListDevice.UInt21List.get()\n BoolListAA = root.ListDevice.BoolList.get()\n\n UInt32ListAB = np.array([0] * 32,np.uint32)\n Int32ListAB = np.array([0] * 32,np.int32)\n UInt48ListAB = np.array([0] * 32,np.uint64)\n FloatListAB = np.array([0] * 32,np.float32)\n DoubleListAB = np.array([0] * 32,np.float64)\n UInt16ListAB = np.array([0] * 32,np.uint32)\n UInt21ListAB = np.array([0] * 32,np.uint32)\n BoolListAB = np.array([0] * 32,bool)\n\n for i in range(32):\n UInt32ListAB[i] = root.ListDevice.UInt32List.get(index=i)\n Int32ListAB[i] = root.ListDevice.Int32List.get(index=i)\n UInt48ListAB[i] = root.ListDevice.UInt48List.get(index=i)\n FloatListAB[i] = root.ListDevice.FloatList.get(index=i)\n DoubleListAB[i] = root.ListDevice.DoubleList.get(index=i)\n UInt16ListAB[i] = root.ListDevice.UInt16List.get(index=i)\n UInt21ListAB[i] = root.ListDevice.UInt21List.get(index=i)\n BoolListAB[i] = root.ListDevice.BoolList.get(index=i)\n\n for i in range(32):\n if UInt32ListAA[i] != UInt32ListA[i]:\n raise AssertionError(f'Verification Failure for UInt32ListAA at position {i}')\n\n if Int32ListAA[i] != Int32ListA[i]:\n raise AssertionError(f'Verification Failure for Int32ListAA at position {i}')\n\n if UInt48ListAA[i] != UInt48ListA[i]:\n raise AssertionError(f'Verification Failure for UInt48ListAA at position {i}')\n\n if abs(FloatListAA[i] - FloatListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListAA at position {i}')\n\n if abs(DoubleListAA[i] - DoubleListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListAA at position {i}')\n\n if UInt16ListAA[i] != UInt16ListA[i]:\n raise AssertionError(f'Verification Failure for UInt16ListAA at position {i}')\n\n if UInt21ListAA[i] != UInt21ListA[i]:\n raise AssertionError(f'Verification Failure for UInt21ListAA at position {i}')\n\n if BoolListAA[i] != BoolListA[i]:\n raise AssertionError(f'Verification Failure for BoolListAA at position {i}')\n\n if UInt32ListAB[i] != UInt32ListA[i]:\n raise AssertionError(f'Verification Failure for UInt32ListAB at position {i}')\n\n if UInt48ListAB[i] != UInt48ListA[i]:\n raise AssertionError(f'Verification Failure for UInt48ListAB at position {i}')\n\n if abs(FloatListAB[i] - FloatListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListAB at position {i}')\n\n if abs(DoubleListAB[i] - DoubleListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListAB at position {i}')\n\n if UInt16ListAB[i] != UInt16ListA[i]:\n raise AssertionError(f'Verification Failure for UInt16ListAB at position {i}')\n\n if UInt21ListAB[i] != UInt21ListA[i]:\n raise AssertionError(f'Verification Failure for UInt21ListAB at position {i}')\n\n if BoolListAB[i] != BoolListA[i]:\n raise AssertionError(f'Verification Failure for BoolListAB at position {i}')\n\n for i in range(32):\n root.ListDevice.UInt32List.set(UInt32ListB[i],index=i)\n root.ListDevice.Int32List.set(Int32ListB[i],index=i)\n root.ListDevice.UInt48List.set(UInt48ListB[i],index=i)\n root.ListDevice.FloatList.set(FloatListB[i],index=i)\n root.ListDevice.DoubleList.set(DoubleListB[i],index=i)\n root.ListDevice.UInt16List.set(UInt16ListB[i],index=i)\n root.ListDevice.UInt21List.set(UInt21ListB[i],index=i)\n root.ListDevice.BoolList.set(BoolListB[i],index=i)\n\n UInt32ListBA = root.ListDevice.UInt32List.get()\n Int32ListBA = root.ListDevice.Int32List.get()\n UInt48ListBA = root.ListDevice.UInt48List.get()\n FloatListBA = root.ListDevice.FloatList.get()\n DoubleListBA = root.ListDevice.DoubleList.get()\n UInt16ListBA = root.ListDevice.UInt16List.get()\n UInt21ListBA = root.ListDevice.UInt21List.get()\n BoolListBA = root.ListDevice.BoolList.get()\n\n UInt32ListBB = np.array([0] * 32,np.uint32)\n Int32ListBB = np.array([0] * 32,np.int32)\n UInt48ListBB = np.array([0] * 32,np.uint64)\n FloatListBB = np.array([0] * 32,np.float32)\n DoubleListBB = np.array([0] * 32,np.float64)\n UInt16ListBB = np.array([0] * 32,np.uint32)\n UInt21ListBB = np.array([0] * 32,np.uint32)\n BoolListBB = np.array([0] * 32,bool)\n\n for i in range(32):\n UInt32ListBB[i] = root.ListDevice.UInt32List.get(index=i)\n Int32ListBB[i] = root.ListDevice.Int32List.get(index=i)\n UInt48ListBB[i] = root.ListDevice.UInt48List.get(index=i)\n FloatListBB[i] = root.ListDevice.FloatList.get(index=i)\n DoubleListBB[i] = root.ListDevice.DoubleList.get(index=i)\n UInt16ListBB[i] = root.ListDevice.UInt16List.get(index=i)\n UInt21ListBB[i] = root.ListDevice.UInt21List.get(index=i)\n BoolListBB[i] = root.ListDevice.BoolList.get(index=i)\n\n for i in range(32):\n if UInt32ListBA[i] != UInt32ListB[i]:\n raise AssertionError(f'Verification Failure for UInt32ListBA at position {i}')\n\n if Int32ListBA[i] != Int32ListB[i]:\n raise AssertionError(f'Verification Failure for Int32ListBA at position {i}')\n\n if UInt48ListBA[i] != UInt48ListB[i]:\n raise AssertionError(f'Verification Failure for UInt48ListBA at position {i}')\n\n if abs(FloatListBA[i] - FloatListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListBA at position {i}')\n\n if abs(DoubleListBA[i] != DoubleListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListBA at position {i}')\n\n if UInt16ListBA[i] != UInt16ListB[i]:\n raise AssertionError(f'Verification Failure for UInt16ListBA at position {i}')\n\n if UInt21ListBA[i] != UInt21ListB[i]:\n raise AssertionError(f'Verification Failure for UInt21ListBA at position {i}')\n\n if BoolListBA[i] != BoolListB[i]:\n raise AssertionError(f'Verification Failure for BoolListBA at position {i}')\n\n if UInt32ListBB[i] != UInt32ListB[i]:\n raise AssertionError(f'Verification Failure for UInt32ListBB at position {i}')\n\n if abs(FloatListBB[i] - FloatListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListBB at position {i}')\n\n if abs(DoubleListBB[i] - DoubleListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListBB at position {i}')\n\n if UInt16ListBB[i] != UInt16ListB[i]:\n raise AssertionError(f'Verification Failure for UInt16ListBB at position {i}')\n\n if UInt21ListBB[i] != UInt21ListB[i]:\n raise AssertionError(f'Verification Failure for UInt21ListBB at position {i}')\n\n if BoolListBB[i] != BoolListB[i]:\n raise AssertionError(f'Verification Failure for BoolListBB at position {i}')\n\n\n root.ListDevice.UInt32List.set(UInt32ListA)\n root.ListDevice.Int32List.set(Int32ListA)\n\n root.ListDevice.UInt32List.set(np.array([1,2,3],np.uint32),index=7)\n root.ListDevice.Int32List.set([1,-22,-33],index=5)\n\n resA = root.ListDevice.UInt32List.get()\n resB = root.ListDevice.Int32List.get()\n\n UInt32ListA[7:10] = [1,2,3]\n Int32ListA[5:8] = [1,-22,-33]\n\n # Verify update\n for i in range(32):\n\n if resA[i] != UInt32ListA[i]:\n raise AssertionError(f'Stripe Verification Failure for UInt32ListA at position {i}')\n\n if resB[i] != Int32ListA[i]:\n raise AssertionError(f'Stripe Verification Failure for Int32ListA at position {i}')\n\n # Test value shift\n _ = resA[0] >> 5\n\ndef run_gui():\n import pyrogue.pydm\n\n with DummyTree() as root:\n pyrogue.pydm.runPyDM(root=root,title='test123',sizeX=1000,sizeY=500)\n\nif __name__ == \"__main__\":\n test_memory()\n #run_gui()\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
event-driven-robotics/models
[ "a8b6e2a83d4842eb99878d3fa53cd92f4c6b3db8" ]
[ "nxsdk_modules_ncl/dnn/composable/composable_dnn.py" ]
[ "# \n# Copyright © 2020 Intel Corporation.\n# \n# This software and the related documents are Intel copyrighted\n# materials, and your use of them is governed by the express \n# license under which they were provided to you (License). Unless\n# the License provides otherwise, you may not use, modify, copy, \n# publish, distribute, disclose or transmit this software or the\n# related documents without Intel's prior written permission.\n# \n# This software and the related documents are provided as is, with\n# no express or implied warranties, other than those that are \n# expressly stated in the License.\n\n\"\"\"A wrapper around NxModel to make it a composable\"\"\"\n\nimport os\nfrom typing import List\n\nimport numpy as np\nfrom jinja2 import Environment, FileSystemLoader\nimport atexit\n\nfrom nxsdk import get_logger\nfrom nxsdk.composable.abstract_composable import AbstractComposable\nfrom nxsdk.composable.collections import Processes\nfrom nxsdk.composable.interfaces.composable_enums import ResourceMapType\nfrom nxsdk.composable.interfaces.process import Process\nfrom nxsdk.composable.interfaces.process_aggregator_interface import AbstractProcessAggregator\nfrom nxsdk.composable.port_impl import StateInputPort\nfrom nxsdk.composable.resource_map import ResourceMapFactory\nfrom nxsdk.graph.graph import Graph\nfrom nxsdk.graph.monitor.probes import SpikeProbeCondition\nfrom nxsdk.graph.processes.phase_enums import Phase\nfrom nxsdk_modules_ncl.dnn.src.dnn_layers import ProbableStates, InputModes\nfrom nxsdk_modules_ncl.dnn.tests.test_softreset import printLayerMappings, \\\n printLayers\n\n\nclass ComposableDNN(AbstractComposable):\n \"\"\"A DNN that is composable. See nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel which is the underlying DNN Model\"\"\"\n def __init__(self, model: 'NxModel', num_steps_per_img: int, enable_reset: bool = True):\n \"\"\"\n Wraps a DNNModel and makes it composable\n\n :param model (nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel): The underlying DNN Model created from NxTF Layers\n :param num_steps_per_img: Number of steps to run for each image\n :param enable_reset: Whether to reset states after ``num_steps_per_img``.\n \"\"\"\n super().__init__()\n\n self._logger = get_logger(\"NET.DNN\")\n\n self._build(model=model, num_steps_per_img=num_steps_per_img, enableReset=enable_reset)\n\n def _build(self, *args, **kwargs):\n \"\"\"Builds the ports, probes and snips for the composable. This method is called from base class constructor\"\"\"\n # Stores a reference to the underlying model\n self._dnn = kwargs[\"model\"]\n self._addPorts()\n self._addProcesses()\n self._num_steps_per_img = kwargs[\"num_steps_per_img\"]\n self._enableReset = kwargs['enableReset']\n\n def _addPorts(self):\n \"\"\"Adds ports to the composable\"\"\"\n # Create and add input port. This will be delegated to the input layer\n self.addPort(StateInputPort(name=\"input\"))\n\n def _addProcesses(self):\n \"\"\"Adds processes/snips associated with DNN Composable\"\"\"\n snipDir = os.path.join(os.path.dirname(__file__), '..', 'snips', 'reset_model_states')\n\n # Init snip to populate number of cores and reset interval\n init = Process(\n name='init',\n cFilePath=snipDir + \"/snip_init.c\",\n includeDir=snipDir,\n funcName='init_1',\n phase=Phase.EMBEDDED_INIT,\n lmtId=0)\n self.addProcess(init)\n\n # Todo : Profile and measure to see if spreading readout and/or reset across lmts helps.\n # Reset SNIP\n reset_snip = Process(\n name='reset',\n cFilePath=snipDir + \"/snip_reset.c\",\n includeDir=snipDir,\n guardName='do_reset',\n funcName='reset',\n phase=Phase.EMBEDDED_MGMT,\n lmtId=0)\n self.addProcess(reset_snip)\n\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n\n # This is an example of lazily creating a process. The C file does not exist yet and will\n # only be generated post map phase when output layer has been mapped to neurocores.\n\n # Class Readout SNIP\n readout_snip = Process(\n name='readout',\n cFilePath=readout_spike_activity_snip_dir + \"/snip_class_readout.c\",\n includeDir=readout_spike_activity_snip_dir,\n guardName='do_readout',\n funcName='readout',\n phase=Phase.EMBEDDED_MGMT,\n lmtId=0)\n self.addProcess(readout_snip)\n\n def partition(self, board: Graph) -> AbstractComposable:\n \"\"\"Partition the dnn model. We ignore this step and delegate it to map which invokes compileModel\"\"\"\n return self\n\n def map(self, board: Graph) -> AbstractComposable:\n \"\"\"Invoke partition and mapping of the dnn model\"\"\"\n mapper = self._dnn.compileModel(board)\n\n printLayerInfo = False\n if printLayerInfo:\n printLayerMappings(self._dnn.layers, mapper, synapses=True, inputAxons=True)\n printLayers(self._dnn.layers)\n\n self._createSnips(board)\n self._createReadoutSnip()\n return self\n\n def updatePorts(self, board: Graph) -> AbstractComposable:\n \"\"\"Updates resourceMap to input and output ports\"\"\"\n inputLayer = self._dnn.layers[0]\n\n if inputLayer.inputMode == InputModes.AEDAT:\n self.ports.input.resourceMap = ResourceMapFactory.createExplicit(\n ResourceMapType.INPUT_AXON, inputLayer.inputAxonResourceMap)\n else:\n # Return input compartments for multi-compartment neurons\n neuronSize = 2 if inputLayer.resetMode == 'soft' else 1\n cxResourceMap = inputLayer.cxResourceMap[::neuronSize]\n self.ports.input.resourceMap = ResourceMapFactory.createExplicit(\n ResourceMapType.COMPARTMENT, cxResourceMap)\n # self.ports.output.resourceMap = CompartmentResourceMap(self._dnn.layers[-1].cxResourceMap)\n return self\n\n def completeConnectivity(self, board: Graph, processAggregator: AbstractProcessAggregator) -> AbstractComposable:\n \"\"\"Create channel to communicate data to init snip\"\"\"\n # Should pipe to resourceMap indices for output layer\n self._createInitializationChannel(board, processAggregator)\n self._createReadoutChannel(board, processAggregator)\n return self\n\n def _createSnips(self, board: Graph):\n \"\"\"Create clones of reset and init snips based on number of chips used by input layer.\"\"\"\n processes = Processes()\n for chip_id in range(board.numChips):\n # init snip\n initProcess = self.processes.init\n initProcessWithChipId = initProcess.clone(name=initProcess.name + str(chip_id),\n params={'chipId': chip_id})\n processes.add(initProcessWithChipId)\n\n # reset snip\n resetProcess = self.processes.reset\n resetProcessWithChipId = resetProcess.clone(name=resetProcess.name + str(chip_id),\n params={'chipId': chip_id})\n processes.add(resetProcessWithChipId)\n\n # Todo : Enable readout for output layers distributed across multiple chips.\n # readout\n chip_id = self._dnn.layers[-1].cxResourceMap[0, 0]\n assert len(np.unique(self._dnn.layers[-1].cxResourceMap[:, 0])) == 1\n\n readoutProcess = self.processes.readout\n readoutProcessWithChipId = readoutProcess.clone(name=readoutProcess.name,\n params={'chipId': chip_id})\n processes.add(readoutProcessWithChipId)\n self.processes = processes\n\n def _createReadoutSnip(self):\n \"\"\"Create readout snip for compartment of the output layer.\n\n The voltage is readout when using an output layer with a softmax\n activation, otherwise, spikes are readout by creating spike counters\n at the lakemonts.\n \"\"\"\n probeDt = 1\n probeStart = 100000000\n\n # Get the output layer from the spiking model\n output_layer = self._dnn.layers[-1]\n\n NUM_CLASSES = int(np.prod(output_layer.output_shape[1:]))\n\n # Return output compartments for multi-compartment neurons.\n neuronSize = 2 if output_layer.resetMode == 'soft' else 1\n offset = 1 if output_layer.resetMode == 'soft' else 0\n\n # Determine whether to read spikes or voltages based on activation.\n readSpikes = True\n if hasattr(output_layer, 'activation') and \\\n output_layer.activation.__name__ == 'softmax':\n offset = 0\n readSpikes = False\n\n lmt_spike_counters = []\n\n if readSpikes:\n for i in range(NUM_CLASSES):\n spike_probe = output_layer[i * neuronSize + offset].probe(\n state=ProbableStates.SPIKE,\n probeCondition=SpikeProbeCondition(dt=probeDt, tStart=probeStart))\n lmt_spike_counters.append(spike_probe.counterId)\n cores = cxIds = np.zeros_like(lmt_spike_counters).tolist()\n else:\n rm = output_layer.cxResourceMap\n cores = rm[offset::neuronSize, 1].tolist()\n cxIds = rm[offset::neuronSize, 2].tolist()\n lmt_spike_counters = np.zeros_like(cxIds).tolist()\n\n # Now that lmt_spike_counters are known, generate the snip_class_readout.c\n self._generateReadOutSnipCFileFromJinjaTemplate(readSpikes=readSpikes,\n num_classes=NUM_CLASSES,\n lmt_output_spike_counter_ids=lmt_spike_counters,\n cores=cores,\n cxIds=cxIds)\n\n @staticmethod\n def _cleanup():\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n cFilePath = os.path.join(readout_spike_activity_snip_dir, \"snip_class_readout.c\")\n if os.path.exists(cFilePath):\n os.remove(cFilePath)\n\n def _generateReadOutSnipCFileFromJinjaTemplate(self,\n readSpikes: bool,\n num_classes: int,\n lmt_output_spike_counter_ids: List[int],\n cores: List[int],\n cxIds: List[int]):\n atexit.register(ComposableDNN._cleanup)\n\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n\n context = {\n \"READ_SPIKES\": int(readSpikes),\n \"NUM_CLASSES\": num_classes,\n \"NUM_STEPS_PER_IMG\": self._num_steps_per_img,\n \"LMT_OUTPUT_SPIKE_COUNTER_IDS\": \"{\" + str(lmt_output_spike_counter_ids)[1:-1] + \"}\",\n \"CORE_IDS\": \"{\" + str(cores)[1:-1] + \"}\",\n \"CX_IDS\": \"{\" + str(cxIds)[1:-1] + \"}\"\n }\n\n env = Environment(loader=FileSystemLoader(os.path.join(readout_spike_activity_snip_dir, \"templates\")),\n trim_blocks=True)\n\n c_template = env.get_template(\"snip_class_readout.c.template\")\n c_contents = c_template.render(context)\n with open(os.path.join(readout_spike_activity_snip_dir, \"snip_class_readout.c\"), 'w') as cFile:\n cFile.write(c_contents)\n\n def _createInitializationChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):\n \"\"\"Creates a channel and connects it to init snip\"\"\"\n\n for chip_id in range(board.numChips):\n init_process = self.processes['init' + str(chip_id)]\n processKey = init_process.getProcessKey()\n snip_init_1 = processAggregator.getEmbeddedSnipForProcessKey(processKey)\n name = 'channel_init_ch{}_lmt0'.format(chip_id)\n setattr(self,\n name,\n board.createChannel(bytes(name, 'utf-8'), \"int\", 3))\n\n getattr(self, name).connect(None, snip_init_1)\n\n def _createReadoutChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):\n \"\"\"Create a readout channel to read the classification values from spike counters\"\"\"\n readout_process = self.processes.readout\n processKey = readout_process.getProcessKey()\n snip_readout = processAggregator.getEmbeddedSnipForProcessKey(processKey)\n self.readout_channel = board.createChannel(bytes('readout', 'utf-8'), \"int\", numElements=100000)\n self.readout_channel.connect(snip_readout, None)\n\n @staticmethod\n def load(path: str, board: Graph = None) -> 'AbstractComposable':\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n\n def save(self, path: str):\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n\n def start(self, board: Graph, *args, **kwargs):\n \"\"\"Writes initial configuration settings (num_cores_per_chip, num_steps_per_img, enableReset) to init channel\"\"\"\n num_cores_per_chip = [board.n2Chips[i].numCores for i in range(board.numChips)]\n for chip_id in range(board.numChips):\n name = 'channel_init_ch{}_lmt0'.format(chip_id)\n getattr(self, name).write(3, [num_cores_per_chip[chip_id], self._num_steps_per_img, self._enableReset])\n" ]
[ [ "numpy.zeros_like", "numpy.prod", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sixhobbits/prefect
[ "bf7a6b95ab592ad4808415f295163a64e38f1419" ]
[ "src/prefect/engine/serializers.py" ]
[ "import base64\nimport bz2\nimport gzip\nimport io\nimport json\nimport lzma\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Tuple\nimport zlib\n\nimport cloudpickle\nimport pendulum\n\nif TYPE_CHECKING:\n import pandas as pd\n\n__all__ = (\n \"Serializer\",\n \"PickleSerializer\",\n \"JSONSerializer\",\n \"DateTimeSerializer\",\n \"PandasSerializer\",\n \"CompressedSerializer\",\n)\n\nCOMPRESSION_FORMATS: Dict[str, Tuple[Callable[..., bytes], Callable[..., bytes]]] = {\n \"bz2\": (bz2.compress, bz2.decompress),\n \"gzip\": (gzip.compress, gzip.decompress),\n \"lzma\": (lzma.compress, lzma.decompress),\n \"zlib\": (zlib.compress, zlib.decompress),\n}\n\n\nclass Serializer:\n \"\"\"\n Serializers are used by Results to handle the transformation of Python\n objects to and from bytes.\n\n Subclasses should implement `serialize` and `deserialize`.\n \"\"\"\n\n def __eq__(self, other: Any) -> bool:\n return type(self) == type(other)\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to bytes.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n raise NotImplementedError\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from bytes.\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n raise NotImplementedError\n\n\nclass PickleSerializer(Serializer):\n \"\"\"A `Serializer` that uses cloudpickle to serialize Python objects.\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to bytes using cloudpickle.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return cloudpickle.dumps(value)\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from bytes using cloudpickle.\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n try:\n return cloudpickle.loads(value)\n except Exception as exc:\n try:\n # old versions of Core encoded pickles with base64\n return cloudpickle.loads(base64.b64decode(value))\n except Exception:\n # if there's an error with the backwards-compatible step,\n # reraise the original exception\n raise exc\n\n\nclass JSONSerializer(Serializer):\n \"\"\"A Serializer that uses JSON to serialize objects\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to JSON\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return json.dumps(value).encode()\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from JSON\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return json.loads(value)\n\n\nclass DateTimeSerializer(Serializer):\n \"\"\"A Serializer for working with human-readable datetimes\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize a datetime to human-readable bytes\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return pendulum.instance(value).to_iso8601_string().encode()\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an datetime from human-readable bytes\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return pendulum.parse(value.decode())\n\n\nclass PandasSerializer(Serializer):\n \"\"\"A Serializer for Pandas DataFrames.\n\n Args:\n - file_type (str): The type you want the resulting file to be\n saved as, e.g. \"csv\" or \"parquet\". Must match a type used\n in a `DataFrame.to_` method and a `pd.read_` function.\n - deserialize_kwargs (dict, optional): Keyword arguments to pass to the\n serialization method.\n - serialize_kwargs (dict, optional): Keyword arguments to pass to the\n deserialization method.\n \"\"\"\n\n def __init__(\n self,\n file_type: str,\n deserialize_kwargs: dict = None,\n serialize_kwargs: dict = None,\n ) -> None:\n self.file_type = file_type\n\n # Fails fast if user specifies a format that Pandas can't deal with.\n self._get_deserialize_method()\n self._get_serialize_method()\n\n self.deserialize_kwargs = (\n {} if deserialize_kwargs is None else deserialize_kwargs\n )\n self.serialize_kwargs = {} if serialize_kwargs is None else serialize_kwargs\n\n def serialize(self, value: \"pd.DataFrame\") -> bytes: # noqa: F821\n \"\"\"\n Serialize a Pandas DataFrame to bytes.\n\n Args:\n - value (DataFrame): the DataFrame to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n serialization_method = self._get_serialize_method(dataframe=value)\n buffer = io.BytesIO()\n try:\n serialization_method(buffer, **self.serialize_kwargs)\n return buffer.getvalue()\n except TypeError:\n # there are some weird bugs with several of the Pandas serialization\n # methods when trying to serialize to bytes directly. This is a\n # workaround. See https://github.com/pandas-dev/pandas/pull/35129\n string_buffer = io.StringIO()\n serialization_method(string_buffer, **self.serialize_kwargs)\n return string_buffer.getvalue().encode()\n\n def deserialize(self, value: bytes) -> \"pd.DataFrame\": # noqa: F821\n \"\"\"\n Deserialize an object to a Pandas DataFrame\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - DataFrame: the deserialized DataFrame\n \"\"\"\n deserialization_method = self._get_deserialize_method()\n buffer = io.BytesIO(value)\n deserialized_data = deserialization_method(buffer, **self.deserialize_kwargs)\n return deserialized_data\n\n def __eq__(self, other: Any) -> bool:\n if type(self) == type(other):\n return (\n self.file_type == other.file_type\n and self.serialize_kwargs == other.serialize_kwargs\n and self.deserialize_kwargs == other.deserialize_kwargs\n )\n return False\n\n # _get_read_method and _get_write_method are constructed as they are both to\n # limit copy/paste but also to make it easier for potential future extension to serialization\n # methods that do not map to the \"to_{}/read_{}\" interface.\n def _get_deserialize_method(self) -> Callable:\n import pandas as pd\n\n try:\n return getattr(pd, \"read_{}\".format(self.file_type))\n except AttributeError as exc:\n raise ValueError(\n \"Could not find deserialization methods for {}\".format(self.file_type)\n ) from exc\n\n def _get_serialize_method(self, dataframe: \"pd.DataFrame\" = None) -> Callable:\n import pandas as pd\n\n if dataframe is None:\n # If you just want to test if the method exists, create an empty dataframe\n dataframe = pd.DataFrame()\n try:\n return getattr(dataframe, \"to_{}\".format(self.file_type))\n except AttributeError as exc:\n raise ValueError(\n \"Could not find serialization methods for {}\".format(self.file_type)\n ) from exc\n\n\nclass CompressedSerializer(Serializer):\n \"\"\"\n A Serializer that wraps another Serializer and a compression function to serialize\n Python objects with compression.\n\n Args:\n - serializer (Serializer): the serializer that this serializer wraps\n - format (str): name of the selected pre-defined compression format (bz2, gzip,\n lzma, or zlib)\n - compress (Callable[..., bytes]): the custom compression function\n - decompress (Callable[..., bytes]): the custom decompression function\n - compress_kwargs (Dict[str, Any]): keyword arguments to be passed to the\n compression function\n - decompress_kwargs (Dict[str, Any]): keyword arguments to be passed to the\n decompression function\n \"\"\"\n\n def __init__(\n self,\n serializer: Serializer,\n format: str = None,\n compress: Callable[..., bytes] = None,\n decompress: Callable[..., bytes] = None,\n compress_kwargs: Dict[str, Any] = None,\n decompress_kwargs: Dict[str, Any] = None,\n ):\n self._serializer = serializer\n\n if format and (compress or decompress):\n raise ValueError(\n \"You must specify either `format` or `compress`/`decompress`, \"\n \"but not both.\"\n )\n elif format:\n try:\n self._compress, self._decompress = COMPRESSION_FORMATS[format]\n except KeyError as e:\n raise ValueError(\n \"`format` must be one of: {}\".format(\", \".join(COMPRESSION_FORMATS))\n ) from e\n elif compress and decompress:\n self._compress = compress\n self._decompress = decompress\n else:\n raise ValueError(\n \"You must specify either `format` or `compress`/`decompress`.\"\n )\n\n self._compress_kwargs = compress_kwargs or {}\n self._decompress_kwargs = decompress_kwargs or {}\n\n def __eq__(self, other: Any) -> bool:\n return (\n type(self) == type(other)\n and self._serializer == other._serializer\n and self._compress == other._compress\n and self._decompress == other._decompress\n and self._compress_kwargs == other._compress_kwargs\n and self._decompress_kwargs == other._decompress_kwargs\n )\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to compressed bytes.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the compressed serialized value\n \"\"\"\n return self._compress(\n self._serializer.serialize(value), **self._compress_kwargs\n )\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from compressed bytes.\n\n Args:\n - value (bytes): the compressed value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return self._serializer.deserialize(\n self._decompress(value, **self._decompress_kwargs)\n )\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
seekindark/helloworld
[ "00fe439fdbd98add53f3bec7eac2b1ba1dc817a7" ]
[ "python/matplotlib/ee.py" ]
[ "import matplotlib.pyplot as plt\r\nx = [1, 2, 3, 4, 5]\r\ny = [2.3, 3.4, 1.2, 6.6, 7.0]\r\nplt.scatter(x, y, color='r', marker='+')\r\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Adib234/AugLy
[ "35a6a5de07e64f465b8979e3257218551929e57a" ]
[ "augly/video/helpers/ffmpeg.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport io\nimport math\nimport os\nimport shutil\nfrom typing import Any, Dict, Optional, Union\n\nimport augly.audio.utils as audutils\nimport ffmpeg\nimport numpy as np\nfrom augly.utils import pathmgr, SILENT_AUDIO_PATH\nfrom augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH\nfrom ffmpeg.nodes import FilterableStream\n\n\ndef combine_frames_and_audio_to_file(\n raw_frames: str,\n audio: Optional[Union[str, io.BytesIO]],\n output_path: str,\n framerate: float,\n) -> None:\n frame_dir = os.path.dirname(raw_frames)\n if not os.path.isdir(frame_dir):\n raise RuntimeError(\n f\"Got raw frames glob path of {raw_frames}, but {frame_dir} is not \"\n \"a directory\"\n )\n\n video_stream = ffmpeg.input(raw_frames, pattern_type=\"glob\", framerate=framerate)\n video_stream = video_stream.filter(\n \"pad\", **{\"width\": \"ceil(iw/2)*2\", \"height\": \"ceil(ih/2)*2\"}\n )\n merge_video_and_audio(video_stream, audio, output_path)\n\n\ndef extract_audio_to_file(video_path: str, output_audio_path: str) -> None:\n audio_info = get_audio_info(video_path)\n sample_rate = str(audio_info[\"sample_rate\"])\n codec = audio_info[\"codec_name\"]\n\n if os.path.splitext(output_audio_path)[-1] == \".aac\":\n (\n ffmpeg.input(video_path, loglevel=\"quiet\")\n .output(output_audio_path, acodec=codec, ac=1)\n .overwrite_output()\n .run(cmd=FFMPEG_PATH)\n )\n else:\n out, err = (\n ffmpeg.input(video_path, loglevel=\"quiet\")\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sample_rate)\n .run(cmd=FFMPEG_PATH, capture_stdout=True, capture_stderr=True)\n )\n audio = np.frombuffer(out, np.float32)\n audutils.ret_and_save_audio(audio, output_audio_path, int(sample_rate))\n\n\ndef extract_frames_to_dir(\n video_path: str,\n output_dir: str,\n output_pattern: str = \"raw_frame%08d.jpg\",\n quality: int = 0,\n scale: float = 1,\n) -> None:\n video_info = get_video_info(video_path)\n\n (\n ffmpeg.input(video_path, ss=0, loglevel=\"quiet\")\n .filter(\"scale\", f\"iw*{scale}\", f\"ih*{scale}\")\n .output(\n os.path.join(output_dir, output_pattern),\n vframes=video_info[\"nb_frames\"],\n **{\"qscale:v\": quality},\n )\n .overwrite_output()\n .run(cmd=FFMPEG_PATH)\n )\n\n\ndef get_audio_info(media_path: str) -> Dict[str, Any]:\n \"\"\"\n Returns whatever ffprobe returns. Of particular use are things such as the\n encoder (\"codec_name\") used for audio encoding, the sample rate (\"sample_rate\"),\n and length in seconds (\"duration\")\n\n Accepts as input either an audio or video path.\n \"\"\"\n try:\n local_media_path = pathmgr.get_local_path(media_path)\n except RuntimeError:\n raise FileNotFoundError(f\"Provided media path {media_path} does not exist\")\n\n probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)\n audio_info = next(\n (stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"audio\"),\n None,\n )\n\n assert (\n audio_info is not None\n ), \"Error retrieving audio metadata, please verify that an audio stream exists\"\n\n return audio_info\n\n\ndef get_video_fps(video_path: str) -> Optional[float]:\n video_info = get_video_info(video_path)\n\n try:\n frame_rate = video_info[\"avg_frame_rate\"]\n # ffmpeg often returns fractional framerates, e.g. 225480/7523\n if \"/\" in frame_rate:\n num, denom = (float(f) for f in frame_rate.split(\"/\"))\n return num / denom\n else:\n return float(frame_rate)\n except Exception:\n return None\n\n\ndef get_video_info(video_path: str) -> Dict[str, Any]:\n \"\"\"\n Returns whatever ffprobe returns. Of particular use are things such as the FPS\n (\"avg_frame_rate\"), number of raw frames (\"nb_frames\"), height and width of each\n frame (\"height\", \"width\") and length in seconds (\"duration\")\n \"\"\"\n try:\n local_video_path = pathmgr.get_local_path(video_path)\n except RuntimeError:\n raise FileNotFoundError(f\"Provided video path {video_path} does not exist\")\n\n probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)\n video_info = next(\n (stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"video\"),\n None,\n )\n\n assert (\n video_info is not None\n ), \"Error retrieving video metadata, please verify that the video file exists\"\n\n return video_info\n\n\ndef has_audio_stream(video_path: str) -> bool:\n streams = ffmpeg.probe(video_path, cmd=FFPROBE_PATH)[\"streams\"]\n for stream in streams:\n if stream[\"codec_type\"] == \"audio\":\n return True\n return False\n\n\ndef add_silent_audio(\n video_path: str,\n output_path: Optional[str] = None,\n duration: Optional[float] = None,\n) -> None:\n local_video_path = pathmgr.get_local_path(video_path)\n if local_video_path != video_path:\n assert (\n output_path is not None\n ), \"If remote video_path is provided, an output_path must be provided\"\n video_path = local_video_path\n output_path = output_path or video_path\n\n if has_audio_stream(video_path):\n if video_path != output_path:\n shutil.copy(video_path, output_path)\n return\n\n duration = duration or float(get_video_info(video_path)[\"duration\"])\n video = ffmpeg.input(video_path).video\n silent_audio_path = pathmgr.get_local_path(SILENT_AUDIO_PATH)\n audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio\n output = ffmpeg.output(video, audio, output_path, pix_fmt=\"yuv420p\", t=duration)\n output.overwrite_output().run(cmd=FFMPEG_PATH)\n\n\ndef merge_video_and_audio(\n video_stream: FilterableStream,\n audio: Optional[Union[str, io.BytesIO]],\n output_path: str,\n) -> None:\n kwargs = {\"c:v\": \"libx264\", \"c:a\": \"copy\", \"bsf:a\": \"aac_adtstoasc\"}\n if audio:\n audio_stream = ffmpeg.input(audio, loglevel=\"quiet\")\n output = ffmpeg.output(\n video_stream, audio_stream, output_path, pix_fmt=\"yuv420p\", **kwargs\n ).overwrite_output()\n else:\n output = ffmpeg.output(\n video_stream, output_path, pix_fmt=\"yuv420p\", **kwargs\n ).overwrite_output()\n\n output.run(cmd=FFMPEG_PATH)\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ueshin/mars
[ "0b542974243be4e0ff239eaf49ab0fb2935f3361", "0b542974243be4e0ff239eaf49ab0fb2935f3361", "0b542974243be4e0ff239eaf49ab0fb2935f3361", "0b542974243be4e0ff239eaf49ab0fb2935f3361", "0b542974243be4e0ff239eaf49ab0fb2935f3361" ]
[ "mars/lib/sparse/matrix.py", "mars/dataframe/arithmetic/tests/test_arithmetic.py", "mars/learn/decomposition/pca.py", "mars/learn/neighbors/tests/test_nearest_neighbors.py", "mars/tensor/linalg/norm.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom collections.abc import Iterable\n\nfrom .core import issparse, get_array_module, cp, cps, \\\n get_sparse_module, naked, sps, splinalg\nfrom .array import SparseNDArray, SparseArray\n\n\ndef zeros_sparse_matrix(shape, dtype=float, gpu=False):\n m = sps if not gpu else cps\n return SparseMatrix(m.csr_matrix(shape, dtype=np.dtype(dtype)))\n\n\ndef diag_sparse_matrix(v, k=0, gpu=False):\n v = naked(v)\n if gpu and get_array_module(v) is not cp:\n v = cp.asarray(v)\n if not gpu and get_array_module(v) is not np:\n v = v.get()\n\n if v.ndim == 1:\n sparse_m = sps if not gpu else cps\n m = n = v.size + k\n mat = sparse_m.spdiags(v[None], [k], m, n, format='csr')\n return SparseMatrix(mat)\n else:\n assert v.ndim == 2\n sparse_m = sps if not gpu else cps\n sparse_eye = sparse_m.eye(v.shape[0], v.shape[1], k=k)\n mat = sparse_eye.multiply(v).tocoo()\n size = sparse_eye.nnz\n col = mat.col - max(k, 0)\n row = get_array_module(col).zeros((len(col),))\n return SparseNDArray(sparse_m.csr_matrix((mat.data, (row, col)), shape=(1, size)),\n shape=(size,))\n\n\ndef eye_sparse_matrix(N, M=None, k=0, dtype=float, gpu=False):\n m = sps if not gpu else cps\n return SparseMatrix(m.eye(N, n=M, k=k, dtype=dtype, format='csr'))\n\n\ndef triu_sparse_matrix(m, k=0, gpu=False):\n m = naked(m)\n if gpu and get_array_module(m) is not cp:\n m = cp.asarray(m)\n if not gpu and get_array_module(m) is not np:\n m = m.get()\n\n sparse_m = sps if not gpu else cps\n mat = sparse_m.triu(m, k=k)\n return SparseMatrix(mat)\n\n\ndef tril_sparse_matrix(m, k=0, gpu=False):\n m = naked(m)\n if gpu and get_array_module(m) is not cp:\n m = cp.asarray(m)\n if not gpu and get_array_module(m) is not np:\n m = m.get()\n\n sparse_m = sps if not gpu else cps\n mat = sparse_m.tril(m, k=k)\n return SparseMatrix(mat)\n\n\ndef where(cond, x, y):\n cond, x, y = [SparseMatrix(i) if issparse(i) else i\n for i in (cond, x, y)]\n return cond * x + (cond * (-y) + y)\n\n\ndef lu_sparse_matrix(a):\n a = naked(a)\n a = a.tocsc()\n super_lu = splinalg.splu(a, permc_spec=\"NATURAL\", diag_pivot_thresh=0, options={\"SymmetricMode\": True})\n l = super_lu.L\n u = super_lu.U\n p = sps.lil_matrix(a.shape)\n p[super_lu.perm_r.copy(), np.arange(a.shape[1])] = 1\n return SparseMatrix(p), SparseMatrix(l), SparseMatrix(u),\n\n\ndef solve_triangular_sparse_matrix(a, b, lower=False, sparse=True):\n a = naked(a)\n b = b.toarray() if issparse(b) else b\n\n x = splinalg.spsolve_triangular(a, b, lower=lower)\n if sparse:\n spx = sps.csr_matrix(x).reshape(x.shape[0], 1) if len(x.shape) == 1 else sps.csr_matrix(x)\n return SparseNDArray(spx, shape=x.shape)\n else:\n return x\n\n\nclass SparseMatrix(SparseArray):\n __slots__ = 'spmatrix',\n\n def __init__(self, spmatrix, shape=()):\n if shape and len(shape) != 2:\n raise ValueError('Only accept 2-d array')\n if isinstance(spmatrix, SparseMatrix):\n self.spmatrix = spmatrix.spmatrix\n else:\n self.spmatrix = spmatrix.tocsr()\n\n @property\n def shape(self):\n return self.spmatrix.shape\n\n def transpose(self, axes=None):\n assert axes is None or tuple(axes) == (1, 0)\n return SparseMatrix(self.spmatrix.transpose())\n\n @property\n def T(self):\n return SparseMatrix(self.spmatrix.T)\n\n def dot(self, other, sparse=True):\n other_shape = other.shape\n try:\n other = naked(other)\n except TypeError:\n return NotImplemented\n\n if sparse:\n if len(other_shape) == 1:\n x = self.spmatrix.dot(other.T)\n else:\n x = self.spmatrix.dot(other)\n else:\n a = self.spmatrix.toarray()\n if issparse(other):\n other = other.toarray().reshape(other_shape)\n x = a.dot(other)\n if issparse(x):\n shape = (x.shape[0],) if len(other_shape) == 1 else x.shape\n return SparseNDArray(x, shape=shape)\n return get_array_module(x).asarray(x)\n\n def concatenate(self, other, axis=0):\n try:\n other = naked(other)\n except TypeError:\n return NotImplemented\n\n if issparse(other):\n xps = get_sparse_module(self.spmatrix)\n if axis not in (0, 1):\n raise ValueError('axis can only be 0 or 1')\n method = xps.vstack if axis == 0 else xps.hstack\n x = method((self.spmatrix, other))\n else:\n xp = get_array_module(self.spmatrix)\n x = xp.concatenate((self.spmatrix.toarray(), other), axis=axis)\n\n if issparse(x):\n return SparseMatrix(x)\n return get_array_module(x).asarray(x)\n\n def _reduction(self, method_name, axis=None, dtype=None, keepdims=None, todense=False, **kw):\n # TODO: support keepdims\n if isinstance(axis, tuple):\n if sorted(axis) != [0, 1]:\n assert len(axis) == 1\n axis = axis[0]\n else:\n axis = None\n\n if todense:\n x = self.spmatrix.toarray()\n x = getattr(get_array_module(x), method_name)(x, axis=axis, **kw)\n else:\n x = getattr(self.spmatrix, method_name)(axis=axis, **kw)\n if not isinstance(axis, Iterable):\n axis = (axis,)\n axis = list(range(len(self.shape))) if axis is None else axis\n shape = tuple(s if i not in axis else 1 for i, s in enumerate(self.shape)\n if keepdims or i not in axis)\n m = get_array_module(x)\n if issparse(x):\n return SparseNDArray(x, shape=shape)\n if m.isscalar(x):\n if keepdims:\n return m.array([x])[0].reshape((1,) * self.ndim)\n else:\n return m.array([x])[0]\n else:\n return m.asarray(x).reshape(shape)\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport operator\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nfrom mars.operands import OperandStage\nfrom mars.dataframe.core import IndexValue\nfrom mars.dataframe.operands import ObjectType\nfrom mars.dataframe.utils import hash_dtypes\nfrom mars.dataframe.utils import split_monotonic_index_min_max, \\\n build_split_idx_to_origin_idx, filter_index_value\nfrom mars.dataframe.datasource.dataframe import from_pandas, DataFrameDataSource\nfrom mars.dataframe.datasource.series import from_pandas as from_pandas_series, SeriesDataSource\nfrom mars.dataframe.arithmetic import DataFrameAbs, DataFrameAdd, DataFrameSubtract, \\\n DataFrameMul, DataFrameFloorDiv, DataFrameTrueDiv, DataFramePower, \\\n DataFrameEqual, DataFrameNotEqual, DataFrameGreater, DataFrameLess, \\\n DataFrameGreaterEqual, DataFrameLessEqual, DataFrameNot, \\\n DataFrameAnd, DataFrameOr, DataFrameXor\nfrom mars.dataframe.align import DataFrameIndexAlign, DataFrameShuffleProxy\nfrom mars.tiles import get_tiled\nfrom mars.tests.core import TestBase, parameterized\n\n\ndef comp_func(name, reverse_name):\n def inner(_, lhs, rhs):\n try:\n return getattr(lhs, name)(rhs)\n except AttributeError:\n return getattr(rhs, reverse_name)(lhs)\n return inner\n\n\nbinary_functions = dict(\n add=dict(func=operator.add, op=DataFrameAdd, func_name='add', rfunc_name='radd'),\n subtract=dict(func=operator.sub, op=DataFrameSubtract, func_name='sub', rfunc_name='rsub'),\n multiply=dict(func=operator.mul, op=DataFrameMul, func_name='mul', rfunc_name='rmul'),\n floordiv=dict(func=operator.floordiv, op=DataFrameFloorDiv,\n func_name='floordiv', rfunc_name='rfloordiv'),\n truediv=dict(func=operator.truediv, op=DataFrameTrueDiv,\n func_name='truediv', rfunc_name='rtruediv'),\n power=dict(func=operator.pow, op=DataFramePower,\n func_name='pow', rfunc_name='rpow'),\n equal=dict(func=comp_func('eq', 'eq'), op=DataFrameEqual,\n func_name='eq', rfunc_name='eq'),\n not_equal=dict(func=comp_func('ne', 'ne'), op=DataFrameNotEqual,\n func_name='ne', rfunc_name='ne'),\n greater=dict(func=comp_func('gt', 'lt'), op=DataFrameGreater,\n func_name='gt', rfunc_name='lt'),\n less=dict(func=comp_func('lt', 'gt'), op=DataFrameLess,\n func_name='lt', rfunc_name='gt'),\n greater_equal=dict(func=comp_func('ge', 'le'), op=DataFrameGreaterEqual,\n func_name='ge', rfunc_name='le'),\n less_equal=dict(func=comp_func('le', 'ge'), op=DataFrameLessEqual,\n func_name='le', rfunc_name='ge'),\n logical_and=dict(func=operator.and_, op=DataFrameAnd,\n func_name='__and__', rfunc_name='and'),\n logical_or=dict(func=operator.or_, op=DataFrameOr,\n func_name='__or__', rfunc_name='__ror__'),\n logical_xor=dict(func=operator.xor, op=DataFrameXor,\n func_name='__xor__', rfunc_name='__rxor__'),\n)\n\n\n@parameterized(**binary_functions)\nclass TestBinary(TestBase):\n def to_boolean_if_needed(self, value, split_value=0.5):\n if self.func_name in ['__and__', '__or__', '__xor__']:\n return value > split_value\n else:\n return value\n\n def testWithoutShuffle(self):\n # all the axes are monotonic\n # data1 with index split into [0...4], [5...9],\n # columns [3...7], [8...12]\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(3, 13))\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n # data2 with index split into [6...11], [2, 5],\n # columns [4...9], [10, 13]\n data2 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(11, 1, -1),\n columns=np.arange(4, 14))\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=6)\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 11) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n # test df3's index and columns after tiling\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 11) # columns is recorded, so we can get it\n\n data1_index_min_max = [(0, True, 4, True), (5, True, 9, True)]\n data1_columns_min_max = [[3, True, 7, True], [8, True, 12, True]]\n data2_index_min_max = [(2, True, 5, True), (6, True, 11, True)]\n data2_columns_min_max = [(4, True, 9, True), (10, True, 13, True)]\n\n left_index_splits, right_index_splits = split_monotonic_index_min_max(\n data1_index_min_max, True, data2_index_min_max, False)\n left_columns_splits, right_columns_splits = split_monotonic_index_min_max(\n data1_columns_min_max, True, data2_columns_min_max, True)\n\n left_index_idx_to_original_idx = build_split_idx_to_origin_idx(left_index_splits)\n right_index_idx_to_original_idx = build_split_idx_to_origin_idx(right_index_splits, False)\n left_columns_idx_to_original_idx = build_split_idx_to_origin_idx(left_columns_splits)\n right_columns_idx_to_original_idx = build_split_idx_to_origin_idx(right_columns_splits)\n\n self.assertEqual(df3.chunk_shape, (7, 7))\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test shape\n idx = c.index\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.map)\n left_row_idx, left_row_inner_idx = left_index_idx_to_original_idx[idx[0]]\n left_col_idx, left_col_inner_idx = left_columns_idx_to_original_idx[idx[1]]\n expect_df1_input = df1.cix[left_row_idx, left_col_idx].data\n self.assertIs(c.inputs[0].inputs[0], expect_df1_input)\n left_index_min_max = left_index_splits[left_row_idx][left_row_inner_idx]\n self.assertEqual(c.inputs[0].op.index_min, left_index_min_max[0])\n self.assertEqual(c.inputs[0].op.index_min_close, left_index_min_max[1])\n self.assertEqual(c.inputs[0].op.index_max, left_index_min_max[2])\n self.assertEqual(c.inputs[0].op.index_max_close, left_index_min_max[3])\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n left_column_min_max = left_columns_splits[left_col_idx][left_col_inner_idx]\n self.assertEqual(c.inputs[0].op.column_min, left_column_min_max[0])\n self.assertEqual(c.inputs[0].op.column_min_close, left_column_min_max[1])\n self.assertEqual(c.inputs[0].op.column_max, left_column_min_max[2])\n self.assertEqual(c.inputs[0].op.column_max_close, left_column_min_max[3])\n expect_left_columns = filter_index_value(expect_df1_input.columns_value, left_column_min_max,\n store_data=True)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), expect_left_columns.to_pandas())\n pd.testing.assert_index_equal(c.inputs[0].dtypes.index, expect_left_columns.to_pandas())\n # test the right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.map)\n right_row_idx, right_row_inner_idx = right_index_idx_to_original_idx[idx[0]]\n right_col_idx, right_col_inner_idx = right_columns_idx_to_original_idx[idx[1]]\n expect_df2_input = df2.cix[right_row_idx, right_col_idx].data\n self.assertIs(c.inputs[1].inputs[0], expect_df2_input)\n right_index_min_max = right_index_splits[right_row_idx][right_row_inner_idx]\n self.assertEqual(c.inputs[1].op.index_min, right_index_min_max[0])\n self.assertEqual(c.inputs[1].op.index_min_close, right_index_min_max[1])\n self.assertEqual(c.inputs[1].op.index_max, right_index_min_max[2])\n self.assertEqual(c.inputs[1].op.index_max_close, right_index_min_max[3])\n self.assertIsInstance(c.inputs[1].index_value.to_pandas(), type(data2.index))\n right_column_min_max = right_columns_splits[right_col_idx][right_col_inner_idx]\n self.assertEqual(c.inputs[1].op.column_min, right_column_min_max[0])\n self.assertEqual(c.inputs[1].op.column_min_close, right_column_min_max[1])\n self.assertEqual(c.inputs[1].op.column_max, right_column_min_max[2])\n self.assertEqual(c.inputs[1].op.column_max_close, right_column_min_max[3])\n expect_right_columns = filter_index_value(expect_df2_input.columns_value, left_column_min_max,\n store_data=True)\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), expect_right_columns.to_pandas())\n pd.testing.assert_index_equal(c.inputs[1].dtypes.index, expect_right_columns.to_pandas())\n\n def testDataFrameAndSeriesWithAlignMap(self):\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(3, 13))\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n s1 = df1[3]\n\n df2 = self.func(df1, s1)\n df2 = df2.tiles()\n df1, s1 = get_tiled(df1), get_tiled(s1)\n\n self.assertEqual(df2.shape, (df1.shape[0], np.nan))\n self.assertEqual(df2.index_value.key, df1.index_value.key)\n\n data1_columns_min_max = [[3, True, 7, True], [8, True, 12, True]]\n data2_index_min_max = [(0, True, 4, True), (5, True, 9, True)]\n\n left_columns_splits, right_index_splits = split_monotonic_index_min_max(\n data1_columns_min_max, True, data2_index_min_max, True)\n\n left_columns_idx_to_original_idx = build_split_idx_to_origin_idx(left_columns_splits)\n right_index_idx_to_original_idx = build_split_idx_to_origin_idx(right_index_splits)\n\n self.assertEqual(df2.chunk_shape, (2, 7))\n for c in df2.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test shape\n idx = c.index\n # test the left side (dataframe)\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.map)\n left_col_idx, left_col_inner_idx = left_columns_idx_to_original_idx[idx[1]]\n expect_df1_input = df1.cix[idx[0], left_col_idx].data\n self.assertIs(c.inputs[0].inputs[0], expect_df1_input)\n left_column_min_max = left_columns_splits[left_col_idx][left_col_inner_idx]\n self.assertEqual(c.inputs[0].op.column_min, left_column_min_max[0])\n self.assertEqual(c.inputs[0].op.column_min_close, left_column_min_max[1])\n self.assertEqual(c.inputs[0].op.column_max, left_column_min_max[2])\n self.assertEqual(c.inputs[0].op.column_max_close, left_column_min_max[3])\n expect_left_columns = filter_index_value(expect_df1_input.columns_value, left_column_min_max,\n store_data=True)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), expect_left_columns.to_pandas())\n pd.testing.assert_index_equal(c.inputs[0].dtypes.index, expect_left_columns.to_pandas())\n\n # test the right side (series)\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.map)\n right_row_idx, right_row_inner_idx = right_index_idx_to_original_idx[idx[1]]\n expect_s1_input = s1.cix[(right_row_idx,)].data\n self.assertIs(c.inputs[1].inputs[0], expect_s1_input)\n right_index_min_max = right_index_splits[right_row_idx][right_row_inner_idx]\n self.assertEqual(c.inputs[1].op.index_min, right_index_min_max[0])\n self.assertEqual(c.inputs[1].op.index_min_close, right_index_min_max[1])\n self.assertEqual(c.inputs[1].op.index_max, right_index_min_max[2])\n self.assertEqual(c.inputs[1].op.index_max_close, right_index_min_max[3])\n self.assertIsInstance(c.inputs[1].index_value.to_pandas(), type(data1[3].index))\n\n def testDataFrameAndSeriesIdentical(self):\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(10))\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n s1 = from_pandas_series(data1[3], chunk_size=5)\n\n df2 = self.func(df1, s1)\n df2 = df2.tiles()\n df1, s1 = get_tiled(df1), get_tiled(s1)\n\n self.assertEqual(df2.shape, (10, 10))\n self.assertEqual(df2.index_value.key, df1.index_value.key)\n self.assertEqual(df2.columns_value.key, df1.columns_value.key)\n self.assertEqual(df2.columns_value.key, s1.index_value.key)\n\n self.assertEqual(df2.chunk_shape, (2, 2))\n for c in df2.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n self.assertEqual(c.shape, (5, 5))\n self.assertEqual(c.index_value.key, df1.cix[c.index].index_value.key)\n self.assertEqual(c.index_value.key, df2.cix[c.index].index_value.key)\n self.assertEqual(c.columns_value.key, df1.cix[c.index].columns_value.key)\n self.assertEqual(c.columns_value.key, df2.cix[c.index].columns_value.key)\n pd.testing.assert_index_equal(c.columns_value.to_pandas(), df1.cix[c.index].columns_value.to_pandas())\n pd.testing.assert_index_equal(c.columns_value.to_pandas(), df2.cix[c.index].columns_value.to_pandas())\n pd.testing.assert_index_equal(c.dtypes.index, df1.cix[c.index].columns_value.to_pandas())\n\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameDataSource)\n self.assertIs(c.inputs[0], df1.cix[c.index].data)\n # test the right side\n self.assertIsInstance(c.inputs[1].op, SeriesDataSource)\n self.assertIs(c.inputs[1], s1.cix[(c.index[1],)].data)\n\n def testDataFrameAndSeriesWithShuffle(self):\n data1 = pd.DataFrame(np.random.rand(10, 10),\n index=[4, 9, 3, 2, 1, 5, 8, 6, 7, 10],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n s1 = from_pandas_series(data1[10], chunk_size=6)\n\n df2 = self.func(df1, s1)\n\n # test df2's index and columns\n self.assertEqual(df2.shape, (df1.shape[0], np.nan))\n self.assertEqual(df2.index_value.key, df1.index_value.key)\n pd.testing.assert_index_equal(df2.columns_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df2.columns_value.key, df1.columns_value.key)\n self.assertTrue(df2.columns_value.should_be_monotonic)\n\n df2 = df2.tiles()\n df1, s1 = get_tiled(df1), get_tiled(s1)\n\n self.assertEqual(df2.chunk_shape, (2, 2))\n for c in df2.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n idx = c.index\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 2)[c.index[1]]\n for ic in c.inputs[0].inputs[0].inputs])\n pd.testing.assert_series_equal(c.inputs[0].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), c.inputs[0].dtypes.index)\n pd.testing.assert_index_equal(c.inputs[0].index_value.to_pandas(), c.index_value.to_pandas())\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n for j, ci, ic in zip(itertools.count(0), c.inputs[0].inputs[0].inputs, df1.cix[idx[0], :]):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (idx[0], j))\n self.assertTrue(ci.op.column_shuffle_size, 2)\n shuffle_segments = ci.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ic.data.dtypes, 2)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ci.inputs[0], ic.data)\n\n # test the right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n self.assertEqual(c.inputs[1].op.object_type, ObjectType.series)\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n for j, ci, ic in zip(itertools.count(0), c.inputs[1].inputs[0].inputs, s1.chunks):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (j,))\n self.assertTrue(ci.op.index_shuffle_size, 2)\n self.assertIs(ci.inputs[0], ic.data)\n\n # make sure shuffle proxies' key are different\n proxy_keys = set()\n for i in range(df2.chunk_shape[0]):\n cs = [c for c in df2.chunks if c.index[0] == i]\n lps = {c.inputs[0].inputs[0].op.key for c in cs}\n self.assertEqual(len(lps), 1)\n proxy_keys.add(lps.pop())\n rps = {c.inputs[1].inputs[0].op.key for c in cs}\n self.assertEqual(len(rps), 1)\n proxy_keys.add(rps.pop())\n self.assertEqual(len(proxy_keys), df2.chunk_shape[0] + 1)\n\n def testSeriesAndSeriesWithAlignMap(self):\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(3, 13))\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n\n s1 = df1.iloc[4]\n s2 = df1[3]\n\n s3 = self.func(s1, s2)\n s3 = s3.tiles()\n s1, s2 = get_tiled(s1), get_tiled(s2)\n\n self.assertEqual(s3.shape, (np.nan,))\n\n s1_index_min_max = [[3, True, 7, True], [8, True, 12, True]]\n s2_index_min_max = [(0, True, 4, True), (5, True, 9, True)]\n\n left_index_splits, right_index_splits = split_monotonic_index_min_max(\n s1_index_min_max, True, s2_index_min_max, True)\n\n left_index_idx_to_original_idx = build_split_idx_to_origin_idx(left_index_splits)\n right_index_idx_to_original_idx = build_split_idx_to_origin_idx(right_index_splits)\n\n self.assertEqual(s3.chunk_shape, (7,))\n for c in s3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test shape\n idx = c.index\n # test the left side (series)\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.map)\n left_col_idx, left_col_inner_idx = left_index_idx_to_original_idx[idx[0]]\n expect_s1_input = s1.cix[(left_col_idx,)].data\n self.assertIs(c.inputs[0].inputs[0], expect_s1_input)\n left_index_min_max = left_index_splits[left_col_idx][left_col_inner_idx]\n self.assertEqual(c.inputs[0].op.index_min, left_index_min_max[0])\n self.assertEqual(c.inputs[0].op.index_min_close, left_index_min_max[1])\n self.assertEqual(c.inputs[0].op.index_max, left_index_min_max[2])\n self.assertEqual(c.inputs[0].op.index_max_close, left_index_min_max[3])\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.iloc[4].index))\n expect_left_index = filter_index_value(expect_s1_input.index_value, left_index_min_max,\n store_data=True)\n pd.testing.assert_index_equal(c.inputs[0].index_value.to_pandas(), expect_left_index.to_pandas())\n\n # test the right side (series)\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.map)\n right_row_idx, right_row_inner_idx = right_index_idx_to_original_idx[idx[0]]\n expect_s2_input = s2.cix[(right_row_idx,)].data\n self.assertIs(c.inputs[1].inputs[0], expect_s2_input)\n right_index_min_max = right_index_splits[right_row_idx][right_row_inner_idx]\n self.assertEqual(c.inputs[1].op.index_min, right_index_min_max[0])\n self.assertEqual(c.inputs[1].op.index_min_close, right_index_min_max[1])\n self.assertEqual(c.inputs[1].op.index_max, right_index_min_max[2])\n self.assertEqual(c.inputs[1].op.index_max_close, right_index_min_max[3])\n self.assertIsInstance(c.inputs[1].index_value.to_pandas(), type(data1[3].index))\n expect_right_index = filter_index_value(expect_s2_input.index_value, right_index_min_max,\n store_data=True)\n pd.testing.assert_index_equal(c.inputs[1].index_value.to_pandas(), expect_right_index.to_pandas())\n\n def testSeriesAndSeriesIdentical(self):\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(10))\n data1 = self.to_boolean_if_needed(data1)\n s1 = from_pandas_series(data1[1], chunk_size=5)\n s2 = from_pandas_series(data1[3], chunk_size=5)\n\n s3 = self.func(s1, s2)\n s3 = s3.tiles()\n s1, s2 = get_tiled(s1), get_tiled(s2)\n\n self.assertEqual(s3.shape, (10,))\n self.assertEqual(s3.index_value.key, s1.index_value.key)\n self.assertEqual(s3.index_value.key, s2.index_value.key)\n\n self.assertEqual(s3.chunk_shape, (2,))\n for c in s3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(c.op.object_type, ObjectType.series)\n self.assertEqual(len(c.inputs), 2)\n self.assertEqual(c.shape, (5,))\n self.assertEqual(c.index_value.key, s1.cix[c.index].index_value.key)\n self.assertEqual(c.index_value.key, s2.cix[c.index].index_value.key)\n\n # test the left side\n self.assertIsInstance(c.inputs[0].op, SeriesDataSource)\n self.assertIs(c.inputs[0], s1.cix[c.index].data)\n # test the right side\n self.assertIsInstance(c.inputs[1].op, SeriesDataSource)\n self.assertIs(c.inputs[1], s2.cix[c.index].data)\n\n def testSeriesAndSeriesWithShuffle(self):\n data1 = pd.DataFrame(np.random.rand(10, 10),\n index=[4, 9, 3, 2, 1, 5, 8, 6, 7, 10],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n s1 = from_pandas_series(data1.iloc[4], chunk_size=5)\n s2 = from_pandas_series(data1[10], chunk_size=6)\n\n s3 = self.func(s1, s2)\n\n # test s3's index\n self.assertEqual(s3.shape, (np.nan,))\n self.assertNotEqual(s3.index_value.key, s1.index_value.key)\n self.assertNotEqual(s3.index_value.key, s2.index_value.key)\n pd.testing.assert_index_equal(s3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertTrue(s3.index_value.should_be_monotonic)\n\n s3 = s3.tiles()\n s1, s2 = get_tiled(s1), get_tiled(s2)\n\n self.assertEqual(s3.chunk_shape, (2,))\n for c in s3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n self.assertEqual(c.inputs[0].op.object_type, ObjectType.series)\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n for j, ci, ic in zip(itertools.count(0), c.inputs[0].inputs[0].inputs, s1.chunks):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (j,))\n self.assertTrue(ci.op.index_shuffle_size, 2)\n self.assertIs(ci.inputs[0], ic.data)\n\n # test the right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n self.assertEqual(c.inputs[1].op.object_type, ObjectType.series)\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n for j, ci, ic in zip(itertools.count(0), c.inputs[1].inputs[0].inputs, s2.chunks):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (j,))\n self.assertTrue(ci.op.index_shuffle_size, 2)\n self.assertIs(ci.inputs[0], ic.data)\n\n # make sure shuffle proxies' key are different\n proxy_keys = set()\n for c in s3.chunks:\n proxy_keys.add(c.inputs[0].inputs[0].op.key)\n proxy_keys.add(c.inputs[1].inputs[0].op.key)\n self.assertEqual(len(proxy_keys), 2)\n\n def testIdenticalIndexAndColumns(self):\n data1 = pd.DataFrame(np.random.rand(10, 10),\n columns=np.arange(3, 13))\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n data2 = pd.DataFrame(np.random.rand(10, 10),\n columns=np.arange(3, 13))\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=5)\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertFalse(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.RangeIndex)\n self.assertFalse(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.RangeIndex(0, 10))\n self.assertEqual(df3.index_value.key, df1.index_value.key)\n self.assertEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape, (10, 10)) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n self.assertEqual(df3.chunk_shape, (2, 2))\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n self.assertEqual(c.shape, (5, 5))\n self.assertEqual(c.index_value.key, df1.cix[c.index].index_value.key)\n self.assertEqual(c.index_value.key, df2.cix[c.index].index_value.key)\n self.assertEqual(c.columns_value.key, df1.cix[c.index].columns_value.key)\n self.assertEqual(c.columns_value.key, df2.cix[c.index].columns_value.key)\n pd.testing.assert_index_equal(c.columns_value.to_pandas(), df1.cix[c.index].columns_value.to_pandas())\n pd.testing.assert_index_equal(c.columns_value.to_pandas(), df2.cix[c.index].columns_value.to_pandas())\n pd.testing.assert_index_equal(c.dtypes.index, df1.cix[c.index].columns_value.to_pandas())\n\n # test the left side\n self.assertIs(c.inputs[0], df1.cix[c.index].data)\n # test the right side\n self.assertIs(c.inputs[1], df2.cix[c.index].data)\n\n def testWithOneShuffle(self):\n # only 1 axis is monotonic\n # data1 with index split into [0...4], [5...9],\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n # data2 with index split into [6...11], [2, 5],\n data2 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(11, 1, -1),\n columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=6)\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 12) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n data1_index_min_max = [(0, True, 4, True), (5, True, 9, True)]\n data2_index_min_max = [(2, True, 5, True), (6, True, 11, True)]\n\n left_index_splits, right_index_splits = split_monotonic_index_min_max(\n data1_index_min_max, True, data2_index_min_max, False)\n\n left_index_idx_to_original_idx = build_split_idx_to_origin_idx(left_index_splits)\n right_index_idx_to_original_idx = build_split_idx_to_origin_idx(right_index_splits, False)\n\n self.assertEqual(df3.chunk_shape, (7, 2))\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n idx = c.index\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 2)[c.index[1]]\n for ic in c.inputs[0].inputs[0].inputs])\n pd.testing.assert_series_equal(c.inputs[0].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), c.inputs[0].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n left_row_idx, left_row_inner_idx = left_index_idx_to_original_idx[idx[0]]\n left_index_min_max = left_index_splits[left_row_idx][left_row_inner_idx]\n ics = [ic for ic in df1.chunks if ic.index[0] == left_row_idx]\n for j, ci, ic in zip(itertools.count(0), c.inputs[0].inputs[0].inputs, ics):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (idx[0], j))\n self.assertEqual(ci.op.index_min, left_index_min_max[0])\n self.assertEqual(ci.op.index_min_close, left_index_min_max[1])\n self.assertEqual(ci.op.index_max, left_index_min_max[2])\n self.assertEqual(ci.op.index_max_close, left_index_min_max[3])\n self.assertIsInstance(ci.index_value.to_pandas(), type(data1.index))\n self.assertTrue(ci.op.column_shuffle_size, 2)\n shuffle_segments = ci.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ic.data.dtypes, 2)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ci.inputs[0], ic.data)\n # test the right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 2)[c.index[1]]\n for ic in c.inputs[1].inputs[0].inputs])\n pd.testing.assert_series_equal(c.inputs[1].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), c.inputs[1].dtypes.index)\n self.assertIsInstance(c.inputs[1].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n right_row_idx, right_row_inner_idx = right_index_idx_to_original_idx[idx[0]]\n right_index_min_max = right_index_splits[right_row_idx][right_row_inner_idx]\n ics = [ic for ic in df2.chunks if ic.index[0] == right_row_idx]\n for j, ci, ic in zip(itertools.count(0), c.inputs[1].inputs[0].inputs, ics):\n self.assertIsInstance(ci.op, DataFrameIndexAlign)\n self.assertEqual(ci.op.stage, OperandStage.map)\n self.assertEqual(ci.index, (idx[0], j))\n self.assertEqual(ci.op.index_min, right_index_min_max[0])\n self.assertEqual(ci.op.index_min_close, right_index_min_max[1])\n self.assertEqual(ci.op.index_max, right_index_min_max[2])\n self.assertEqual(ci.op.index_max_close, right_index_min_max[3])\n self.assertTrue(ci.op.column_shuffle_size, 2)\n shuffle_segments = ci.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ic.data.dtypes, 2)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ci.inputs[0], ic.data)\n\n # make sure shuffle proxies' key are different\n proxy_keys = set()\n for i in range(df3.chunk_shape[0]):\n cs = [c for c in df3.chunks if c.index[0] == i]\n lps = {c.inputs[0].inputs[0].op.key for c in cs}\n self.assertEqual(len(lps), 1)\n proxy_keys.add(lps.pop())\n rps = {c.inputs[1].inputs[0].op.key for c in cs}\n self.assertEqual(len(rps), 1)\n proxy_keys.add(rps.pop())\n self.assertEqual(len(proxy_keys), 2 * df3.chunk_shape[0])\n\n def testWithAllShuffle(self):\n # no axis is monotonic\n data1 = pd.DataFrame(np.random.rand(10, 10), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=5)\n data2 = pd.DataFrame(np.random.rand(10, 10), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3],\n columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=6)\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 12) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n self.assertEqual(df3.chunk_shape, (2, 2))\n proxy_keys = set()\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 2)[c.index[1]]\n for ic in c.inputs[0].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[0].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), c.inputs[0].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[0].inputs[0].op.key)\n for ic, ci in zip(c.inputs[0].inputs[0].inputs, df1.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 2)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertEqual(ic.op.column_shuffle_size, 2)\n self.assertIsNotNone(ic.columns_value)\n shuffle_segments = ic.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ci.data.dtypes, 2)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ic.inputs[0], ci.data)\n # test right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 2)[c.index[1]]\n for ic in c.inputs[1].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[1].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), c.inputs[1].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[1].inputs[0].op.key)\n for ic, ci in zip(c.inputs[1].inputs[0].inputs, df2.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 2)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertEqual(ic.op.column_shuffle_size, 2)\n self.assertIsNotNone(ic.columns_value)\n shuffle_segments = ic.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ci.data.dtypes, 2)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ic.inputs[0], ci.data)\n\n self.assertEqual(len(proxy_keys), 2)\n\n data4 = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),\n columns=[np.random.bytes(10) for _ in range(10)])\n data4 = self.to_boolean_if_needed(data4)\n df4 = from_pandas(data4, chunk_size=3)\n\n data5 = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),\n columns=[np.random.bytes(10) for _ in range(10)])\n data5 = self.to_boolean_if_needed(data5)\n df5 = from_pandas(data5, chunk_size=3)\n\n df6 = self.func(df4, df5)\n\n # test df6's index and columns\n pd.testing.assert_index_equal(df6.columns_value.to_pandas(), self.func(data4, data5).columns)\n self.assertTrue(df6.columns_value.should_be_monotonic)\n self.assertIsInstance(df6.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df6.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df6.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df6.index_value.key, df4.index_value.key)\n self.assertNotEqual(df6.index_value.key, df5.index_value.key)\n self.assertEqual(df6.shape[1], 20) # columns is recorded, so we can get it\n\n df6 = df6.tiles()\n df4, df5 = get_tiled(df4), get_tiled(df5)\n\n self.assertEqual(df6.chunk_shape, (4, 4))\n proxy_keys = set()\n for c in df6.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 4)[c.index[1]]\n for ic in c.inputs[0].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[0].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), c.inputs[0].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[0].inputs[0].op.key)\n for ic, ci in zip(c.inputs[0].inputs[0].inputs, df4.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 4)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertEqual(ic.op.column_shuffle_size, 4)\n self.assertIsNotNone(ic.columns_value)\n shuffle_segments = ic.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ci.data.dtypes, 4)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ic.inputs[0], ci.data)\n # test right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([hash_dtypes(ic.inputs[0].op.data.dtypes, 4)[c.index[1]]\n for ic in c.inputs[1].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[1].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), c.inputs[1].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[1].inputs[0].op.key)\n for ic, ci in zip(c.inputs[1].inputs[0].inputs, df5.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 4)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertEqual(ic.op.column_shuffle_size, 4)\n self.assertIsNotNone(ic.columns_value)\n shuffle_segments = ic.op.column_shuffle_segments\n expected_shuffle_segments = hash_dtypes(ci.data.dtypes, 4)\n self.assertEqual(len(shuffle_segments), len(expected_shuffle_segments))\n for ss, ess in zip(shuffle_segments, expected_shuffle_segments):\n pd.testing.assert_series_equal(ss, ess)\n self.assertIs(ic.inputs[0], ci.data)\n\n self.assertEqual(len(proxy_keys), 2)\n\n def testWithoutShuffleAndWithOneChunk(self):\n # only 1 axis is monotonic\n # data1 with index split into [0...4], [5...9],\n data1 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=(5, 10))\n # data2 with index split into [6...11], [2, 5],\n data2 = pd.DataFrame(np.random.rand(10, 10), index=np.arange(11, 1, -1),\n columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=(6, 10))\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 12) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n data1_index_min_max = [(0, True, 4, True), (5, True, 9, True)]\n data2_index_min_max = [(2, True, 5, True), (6, True, 11, True)]\n\n left_index_splits, right_index_splits = split_monotonic_index_min_max(\n data1_index_min_max, True, data2_index_min_max, False)\n\n left_index_idx_to_original_idx = build_split_idx_to_origin_idx(left_index_splits)\n right_index_idx_to_original_idx = build_split_idx_to_origin_idx(right_index_splits, False)\n\n self.assertEqual(df3.chunk_shape, (7, 1))\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test shape\n idx = c.index\n # test the left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.map)\n left_row_idx, left_row_inner_idx = left_index_idx_to_original_idx[idx[0]]\n expect_df1_input = df1.cix[left_row_idx, 0].data\n self.assertIs(c.inputs[0].inputs[0], expect_df1_input)\n left_index_min_max = left_index_splits[left_row_idx][left_row_inner_idx]\n self.assertEqual(c.inputs[0].op.index_min, left_index_min_max[0])\n self.assertEqual(c.inputs[0].op.index_min_close, left_index_min_max[1])\n self.assertEqual(c.inputs[0].op.index_max, left_index_min_max[2])\n self.assertEqual(c.inputs[0].op.index_max_close, left_index_min_max[3])\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertEqual(c.inputs[0].op.column_min, expect_df1_input.columns_value.min_val)\n self.assertEqual(c.inputs[0].op.column_min_close, expect_df1_input.columns_value.min_val_close)\n self.assertEqual(c.inputs[0].op.column_max, expect_df1_input.columns_value.max_val)\n self.assertEqual(c.inputs[0].op.column_max_close, expect_df1_input.columns_value.max_val_close)\n expect_left_columns = expect_df1_input.columns_value\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), expect_left_columns.to_pandas())\n pd.testing.assert_index_equal(c.inputs[0].dtypes.index, expect_left_columns.to_pandas())\n # test the right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.map)\n right_row_idx, right_row_inner_idx = right_index_idx_to_original_idx[idx[0]]\n expect_df2_input = df2.cix[right_row_idx, 0].data\n self.assertIs(c.inputs[1].inputs[0], expect_df2_input)\n right_index_min_max = right_index_splits[right_row_idx][right_row_inner_idx]\n self.assertEqual(c.inputs[1].op.index_min, right_index_min_max[0])\n self.assertEqual(c.inputs[1].op.index_min_close, right_index_min_max[1])\n self.assertEqual(c.inputs[1].op.index_max, right_index_min_max[2])\n self.assertEqual(c.inputs[1].op.index_max_close, right_index_min_max[3])\n self.assertIsInstance(c.inputs[1].index_value.to_pandas(), type(data2.index))\n self.assertEqual(c.inputs[1].op.column_min, expect_df2_input.columns_value.min_val)\n self.assertEqual(c.inputs[1].op.column_min_close, expect_df2_input.columns_value.min_val_close)\n self.assertEqual(c.inputs[1].op.column_max, expect_df2_input.columns_value.max_val)\n self.assertEqual(c.inputs[1].op.column_max_close, expect_df2_input.columns_value.max_val_close)\n expect_right_columns = expect_df2_input.columns_value\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), expect_right_columns.to_pandas())\n pd.testing.assert_index_equal(c.inputs[1].dtypes.index, expect_right_columns.to_pandas())\n\n def testBothOneChunk(self):\n # no axis is monotonic, but 1 chunk for all axes\n data1 = pd.DataFrame(np.random.rand(10, 10), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=10)\n data2 = pd.DataFrame(np.random.rand(10, 10), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3],\n columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=10)\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 12) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n self.assertEqual(df3.chunk_shape, (1, 1))\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test the left side\n self.assertIs(c.inputs[0], df1.chunks[0].data)\n # test the right side\n self.assertIs(c.inputs[1], df2.chunks[0].data)\n\n def testWithShuffleAndOneChunk(self):\n # no axis is monotonic\n data1 = pd.DataFrame(np.random.rand(10, 10), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n data1 = self.to_boolean_if_needed(data1)\n df1 = from_pandas(data1, chunk_size=(5, 10))\n data2 = pd.DataFrame(np.random.rand(10, 10), index=[11, 1, 2, 5, 7, 6, 8, 9, 10, 3],\n columns=[5, 9, 12, 3, 11, 10, 6, 4, 1, 2])\n data2 = self.to_boolean_if_needed(data2)\n df2 = from_pandas(data2, chunk_size=(6, 10))\n\n df3 = self.func(df1, df2)\n\n # test df3's index and columns\n pd.testing.assert_index_equal(df3.columns_value.to_pandas(), self.func(data1, data2).columns)\n self.assertTrue(df3.columns_value.should_be_monotonic)\n self.assertIsInstance(df3.index_value.value, IndexValue.Int64Index)\n self.assertTrue(df3.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df3.index_value.to_pandas(), pd.Int64Index([]))\n self.assertNotEqual(df3.index_value.key, df1.index_value.key)\n self.assertNotEqual(df3.index_value.key, df2.index_value.key)\n self.assertEqual(df3.shape[1], 12) # columns is recorded, so we can get it\n\n df3 = df3.tiles()\n df1, df2 = get_tiled(df1), get_tiled(df2)\n\n self.assertEqual(df3.chunk_shape, (2, 1))\n proxy_keys = set()\n for c in df3.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test left side\n self.assertIsInstance(c.inputs[0].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[0].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([ic.inputs[0].op.data.dtypes\n for ic in c.inputs[0].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[0].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[0].columns_value.to_pandas(), c.inputs[0].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[0].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[0].inputs[0].op.key)\n for ic, ci in zip(c.inputs[0].inputs[0].inputs, df1.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 2)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertEqual(ic.op.column_min, ci.columns_value.min_val)\n self.assertEqual(ic.op.column_min_close, ci.columns_value.min_val_close)\n self.assertEqual(ic.op.column_max, ci.columns_value.max_val)\n self.assertEqual(ic.op.column_max_close, ci.columns_value.max_val_close)\n self.assertIsNone(ic.op.column_shuffle_size, None)\n self.assertIsNotNone(ic.columns_value)\n self.assertIs(ic.inputs[0], ci.data)\n # test right side\n self.assertIsInstance(c.inputs[1].op, DataFrameIndexAlign)\n self.assertEqual(c.inputs[1].op.stage, OperandStage.reduce)\n expect_dtypes = pd.concat([ic.inputs[0].op.data.dtypes\n for ic in c.inputs[1].inputs[0].inputs if ic.index[0] == 0])\n pd.testing.assert_series_equal(c.inputs[1].dtypes, expect_dtypes)\n pd.testing.assert_index_equal(c.inputs[1].columns_value.to_pandas(), c.inputs[1].dtypes.index)\n self.assertIsInstance(c.inputs[0].index_value.to_pandas(), type(data1.index))\n self.assertIsInstance(c.inputs[1].inputs[0].op, DataFrameShuffleProxy)\n proxy_keys.add(c.inputs[1].inputs[0].op.key)\n for ic, ci in zip(c.inputs[1].inputs[0].inputs, df2.chunks):\n self.assertIsInstance(ic.op, DataFrameIndexAlign)\n self.assertEqual(ic.op.stage, OperandStage.map)\n self.assertEqual(ic.op.index_shuffle_size, 2)\n self.assertIsInstance(ic.index_value.to_pandas(), type(data1.index))\n self.assertIsNone(ic.op.column_shuffle_size)\n self.assertEqual(ic.op.column_min, ci.columns_value.min_val)\n self.assertEqual(ic.op.column_min_close, ci.columns_value.min_val_close)\n self.assertEqual(ic.op.column_max, ci.columns_value.max_val)\n self.assertEqual(ic.op.column_max_close, ci.columns_value.max_val_close)\n self.assertIsNone(ic.op.column_shuffle_size, None)\n self.assertIsNotNone(ic.columns_value)\n self.assertIs(ic.inputs[0], ci.data)\n\n self.assertEqual(len(proxy_keys), 2)\n\n def testOnSameDataFrame(self):\n data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),\n columns=[np.random.bytes(10) for _ in range(10)])\n data = self.to_boolean_if_needed(data)\n df = from_pandas(data, chunk_size=3)\n df2 = self.func(df, df)\n\n # test df2's index and columns\n pd.testing.assert_index_equal(df2.columns_value.to_pandas(), self.func(data, data).columns)\n self.assertFalse(df2.columns_value.should_be_monotonic)\n self.assertIsInstance(df2.index_value.value, IndexValue.Int64Index)\n self.assertFalse(df2.index_value.should_be_monotonic)\n pd.testing.assert_index_equal(df2.index_value.to_pandas(), pd.Int64Index([]))\n self.assertEqual(df2.index_value.key, df.index_value.key)\n self.assertEqual(df2.columns_value.key, df.columns_value.key)\n self.assertEqual(df2.shape[1], 10)\n\n df2 = df2.tiles()\n df = get_tiled(df)\n\n self.assertEqual(df2.chunk_shape, df.chunk_shape)\n for c in df2.chunks:\n self.assertIsInstance(c.op, self.op)\n self.assertEqual(len(c.inputs), 2)\n # test the left side\n self.assertIs(c.inputs[0], df.cix[c.index].data)\n # test the right side\n self.assertIs(c.inputs[1], df.cix[c.index].data)\n\n def testDataFrameAndScalar(self):\n if self.func_name in ['__and__', '__or__', '__xor__']:\n # bitwise logical operators doesn\\'t support floating point scalars\n return\n\n data = pd.DataFrame(np.random.rand(10, 10), index=np.arange(10),\n columns=np.arange(3, 13))\n df = from_pandas(data, chunk_size=5)\n # test operator with scalar\n result = self.func(df, 1)\n result2 = getattr(df, self.func_name)(1)\n\n # test reverse operator with scalar\n result3 = getattr(df, self.rfunc_name)(1)\n result4 = self.func(df, 1)\n result5 = self.func(1, df)\n pd.testing.assert_index_equal(result.columns_value.to_pandas(), data.columns)\n self.assertIsInstance(result.index_value.value, IndexValue.Int64Index)\n\n pd.testing.assert_index_equal(result2.columns_value.to_pandas(), data.columns)\n self.assertIsInstance(result2.index_value.value, IndexValue.Int64Index)\n\n pd.testing.assert_index_equal(result3.columns_value.to_pandas(), data.columns)\n self.assertIsInstance(result3.index_value.value, IndexValue.Int64Index)\n\n pd.testing.assert_index_equal(result4.columns_value.to_pandas(), data.columns)\n self.assertIsInstance(result4.index_value.value, IndexValue.Int64Index)\n\n pd.testing.assert_index_equal(result5.columns_value.to_pandas(), data.columns)\n self.assertIsInstance(result5.index_value.value, IndexValue.Int64Index)\n\n if 'builtin_function_or_method' not in str(type(self.func)):\n # skip NotImplemented test for comparison function\n return\n\n # test NotImplemented, use other's rfunc instead\n class TestRFunc:\n pass\n\n setattr(TestRFunc, '__%s__' % self.rfunc_name, lambda *_: 1)\n other = TestRFunc()\n ret = self.func(df, other)\n self.assertEqual(ret, 1)\n\n def testSeriesAndScalar(self):\n if self.func_name in ['__and__', '__or__', '__xor__']:\n # bitwise logical operators doesn\\'t support floating point scalars\n return\n\n data = pd.Series(range(10), index=[1, 3, 4, 2, 9, 10, 33, 23, 999, 123])\n s1 = from_pandas_series(data, chunk_size=3)\n r = getattr(s1, self.func_name)(456)\n r = r.tiles()\n s1 = get_tiled(s1)\n\n self.assertEqual(r.index_value.key, s1.index_value.key)\n self.assertEqual(r.chunk_shape, s1.chunk_shape)\n self.assertEqual(r.dtype, getattr(data, self.func_name)(456).dtype)\n\n for cr in r.chunks:\n cs = s1.cix[cr.index]\n self.assertEqual(cr.index_value.key, cs.index_value.key)\n self.assertIsInstance(cr.op, self.op)\n self.assertEqual(len(cr.inputs), 1)\n self.assertIsInstance(cr.inputs[0].op, SeriesDataSource)\n self.assertEqual(cr.op.rhs, 456)\n\n if 'builtin_function_or_method' not in str(type(self.func)):\n # skip rfunc test for comparison function\n return\n\n r = getattr(s1, self.rfunc_name)(789)\n r = r.tiles()\n s1 = get_tiled(s1)\n\n self.assertEqual(r.index_value.key, s1.index_value.key)\n self.assertEqual(r.chunk_shape, s1.chunk_shape)\n\n for cr in r.chunks:\n cs = s1.cix[cr.index]\n self.assertEqual(cr.index_value.key, cs.index_value.key)\n self.assertIsInstance(cr.op, self.op)\n self.assertEqual(len(cr.inputs), 1)\n self.assertIsInstance(cr.inputs[0].op, SeriesDataSource)\n self.assertEqual(cr.op.lhs, 789)\n\n def testCheckInputs(self):\n data = pd.DataFrame(np.random.rand(10, 3))\n data = self.to_boolean_if_needed(data)\n df = from_pandas(data)\n\n with self.assertRaises(ValueError):\n _ = df + np.random.rand(5, 3)\n\n with self.assertRaises(ValueError):\n _ = df + np.random.rand(10)\n\n with self.assertRaises(ValueError):\n _ = df + np.random.rand(10, 3, 2)\n\n data = pd.Series(np.random.rand(10))\n series = from_pandas_series(data)\n\n with self.assertRaises(ValueError):\n _ = series + np.random.rand(5, 3)\n\n with self.assertRaises(ValueError):\n _ = series + np.random.rand(5)\n\n\nclass TestUnary(TestBase):\n def testAbs(self):\n data1 = pd.DataFrame(np.random.rand(10, 10), index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n df1 = from_pandas(data1, chunk_size=(5, 10))\n\n df2 = df1.abs()\n\n # test df2's index and columns\n pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df1.columns_value.to_pandas())\n self.assertIsInstance(df2.index_value.value, IndexValue.Int64Index)\n self.assertEqual(df2.shape, (10, 10))\n\n df2 = df2.tiles()\n df1 = get_tiled(df1)\n\n self.assertEqual(df2.chunk_shape, (2, 1))\n for c2, c1 in zip(df2.chunks, df1.chunks):\n self.assertIsInstance(c2.op, DataFrameAbs)\n self.assertEqual(len(c2.inputs), 1)\n # compare with input chunks\n self.assertEqual(c2.index, c1.index)\n pd.testing.assert_index_equal(c2.columns_value.to_pandas(), c1.columns_value.to_pandas())\n pd.testing.assert_index_equal(c2.index_value.to_pandas(), c1.index_value.to_pandas())\n\n def testNot(self):\n data1 = pd.DataFrame(np.random.rand(10, 10) > 0.5, index=[0, 10, 2, 3, 4, 5, 6, 7, 8, 9],\n columns=[4, 1, 3, 2, 10, 5, 9, 8, 6, 7])\n df1 = from_pandas(data1, chunk_size=(5, 10))\n\n df2 = ~df1\n\n # test df2's index and columns\n pd.testing.assert_index_equal(df2.columns_value.to_pandas(), df1.columns_value.to_pandas())\n self.assertIsInstance(df2.index_value.value, IndexValue.Int64Index)\n self.assertEqual(df2.shape, (10, 10))\n\n df2 = df2.tiles()\n df1 = get_tiled(df1)\n\n self.assertEqual(df2.chunk_shape, (2, 1))\n for c2, c1 in zip(df2.chunks, df1.chunks):\n self.assertIsInstance(c2.op, DataFrameNot)\n self.assertEqual(len(c2.inputs), 1)\n # compare with input chunks\n self.assertEqual(c2.index, c1.index)\n pd.testing.assert_index_equal(c2.columns_value.to_pandas(), c1.columns_value.to_pandas())\n pd.testing.assert_index_equal(c2.index_value.to_pandas(), c1.index_value.to_pandas())\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom math import log, sqrt\nimport numbers\n\nimport numpy as np\nfrom scipy.special import gammaln\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.extmath import fast_logdet\n\nfrom ... import tensor as mt\nfrom ...tensor.core import TENSOR_TYPE\nfrom ...tensor.utils import check_random_state\nfrom ...tensor.linalg import randomized_svd\nfrom ...tensor.linalg.randomized_svd import svd_flip\nfrom ...lib.sparse import issparse\nfrom ...core import ExecutableTuple\nfrom ..utils import check_array\nfrom .base import _BasePCA\n\n\ndef _assess_dimension_(spectrum, rank, n_samples, n_features):\n \"\"\"Compute the likelihood of a rank ``rank`` dataset\n\n The dataset is assumed to be embedded in gaussian noise of shape(n,\n dimf) having spectrum ``spectrum``.\n\n Parameters\n ----------\n spectrum : array of shape (n)\n Data spectrum.\n rank : int\n Tested rank value.\n n_samples : int\n Number of samples.\n n_features : int\n Number of features.\n\n Returns\n -------\n ll : float,\n The log-likelihood\n\n Notes\n -----\n This implements the method of `Thomas P. Minka:\n Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`\n \"\"\"\n if rank > len(spectrum):\n raise ValueError(\"The tested rank cannot exceed the rank of the\"\n \" dataset\")\n\n pu = -rank * log(2.)\n for i in range(rank):\n pu += (gammaln((n_features - i) / 2.) -\n log(mt.pi) * (n_features - i) / 2.)\n\n pl = mt.sum(mt.log(spectrum[:rank]))\n pl = -pl * n_samples / 2.\n\n if rank == n_features:\n pv = 0\n v = 1\n else:\n v = mt.sum(spectrum[rank:]) / (n_features - rank)\n pv = -mt.log(v) * n_samples * (n_features - rank) / 2.\n\n m = n_features * rank - rank * (rank + 1.) / 2.\n pp = log(2. * mt.pi) * (m + rank + 1.) / 2.\n\n pa = 0.\n spectrum_ = spectrum.copy()\n spectrum_[rank:n_features] = v\n for i in range(rank):\n for j in range(i + 1, len(spectrum)):\n pa += mt.log((spectrum[i] - spectrum[j]) *\n (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)\n\n ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.\n return ll\n\n\ndef _infer_dimension_(spectrum, n_samples, n_features):\n \"\"\"Infers the dimension of a dataset of shape (n_samples, n_features)\n\n The dataset is described by its spectrum `spectrum`.\n \"\"\"\n n_spectrum = len(spectrum)\n ll = mt.empty(n_spectrum)\n for rank in range(n_spectrum):\n ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)\n return ll.argmax()\n\n\nclass PCA(_BasePCA):\n \"\"\"Principal component analysis (PCA)\n\n Linear dimensionality reduction using Singular Value Decomposition of the\n data to project it to a lower dimensional space. The input data is centered\n but not scaled for each feature before applying the SVD.\n\n It uses the LAPACK implementation of the full SVD or a randomized truncated\n SVD by the method of Halko et al. 2009, depending on the shape of the input\n data and the number of components to extract.\n\n It can also use the scipy.sparse.linalg ARPACK implementation of the\n truncated SVD.\n\n Notice that this class does not support sparse input. See\n :class:`TruncatedSVD` for an alternative with sparse data.\n\n Read more in the :ref:`User Guide <PCA>`.\n\n Parameters\n ----------\n n_components : int, float, None or string\n Number of components to keep.\n if n_components is not set all components are kept::\n\n n_components == min(n_samples, n_features)\n\n If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\n MLE is used to guess the dimension. Use of ``n_components == 'mle'``\n will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\n If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\n number of components such that the amount of variance that needs to be\n explained is greater than the percentage specified by n_components.\n\n If ``svd_solver == 'arpack'``, the number of components must be\n strictly less than the minimum of n_features and n_samples.\n\n Hence, the None case results in::\n\n n_components == min(n_samples, n_features) - 1\n\n copy : bool (default True)\n If False, data passed to fit are overwritten and running\n fit(X).transform(X) will not yield the expected results,\n use fit_transform(X) instead.\n\n whiten : bool, optional (default False)\n When True (False by default) the `components_` vectors are multiplied\n by the square root of n_samples and then divided by the singular values\n to ensure uncorrelated outputs with unit component-wise variances.\n\n Whitening will remove some information from the transformed signal\n (the relative variance scales of the components) but can sometime\n improve the predictive accuracy of the downstream estimators by\n making their data respect some hard-wired assumptions.\n\n svd_solver : string {'auto', 'full', 'arpack', 'randomized'}\n auto :\n the solver is selected by a default policy based on `X.shape` and\n `n_components`: if the input data is larger than 500x500 and the\n number of components to extract is lower than 80% of the smallest\n dimension of the data, then the more efficient 'randomized'\n method is enabled. Otherwise the exact full SVD is computed and\n optionally truncated afterwards.\n full :\n run exact full SVD calling the standard LAPACK solver via\n `scipy.linalg.svd` and select the components by postprocessing\n arpack :\n run SVD truncated to n_components calling ARPACK solver via\n `scipy.sparse.linalg.svds`. It requires strictly\n 0 < n_components < min(X.shape)\n randomized :\n run randomized SVD by the method of Halko et al.\n\n tol : float >= 0, optional (default .0)\n Tolerance for singular values computed by svd_solver == 'arpack'.\n\n iterated_power : int >= 0, or 'auto', (default 'auto')\n Number of iterations for the power method computed by\n svd_solver == 'randomized'.\n\n random_state : int, RandomState instance or None, optional (default None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`. Used when ``svd_solver`` == 'arpack' or 'randomized'.\n\n Attributes\n ----------\n components_ : tensor, shape (n_components, n_features)\n Principal axes in feature space, representing the directions of\n maximum variance in the data. The components are sorted by\n ``explained_variance_``.\n\n explained_variance_ : tensor, shape (n_components,)\n The amount of variance explained by each of the selected components.\n\n Equal to n_components largest eigenvalues\n of the covariance matrix of X.\n\n explained_variance_ratio_ : tensor, shape (n_components,)\n Percentage of variance explained by each of the selected components.\n\n If ``n_components`` is not set then all components are stored and the\n sum of the ratios is equal to 1.0.\n\n singular_values_ : tensor, shape (n_components,)\n The singular values corresponding to each of the selected components.\n The singular values are equal to the 2-norms of the ``n_components``\n variables in the lower-dimensional space.\n\n mean_ : tensor, shape (n_features,)\n Per-feature empirical mean, estimated from the training set.\n\n Equal to `X.mean(axis=0)`.\n\n n_components_ : int\n The estimated number of components. When n_components is set\n to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this\n number is estimated from input data. Otherwise it equals the parameter\n n_components, or the lesser value of n_features and n_samples\n if n_components is None.\n\n noise_variance_ : float\n The estimated noise covariance following the Probabilistic PCA model\n from Tipping and Bishop 1999. See \"Pattern Recognition and\n Machine Learning\" by C. Bishop, 12.2.1 p. 574 or\n http://www.miketipping.com/papers/met-mppca.pdf. It is required to\n compute the estimated data covariance and score samples.\n\n Equal to the average of (min(n_features, n_samples) - n_components)\n smallest eigenvalues of the covariance matrix of X.\n\n References\n ----------\n For n_components == 'mle', this class uses the method of *Minka, T. P.\n \"Automatic choice of dimensionality for PCA\". In NIPS, pp. 598-604*\n\n Implements the probabilistic PCA model from:\n Tipping, M. E., and Bishop, C. M. (1999). \"Probabilistic principal\n component analysis\". Journal of the Royal Statistical Society:\n Series B (Statistical Methodology), 61(3), 611-622.\n via the score and score_samples methods.\n See http://www.miketipping.com/papers/met-mppca.pdf\n\n For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.\n\n For svd_solver == 'randomized', see:\n *Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).\n \"Finding structure with randomness: Probabilistic algorithms for\n constructing approximate matrix decompositions\".\n SIAM review, 53(2), 217-288.* and also\n *Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).\n \"A randomized algorithm for the decomposition of matrices\".\n Applied and Computational Harmonic Analysis, 30(1), 47-68.*\n\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> from mars.learn.decomposition import PCA\n >>> X = mt.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n >>> pca = PCA(n_components=2)\n >>> pca.fit(X) # doctest: +NORMALIZE_WHITESPACE\n PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,\n svd_solver='auto', tol=0.0, whiten=False)\n >>> print(pca.explained_variance_ratio_.execute()) # doctest: +ELLIPSIS\n [0.9924... 0.0075...]\n >>> print(pca.singular_values_.execute()) # doctest: +ELLIPSIS\n [6.30061... 0.54980...]\n\n >>> pca = PCA(n_components=2, svd_solver='full')\n >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,\n svd_solver='full', tol=0.0, whiten=False)\n >>> print(pca.explained_variance_ratio_.execute()) # doctest: +ELLIPSIS\n [0.9924... 0.00755...]\n >>> print(pca.singular_values_.execute()) # doctest: +ELLIPSIS\n [6.30061... 0.54980...]\n\n See also\n --------\n KernelPCA\n SparsePCA\n TruncatedSVD\n IncrementalPCA\n \"\"\"\n\n def __init__(self, n_components=None, copy=True, whiten=False,\n svd_solver='auto', tol=0.0, iterated_power='auto',\n random_state=None):\n self.n_components = n_components\n self.copy = copy\n self.whiten = whiten\n self.svd_solver = svd_solver\n self.tol = tol\n self.iterated_power = iterated_power\n self.random_state = random_state\n\n def fit(self, X, y=None, session=None, run_kwargs=None):\n \"\"\"Fit the model with X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n self._fit(X, session=session, run=True, run_kwargs=run_kwargs)\n return self\n\n def fit_transform(self, X, y=None, session=None):\n \"\"\"Fit the model with X and apply the dimensionality reduction on X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n\n \"\"\"\n U, S, _ = self._fit(X, session=session, run=False)\n U = U[:, :self.n_components_]\n\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0] - 1)\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n self._run([U], session=session)\n return U\n\n def _run(self, result, session=None, run_kwargs=None):\n to_run_tensors = list(result)\n if isinstance(self.noise_variance_, TENSOR_TYPE):\n to_run_tensors.append(self.noise_variance_)\n to_run_tensors.append(self.components_)\n to_run_tensors.append(self.explained_variance_)\n to_run_tensors.append(self.explained_variance_ratio_)\n to_run_tensors.append(self.singular_values_)\n\n ExecutableTuple(to_run_tensors).execute(session=session, fetch=False, **(run_kwargs or {}))\n\n def _fit(self, X, session=None, run=True, run_kwargs=None):\n \"\"\"Dispatch to the right submethod depending on the chosen solver.\"\"\"\n\n # Raise an error for sparse input.\n # This is more informative than the generic one raised by check_array.\n if (hasattr(X, 'issparse') and X.issparse()) or issparse(X):\n raise TypeError('PCA does not support sparse input. See '\n 'TruncatedSVD for a possible alternative.')\n\n X = check_array(X, dtype=[mt.float64, mt.float32], ensure_2d=True,\n copy=self.copy)\n\n # Handle n_components==None\n if self.n_components is None:\n if self.svd_solver != 'arpack':\n n_components = min(X.shape)\n else:\n n_components = min(X.shape) - 1\n else:\n n_components = self.n_components\n\n # Handle svd_solver\n self._fit_svd_solver = self.svd_solver\n if self._fit_svd_solver == 'auto':\n # Small problem or n_components == 'mle', just call full PCA\n if max(X.shape) <= 500 or n_components == 'mle':\n self._fit_svd_solver = 'full'\n elif n_components >= 1 and n_components < .8 * min(X.shape):\n self._fit_svd_solver = 'randomized'\n # This is also the case of n_components in (0,1)\n else:\n self._fit_svd_solver = 'full'\n\n # Call different fits for either full or truncated SVD\n if self._fit_svd_solver == 'full':\n ret = self._fit_full(X, n_components, session=session)\n elif self._fit_svd_solver in ['arpack', 'randomized']:\n ret = self._fit_truncated(X, n_components, self._fit_svd_solver)\n else:\n raise ValueError(\"Unrecognized svd_solver='{0}'\"\n \"\".format(self._fit_svd_solver))\n\n if run:\n self._run(ret, session=session, run_kwargs=run_kwargs)\n return ret\n\n def _fit_full(self, X, n_components, session=None):\n \"\"\"Fit the model by computing full SVD on X\"\"\"\n n_samples, n_features = X.shape\n\n if n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n elif not 0 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 0 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='full'\"\n % (n_components, min(n_samples, n_features)))\n elif n_components >= 1:\n if not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, \"\n \"was of type=%r\"\n % (n_components, type(n_components)))\n\n # Center data\n self.mean_ = mt.mean(X, axis=0)\n X -= self.mean_\n\n U, S, V = mt.linalg.svd(X)\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U, V)\n\n components_ = V\n\n # Get variance explained by singular values\n explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / total_var\n singular_values_ = S.copy() # Store the singular values.\n\n # Postprocess the number of components required\n if n_components == 'mle':\n n_components = \\\n _infer_dimension_(explained_variance_, n_samples, n_features)\\\n .execute(session=session)\n elif 0 < n_components < 1.0:\n # number of components for which the cumulated explained\n # variance percentage is superior to the desired threshold\n # ratio_cumsum = stable_cumsum(explained_variance_ratio_)\n ratio_cumsum = explained_variance_ratio_.cumsum()\n n_components = (mt.searchsorted(ratio_cumsum, n_components) + 1)\\\n .execute(session=session)\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = singular_values_[:n_components]\n\n return U, S, V\n\n def _fit_truncated(self, X, n_components, svd_solver):\n \"\"\"Fit the model by computing truncated SVD (by ARPACK or randomized)\n on X\n \"\"\"\n n_samples, n_features = X.shape\n\n if isinstance(n_components, str):\n raise ValueError(\"n_components=%r cannot be a string \"\n \"with svd_solver='%s'\"\n % (n_components, svd_solver))\n elif not 1 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 1 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='%s'\"\n % (n_components, min(n_samples, n_features),\n svd_solver))\n elif not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, was of type=%r\"\n % (n_components, type(n_components)))\n elif svd_solver == 'arpack' and n_components == min(n_samples,\n n_features):\n raise ValueError(\"n_components=%r must be strictly less than \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='%s'\"\n % (n_components, min(n_samples, n_features),\n svd_solver))\n\n random_state = check_random_state(self.random_state)\n\n # Center data\n self.mean_ = mt.mean(X, axis=0)\n X -= self.mean_\n\n if svd_solver == 'arpack':\n # # random init solution, as ARPACK does it internally\n # v0 = random_state.uniform(-1, 1, size=min(X.shape))\n # U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0)\n # # svds doesn't abide by scipy.linalg.svd/randomized_svd\n # # conventions, so reverse its outputs.\n # S = S[::-1]\n # # flip eigenvectors' sign to enforce deterministic output\n # U, V = svd_flip(U[:, ::-1], V[::-1])\n raise NotImplementedError('Does not support arpack svd_resolver')\n\n elif svd_solver == 'randomized':\n # sign flipping is done inside\n U, S, V = randomized_svd(X, n_components=n_components,\n n_iter=self.iterated_power,\n flip_sign=True,\n random_state=random_state)\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = V\n self.n_components_ = n_components\n\n # Get variance explained by singular values\n self.explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = mt.var(X, ddof=1, axis=0)\n self.explained_variance_ratio_ = \\\n self.explained_variance_ / total_var.sum()\n self.singular_values_ = S.copy() # Store the singular values.\n\n if self.n_components_ < min(n_features, n_samples):\n self.noise_variance_ = (total_var.sum() -\n self.explained_variance_.sum())\n self.noise_variance_ /= min(n_features, n_samples) - n_components\n else:\n self.noise_variance_ = 0.\n\n return U, S, V\n\n def _score_samples(self, X):\n check_is_fitted(self, 'mean_')\n\n X = check_array(X)\n Xr = X - self.mean_\n n_features = X.shape[1]\n precision = self.get_precision().fetch()\n log_like = -.5 * (Xr * (mt.dot(Xr, precision))).sum(axis=1)\n log_like -= .5 * (n_features * log(2. * mt.pi) -\n fast_logdet(precision))\n return log_like\n\n def score_samples(self, X, session=None):\n \"\"\"Return the log-likelihood of each sample.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : tensor, shape(n_samples, n_features)\n The data.\n\n Returns\n -------\n ll : tensor, shape (n_samples,)\n Log-likelihood of each sample under the current model\n \"\"\"\n log_like = self._score_samples(X)\n log_like.execute(session=session, fetch=False)\n return log_like\n\n def score(self, X, y=None, session=None):\n \"\"\"Return the average log-likelihood of all samples.\n\n See. \"Pattern Recognition and Machine Learning\"\n by C. Bishop, 12.2.1 p. 574\n or http://www.miketipping.com/papers/met-mppca.pdf\n\n Parameters\n ----------\n X : tensor, shape(n_samples, n_features)\n The data.\n\n y : Ignored\n\n Returns\n -------\n ll : float\n Average log-likelihood of the samples under the current model\n \"\"\"\n ret = mt.mean(self._score_samples(X))\n ret.execute(session=session, fetch=False)\n return ret\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport scipy.sparse as sps\ntry:\n import faiss\nexcept ImportError: # pragma: no cover\n faiss = None\ntry:\n import cupy\nexcept ImportError:\n cupy = None\ntry:\n from sklearn.neighbors import NearestNeighbors as SkNearestNeighbors\n from sklearn.neighbors import BallTree as SkBallTree\n from sklearn.neighbors import KDTree as SkKDTree\n from sklearn.utils.testing import assert_warns\nexcept ImportError: # pragma: no cover\n SkNearestNeighbors = None\n\nimport mars.tensor as mt\nfrom mars.lib.sparse import SparseNDArray\nfrom mars.tiles import get_tiled\nfrom mars.learn.neighbors import NearestNeighbors\n\n\[email protected](SkNearestNeighbors is None, 'scikit-learn not installed')\nclass Test(unittest.TestCase):\n def testNearestNeighbors(self):\n rs = np.random.RandomState(0)\n raw_X = rs.rand(10, 5)\n raw_Y = rs.rand(8, 5)\n\n X = mt.tensor(raw_X)\n Y = mt.tensor(raw_Y)\n\n raw_sparse_x = sps.random(10, 5, density=0.5, format='csr', random_state=rs)\n raw_sparse_y = sps.random(8, 5, density=0.4, format='csr', random_state=rs)\n\n X_sparse = mt.tensor(raw_sparse_x)\n Y_sparse = mt.tensor(raw_sparse_y)\n\n metric_func = lambda u, v: np.sqrt(((u-v)**2).sum())\n\n _ = NearestNeighbors(algorithm='auto', metric='precomputed', metric_params={})\n\n with self.assertRaises(ValueError):\n _ = NearestNeighbors(algorithm='unknown')\n\n with self.assertRaises(ValueError):\n _ = NearestNeighbors(algorithm='kd_tree', metric=metric_func)\n\n with self.assertRaises(ValueError):\n _ = NearestNeighbors(algorithm='auto', metric='unknown')\n\n assert_warns(SyntaxWarning, NearestNeighbors, metric_params={'p': 1})\n\n with self.assertRaises(ValueError):\n _ = NearestNeighbors(metric='wminkowski', p=0)\n\n with self.assertRaises(ValueError):\n _ = NearestNeighbors(algorithm='auto', metric='minkowski', p=0)\n\n nn = NearestNeighbors(algorithm='auto', metric='minkowski', p=1)\n nn.fit(X)\n self.assertEqual(nn.effective_metric_, 'manhattan')\n\n nn = NearestNeighbors(algorithm='auto', metric='minkowski', p=2)\n nn.fit(X)\n self.assertEqual(nn.effective_metric_, 'euclidean')\n\n nn = NearestNeighbors(algorithm='auto', metric='minkowski', p=np.inf)\n nn.fit(X)\n self.assertEqual(nn.effective_metric_, 'chebyshev')\n\n nn2 = NearestNeighbors(algorithm='auto', metric='minkowski')\n nn2.fit(nn)\n self.assertEqual(nn2._fit_method, nn._fit_method)\n\n nn = NearestNeighbors(algorithm='auto', metric='minkowski')\n ball_tree = SkBallTree(raw_X)\n nn.fit(ball_tree)\n self.assertEqual(nn._fit_method, 'ball_tree')\n\n nn = NearestNeighbors(algorithm='auto', metric='minkowski')\n kd_tree = SkKDTree(raw_X)\n nn.fit(kd_tree)\n self.assertEqual(nn._fit_method, 'kd_tree')\n\n with self.assertRaises(ValueError):\n nn = NearestNeighbors()\n nn.fit(np.random.rand(0, 10))\n\n nn = NearestNeighbors(algorithm='ball_tree')\n assert_warns(UserWarning, nn.fit, X_sparse)\n\n nn = NearestNeighbors(metric='haversine')\n with self.assertRaises(ValueError):\n nn.fit(X_sparse)\n\n nn = NearestNeighbors(metric=metric_func, n_neighbors=1)\n nn.fit(X)\n self.assertEqual(nn._fit_method, 'ball_tree')\n\n nn = NearestNeighbors(metric='sqeuclidean', n_neighbors=1)\n nn.fit(X)\n self.assertEqual(nn._fit_method, 'brute')\n\n with self.assertRaises(ValueError):\n nn = NearestNeighbors(n_neighbors=-1)\n nn.fit(X)\n\n with self.assertRaises(TypeError):\n nn = NearestNeighbors(n_neighbors=1.3)\n nn.fit(X)\n\n nn = NearestNeighbors()\n nn.fit(X)\n with self.assertRaises(ValueError):\n nn.kneighbors(Y, n_neighbors=-1)\n with self.assertRaises(TypeError):\n nn.kneighbors(Y, n_neighbors=1.3)\n with self.assertRaises(ValueError):\n nn.kneighbors(Y, n_neighbors=11)\n\n nn = NearestNeighbors(algorithm='ball_tree')\n nn.fit(X)\n with self.assertRaises(ValueError):\n nn.kneighbors(Y_sparse)\n\n def testNearestNeighborsExecution(self):\n rs = np.random.RandomState(0)\n raw_X = rs.rand(10, 5)\n raw_Y = rs.rand(8, 5)\n\n X = mt.tensor(raw_X, chunk_size=7)\n Y = mt.tensor(raw_Y, chunk_size=(5, 3))\n\n for algo in ['brute', 'ball_tree', 'kd_tree', 'auto']:\n for metric in ['minkowski', 'manhattan']:\n nn = NearestNeighbors(n_neighbors=3,\n algorithm=algo,\n metric=metric)\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n snn = SkNearestNeighbors(n_neighbors=3,\n algorithm=algo,\n metric=metric)\n snn.fit(raw_X)\n expected = snn.kneighbors(raw_Y)\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n if nn._tree is not None:\n self.assertIsInstance(nn._tree.fetch(), type(snn._tree))\n\n # test return_distance=False\n ret = nn.kneighbors(Y, return_distance=False)\n\n result = ret.fetch()\n np.testing.assert_almost_equal(result, expected[1])\n\n # test y is x\n ret = nn.kneighbors()\n\n expected = snn.kneighbors()\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test y is x, and return_distance=False\n ret = nn.kneighbors(return_distance=False)\n\n result = ret.fetch()\n np.testing.assert_almost_equal(result, expected[1])\n\n # test callable metric\n metric = lambda u, v: np.sqrt(((u-v)**2).sum())\n for algo in ['brute', 'ball_tree']:\n nn = NearestNeighbors(n_neighbors=3,\n algorithm=algo,\n metric=metric)\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n snn = SkNearestNeighbors(n_neighbors=3,\n algorithm=algo,\n metric=metric)\n snn.fit(raw_X)\n expected = snn.kneighbors(raw_Y)\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test sparse\n raw_sparse_x = sps.random(10, 5, density=0.5, format='csr', random_state=rs)\n raw_sparse_y = sps.random(8, 5, density=0.4, format='csr', random_state=rs)\n\n X = mt.tensor(raw_sparse_x, chunk_size=7)\n Y = mt.tensor(raw_sparse_y, chunk_size=5)\n\n nn = NearestNeighbors(n_neighbors=3)\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n snn = SkNearestNeighbors(n_neighbors=3)\n snn.fit(raw_sparse_x)\n expected = snn.kneighbors(raw_sparse_y)\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test input with unknown shape\n X = mt.tensor(raw_X, chunk_size=7)\n X = X[X[:, 0] > 0.1]\n Y = mt.tensor(raw_Y, chunk_size=(5, 3))\n Y = Y[Y[:, 0] > 0.1]\n\n nn = NearestNeighbors(n_neighbors=3)\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n x2 = raw_X[raw_X[:, 0] > 0.1]\n y2 = raw_Y[raw_Y[:, 0] > 0.1]\n snn = SkNearestNeighbors(n_neighbors=3)\n snn.fit(x2)\n expected = snn.kneighbors(y2)\n\n result = ret.fetch()\n self.assertEqual(nn._fit_method, snn._fit_method)\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test serialization\n graph = ret[0].build_graph()\n self.assertEqual(len(graph.from_pb(graph.to_pb())), len(graph))\n self.assertEqual(len(graph.from_json(graph.to_json())), len(graph))\n\n # test fit a sklearn tree\n nn = NearestNeighbors(n_neighbors=3)\n nn.fit(snn._tree)\n\n ret = nn.kneighbors(Y)\n result = ret.fetch()\n self.assertEqual(nn._fit_method, snn._fit_method)\n np.testing.assert_almost_equal(result[0], expected[0])\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test serialization\n graph = ret[0].build_graph()\n self.assertEqual(len(graph.from_pb(graph.to_pb())), len(graph))\n self.assertEqual(len(graph.from_json(graph.to_json())), len(graph))\n\n def testKNeighborsGraphExecution(self):\n rs = np.random.RandomState(0)\n raw_X = rs.rand(10, 5)\n raw_Y = rs.rand(8, 5)\n\n X = mt.tensor(raw_X, chunk_size=7)\n Y = mt.tensor(raw_Y, chunk_size=(5, 3))\n\n neigh = NearestNeighbors(n_neighbors=3)\n neigh.fit(X)\n sklearn_neigh = SkNearestNeighbors(n_neighbors=3)\n sklearn_neigh.fit(raw_X)\n\n for mode in ['connectivity', 'distance']:\n graph = neigh.kneighbors_graph(Y, mode=mode)\n result = graph.fetch()\n\n self.assertIsInstance(result, SparseNDArray)\n self.assertGreater(len(get_tiled(graph).chunks), 1)\n\n expected = sklearn_neigh.kneighbors_graph(raw_Y, mode=mode)\n\n np.testing.assert_array_equal(result.toarray(),\n expected.toarray())\n\n graph2 = neigh.kneighbors_graph(mode=mode)\n result2 = graph2.fetch()\n\n self.assertIsInstance(result2, SparseNDArray)\n self.assertGreater(len(get_tiled(graph2).chunks), 1)\n\n expected2 = sklearn_neigh.kneighbors_graph(mode=mode)\n\n np.testing.assert_array_equal(result2.toarray(),\n expected2.toarray())\n\n X = [[0], [3], [1]]\n\n neigh = NearestNeighbors(n_neighbors=2)\n sklearn_neigh = SkNearestNeighbors(n_neighbors=2)\n neigh.fit(X)\n sklearn_neigh.fit(X)\n\n A = neigh.kneighbors_graph(X).fetch()\n expected_A = sklearn_neigh.kneighbors_graph(X)\n np.testing.assert_array_equal(A.toarray(), expected_A.toarray())\n\n # test wrong mode\n with self.assertRaises(ValueError):\n _ = neigh.kneighbors_graph(mode='unknown')\n\n @unittest.skipIf(faiss is None, 'faiss not installed')\n def testFaissNearestNeighborsExecution(self):\n rs = np.random.RandomState(0)\n raw_X = rs.rand(10, 5)\n raw_Y = rs.rand(8, 5)\n\n # test faiss execution\n X = mt.tensor(raw_X, chunk_size=7)\n Y = mt.tensor(raw_Y, chunk_size=(5, 3))\n\n nn = NearestNeighbors(n_neighbors=3, algorithm='faiss', metric='l2')\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n snn = SkNearestNeighbors(n_neighbors=3, algorithm='auto', metric='l2')\n snn.fit(raw_X)\n expected = snn.kneighbors(raw_Y)\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0], decimal=6)\n np.testing.assert_almost_equal(result[1], expected[1])\n\n # test return_distance=False\n ret = nn.kneighbors(Y, return_distance=False)\n\n result = ret.fetch()\n np.testing.assert_almost_equal(result, expected[1])\n\n # test y is x\n ret = nn.kneighbors()\n\n expected = snn.kneighbors()\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0], expected[0], decimal=5)\n np.testing.assert_almost_equal(result[1], expected[1])\n\n @unittest.skipIf(cupy is None or faiss is None, 'either cupy or faiss not installed')\n def testGPUFaissNearestNeighborsExecution(self):\n rs = np.random.RandomState(0)\n\n raw_X = rs.rand(10, 5)\n raw_Y = rs.rand(8, 5)\n\n # test faiss execution\n X = mt.tensor(raw_X, chunk_size=7).to_gpu()\n Y = mt.tensor(raw_Y, chunk_size=8).to_gpu()\n\n nn = NearestNeighbors(n_neighbors=3, algorithm='faiss', metric='l2')\n nn.fit(X)\n\n ret = nn.kneighbors(Y)\n\n snn = SkNearestNeighbors(n_neighbors=3, algorithm='auto', metric='l2')\n snn.fit(raw_X)\n expected = snn.kneighbors(raw_Y)\n\n result = [r.fetch() for r in ret]\n np.testing.assert_almost_equal(result[0].get(), expected[0], decimal=6)\n np.testing.assert_almost_equal(result[1].get(), expected[1])\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nfrom collections.abc import Iterable\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import ValueType, KeyField, AnyField, TupleField, BoolField\nfrom ..utils import recursive_tile\nfrom ..array_utils import device, as_same_device\nfrom ..operands import TensorHasInput, TensorOperandMixin\nfrom ..arithmetic import sqrt\nfrom ..datasource import tensor as astensor\nfrom .svd import svd\n\n\nclass TensorNorm(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.NORM\n\n _input = KeyField('input')\n _ord = AnyField('ord')\n _axis = TupleField('axis', ValueType.int32)\n _keepdims = BoolField('keepdims')\n\n def __init__(self, ord=None, axis=None, keepdims=None, dtype=None, sparse=False, **kw):\n super().__init__(_ord=ord, _axis=axis, _keepdims=keepdims, _dtype=dtype,\n _sparse=sparse, **kw)\n\n @property\n def ord(self):\n return getattr(self, '_ord', None)\n\n @property\n def axis(self):\n return self._axis\n\n @property\n def keepdims(self):\n return self._keepdims\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, x):\n r = x.astype(self.dtype)\n shape = self._norm(r, self._ord, self._axis, self._keepdims).shape\n return self.new_tensor([x], shape)\n\n @classmethod\n def tile(cls, op):\n x = op.input\n axis = op.axis\n ord = op.ord\n keepdims = op.keepdims\n\n axis_chunk_shapes = tuple(x.chunk_shape[i] for i in axis)\n can_apply_norm = all(s == 1 for s in axis_chunk_shapes)\n\n if can_apply_norm:\n axis_set = set(axis)\n get_shape = lambda shape: tuple(s if i not in axis_set else 1 for i, s in enumerate(shape)\n if i not in axis_set or keepdims)\n\n out_chunk_shape = get_shape(x.chunk_shape)\n out_chunks = []\n for idx in itertools.product(*[range(s) for s in out_chunk_shape]):\n idx_iter = iter(idx)\n in_idx = tuple(0 if i in axis_set and not keepdims else next(idx_iter)\n for i in range(x.ndim))\n\n c = x.cix[in_idx]\n chunk_op = op.copy().reset_key()\n out_chunk = chunk_op.new_chunk([c], shape=get_shape(c.shape), index=idx)\n out_chunks.append(out_chunk)\n\n nsplits = [tuple(c.shape[i] for c in out_chunks\n if all(idx == 0 for j, idx in enumerate(c.index) if j != i))\n for i in range(len(out_chunks[0].shape))]\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, op.outputs[0].shape, chunks=out_chunks, nsplits=nsplits)\n\n r = cls._norm(x.astype(op.outputs[0].dtype), ord, axis, keepdims)\n recursive_tile(r)\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, op.outputs[0].shape, chunks=r.chunks, nsplits=r.nsplits)\n\n @staticmethod\n def _norm(r, ord, axis, keepdims):\n if ord is None:\n return sqrt((abs(r) ** 2).sum(axis=axis, keepdims=keepdims))\n elif ord == 'nuc':\n if len(axis) == 1:\n raise ValueError('Invalid norm order for vectors.')\n return svd(r)[1][np.newaxis].sum(keepdims=keepdims)\n elif ord == np.inf:\n if r.ndim > 2:\n raise ValueError('Improper number of dimensions to norm.')\n r = abs(r)\n if len(axis) == 1:\n return r.max(axis=axis, keepdims=keepdims)\n else:\n return r.sum(axis=axis[1], keepdims=keepdims).max(keepdims=keepdims)\n elif ord == -np.inf:\n if r.ndim > 2:\n raise ValueError('Improper number of dimensions to norm.')\n r = abs(r)\n if len(axis) == 1:\n return r.min(axis=axis, keepdims=keepdims)\n else:\n return r.sum(axis=axis[1], keepdims=keepdims).min(keepdims=keepdims)\n elif ord == 0:\n if r.ndim > 2:\n raise ValueError('Improper number of dimensions to norm.')\n if len(axis) == 2:\n raise ValueError('Invalid norm order for matrices.')\n return (r != 0).astype(r.dtype).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n if r.ndim > 2:\n raise ValueError('Improper number of dimensions to norm.')\n r = abs(r)\n if len(axis) == 1:\n return r.sum(axis=axis, keepdims=keepdims)\n else:\n return r.sum(axis=axis[0], keepdims=keepdims).max(keepdims=keepdims)\n elif ord == -1 and len(axis) == 2:\n if r.ndim > 2:\n raise ValueError('Improper number of dimensions to norm.')\n return abs(r).sum(axis=axis[0], keepdims=keepdims).min(keepdims=keepdims)\n elif ord == 2 and len(axis) == 2:\n return svd(r)[1][np.newaxis].max(keepdims=keepdims)\n elif ord == -2 and len(axis) == 2:\n return svd(r)[1][np.newaxis].min(keepdims=keepdims)\n else:\n if len(axis) == 2:\n raise ValueError('Invalid norm order for matrices.')\n\n return (abs(r) ** ord).sum(axis=axis, keepdims=keepdims) ** (1.0 / ord)\n\n @classmethod\n def execute(cls, ctx, op):\n (x,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n ctx[op.outputs[0].key] = xp.linalg.norm(x, ord=op.ord, axis=op.axis,\n keepdims=op.keepdims)\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n r\"\"\"\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input tensor. If `axis` is None, `x` must be 1-D or 2-D.\n ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional\n Order of the norm (see table under ``Notes``). inf means mars tensor's\n `inf` object.\n axis : {int, 2-tuple of ints, None}, optional\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned.\n keepdims : bool, optional\n If this is set to True, the axes which are normed over are left in the\n result as dimensions with size one. With this option the result will\n broadcast correctly against the original `x`.\n\n Returns\n -------\n n : float or Tensor\n Norm of the matrix or vector(s).\n\n Notes\n -----\n For values of ``ord <= 0``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n 'nuc' nuclear norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1./ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n The nuclear norm is the sum of the singular values.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from mars.tensor import linalg as LA\n >>> import mars.tensor as mt\n >>> a = mt.arange(9) - 4\n >>> a.execute()\n array([-4, -3, -2, -1, 0, 1, 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b.execute()\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a).execute()\n 7.745966692414834\n >>> LA.norm(b).execute()\n 7.745966692414834\n >>> LA.norm(b, 'fro').execute()\n 7.745966692414834\n >>> LA.norm(a, mt.inf).execute()\n 4.0\n >>> LA.norm(b, mt.inf).execute()\n 9.0\n >>> LA.norm(a, -mt.inf).execute()\n 0.0\n >>> LA.norm(b, -mt.inf).execute()\n 2.0\n\n >>> LA.norm(a, 1).execute()\n 20.0\n >>> LA.norm(b, 1).execute()\n 7.0\n >>> LA.norm(a, -1).execute()\n 0.0\n >>> LA.norm(b, -1).execute()\n 6.0\n >>> LA.norm(a, 2).execute()\n 7.745966692414834\n >>> LA.norm(b, 2).execute()\n 7.3484692283495345\n\n >>> LA.norm(a, -2).execute()\n 0.0\n >>> LA.norm(b, -2).execute()\n 4.351066026358965e-18\n >>> LA.norm(a, 3).execute()\n 5.8480354764257312\n >>> LA.norm(a, -3).execute()\n 0.0\n\n Using the `axis` argument to compute vector norms:\n\n >>> c = mt.array([[ 1, 2, 3],\n ... [-1, 1, 4]])\n >>> LA.norm(c, axis=0).execute()\n array([ 1.41421356, 2.23606798, 5. ])\n >>> LA.norm(c, axis=1).execute()\n array([ 3.74165739, 4.24264069])\n >>> LA.norm(c, ord=1, axis=1).execute()\n array([ 6., 6.])\n\n Using the `axis` argument to compute matrix norms:\n\n >>> m = mt.arange(8).reshape(2,2,2)\n >>> LA.norm(m, axis=(1,2)).execute()\n array([ 3.74165739, 11.22497216])\n >>> LA.norm(m[0, :, :]).execute(), LA.norm(m[1, :, :]).execute()\n (3.7416573867739413, 11.224972160321824)\n\n \"\"\"\n x = astensor(x)\n\n if ord == 'fro':\n ord = None\n if axis is not None:\n if isinstance(axis, Iterable):\n axis = tuple(axis)\n else:\n axis = (axis,)\n else:\n axis = tuple(range(x.ndim))\n\n op = TensorNorm(ord=ord, axis=axis, keepdims=keepdims,\n dtype=np.result_type(x.dtype, np.float_), sparse=x.issparse())\n return op(x)\n" ]
[ [ "numpy.arange", "numpy.dtype" ], [ "pandas.concat", "pandas.testing.assert_series_equal", "pandas.RangeIndex", "numpy.arange", "numpy.random.bytes", "pandas.Int64Index", "numpy.random.rand", "numpy.random.randint" ], [ "sklearn.utils.extmath.fast_logdet", "sklearn.utils.validation.check_is_fitted", "scipy.special.gammaln" ], [ "sklearn.neighbors.BallTree", "sklearn.neighbors.KDTree", "scipy.sparse.random", "numpy.testing.assert_almost_equal", "sklearn.neighbors.NearestNeighbors", "sklearn.utils.testing.assert_warns", "numpy.random.rand", "numpy.random.RandomState" ], [ "numpy.result_type" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.3", "1.1", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.18", "0.19" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iwangjian/ByteCup2018
[ "348bdee3215c146ef7d6e4fe1fecbe4598798c8a", "348bdee3215c146ef7d6e4fe1fecbe4598798c8a", "348bdee3215c146ef7d6e4fe1fecbe4598798c8a" ]
[ "model/dropout.py", "model/copy_summ.py", "model/extract.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass LockedDropout(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, dropout=0.5, seq_lens=None):\n if not self.training or not dropout:\n return x\n if seq_lens == None:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)\n mask = Variable(m, requires_grad=False) / (1 - dropout)\n mask = mask.expand_as(x)\n return mask * x\n else:\n x, _ = nn.utils.rnn.pad_packed_sequence(x)\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)\n mask = Variable(m, requires_grad=False) / (1 - dropout)\n mask = mask.expand_as(x)\n x = mask * x\n return nn.utils.rnn.pack_padded_sequence(x, seq_lens)\n\n\nclass WeightDropout(nn.Module):\n def __init__(self, module, weights, dropout=0, variational=False):\n super(WeightDropout, self).__init__()\n self.module = module\n self.weights = weights\n self.dropout = dropout\n self.variational = variational\n if hasattr(module, 'batch_first'):\n self.batch_first = module.batch_first\n else:\n self.batch_first = False\n self._setup()\n\n def widget_demagnetizer_y2k_edition(*args, **kwargs):\n # We need to replace flatten_parameters with a nothing function\n # It must be a function rather than a lambda as otherwise pickling explodes\n # We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!\n return\n\n def _setup(self):\n # Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN\n if issubclass(type(self.module), nn.RNNBase):\n self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition\n\n for name_w in self.weights:\n print('Applying weight drop of {} to {}'.format(self.dropout, name_w))\n w = getattr(self.module, name_w)\n del self.module._parameters[name_w]\n self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))\n\n def _setweights(self):\n for name_w in self.weights:\n raw_w = getattr(self.module, name_w + '_raw')\n w = None\n if self.variational:\n mask = Variable(torch.ones(raw_w.size(0), 1))\n if raw_w.is_cuda: mask = mask.cuda()\n mask = nn.functional.dropout(mask, p=self.dropout, training=True)\n w = mask.expand_as(raw_w) * raw_w\n else:\n w = nn.functional.dropout(raw_w, p=self.dropout, training=self.training)\n setattr(self.module, name_w, w)\n\n def forward(self, *args):\n self._setweights()\n return self.module.forward(*args)\n", "import torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nfrom .attention import step_attention\nfrom .util import len_mask\nfrom .summ import Seq2SeqSumm, AttentionalLSTMDecoder\nfrom . import beam_search as bs\n\n\nINIT = 1e-2\n\n\nclass _CopyLinear(nn.Module):\n def __init__(self, context_dim, state_dim, input_dim, bias=True):\n super().__init__()\n self._v_c = nn.Parameter(torch.Tensor(context_dim))\n self._v_s = nn.Parameter(torch.Tensor(state_dim))\n self._v_i = nn.Parameter(torch.Tensor(input_dim))\n init.uniform_(self._v_c, -INIT, INIT)\n init.uniform_(self._v_s, -INIT, INIT)\n init.uniform_(self._v_i, -INIT, INIT)\n if bias:\n self._b = nn.Parameter(torch.zeros(1))\n else:\n self.regiser_module(None, '_b')\n\n def forward(self, context, state, input_):\n output = (torch.matmul(context, self._v_c.unsqueeze(1))\n + torch.matmul(state, self._v_s.unsqueeze(1))\n + torch.matmul(input_, self._v_i.unsqueeze(1)))\n if self._b is not None:\n output = output + self._b.unsqueeze(0)\n return output\n\n\nclass CopySumm(Seq2SeqSumm):\n def __init__(self, vocab_size, emb_dim,\n n_hidden, bidirectional, n_layer, dropoute, dropouti, dropout, wdrop, dropouth):\n super().__init__(vocab_size, emb_dim,\n n_hidden, bidirectional, n_layer, dropoute, dropouti, dropout, wdrop, dropouth)\n self._copy = _CopyLinear(n_hidden, n_hidden, 2*emb_dim)\n self._decoder = CopyLSTMDecoder(\n self._copy, self._embedding, self._dec_lstm,\n self._attn_wq, self._projection, dropouth=dropouth\n )\n\n def forward(self, article, art_lens, abstract, extend_art, extend_vsize):\n attention, init_dec_states = self.encode(article, art_lens)\n mask = len_mask(art_lens, attention.device).unsqueeze(-2)\n logit = self._decoder(\n (attention, mask, extend_art, extend_vsize),\n init_dec_states, abstract\n )\n return logit\n\n def batch_decode(self, article, art_lens, extend_art, extend_vsize,\n go, eos, unk, max_len):\n \"\"\" greedy decode support batching\"\"\"\n batch_size = len(art_lens)\n vsize = self._embedding.num_embeddings\n attention, init_dec_states = self.encode(article, art_lens)\n mask = len_mask(art_lens, attention.device).unsqueeze(-2)\n attention = (attention, mask, extend_art, extend_vsize)\n tok = torch.LongTensor([go]*batch_size).to(article.device)\n outputs = []\n attns = []\n states = init_dec_states\n\n # start of locked dropout\n m = init_dec_states[0][0].data.new(init_dec_states[0][0].size(0),\n init_dec_states[0][0].size(1),\n self.emb_size * 2\n ).bernoulli_(1 - self.dropouth)\n dropout_mask = Variable(m, requires_grad=False) / (1 - self.dropouth)\n # end of locked dropout\n\n for i in range(max_len):\n tok, states, attn_score = self._decoder.decode_step(\n tok, states, attention, dropout_mask)\n attns.append(attn_score)\n outputs.append(tok[:, 0].clone())\n tok.masked_fill_(tok >= vsize, unk)\n return outputs, attns\n\n def decode(self, article, extend_art, extend_vsize, go, eos, unk, max_len):\n vsize = self._embedding.num_embeddings\n attention, init_dec_states = self.encode(article)\n attention = (attention, None, extend_art, extend_vsize)\n tok = torch.LongTensor([go]).to(article.device)\n outputs = []\n attns = []\n states = init_dec_states\n\n # start of locked dropout\n m = init_dec_states[0][0].data.new(init_dec_states[0][0].size(0),\n init_dec_states[0][0].size(1),\n self.emb_size * 2\n ).bernoulli_(1 - self.dropouth)\n dropout_mask = Variable(m, requires_grad=False) / (1 - self.dropouth)\n # end of locked dropout\n\n for i in range(max_len):\n tok, states, attn_score = self._decoder.decode_step(\n tok, states, attention, dropout_mask)\n if tok[0, 0].item() == eos:\n break\n outputs.append(tok[0, 0].item())\n attns.append(attn_score.squeeze(0))\n if tok[0, 0].item() >= vsize:\n tok[0, 0] = unk\n return outputs, attns\n\n def batched_beamsearch(self, article, art_lens,\n extend_art, extend_vsize,\n go, eos, unk, max_len, beam_size, diverse=1.0):\n batch_size = len(art_lens)\n vsize = self._embedding.num_embeddings\n attention, init_dec_states = self.encode(article, art_lens)\n mask = len_mask(art_lens, attention.device).unsqueeze(-2)\n all_attention = (attention, mask, extend_art, extend_vsize)\n attention = all_attention\n (h, c), prev = init_dec_states\n all_beams = [bs.init_beam(go, (h[:, i, :], c[:, i, :], prev[i]))\n for i in range(batch_size)]\n finished_beams = [[] for _ in range(batch_size)]\n outputs = [None for _ in range(batch_size)]\n\n # start of locked dropout\n m = h.data.new(h.size(0), h.size(1), self.emb_size * 2).bernoulli_(1 - self.dropouth)\n dropout_mask = Variable(m, requires_grad=False) / (1 - self.dropouth)\n # end of locked dropout\n\n for t in range(max_len):\n toks = []\n all_states = []\n for beam in filter(bool, all_beams):\n token, states = bs.pack_beam(beam, article.device)\n toks.append(token)\n all_states.append(states)\n token = torch.stack(toks, dim=1)\n states = ((torch.stack([h for (h, _), _ in all_states], dim=2),\n torch.stack([c for (_, c), _ in all_states], dim=2)),\n torch.stack([prev for _, prev in all_states], dim=1))\n token.masked_fill_(token >= vsize, unk)\n\n topk, lp, states, attn_score = self._decoder.topk_step(\n token, states, attention, beam_size, dropout_mask)\n\n batch_i = 0\n for i, (beam, finished) in enumerate(zip(all_beams,\n finished_beams)):\n if not beam:\n continue\n finished, new_beam = bs.next_search_beam(\n beam, beam_size, finished, eos,\n topk[:, batch_i, :], lp[:, batch_i, :],\n (states[0][0][:, :, batch_i, :],\n states[0][1][:, :, batch_i, :],\n states[1][:, batch_i, :]),\n attn_score[:, batch_i, :],\n diverse\n )\n batch_i += 1\n if len(finished) >= beam_size:\n all_beams[i] = []\n outputs[i] = finished[:beam_size]\n # exclude finished inputs\n (attention, mask, extend_art, extend_vsize\n ) = all_attention\n masks = [mask[j] for j, o in enumerate(outputs)\n if o is None]\n ind = [j for j, o in enumerate(outputs) if o is None]\n ind = torch.LongTensor(ind).to(attention.device)\n attention, extend_art = map(\n lambda v: v.index_select(dim=0, index=ind),\n [attention, extend_art]\n )\n if masks:\n mask = torch.stack(masks, dim=0)\n else:\n mask = None\n attention = (\n attention, mask, extend_art, extend_vsize)\n else:\n all_beams[i] = new_beam\n finished_beams[i] = finished\n if all(outputs):\n break\n else:\n for i, (o, f, b) in enumerate(zip(outputs,\n finished_beams, all_beams)):\n if o is None:\n outputs[i] = (f+b)[:beam_size]\n return outputs\n\n\nclass CopyLSTMDecoder(AttentionalLSTMDecoder):\n def __init__(self, copy, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._copy = copy\n\n def _step(self, tok, states, attention, dropout_mask):\n prev_states, prev_out = states\n lstm_in = torch.cat(\n [self._embedding(tok).squeeze(1), prev_out],\n dim=1\n )\n states = self._lstm(lstm_in, prev_states, dropout_mask)\n lstm_out = states[0][-1]\n query = torch.mm(lstm_out, self._attn_w)\n attention, attn_mask, extend_src, extend_vsize = attention\n context, score = step_attention(\n query, attention, attention, attn_mask)\n dec_out = self._projection(torch.cat([lstm_out, context], dim=1))\n\n # extend generation prob to extended vocabulary\n gen_prob = self._compute_gen_prob(dec_out, extend_vsize)\n # compute the probabilty of each copying\n copy_prob = torch.sigmoid(self._copy(context, states[0][-1], lstm_in))\n # add the copy prob to existing vocab distribution\n lp = torch.log(\n ((-copy_prob + 1) * gen_prob\n ).scatter_add(\n dim=1,\n index=extend_src.expand_as(score),\n source=score * copy_prob\n ) + 1e-8) # numerical stability for log\n return lp, (states, dec_out), score\n\n\n def topk_step(self, tok, states, attention, k, dropout_mask):\n \"\"\"tok:[BB, B], states ([L, BB, B, D]*2, [BB, B, D])\"\"\"\n (h, c), prev_out = states\n\n # lstm is not bemable\n nl, _, _, d = h.size()\n beam, batch = tok.size()\n lstm_in_beamable = torch.cat(\n [self._embedding(tok), prev_out], dim=-1)\n lstm_in = lstm_in_beamable.contiguous().view(beam*batch, -1)\n prev_states = (h.contiguous().view(nl, -1, d),\n c.contiguous().view(nl, -1, d))\n h, c = self._lstm(lstm_in, prev_states, dropout_mask)\n states = (h.contiguous().view(nl, beam, batch, -1),\n c.contiguous().view(nl, beam, batch, -1))\n lstm_out = states[0][-1]\n\n # attention is beamable\n query = torch.matmul(lstm_out, self._attn_w)\n attention, attn_mask, extend_src, extend_vsize = attention\n context, score = step_attention(\n query, attention, attention, attn_mask)\n dec_out = self._projection(torch.cat([lstm_out, context], dim=-1))\n\n # copy mechanism is not beamable\n gen_prob = self._compute_gen_prob(\n dec_out.contiguous().view(batch*beam, -1), extend_vsize)\n copy_prob = torch.sigmoid(\n self._copy(context, lstm_out, lstm_in_beamable)\n ).contiguous().view(-1, 1)\n lp = torch.log(\n ((-copy_prob + 1) * gen_prob\n ).scatter_add(\n dim=1,\n index=extend_src.expand_as(score).contiguous().view(\n beam*batch, -1),\n source=score.contiguous().view(beam*batch, -1) * copy_prob\n ) + 1e-8).contiguous().view(beam, batch, -1)\n\n k_lp, k_tok = lp.topk(k=k, dim=-1)\n return k_tok, k_lp, (states, dec_out), score\n\n def _compute_gen_prob(self, dec_out, extend_vsize, eps=1e-6):\n logit = torch.mm(dec_out, self._embedding.weight.t())\n bsize, vsize = logit.size()\n if extend_vsize > vsize:\n ext_logit = torch.Tensor(bsize, extend_vsize-vsize\n ).to(logit.device)\n ext_logit.fill_(eps)\n gen_logit = torch.cat([logit, ext_logit], dim=1)\n else:\n gen_logit = logit\n gen_prob = F.softmax(gen_logit, dim=-1)\n return gen_prob\n\n def _compute_copy_activation(self, context, state, input_, score):\n copy = self._copy(context, state, input_) * score\n return copy\n", "import torch\nimport numpy as np\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.nn import functional as F\n\nfrom .rnn import MultiLayerLSTMCells\nfrom .rnn import lstm_encoder\nfrom .util import sequence_mean, len_mask\nfrom .attention import prob_normalize\nfrom .embed_regularize import embedded_dropout\nfrom .rnn import MultiLayerLSTMCells_abs_enc\nfrom .dropout import LockedDropout\n\nINI = 1e-2\n\n\nclass ConvSentEncoder(nn.Module):\n \"\"\"\n Convolutional word-level sentence encoder\n w/ max-over-time pooling, [3, 4, 5] kernel sizes, ReLU activation\n \"\"\"\n def __init__(self, vocab_size, emb_dim, n_hidden, dropout, dropoute):\n super().__init__()\n self._embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0)\n self._convs = nn.ModuleList([nn.Conv1d(emb_dim, n_hidden, i)\n for i in range(3, 6)])\n self._dropout = dropout\n self._dropoute = dropoute\n self._grad_handle = None\n\n def forward(self, input_):\n # emb_input = self._embedding(input_)\n emb_input = embedded_dropout(self._embedding, input_, dropout=self._dropoute if self.training else 0)\n\n conv_in = F.dropout(emb_input.transpose(1, 2), self._dropout, training=self.training)\n if conv_in.size(2) < 6:\n print(\"conv: \", conv_in.size())\n if conv_in.is_cuda:\n conv_in = torch.cat((conv_in,\n torch.autograd.Variable(\n torch.cuda.FloatTensor(np.zeros([conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)])))\n ),2)\n else:\n conv_in = torch.cat((conv_in,\n torch.autograd.Variable(torch.zeros(conv_in.size(0), conv_in.size(1), 6 - conv_in.size(2)))\n ), 2)\n print(\"af-conv: \", conv_in.size())\n output = torch.cat([F.relu(conv(conv_in)).max(dim=2)[0]\n for conv in self._convs], dim=1)\n return output\n\n def set_embedding(self, embedding):\n \"\"\"embedding is the weight matrix\"\"\"\n assert self._embedding.weight.size() == embedding.size()\n self._embedding.weight.data.copy_(embedding)\n #self._embedding.weight.requires_grad = False\n\n\nclass LSTMEncoder(nn.Module):\n def __init__(self, input_dim, n_hidden, n_layer, dropout, wdrop, dropouth, bidirectional):\n super().__init__()\n self._init_h = nn.Parameter(\n torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))\n self._init_c = nn.Parameter(\n torch.Tensor(n_layer*(2 if bidirectional else 1), n_hidden))\n init.uniform_(self._init_h, -INI, INI)\n init.uniform_(self._init_c, -INI, INI)\n\n # weight_dropoutput\n # self._lstm = nn.LSTM(input_dim, n_hidden, n_layer,\n # dropout=dropout, bidirectional=bidirectional)\n self.lockdrop = LockedDropout()\n self._lstm = MultiLayerLSTMCells_abs_enc(\n input_dim, n_hidden, n_layer,\n dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional,\n lockdrop=self.lockdrop\n )\n\n def forward(self, input_, in_lens=None):\n \"\"\" [batch_size, max_num_sent, input_dim] Tensor\"\"\"\n size = (self._init_h.size(0), input_.size(0), self._init_h.size(1))\n init_states = (self._init_h.unsqueeze(1).expand(*size),\n self._init_c.unsqueeze(1).expand(*size))\n lstm_out, _ = lstm_encoder(\n input_, self._lstm, in_lens, init_states)\n return lstm_out.transpose(0, 1)\n\n @property\n def input_size(self):\n return self._lstm.input_size\n\n @property\n def hidden_size(self):\n return self._lstm.hidden_size\n\n @property\n def num_layers(self):\n return self._lstm.num_layers\n\n @property\n def bidirectional(self):\n return self._lstm.bidirectional\n\n\nclass ExtractSumm(nn.Module):\n \"\"\" ff-ext \"\"\"\n def __init__(self, vocab_size, emb_dim,\n conv_hidden, lstm_hidden, lstm_layer,\n bidirectional, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):\n super().__init__()\n self._sent_enc = ConvSentEncoder(\n vocab_size, emb_dim, conv_hidden, dropout, dropoute)\n self._art_enc = LSTMEncoder(\n 3*conv_hidden, lstm_hidden, lstm_layer,\n dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional\n )\n\n lstm_out_dim = lstm_hidden * (2 if bidirectional else 1)\n self._sent_linear = nn.Linear(lstm_out_dim, 1)\n self._art_linear = nn.Linear(lstm_out_dim, lstm_out_dim)\n\n def forward(self, article_sents, sent_nums):\n enc_sent, enc_art = self._encode(article_sents, sent_nums)\n saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))\n saliency = torch.cat(\n [s[:n] for s, n in zip(saliency, sent_nums)], dim=0)\n content = self._sent_linear(\n torch.cat([s[:n] for s, n in zip(enc_sent, sent_nums)], dim=0)\n )\n logit = (content + saliency).squeeze(1)\n return logit\n\n def extract(self, article_sents, sent_nums=None, k=4):\n \"\"\" extract top-k scored sentences from article (eval only)\"\"\"\n enc_sent, enc_art = self._encode(article_sents, sent_nums)\n saliency = torch.matmul(enc_sent, enc_art.unsqueeze(2))\n content = self._sent_linear(enc_sent)\n logit = (content + saliency).squeeze(2)\n if sent_nums is None: # test-time extract only\n assert len(article_sents) == 1\n n_sent = logit.size(1)\n extracted = logit[0].topk(\n k if k < n_sent else n_sent, sorted=False # original order\n )[1].tolist()\n else:\n extracted = [l[:n].topk(k if k < n else n)[1].tolist()\n for n, l in zip(sent_nums, logit)]\n return extracted\n\n def _encode(self, article_sents, sent_nums):\n if sent_nums is None: # test-time extract only\n enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)\n else:\n max_n = max(sent_nums)\n enc_sents = [self._sent_enc(art_sent)\n for art_sent in article_sents]\n def zero(n, device):\n z = torch.zeros(n, self._art_enc.input_size).to(device)\n return z\n enc_sent = torch.stack(\n [torch.cat([s, zero(max_n-n, s.device)],\n dim=0) if n != max_n\n else s\n for s, n in zip(enc_sents, sent_nums)],\n dim=0\n )\n lstm_out = self._art_enc(enc_sent, sent_nums)\n enc_art = F.tanh(\n self._art_linear(sequence_mean(lstm_out, sent_nums, dim=1)))\n return lstm_out, enc_art\n\n def set_embedding(self, embedding):\n self._sent_enc.set_embedding(embedding)\n\n\nclass LSTMPointerNet(nn.Module):\n \"\"\"Pointer network as in Vinyals et al \"\"\"\n def __init__(self, input_dim, n_hidden, n_layer,\n dropout, n_hop):\n super().__init__()\n self._init_h = nn.Parameter(torch.Tensor(n_layer, n_hidden))\n self._init_c = nn.Parameter(torch.Tensor(n_layer, n_hidden))\n self._init_i = nn.Parameter(torch.Tensor(input_dim))\n init.uniform_(self._init_h, -INI, INI)\n init.uniform_(self._init_c, -INI, INI)\n init.uniform_(self._init_i, -0.1, 0.1)\n\n self._lstm = nn.LSTM(\n input_dim, n_hidden, n_layer,\n bidirectional=False, dropout=dropout\n )\n self._lstm_cell = None\n\n # attention parameters\n self._attn_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))\n self._attn_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))\n self._attn_v = nn.Parameter(torch.Tensor(n_hidden))\n init.xavier_normal_(self._attn_wm)\n init.xavier_normal_(self._attn_wq)\n init.uniform_(self._attn_v, -INI, INI)\n\n # hop parameters\n self._hop_wm = nn.Parameter(torch.Tensor(input_dim, n_hidden))\n self._hop_wq = nn.Parameter(torch.Tensor(n_hidden, n_hidden))\n self._hop_v = nn.Parameter(torch.Tensor(n_hidden))\n init.xavier_normal_(self._hop_wm)\n init.xavier_normal_(self._hop_wq)\n init.uniform_(self._hop_v, -INI, INI)\n self._n_hop = n_hop\n\n def forward(self, attn_mem, mem_sizes, lstm_in):\n \"\"\"atten_mem: Tensor of size [batch_size, max_sent_num, input_dim]\"\"\"\n attn_feat, hop_feat, lstm_states, init_i = self._prepare(attn_mem)\n # lstm_in = torch.cat([init_i, lstm_in], dim=1).transpose(0, 1)\n lstm_in[:,0,:] = init_i.squeeze(1)\n lstm_in = lstm_in.transpose(0, 1)\n query, final_states = self._lstm(lstm_in, lstm_states)\n query = query.transpose(0, 1)\n for _ in range(self._n_hop):\n query = LSTMPointerNet.attention(\n hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)\n output = LSTMPointerNet.attention_score(\n attn_feat, query, self._attn_v, self._attn_wq)\n return output # unormalized extraction logit\n\n def extract(self, attn_mem, mem_sizes, k):\n \"\"\"extract k sentences, decode only, batch_size==1\"\"\"\n attn_feat, hop_feat, lstm_states, lstm_in = self._prepare(attn_mem)\n lstm_in = lstm_in.squeeze(1)\n if self._lstm_cell is None:\n self._lstm_cell = MultiLayerLSTMCells.convert(\n self._lstm).to(attn_mem.device)\n extracts = []\n for _ in range(k):\n h, c = self._lstm_cell(lstm_in, lstm_states)\n query = h[-1]\n for _ in range(self._n_hop):\n query = LSTMPointerNet.attention(\n hop_feat, query, self._hop_v, self._hop_wq, mem_sizes)\n score = LSTMPointerNet.attention_score(\n attn_feat, query, self._attn_v, self._attn_wq)\n score = score.squeeze()\n for e in extracts:\n score[e] = -1e6\n ext = score.max(dim=0)[1].item()\n extracts.append(ext)\n lstm_states = (h, c)\n lstm_in = attn_mem[:, ext, :]\n return extracts\n\n def _prepare(self, attn_mem):\n attn_feat = torch.matmul(attn_mem, self._attn_wm.unsqueeze(0))\n hop_feat = torch.matmul(attn_mem, self._hop_wm.unsqueeze(0))\n bs = attn_mem.size(0)\n n_l, d = self._init_h.size()\n size = (n_l, bs, d)\n lstm_states = (self._init_h.unsqueeze(1).expand(*size).contiguous(),\n self._init_c.unsqueeze(1).expand(*size).contiguous())\n d = self._init_i.size(0)\n init_i = self._init_i.unsqueeze(0).unsqueeze(1).expand(bs, 1, d)\n return attn_feat, hop_feat, lstm_states, init_i\n\n @staticmethod\n def attention_score(attention, query, v, w):\n \"\"\" unnormalized attention score\"\"\"\n sum_ = attention.unsqueeze(1) + torch.matmul(\n query, w.unsqueeze(0)\n ).unsqueeze(2) # [B, Nq, Ns, D]\n score = torch.matmul(\n F.tanh(sum_), v.unsqueeze(0).unsqueeze(1).unsqueeze(3)\n ).squeeze(3) # [B, Nq, Ns]\n return score\n\n @staticmethod\n def attention(attention, query, v, w, mem_sizes):\n \"\"\" attention context vector\"\"\"\n score = LSTMPointerNet.attention_score(attention, query, v, w)\n if mem_sizes is None:\n norm_score = F.softmax(score, dim=-1)\n else:\n mask = len_mask(mem_sizes, score.device).unsqueeze(-2)\n norm_score = prob_normalize(score, mask)\n output = torch.matmul(norm_score, attention)\n return output\n\n\nclass PtrExtractSumm(nn.Module):\n \"\"\" rnn-ext\"\"\"\n def __init__(self, emb_dim, vocab_size, conv_hidden,\n lstm_hidden, lstm_layer, bidirectional,\n n_hop=1, dropout=0.0, dropoute=0.1, wdrop=0.5, dropouth=0.3):\n super().__init__()\n self._sent_enc = ConvSentEncoder(\n vocab_size, emb_dim, conv_hidden, dropout, dropoute)\n self._art_enc = LSTMEncoder(\n 3*conv_hidden, lstm_hidden, lstm_layer,\n dropout=dropout, wdrop=wdrop, dropouth=dropouth, bidirectional=bidirectional\n )\n enc_out_dim = lstm_hidden * (2 if bidirectional else 1)\n self._extractor = LSTMPointerNet(\n enc_out_dim, lstm_hidden, lstm_layer,\n dropout, n_hop\n )\n\n def forward(self, article_sents, sent_nums, target):\n enc_out = self._encode(article_sents, sent_nums)\n bs, nt = target.size()\n d = enc_out.size(2)\n ptr_in = torch.gather(\n enc_out, dim=1, index=target.unsqueeze(2).expand(bs, nt, d)\n )\n output = self._extractor(enc_out, sent_nums, ptr_in)\n return output\n\n def extract(self, article_sents, sent_nums=None, k=4):\n enc_out = self._encode(article_sents, sent_nums)\n output = self._extractor.extract(enc_out, sent_nums, k)\n return output\n\n def _encode(self, article_sents, sent_nums):\n if sent_nums is None: # test-time excode only\n enc_sent = self._sent_enc(article_sents[0]).unsqueeze(0)\n else:\n max_n = max(sent_nums)\n enc_sents = [self._sent_enc(art_sent)\n for art_sent in article_sents]\n def zero(n, device):\n z = torch.zeros(n, self._art_enc.input_size).to(device)\n return z\n enc_sent = torch.stack(\n [torch.cat([s, zero(max_n-n, s.device)], dim=0)\n if n != max_n\n else s\n for s, n in zip(enc_sents, sent_nums)],\n dim=0\n )\n lstm_out = self._art_enc(enc_sent, sent_nums)\n return lstm_out\n\n def set_embedding(self, embedding):\n self._sent_enc.set_embedding(embedding)\n" ]
[ [ "torch.nn.Parameter", "torch.nn.functional.dropout", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.utils.rnn.pad_packed_sequence", "torch.autograd.Variable" ], [ "torch.nn.init.uniform_", "torch.mm", "torch.nn.functional.softmax", "torch.LongTensor", "torch.Tensor", "torch.cat", "torch.zeros", "torch.matmul", "torch.stack", "torch.autograd.Variable" ], [ "torch.nn.init.uniform_", "torch.nn.functional.softmax", "torch.Tensor", "torch.nn.LSTM", "torch.zeros", "torch.nn.init.xavier_normal_", "torch.nn.Embedding", "torch.nn.Linear", "torch.matmul", "torch.nn.Conv1d", "torch.nn.functional.tanh" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kkelchte/pilot
[ "e3c3b753351efac30323af57465abe360973653a" ]
[ "pilot/models/alex_net_v4.py" ]
[ "\"\"\"\nVersion of Alexnet with smaller input size and less weights\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# input downscaled to 128x128x1\ndef alexnet(inputs,\n num_outputs=1,\n dropout_rate=0,\n reuse=None,\n is_training=False,\n verbose=False):\n \"\"\"A basic alex net.\"\"\"\n end_points={}\n \n # TOWER ONE\n end_point = 'conv_1'\n l1 = tf.layers.conv2d(inputs, 32, kernel_size=[11,11], strides=4, padding='valid', activation=None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l1: {}\".format(l1.shape))\n end_points[end_point]=l1\n end_point='bn_1'\n bn1 = tf.layers.batch_normalization(l1, axis=-1, momentum=0.999, epsilon=0.00001, center=True, scale=False, training=is_training, name=end_point, reuse=reuse)\n end_points[end_point]=bn1\n end_point='relu_1'\n relu1 = tf.nn.relu(bn1, name=end_point)\n end_points[end_point]=relu1 \n end_point = 'pool_1'\n p1=tf.layers.max_pooling2d(relu1, pool_size=3, strides=2, padding='valid',name=end_point)\n if verbose: print(\"shape p1: {}\".format(p1.shape))\n end_points[end_point]=p1\n \n # TOWER TWO\n end_point = 'conv_2'\n l2=tf.layers.conv2d(p1, 64, kernel_size=[5,5], strides=1, padding='same', activation=None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l2: {}\".format(l2.shape))\n end_points[end_point]=l2\n end_point='bn_2'\n bn2 = tf.layers.batch_normalization(l2, axis=-1, momentum=0.999, epsilon=0.00001, center=True, scale=False, training=is_training, name=end_point, reuse=reuse)\n end_points[end_point]=bn2\n end_point='relu_2'\n relu2 = tf.nn.relu(bn2, name=end_point)\n end_points[end_point]=relu2 \n end_point = 'pool_2'\n p2=tf.layers.max_pooling2d(relu2, pool_size=3, strides=2, padding='valid',name=end_point)\n if verbose: print(\"shape p2: {}\".format(p2.shape))\n end_points[end_point]=p2\n\n # TOWER THREE\n end_point = 'conv_3'\n l3=tf.layers.conv2d(p2, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l3: {}\".format(l3.shape))\n end_points[end_point]=l3\n end_point = 'conv_4'\n l4=tf.layers.conv2d(l3, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l4: {}\".format(l4.shape))\n end_points[end_point]=l4\n end_point = 'conv_5'\n l5=tf.layers.conv2d(l4, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l5: {}\".format(l5.shape))\n end_points[end_point]=l5\n end_point = 'pool_5'\n p5=tf.layers.max_pooling2d(l5, pool_size=3, strides=1 , padding='valid', name=end_point)\n if verbose: print(\"shape p5: {}\".format(p5.shape))\n end_points[end_point]=p5\n p5 = tf.reshape(p5, (-1,1,4*4*64))\n \n if dropout_rate != 0:\n end_point = 'dropout_5'\n p5 = tf.layers.dropout(p5, dropout_rate)\n end_points[end_point]=p5\n \n end_point = 'fc_6'\n l6=tf.layers.conv1d(p5, filters=1024, kernel_size=1, strides=1, padding='valid', activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l6: {}\".format(l6.shape))\n end_points[end_point]=l6\n \n if dropout_rate != 0:\n end_point = 'dropout_6'\n l6 = tf.layers.dropout(l6, dropout_rate)\n end_points[end_point]=l6\n \n end_point = 'fc_7'\n l7=tf.layers.conv1d(l6, filters=1024, kernel_size=1, strides=1, padding='valid', activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l7: {}\".format(l7.shape))\n end_points[end_point]=l7\n\n end_point = 'fc_8'\n l8=tf.layers.conv1d(l7, filters=num_outputs, kernel_size=1, strides=1, padding='valid', activation=tf.nn.tanh if num_outputs == 1 else None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n # l8=tf.layers.conv1d(l7, filters=num_outputs, kernel_size=1, strides=1, padding='valid', activation=tf.nn.tanh if num_outputs == 1 else tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l8: {}\".format(l8.shape))\n end_points[end_point]=l8\n end_point = 'outputs'\n outputs = tf.squeeze(l8, [1], name=end_point)\n if verbose: print(\"shape outputs: {}\".format(outputs.shape))\n end_points[end_point]=outputs\n \n return end_points\n\n# default_image_size=[227,227,3]\n# default_image_size=[127,127,3]\ndefault_image_size=[127,127,1]\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.layers.batch_normalization", "tensorflow.layers.dropout", "tensorflow.reshape", "tensorflow.layers.max_pooling2d", "tensorflow.squeeze", "tensorflow.contrib.layers.xavier_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
goldnimrod/IML.HUJI
[ "4fe39f597e1fc9eb188ca12daa2b3111bae92ee9", "4fe39f597e1fc9eb188ca12daa2b3111bae92ee9" ]
[ "IMLearn/learners/classifiers/decision_stump.py", "IMLearn/learners/classifiers/gaussian_naive_bayes.py" ]
[ "from __future__ import annotations\nfrom typing import Tuple, NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom ...metrics import misclassification_error\nfrom itertools import product\n\n\nclass DecisionStump(BaseEstimator):\n \"\"\"\n A decision stump classifier for {-1,1} labels according to the CART algorithm\n\n Attributes\n ----------\n self.threshold_ : float\n The threshold by which the data is split\n\n self.j_ : int\n The index of the feature by which to split the data\n\n self.sign_: int\n The label to predict for samples where the value of the j'th feature is about the threshold\n \"\"\"\n\n def __init__(self) -> DecisionStump:\n \"\"\"\n Instantiate a Decision stump classifier\n \"\"\"\n super().__init__()\n self.threshold_, self.j_, self.sign_ = None, None, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n fits a decision stump to the given data\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n min_error = 1\n # Run on all sign combinations instead of determining the majority\n for feature_index, sign in product(range(X.shape[1]),\n np.unique(np.sign(y))):\n threshold, error = self._find_threshold(X[:, feature_index], y,\n sign)\n if error <= min_error:\n min_error = error\n self.threshold_ = threshold\n self.sign_ = sign\n self.j_ = feature_index\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n\n Notes\n -----\n Feature values strictly below threshold are predicted as `-sign` whereas values which equal\n to or above the threshold are predicted as `sign`\n \"\"\"\n return np.where(X[:, self.j_] < self.threshold_, -self.sign_,\n self.sign_)\n\n def _find_threshold(self, values: np.ndarray, labels: np.ndarray,\n sign: int) -> Tuple[float, float]:\n \"\"\"\n Given a feature vector and labels, find a threshold by which to perform a split\n The threshold is found according to the value minimizing the misclassification\n error along this feature\n\n Parameters\n ----------\n values: ndarray of shape (n_samples,)\n A feature vector to find a splitting threshold for\n\n labels: ndarray of shape (n_samples,)\n The labels to compare against\n\n sign: int\n Predicted label assigned to values equal to or above threshold\n\n Returns\n -------\n thr: float\n Threshold by which to perform split\n\n thr_err: float between 0 and 1\n Misclassificaiton error of returned threshold\n\n Notes\n -----\n For every tested threshold, values strictly below threshold are predicted as `-sign` whereas values\n which equal to or above the threshold are predicted as `sign`\n \"\"\"\n sorted_values = values[values.argsort()]\n sorted_labels = labels[values.argsort()]\n error_count = np.sum(np.abs(sorted_labels[\n np.not_equal(np.sign(sorted_labels),\n np.ones(\n sorted_values.shape[\n 0]) * sign)]))\n\n def calc_thr_value_error(i):\n \"\"\"\n Calculates the misclassificaiton error of the threshold with\n The value in index i\n\n Parameters\n ----------\n i: int\n The index of the value in the sorted_values array\n\n Returns\n -------\n thr_err: float between 0 and 1\n Misclassificaiton error of the threshold\n\n \"\"\"\n nonlocal error_count\n if i == 0:\n return error_count\n if np.sign(sorted_labels[i - 1]) == -sign:\n error_count -= np.abs(sorted_labels[i - 1])\n else:\n error_count += np.abs(sorted_labels[i - 1])\n return error_count\n\n errors = np.vectorize(calc_thr_value_error)(\n np.arange(sorted_values.shape[0]))\n min_error_index = np.argmin(errors)\n return sorted_values[min_error_index], errors[min_error_index]\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n return misclassification_error(np.sign(y), np.sign(self.predict(X)))\n", "from typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom numpy.linalg import det, inv\n\n\nclass GaussianNaiveBayes(BaseEstimator):\n \"\"\"\n Gaussian Naive-Bayes classifier\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Instantiate a Gaussian Naive Bayes classifier\n\n Attributes\n ----------\n self.classes_ : np.ndarray of shape (n_classes,)\n The different labels classes. To be set in `GaussianNaiveBayes.fit`\n\n self.mu_ : np.ndarray of shape (n_classes,n_features)\n The estimated features means for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.vars_ : np.ndarray of shape (n_classes, n_features)\n The estimated features variances for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.pi_: np.ndarray of shape (n_classes)\n The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`\n \"\"\"\n super().__init__()\n self.classes_, self.mu_, self.vars_, self.pi_ = None, None, None, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n fits a gaussian naive bayes model\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n self.classes_ = np.unique(y)\n self.pi_ = np.vectorize(\n lambda k: np.count_nonzero(y == k) / y.shape[0])(self.classes_)\n self.mu_ = np.array(\n [np.sum(X[np.where(y == k)], axis=0) / np.count_nonzero(\n y == k) for k in self.classes_])\n mu_yi = np.array([self.mu_[yi] for yi in y])\n self.vars_ = np.array(\n [np.sum(\n [np.diag(np.outer(row, row)) for row in\n (X - mu_yi)[np.where(y == k)]],\n axis=0) / np.count_nonzero(y == k)\n for k in self.classes_])\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n\n def calc_predict(x: np.ndarray, k: int):\n ak = inv(np.diag(self.vars_[k])) @ self.mu_[k]\n bk = np.log(self.pi_[k]) - 0.5 * self.mu_[k] @ ak\n ck = -0.5 * inv(np.diag(self.vars_[k])) @ x\n return ak.T @ x + bk + ck.T @ x\n\n def predict_x(x: np.ndarray):\n class_predicts = np.vectorize(lambda k: calc_predict(x, k))(\n self.classes_)\n return self.classes_[np.argmax(class_predicts)]\n\n return np.apply_along_axis(predict_x, 1, X)\n\n def likelihood(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate the likelihood of a given data over the estimated model\n\n Parameters\n ----------\n X : np.ndarray of shape (n_samples, n_features)\n Input data to calculate its likelihood over the different classes.\n\n Returns\n -------\n likelihoods : np.ndarray of shape (n_samples, n_classes)\n The likelihood for each sample under each of the classes\n\n \"\"\"\n if not self.fitted_:\n raise ValueError(\n \"Estimator must first be fitted before calling `likelihood` function\")\n\n def calc_pdf(x, k):\n cov_k = np.diag(self.vars_[k])\n return np.exp(-0.5 * (x - self.mu_[k]).T @ np.inv(cov_k) @ (\n (x - self.mu_[k]))) / np.sqrt(\n det(cov_k) * (2 * np.pi) ** x.shape[0])\n\n return np.array([[calc_pdf(x, k) for k in self.classes_] for x in X])\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n from ...metrics import misclassification_error\n return misclassification_error(y, self.predict(X))\n" ]
[ [ "numpy.abs", "numpy.arange", "numpy.ones", "numpy.sign", "numpy.vectorize", "numpy.argmin", "numpy.where" ], [ "numpy.diag", "numpy.log", "numpy.unique", "numpy.linalg.det", "numpy.inv", "numpy.apply_along_axis", "numpy.argmax", "numpy.count_nonzero", "numpy.outer", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataubc/number_-of_clicks_prediction
[ "738c2e15dc627b247ad5692ab80d3713fe5c8f3e" ]
[ "xgboost_model.py" ]
[ "import pandas as pd\nimport numpy as np\nimport xgboost\n\n# reading data\nhotel_data = pd.read_csv('cleaned_train.csv')\nX = hotel_data.drop(columns=['n_clicks', 'hotel_id'])\n# let's also add the new feature avg_saving_cash\nX['avg_saving_cash'] = X['avg_price'] * X['avg_saving_percent']\ny = hotel_data['n_clicks']\n\n# let's create trained data for xgboost\ndtrain = xgboost.DMatrix(X, label=y)\n\nparams = {'max_depth': 6, 'min_child_weight': 3, 'eta': .1, 'subsample': 1, 'colsample_bytree': 0.7,\n 'objective': 'reg:squarederror', 'eval_metric': \"rmse\"}\nnum_boost_round = 999\nprint('Training phase has started')\n\n# training best model on the optimized hyper-parameters.\nbest_model = xgboost.train(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n)\nprint('Saving the model as best_model.model')\nbest_model.save_model(\"best_model.model\")\nprint('Reading test data')\n\n# reading test data\nX_test = pd.read_csv('cleaned_test.csv')\ndtest = xgboost.DMatrix(X_test.drop(columns=['hotel_id']))\npredicted_y = best_model.predict(dtest)\nX_test['n_clicks'] = predicted_y\n# getting all negative prediction to 0\nX_test['n_clicks'] = np.where(X_test['n_clicks'] < 0, 0, X_test['n_clicks'])\nfinal_result = X_test[['hotel_id', 'n_clicks']]\nprint('Saving the prediction as predictions.csv')\n# saving the result\nfinal_result.to_csv('predictions.csv')\n" ]
[ [ "pandas.read_csv", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
PacktPublishing/Machine-Learning-Algorithms-Second-Edition
[ "b25d3607e9d5cc388bcf5f1a029bae39bb2b837b", "b25d3607e9d5cc388bcf5f1a029bae39bb2b837b", "b25d3607e9d5cc388bcf5f1a029bae39bb2b837b" ]
[ "Chapter10/birch.py", "Chapter16/convolution.py", "Chapter09/gaussian_mixture.py" ]
[ "from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import Birch\nfrom sklearn.metrics import adjusted_rand_score\n\n\n# Set random seed for reproducibility\nnp.random.seed(1000)\n\n\nnb_samples = 2000\nbatch_size = 80\n\n\nif __name__ == '__main__':\n # Create the dataset\n X, Y = make_blobs(n_samples=nb_samples, n_features=2, centers=5, cluster_std=1.5, random_state=1000)\n\n # Create an instance of BIRCH\n birch = Birch(n_clusters=5, threshold=0.15, branching_factor=100)\n\n # Train the model\n X_batch = []\n Y_preds = []\n\n for i in range(0, nb_samples, batch_size):\n birch.partial_fit(X[i:i + batch_size])\n X_batch.append(X[:i + batch_size])\n Y_preds.append(birch.predict(X[:i + batch_size]))\n\n print(adjusted_rand_score(birch.predict(X), Y))\n\n # Show the training steps\n fig, ax = plt.subplots(5, 5, figsize=(20, 12))\n\n for i in range(5):\n for j in range(5):\n idx = (i * 5) + j\n\n for k in range(5):\n ax[i][j].scatter(X_batch[idx][Y_preds[idx] == k, 0], X_batch[idx][Y_preds[idx] == k, 1], s=3)\n\n ax[i][j].set_xticks([])\n ax[i][j].set_yticks([])\n ax[i][j].set_title('{} samples'.format(batch_size * (idx + 1)))\n\n plt.show()\n\n\n", "from __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom scipy.misc import face\n\n# For reproducibility\nnp.random.seed(1000)\n\nif __name__ == '__main__':\n # Load the image\n img = face(gray=True)\n\n # Show the original image\n plt.imshow(img, cmap='gray')\n plt.show()\n\n # Define the kernel\n kernel = np.array(\n [[0, 1, 0],\n [1, -4, 0],\n [0, 1, 0]],\n dtype=np.float32)\n\n cfilter = np.zeros((3, 3, 1, 1), dtype=np.float32)\n cfilter[:, :, 0, 0] = kernel\n\n # Create the graph\n graph = tf.Graph()\n\n with graph.as_default():\n x = tf.placeholder(tf.float32, shape=(None, 768, 1024, 1), name='image')\n f = tf.constant(cfilter)\n\n # In case of errors, please use padding='SAME'\n y = tf.nn.conv2d(x, f, strides=[1, 1, 1, 1], padding='same')\n\n session = tf.InteractiveSession(graph=graph)\n\n # Compute the convolution\n c_img = session.run([y], feed_dict={x: img.reshape((1, 768, 1024, 1))})\n n_img = np.array(c_img).reshape((768, 1024))\n\n # Show the final image\n plt.imshow(n_img, cmap='gray')\n plt.show()\n\n\n\n", "from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.patches import Ellipse\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.mixture import GaussianMixture\n\n\n# Set random seed for reproducibility\nnp.random.seed(1000)\n\n\n# Total number of samples\nnb_samples = 800\n\n\nif __name__ == '__main__':\n # Create the dataset\n X, Y = make_blobs(n_samples=nb_samples, n_features=2, centers=3, cluster_std=2.2, random_state=1000)\n\n # Show the original dataset\n fig, ax = plt.subplots(figsize=(15, 8))\n\n ax.scatter(X[Y == 0, 0], X[Y == 0, 1], c='r', s=20, marker='p', label='Class 0')\n ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c='g', s=20, marker='d', label='Class 1')\n ax.scatter(X[Y == 2, 0], X[Y == 2, 1], c='b', s=20, marker='s', label='Class 2')\n ax.set_xlabel(r'$x_0$')\n ax.set_ylabel(r'$x_1$')\n ax.legend()\n ax.grid()\n\n plt.show()\n\n # Create a fit a Gaussian Mixture model\n gm = GaussianMixture(n_components=3, max_iter=1000, random_state=1000)\n gm.fit(X)\n\n # Print means, covariances, and weights\n print('Means:\\n')\n print(gm.means_)\n\n print('\\nCovariances:\\n')\n print(gm.covariances_)\n\n print('\\nWeights:\\n')\n print(gm.weights_)\n\n # Show the clustered dataset with the final Gaussian distributions\n fig, ax = plt.subplots(figsize=(15, 8))\n\n c = gm.covariances_\n m = gm.means_\n\n g1 = Ellipse(xy=m[0], width=4 * np.sqrt(c[0][0, 0]), height=4 * np.sqrt(c[0][1, 1]), fill=False, linestyle='dashed',\n linewidth=2)\n g1_1 = Ellipse(xy=m[0], width=3 * np.sqrt(c[0][0, 0]), height=3 * np.sqrt(c[0][1, 1]), fill=False,\n linestyle='dashed', linewidth=3)\n g1_2 = Ellipse(xy=m[0], width=1.5 * np.sqrt(c[0][0, 0]), height=1.5 * np.sqrt(c[0][1, 1]), fill=False,\n linestyle='dashed', linewidth=4)\n\n g2 = Ellipse(xy=m[1], width=4 * np.sqrt(c[1][0, 0]), height=4 * np.sqrt(c[1][1, 1]), fill=False, linestyle='dashed',\n linewidth=2)\n g2_1 = Ellipse(xy=m[1], width=3 * np.sqrt(c[1][0, 0]), height=3 * np.sqrt(c[1][1, 1]), fill=False,\n linestyle='dashed', linewidth=3)\n g2_2 = Ellipse(xy=m[1], width=1.5 * np.sqrt(c[1][0, 0]), height=1.5 * np.sqrt(c[1][1, 1]), fill=False,\n linestyle='dashed', linewidth=4)\n\n g3 = Ellipse(xy=m[2], width=4 * np.sqrt(c[2][0, 0]), height=4 * np.sqrt(c[2][1, 1]), fill=False, linestyle='dashed',\n linewidth=2)\n g3_1 = Ellipse(xy=m[2], width=3 * np.sqrt(c[2][0, 0]), height=3 * np.sqrt(c[2][1, 1]), fill=False,\n linestyle='dashed', linewidth=3)\n g3_2 = Ellipse(xy=m[2], width=1.5 * np.sqrt(c[2][0, 0]), height=1.5 * np.sqrt(c[2][1, 1]), fill=False,\n linestyle='dashed', linewidth=4)\n\n ax.add_artist(g1)\n ax.add_artist(g1_1)\n ax.add_artist(g1_2)\n ax.add_artist(g2)\n ax.add_artist(g2_1)\n ax.add_artist(g2_2)\n ax.add_artist(g3)\n ax.add_artist(g3_1)\n ax.add_artist(g3_2)\n\n ax.scatter(X[Y == 0, 0], X[Y == 0, 1], c='r', s=20, marker='p', label='Class 0')\n ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c='g', s=20, marker='d', label='Class 1')\n ax.scatter(X[Y == 2, 0], X[Y == 2, 1], c='b', s=20, marker='s', label='Class 2')\n ax.set_xlabel(r'$x_0$')\n ax.set_ylabel(r'$x_1$')\n ax.legend()\n ax.grid()\n\n plt.show()\n\n # Compute AICs and BICs\n nb_components = [2, 3, 4, 5, 6, 7, 8]\n\n aics = []\n bics = []\n\n for n in nb_components:\n gm = GaussianMixture(n_components=n, max_iter=1000, random_state=1000)\n gm.fit(X)\n aics.append(gm.aic(X))\n bics.append(gm.bic(X))\n\n fig, ax = plt.subplots(2, 1, figsize=(15, 8))\n\n ax[0].plot(nb_components, aics)\n ax[0].set_ylabel('AIC')\n ax[0].grid()\n\n ax[1].plot(nb_components, bics)\n ax[1].set_xlabel('Number of components')\n ax[1].set_ylabel('BIC')\n ax[1].grid()\n\n plt.show()" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.subplots", "sklearn.cluster.Birch", "matplotlib.pyplot.show", "sklearn.datasets.make_blobs" ], [ "matplotlib.pyplot.imshow", "tensorflow.Graph", "tensorflow.constant", "tensorflow.InteractiveSession", "numpy.random.seed", "scipy.misc.face", "tensorflow.placeholder", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show", "tensorflow.nn.conv2d" ], [ "numpy.sqrt", "numpy.random.seed", "matplotlib.pyplot.subplots", "sklearn.mixture.GaussianMixture", "matplotlib.pyplot.show", "sklearn.datasets.make_blobs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rniranjan93/tensorflow
[ "2d22f93b04cd137d2480528a80b45ea5306ca9b3" ]
[ "tensorflow/python/compat/compat.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 3, 25)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning(\"Trying to set the forward compatibility date to the past\"\n \" date %s. This will be ignored by TensorFlow.\" % (date))\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n" ]
[ [ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.util.tf_export.tf_export" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7", "1.4" ] } ]
quantify-os/quantify-scheduler
[ "9dee17ca9345560b998b52f956c23b79a9ab287f" ]
[ "tests/scheduler/test_waveforms.py" ]
[ "# pylint: disable=missing-function-docstring\n\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom quantify_scheduler.waveforms import (\n square,\n drag,\n staircase,\n modulate_wave,\n rotate_wave,\n)\n\n\ndef test_square_wave():\n amped_sq = square(np.arange(50), 2.44)\n npt.assert_array_equal(amped_sq, np.linspace(2.44, 2.44, 50))\n\n amped_sq_iq = square(np.arange(20), 6.88)\n npt.assert_array_equal(amped_sq_iq.real, np.linspace(6.88, 6.88, 20))\n npt.assert_array_equal(amped_sq_iq.imag, np.linspace(0, 0, 20))\n\n\ndef test_staircase():\n t = np.linspace(0, 1e-6, 20)\n sig = staircase(t, -1, 2, 4)\n answer = np.array(\n [\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 2.0,\n 2.0,\n 2.0,\n 2.0,\n 2.0,\n ]\n )\n npt.assert_array_equal(sig, answer)\n\n\ndef test_drag_ns():\n duration = 20e-9\n nr_sigma = 3\n G_amp = 0.5\n D_amp = 1\n\n times = np.arange(0, duration, 1e-9) # sampling rate set to 1 GSPs\n mu = times[0] + duration / 2\n sigma = duration / (2 * nr_sigma)\n gauss_env = G_amp * np.exp(-(0.5 * ((times - mu) ** 2) / sigma ** 2))\n deriv_gauss_env = D_amp * -1 * (times - mu) / (sigma ** 1) * gauss_env\n exp_waveform = gauss_env + 1j * deriv_gauss_env\n\n # quantify\n waveform = drag(\n times,\n G_amp=G_amp,\n D_amp=D_amp,\n duration=duration,\n nr_sigma=nr_sigma,\n subtract_offset=\"none\",\n )\n\n np.testing.assert_array_almost_equal(waveform, exp_waveform, decimal=3)\n assert pytest.approx(np.max(waveform), 0.5)\n\n with pytest.raises(ValueError):\n drag(times, 0.5, D_amp, duration, subtract_offset=\"bad!\")\n\n waveform = drag(\n times,\n G_amp=G_amp,\n D_amp=D_amp,\n duration=duration,\n nr_sigma=nr_sigma,\n subtract_offset=\"average\",\n )\n exp_waveform.real -= np.mean([exp_waveform.real[0], exp_waveform.real[-1]])\n exp_waveform.imag -= np.mean([exp_waveform.imag[0], exp_waveform.imag[-1]])\n np.testing.assert_array_almost_equal(waveform, exp_waveform, decimal=3)\n\n\ndef test_rotate_wave():\n\n I = np.ones(10) # noqa # Q component is zero\n Q = np.zeros(10) # noqa # not used as input, only used for testing\n\n rot_wf = rotate_wave(I, 0)\n\n npt.assert_array_almost_equal(I, rot_wf.real)\n npt.assert_array_almost_equal(I.imag, rot_wf.imag)\n\n rot_wf = rotate_wave(I, 90)\n\n npt.assert_array_almost_equal(I, rot_wf.imag)\n npt.assert_array_almost_equal(Q, -rot_wf.real)\n\n rot_wf = rotate_wave(I, 180)\n\n npt.assert_array_almost_equal(I, -rot_wf.real)\n npt.assert_array_almost_equal(Q, -rot_wf.imag)\n\n rot_wf = rotate_wave(I, 360)\n\n npt.assert_array_almost_equal(I, rot_wf.real)\n npt.assert_array_almost_equal(Q, rot_wf.imag)\n\n\ndef test_modulate():\n fs = 100\n f = 4\n t = np.arange(fs)\n I = np.sin(2 * np.pi * f * (t / fs)) # noqa\n Q = np.sin(2 * np.pi * f * (t / fs) + (np.pi / 2)) # noqa\n wf = I + 1j * Q\n\n mod_wf = modulate_wave(np.linspace(0, 1, fs), wf, 2)\n npt.assert_array_almost_equal(\n mod_wf.real, np.sin(2 * np.pi * (f + 2) * (t / fs)), decimal=1\n )\n\n mod_wf = modulate_wave(np.linspace(0, 1, fs), wf, -2)\n npt.assert_array_almost_equal(\n mod_wf.imag, np.sin(2 * np.pi * (f - 2) * (t / fs) + (np.pi / 2)), decimal=1\n )\n" ]
[ [ "numpy.linspace", "numpy.arange", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.sin", "numpy.max", "numpy.mean", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
surchs/lab-documentation
[ "9d71a4710b66da5341e7c3c67108d175f8a9fe0d" ]
[ "source/tutorials/files/mnist.py" ]
[ "import tensorflow as tf\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy()\nmodel.compile(optimizer='adam',\n loss=loss_fn,\n metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=5)\n\nmodel.evaluate(x_test, y_test, verbose=2)" ]
[ [ "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.datasets.cifar10.load_data", "tensorflow.keras.layers.Flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
kahilah/hpc-python
[ "5d2efa08076ed2706c81ca255c7e4574c937557c" ]
[ "demos/mpi-collective.py" ]
[ "from mpi4py import MPI\nfrom numpy import arange, empty\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nn = 10\ndata = empty(n, float)\nif rank == 0:\n data = arange(n, dtype=float)\n\ncomm.Bcast(data, 0)\n\nif rank == 1:\n print(\"Received: \" + str(data))\n\n" ]
[ [ "numpy.arange", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangyuethz/qutip
[ "7f5682b5edfd4c906b2e89f69cf0a8be4bfd529b", "7f5682b5edfd4c906b2e89f69cf0a8be4bfd529b", "7f5682b5edfd4c906b2e89f69cf0a8be4bfd529b" ]
[ "qutip/propagator.py", "qutip/tests/test_qobjevo.py", "qutip/control/dynamics.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['propagator', 'propagator_steadystate']\n\nimport types\nimport numpy as np\nimport scipy.linalg as la\nimport functools\nimport scipy.sparse as sp\nfrom qutip.qobj import Qobj\nfrom qutip.tensor import tensor\nfrom qutip.operators import qeye\nfrom qutip.rhs_generate import (rhs_generate, rhs_clear, _td_format_check)\nfrom qutip.superoperator import (vec2mat, mat2vec,\n vector_to_operator, operator_to_vector)\nfrom qutip.sparse import sp_reshape\nfrom qutip.cy.sparse_utils import unit_row_norm\nfrom qutip.mesolve import mesolve\nfrom qutip.sesolve import sesolve\nfrom qutip.states import basis\nfrom qutip.solver import Options, _solver_safety_check, config\nfrom qutip.parallel import parallel_map, _default_kwargs\nfrom qutip.ui.progressbar import BaseProgressBar, TextProgressBar\n\n\ndef propagator(H, t, c_op_list=[], args={}, options=None,\n unitary_mode='batch', parallel=False,\n progress_bar=None, _safe_mode=True,\n **kwargs):\n r\"\"\"\n Calculate the propagator U(t) for the density matrix or wave function such\n that :math:`\\psi(t) = U(t)\\psi(0)` or\n :math:`\\rho_{\\mathrm vec}(t) = U(t) \\rho_{\\mathrm vec}(0)`\n where :math:`\\rho_{\\mathrm vec}` is the vector representation of the\n density matrix.\n\n Parameters\n ----------\n H : qobj or list\n Hamiltonian as a Qobj instance of a nested list of Qobjs and\n coefficients in the list-string or list-function format for\n time-dependent Hamiltonians (see description in :func:`qutip.mesolve`).\n\n t : float or array-like\n Time or list of times for which to evaluate the propagator.\n\n c_op_list : list\n List of qobj collapse operators.\n\n args : list/array/dictionary\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : :class:`qutip.Options`\n with options for the ODE solver.\n\n unitary_mode = str ('batch', 'single')\n Solve all basis vectors simulaneously ('batch') or individually\n ('single').\n\n parallel : bool {False, True}\n Run the propagator in parallel mode. This will override the\n unitary_mode settings if set to True.\n\n progress_bar: BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation. By default no progress bar\n is used, and if set to True a TextProgressBar will be used.\n\n Returns\n -------\n a : qobj\n Instance representing the propagator :math:`U(t)`.\n\n \"\"\"\n kw = _default_kwargs()\n if 'num_cpus' in kwargs:\n num_cpus = kwargs['num_cpus']\n else:\n num_cpus = kw['num_cpus']\n\n if progress_bar is None:\n progress_bar = BaseProgressBar()\n elif progress_bar is True:\n progress_bar = TextProgressBar()\n\n if options is None:\n options = Options()\n options.rhs_reuse = True\n rhs_clear()\n\n if isinstance(t, (int, float, np.integer, np.floating)):\n tlist = [0, t]\n else:\n tlist = t\n\n if _safe_mode:\n _solver_safety_check(H, None, c_ops=c_op_list, e_ops=[], args=args)\n\n td_type = _td_format_check(H, c_op_list, solver='me')\n\n if isinstance(H, (types.FunctionType, types.BuiltinFunctionType,\n functools.partial)):\n H0 = H(0.0, args)\n if unitary_mode =='batch':\n # batch don't work with function Hamiltonian\n unitary_mode = 'single'\n elif isinstance(H, list):\n H0 = H[0][0] if isinstance(H[0], list) else H[0]\n else:\n H0 = H\n\n if len(c_op_list) == 0 and H0.isoper:\n # calculate propagator for the wave function\n\n N = H0.shape[0]\n dims = H0.dims\n\n if parallel:\n unitary_mode = 'single'\n u = np.zeros([N, N, len(tlist)], dtype=complex)\n output = parallel_map(_parallel_sesolve, range(N),\n task_args=(N, H, tlist, args, options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N):\n for k, t in enumerate(tlist):\n u[:, n, k] = output[n].states[k].full().T\n else:\n if unitary_mode == 'single':\n output = sesolve(H, qeye(dims[0]), tlist, [], args, options,\n _safe_mode=False)\n if len(tlist) == 2:\n return output.states[-1]\n else:\n return output.states\n\n elif unitary_mode =='batch':\n u = np.zeros(len(tlist), dtype=object)\n _rows = np.array([(N+1)*m for m in range(N)])\n _cols = np.zeros_like(_rows)\n _data = np.ones_like(_rows, dtype=complex)\n psi0 = Qobj(sp.coo_matrix((_data, (_rows, _cols))).tocsr())\n if td_type[1] > 0 or td_type[2] > 0:\n H2 = []\n for k in range(len(H)):\n if isinstance(H[k], list):\n H2.append([tensor(qeye(N), H[k][0]), H[k][1]])\n else:\n H2.append(tensor(qeye(N), H[k]))\n else:\n H2 = tensor(qeye(N), H)\n options.normalize_output = False\n output = sesolve(H2, psi0, tlist, [],\n args=args, options=options,\n _safe_mode=False)\n for k, t in enumerate(tlist):\n u[k] = sp_reshape(output.states[k].data, (N, N))\n unit_row_norm(u[k].data, u[k].indptr, u[k].shape[0])\n u[k] = u[k].T.tocsr()\n\n else:\n raise Exception('Invalid unitary mode.')\n\n\n elif len(c_op_list) == 0 and H0.issuper:\n # calculate the propagator for the vector representation of the\n # density matrix (a superoperator propagator)\n unitary_mode = 'single'\n N = H0.shape[0]\n sqrt_N = int(np.sqrt(N))\n dims = H0.dims\n\n u = np.zeros([N, N, len(tlist)], dtype=complex)\n\n if parallel:\n output = parallel_map(_parallel_mesolve,range(N * N),\n task_args=(\n sqrt_N, H, tlist, c_op_list, args,\n options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N * N):\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output[n].states[k].full()).T\n else:\n rho0 = qeye(N,N)\n rho0.dims = [[sqrt_N, sqrt_N], [sqrt_N, sqrt_N]]\n output = mesolve(H, psi0, tlist, [], args, options,\n _safe_mode=False)\n if len(tlist) == 2:\n return output.states[-1]\n else:\n return output.states\n\n else:\n # calculate the propagator for the vector representation of the\n # density matrix (a superoperator propagator)\n unitary_mode = 'single'\n N = H0.shape[0]\n dims = [H0.dims, H0.dims]\n\n u = np.zeros([N * N, N * N, len(tlist)], dtype=complex)\n\n if parallel:\n output = parallel_map(_parallel_mesolve, range(N * N),\n task_args=(\n N, H, tlist, c_op_list, args, options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N * N):\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output[n].states[k].full()).T\n else:\n progress_bar.start(N * N)\n for n in range(N * N):\n progress_bar.update(n)\n col_idx, row_idx = np.unravel_index(n, (N, N))\n rho0 = Qobj(sp.csr_matrix(([1], ([row_idx], [col_idx])),\n shape=(N,N), dtype=complex))\n output = mesolve(H, rho0, tlist, c_op_list, [], args, options,\n _safe_mode=False)\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output.states[k].full()).T\n progress_bar.finished()\n\n if len(tlist) == 2:\n if unitary_mode == 'batch':\n return Qobj(u[-1], dims=dims)\n else:\n return Qobj(u[:, :, 1], dims=dims)\n else:\n if unitary_mode == 'batch':\n return np.array([Qobj(u[k], dims=dims)\n for k in range(len(tlist))], dtype=object)\n else:\n return np.array([Qobj(u[:, :, k], dims=dims)\n for k in range(len(tlist))], dtype=object)\n\n\ndef _get_min_and_index(lst):\n \"\"\"\n Private function for obtaining min and max indicies.\n \"\"\"\n minval, minidx = lst[0], 0\n for i, v in enumerate(lst[1:]):\n if v < minval:\n minval, minidx = v, i + 1\n return minval, minidx\n\n\ndef propagator_steadystate(U):\n \"\"\"Find the steady state for successive applications of the propagator\n :math:`U`.\n\n Parameters\n ----------\n U : qobj\n Operator representing the propagator.\n\n Returns\n -------\n a : qobj\n Instance representing the steady-state density matrix.\n\n \"\"\"\n\n evals, evecs = la.eig(U.full())\n\n shifted_vals = np.abs(evals - 1.0)\n ev_idx = np.argmin(shifted_vals)\n ev_min = shifted_vals[ev_idx]\n evecs = evecs.T\n rho = Qobj(vec2mat(evecs[ev_idx]), dims=U.dims[0])\n rho = rho * (1.0 / rho.tr())\n rho = 0.5 * (rho + rho.dag()) # make sure rho is herm\n rho.isherm = True\n return rho\n\n\ndef _parallel_sesolve(n, N, H, tlist, args, options):\n psi0 = basis(N, n)\n output = sesolve(H, psi0, tlist, [], args, options, _safe_mode=False)\n return output\n\ndef _parallel_mesolve(n, N, H, tlist, c_op_list, args, options):\n col_idx, row_idx = np.unravel_index(n, (N, N))\n rho0 = Qobj(sp.csr_matrix(([1], ([row_idx], [col_idx])),\n shape=(N,N), dtype=complex))\n output = mesolve(H, rho0, tlist, c_op_list, [], args, options,\n _safe_mode=False)\n return output\n", "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport pytest\nfrom qutip import *\nimport numpy as np\nfrom numpy.testing import (assert_equal, assert_, assert_almost_equal,\n run_module_suite, assert_allclose)\nfrom functools import partial\nfrom types import FunctionType, BuiltinFunctionType\nfrom qutip.interpolate import Cubic_Spline\nfrom qutip.cy.spmatfuncs import (cy_expect_rho_vec, cy_expect_psi, spmv)\n\ndef _f1(t,args):\n return np.sin(t*args['w1'])\n\ndef _f2(t,args):\n return np.cos(t*args['w2'])\n\ndef _f3(t,args):\n return np.exp(1j*t*args['w3'])\n\n\ndef _rand_cqobjevo(N=5):\n tlist=np.linspace(0,10,10001)\n tlistlog=np.logspace(-3,1,10001)\n O0, O1, O2 = rand_herm(N), rand_herm(N), rand_herm(N)\n cte = [QobjEvo([O0])]\n wargs = [QobjEvo([O0,[O1,_f1],[O2,_f2]], args={\"w1\":1,\"w2\":2}),\n QobjEvo([O0,[O1,\"sin(w1*t)\"],[O2,\"cos(w2*t)\"]],\n args={\"w1\":1,\"w2\":2})]\n nargs = [QobjEvo([O0,[O1,np.sin(tlist)],[O2,np.cos(2*tlist)]],tlist=tlist),\n QobjEvo([O0,[O1,np.sin(tlistlog)],[O2,np.cos(2*tlistlog)]],\n tlist=tlistlog),\n QobjEvo([O0,[O1, Cubic_Spline(0,10,np.sin(tlist)) ],\n [O2, Cubic_Spline(0,10,np.cos(2*tlist))]])]\n cqobjevos = cte + wargs + nargs\n base_qobjs = [O0, O1, O2]\n return cqobjevos, base_qobjs\n\n\ndef _sp_eq(sp1, sp2):\n return not np.any(np.abs( (sp1 -sp2).data)> 1e-4)\n\n\ndef _random_QobjEvo(shape=(1,1), ops=[0,0,0], cte=True, tlist=None):\n \"\"\"Create a list to make a QobjEvo with up to 3 coefficients\"\"\"\n if tlist is None:\n tlist = np.linspace(0,1,301)\n Qobj_list = []\n if cte:\n Qobj_list.append(Qobj(np.random.random(shape) + \\\n 1j*np.random.random(shape)))\n coeff = [[_f1, \"sin(w1*t)\", np.sin(tlist),\n Cubic_Spline(0,1,np.sin(tlist))],\n [_f2, \"cos(w2*t)\", np.cos(tlist),\n Cubic_Spline(0,1,np.cos(tlist))],\n [_f3, \"exp(w3*t*1j)\", np.exp(tlist*1j),\n Cubic_Spline(0,1,np.exp(tlist*1j))]]\n for i,form in enumerate(ops):\n if form:\n Qobj_list.append([Qobj(np.random.random(shape)),coeff[i][form-1]])\n return Qobj_list\n\n\ndef _assert_qobj_almost_eq(obj1, obj2, tol=1e-10):\n diff_data = (obj1 - obj2).tidyup(tol).data\n assert_equal(len(diff_data.data),0)\n\n\ndef test_QobjEvo_call():\n \"QobjEvo call\"\n N = 5\n t = np.random.rand()+1\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n O0, O1, O2 = base_qobjs\n O_target1 = O0+np.sin(t)*O1+np.cos(2*t)*O2\n\n # Check the constant flag\n assert_equal(cqobjevos[0].const, True)\n # Check that the call return the Qobj\n assert_equal(cqobjevos[0](t) == O0, True)\n # Check that the call for the data return the data\n assert_equal(_sp_eq(cqobjevos[0](t, data=True), O0.data), True)\n\n for op in cqobjevos[1:]:\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n op.compile()\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n\[email protected]\ndef test_QobjEvo_call_full():\n \"QobjEvo call\"\n N = 5\n t = np.random.rand()+1\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n O0, O1, O2 = base_qobjs\n O_target1 = O0+np.sin(t)*O1+np.cos(2*t)*O2\n\n # Check the constant flag\n assert_equal(cqobjevos[0].const, True)\n # Check that the call return the Qobj\n assert_equal(cqobjevos[0](t) == O0, True)\n # Check that the call for the data return the data\n assert_equal(_sp_eq(cqobjevos[0](t, data=True), O0.data), True)\n\n for op in cqobjevos[1:]:\n op.compile(dense=1)\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n op.compiled = \"\"\n\n op.compile(matched=1)\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n op.compiled = \"\"\n\n op.compile(omp=2)\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n op.compiled = \"\"\n\n op.compile(matched=1, omp=2)\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n op.compiled = \"\"\n\n op.use_cython = False\n op.compile()\n assert_equal(_sp_eq(op(t, data=1) , O_target1.data), True)\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n op.compiled = \"\"\n\n\ndef test_QobjEvo_call_args():\n \"QobjEvo with_args\"\n N = 5\n t = np.random.rand()+1\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n O0, O1, O2 = base_qobjs\n O_target1 = O0+np.sin(t)*O1+np.cos(2*t)*O2\n O_target2 = O0+np.sin(t)*O1+np.cos(4*t)*O2\n\n for op in cqobjevos[1:3]:\n assert_equal(len((op(t, args={\"w2\":4})\n - O_target2).tidyup(1e-10).data.data), 0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data), 0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data), 0)\n\n op.compile()\n assert_equal(len((op(t, args={\"w2\":4})\n - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n\[email protected]\ndef test_QobjEvo_call_args_full():\n \"QobjEvo with_args\"\n N = 5\n t = np.random.rand()+1\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n O0, O1, O2 = base_qobjs\n O_target1 = O0+np.sin(t)*O1+np.cos(2*t)*O2\n O_target2 = O0+np.sin(t)*O1+np.cos(4*t)*O2\n\n for op in cqobjevos[1:3]:\n op.compile(dense=1)\n assert_equal(len((op(t, args={\"w2\":4})\n - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n op.compiled = \"\"\n op.compile(matched=1)\n assert_equal(len((op(t, args={\"w2\":4})\n - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n op.compiled = \"\"\n op.compile(omp=2)\n assert_equal(len((op(t, args={\"w2\":4})\n -O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n op.compiled = \"\"\n op.compile(matched=1, omp=2)\n assert_equal(len((op(t, args={\"w2\":4})\n -O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":4})\n assert_equal(len((op(t) - O_target2).tidyup(1e-10).data.data),0)\n op.arguments({\"w2\":2})\n assert_equal(len((op(t) - O_target1).tidyup(1e-10).data.data),0)\n\n\ndef test_QobjEvo_step_coeff():\n \"QobjEvo step interpolation\"\n coeff1 = np.random.rand(6)\n coeff2 = np.random.rand(6) + np.random.rand(6) * 1.j\n # uniform t\n tlist = np.array([2, 3, 4, 5, 6, 7], dtype=float)\n qobjevo = QobjEvo([[sigmaz(), coeff1], [sigmax(), coeff2]],\n tlist=tlist, args={\"_step_func_coeff\":True})\n assert_equal(qobjevo.ops[0].get_coeff(2.0), coeff1[0])\n assert_equal(qobjevo.ops[0].get_coeff(7.0), coeff1[5])\n assert_equal(qobjevo.ops[0].get_coeff(5.0001), coeff1[3])\n assert_equal(qobjevo.ops[0].get_coeff(3.9999), coeff1[1])\n\n assert_equal(qobjevo.ops[1].get_coeff(2.0), coeff2[0])\n assert_equal(qobjevo.ops[1].get_coeff(7.0), coeff2[5])\n assert_equal(qobjevo.ops[1].get_coeff(5.0001), coeff2[3])\n assert_equal(qobjevo.ops[1].get_coeff(3.9999), coeff2[1])\n\n qobjevo.compile()\n assert_equal(qobjevo.coeff_get(2.0), [coeff1[0], coeff2[0]])\n assert_equal(qobjevo.coeff_get(7.0), [coeff1[5], coeff2[5]])\n assert_equal(qobjevo.coeff_get(4.0001), [coeff1[2], coeff2[2]])\n assert_equal(qobjevo.coeff_get(3.9999), [coeff1[1], coeff2[1]])\n\n # non-uniform t\n tlist = np.array([1, 2, 4, 5, 6, 8], dtype=float)\n qobjevo = QobjEvo([[sigmaz(), coeff1], [sigmax(), coeff2]],\n tlist=tlist, args={\"_step_func_coeff\":True})\n assert_equal(qobjevo.ops[0].get_coeff(1.0), coeff1[0])\n assert_equal(qobjevo.ops[0].get_coeff(8.0), coeff1[5])\n assert_equal(qobjevo.ops[0].get_coeff(3.9999), coeff1[1])\n assert_equal(qobjevo.ops[0].get_coeff(4.23), coeff1[2])\n assert_equal(qobjevo.ops[0].get_coeff(1.23), coeff1[0])\n\n assert_equal(qobjevo.ops[1].get_coeff(1.0), coeff2[0])\n assert_equal(qobjevo.ops[1].get_coeff(8.0), coeff2[5])\n assert_equal(qobjevo.ops[1].get_coeff(6.7), coeff2[4])\n assert_equal(qobjevo.ops[1].get_coeff(7.9999), coeff2[4])\n assert_equal(qobjevo.ops[1].get_coeff(3.9999), coeff2[1])\n\n qobjevo.compile()\n assert_equal(qobjevo.coeff_get(1.0), [coeff1[0], coeff2[0]])\n assert_equal(qobjevo.coeff_get(3.999), [coeff1[1], coeff2[1]])\n assert_equal(qobjevo.coeff_get(6.3), [coeff1[4], coeff2[4]])\n assert_equal(qobjevo.coeff_get(1.0001), [coeff1[0], coeff2[0]])\n\n\ndef test_QobjEvo_copy():\n \"QobjEvo copy\"\n tlist = np.linspace(0,1,300)\n td_obj_1 = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n t = np.random.random()\n td_obj_copy = td_obj_1.copy()\n #Check that the copy is independent\n assert_equal(td_obj_1 is td_obj_copy, False)\n #Check that the copy has the same data\n assert_equal(td_obj_1(t) == td_obj_copy(t), True)\n td_obj_copy = QobjEvo(td_obj_1)\n #Check that the copy is independent\n assert_equal(td_obj_1 is td_obj_copy, False)\n #Check that the copy has the same data\n assert_equal(td_obj_1(t) == td_obj_copy(t), True)\n\n\ndef test_QobjEvo_to_list():\n \"QobjEvo to_list\"\n td_as_list_1 = _random_QobjEvo((5,5), [0,2,3], tlist=np.linspace(0,1,100))\n td_as_list_2 = _random_QobjEvo((5,5), [1,0,0], tlist=np.linspace(0,1,100))\n args={\"w1\":1, \"w2\":2}\n td_obj_1 = QobjEvo(td_as_list_1, args=args, tlist=np.linspace(0,1,100))\n td_obj_2 = QobjEvo(td_as_list_2, args=args, tlist=np.linspace(0,1,100))\n td_as_list_back = (td_obj_1 + td_obj_2).to_list()\n\n all_match = True\n for part in td_as_list_back:\n if isinstance(part, Qobj):\n all_match = all_match and td_as_list_1[0] + td_as_list_2[0] == part\n elif isinstance(part[1], (FunctionType, BuiltinFunctionType, partial)):\n all_match = all_match and td_as_list_2[1][1] == part[1]\n all_match = all_match and td_as_list_2[1][0] == part[0]\n elif isinstance(part[1], str):\n all_match = all_match and td_as_list_1[1][1] == part[1]\n all_match = all_match and td_as_list_1[1][0] == part[0]\n elif isinstance(part[1], np.ndarray):\n all_match = all_match and (td_as_list_1[2][1] == part[1]).all()\n all_match = all_match and td_as_list_1[2][0] == part[0]\n else:\n all_match = False\n # Check that the list get the object back\n assert_equal(all_match, True)\n\n\ndef test_QobjEvo_math_arithmetic():\n \"QobjEvo arithmetic\"\n N = 5\n t = np.random.rand()+1\n cqobjevos1, base_qobjs1 = _rand_cqobjevo(N)\n cqobjevos2, base_qobjs2 = _rand_cqobjevo(N)\n cte = cqobjevos2[0]\n O1 = base_qobjs2[0]\n\n for op, op_2 in zip(cqobjevos1, cqobjevos2):\n _assert_qobj_almost_eq(op(t)*-1, (-op)(t) )\n\n _assert_qobj_almost_eq(op(t) +O1, (op +O1)(t))\n _assert_qobj_almost_eq(op(t) +cte(t), (op +cte)(t))\n _assert_qobj_almost_eq(op(t) +op_2(t), (op +op_2)(t))\n opp = op.copy()\n opp += O1\n _assert_qobj_almost_eq(op(t) +O1, opp(t))\n opp = op.copy()\n opp += op_2\n _assert_qobj_almost_eq(op(t) +op_2(t), opp(t))\n\n _assert_qobj_almost_eq(op(t) -O1, (op -O1)(t))\n _assert_qobj_almost_eq(op(t) -cte(t), (op -cte)(t))\n _assert_qobj_almost_eq(O1 -op(t), (O1 -op)(t))\n _assert_qobj_almost_eq(cte(t) -op(t), (cte -op)(t))\n _assert_qobj_almost_eq(op(t) -op_2(t), (op -op_2)(t))\n opp = op.copy()\n opp -= O1\n _assert_qobj_almost_eq(op(t) -O1, opp(t))\n opp = op.copy()\n opp -= op_2\n _assert_qobj_almost_eq(op(t) -op_2(t), opp(t))\n\n _assert_qobj_almost_eq(op(t) * O1, (op * O1)(t))\n _assert_qobj_almost_eq(O1 * op(t), (O1 * op)(t))\n _assert_qobj_almost_eq(2 * op(t), (2 * op)(t))\n _assert_qobj_almost_eq(op(t) * cte(t), (op * cte)(t))\n _assert_qobj_almost_eq(cte(t) * op(t), (cte * op)(t))\n _assert_qobj_almost_eq(op(t) * op_2(t), (op * op_2)(t))\n _assert_qobj_almost_eq(op_2(t) * op(t), (op_2 * op)(t))\n opp = op.copy()\n opp *= 2\n _assert_qobj_almost_eq(2 * op(t), opp(t))\n opp = op.copy()\n opp *= O1\n _assert_qobj_almost_eq(op(t) * O1, opp(t))\n opp = op.copy()\n opp *= op_2\n _assert_qobj_almost_eq(op(t) * op_2(t), opp(t))\n\n _assert_qobj_almost_eq(op(t)/2, (op/2)(t))\n opp = op.copy()\n opp /= 2\n _assert_qobj_almost_eq(op(t)/2, opp(t))\n\n\ndef test_QobjEvo_unitary():\n \"QobjEvo trans, dag, conj, _cdc\"\n N = 5\n t = np.random.rand()+1\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n _assert_qobj_almost_eq((op.trans())(t), op(t).trans())\n _assert_qobj_almost_eq((op.dag())(t), op(t).dag())\n _assert_qobj_almost_eq((op.conj())(t), op(t).conj())\n _assert_qobj_almost_eq((op._cdc())(t), op(t).dag()*op(t))\n\n\ndef test_QobjEvo_tidyup():\n \"QobjEvo tidyup\"\n tlist = np.linspace(0,1,300)\n args={\"w1\":1}\n td_obj = QobjEvo(_random_QobjEvo((5,5), [1,0,0], tlist=tlist),\n args=args, tlist=tlist)\n td_obj *= 1e-10 * np.random.random()\n td_obj.tidyup(atol=1e-8)\n t = np.random.random()\n # check that the Qobj are cleaned\n assert_equal(np.max(td_obj(t, data=True)), 0.)\n\n\ndef test_QobjEvo_compress():\n \"QobjEvo compress\"\n tlist = np.linspace(0, 1, 300)\n td_obj_1 = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n td_obj_2 = (td_obj_1 + td_obj_1)/2.\n t = np.random.random()\n td_obj_2.compress()\n # check that the number of part is decreased\n assert_equal(len(td_obj_2.to_list()), 4)\n # check that data is still valid\n _assert_qobj_almost_eq(td_obj_2(t), td_obj_1(t))\n\n\ndef test_QobjEvo_shift():\n \"\"\"QobjEvo _shift time\"\"\"\n tlist = np.linspace(0, 1, 300)\n td_obj_1 = QobjEvo(_random_QobjEvo((1,1), [0,0], tlist=tlist),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n td_obj_1s = td_obj_1.copy()\n td_obj_1s._shift()\n td_obj_2 = QobjEvo(_random_QobjEvo((1,1), [1,1], tlist=tlist),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n td_obj_2s = td_obj_2.copy()\n td_obj_2s._shift()\n\n _assert_qobj_almost_eq(td_obj_1(0),td_obj_1s(1, args={\"_t0\":-1}))\n _assert_qobj_almost_eq(td_obj_2(0),td_obj_2s(1, args={\"_t0\":-1}))\n td_obj_1s.arguments({\"_t0\":-1})\n td_obj_2s.arguments({\"_t0\":-1})\n _assert_qobj_almost_eq(td_obj_1(0),td_obj_1s(1))\n _assert_qobj_almost_eq(td_obj_2(0),td_obj_2s(1))\n td_obj_1s.compile()\n td_obj_2s.compile()\n _assert_qobj_almost_eq(td_obj_1(0),td_obj_1s(1))\n _assert_qobj_almost_eq(td_obj_2(0),td_obj_2s(1))\n _assert_qobj_almost_eq(td_obj_1(0),td_obj_1s(2, args={\"_t0\":-2}))\n _assert_qobj_almost_eq(td_obj_2(0),td_obj_2s(2, args={\"_t0\":-2}))\n td_obj_1s.arguments({\"_t0\":-2})\n td_obj_2s.arguments({\"_t0\":-2})\n _assert_qobj_almost_eq(td_obj_1(0),td_obj_1s(2))\n _assert_qobj_almost_eq(td_obj_2(0),td_obj_2s(2))\n\n\ndef test_QobjEvo_apply():\n \"QobjEvo apply\"\n def multiply(qobj,b,factor = 3.):\n return qobj*b*factor\n tlist = np.linspace(0, 1, 300)\n td_obj = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n t = np.random.random()\n # check that the number of part is decreased\n assert_equal(td_obj.apply(multiply,2)(t) == td_obj(t)*6, True)\n # check that data is still valid\n assert_equal(td_obj.apply(multiply,2,factor=2)(t) == td_obj(t)*4, True)\n\n\ndef test_QobjEvo_apply_decorator():\n \"QobjEvo apply_decorator\"\n def rescale_time_and_scale(f_original, time_scale, factor=1.):\n def f(t, *args, **kwargs):\n return f_original(time_scale*t, *args, **kwargs)*factor\n return f\n\n tlist = np.linspace(0, 1, 501)\n td_obj = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist, cte=False),\n args={\"w1\":1, \"w2\":2}, tlist=tlist)\n t = 0.10 + np.random.random() * 0.80\n # cubicspline interpolation can be less precise\n # at the limit of the time range.\n td_obj_scaled = td_obj.apply_decorator(rescale_time_and_scale,2)\n # check that the decorated took effect mixed\n assert_equal(td_obj_scaled(t) == td_obj(2*t), True)\n for op in td_obj_scaled.ops:\n assert_equal(op[3], \"func\")\n\n def square_f(f_original):\n def f(t, *args, **kwargs):\n return f_original(t, *args, **kwargs)**2\n return f\n td_list = _random_QobjEvo((5,5), [2,0,0], tlist=tlist, cte=False)\n td_obj_str = QobjEvo(td_list, args={\"w1\":1, \"w2\":2}, tlist=tlist)\n td_obj_str_2 = td_obj_str.apply_decorator(square_f, str_mod=[\"(\",\")**2\"])\n _assert_qobj_almost_eq(td_obj_str_2(t), td_list[0][0] * np.sin(t)**2)\n assert_equal(td_obj_str_2.ops[0][3], \"string\")\n\n td_list = _random_QobjEvo((5,5), [3,0,0], tlist=tlist, cte=False)\n td_obj_array = QobjEvo(td_list, tlist=tlist)\n td_obj_array_2 = td_obj_array.apply_decorator(square_f, inplace_np=True)\n _assert_qobj_almost_eq(td_obj_array_2(t),\n td_list[0][0] * np.sin(t)**2, tol=3e-7)\n assert_equal(td_obj_array_2.ops[0][3], \"array\")\n\n\ndef test_QobjEvo_mul_vec():\n \"QobjEvo mul_vec\"\n N = 5\n t = np.random.rand()+1\n vec = np.arange(N)*.5+.5j\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n op.compile()\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n\n\[email protected]\ndef test_QobjEvo_mul_vec_full():\n \"QobjEvo mul_vec\"\n N = 5\n t = np.random.rand()+1\n vec = np.arange(N)*.5+.5j\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n op.compile(dense=1)\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n op.compiled = \"\"\n op.compile(matched=1)\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n op.compiled = \"\"\n op.compile(omp=2)\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n op.compiled = \"\"\n op.compile(matched=1,omp=2)\n assert_allclose(spmv(op(t,data=1), vec), op.mul_vec(t, vec))\n\n\ndef test_QobjEvo_mul_mat():\n \"QobjEvo mul_mat\"\n N = 5\n t = np.random.rand()+1\n mat = np.random.rand(N,N)+1 + 1j*np.random.rand(N,N)\n matF = np.asfortranarray(mat)\n matV = mat2vec(mat).flatten()\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n Qo1 = op(t)\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n op.compile()\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n assert_allclose(mat2vec(Qo1.data * mat).flatten(),\n op.compiled_qobjevo.ode_mul_mat_f_vec(t,matV))\n\n\[email protected]\ndef test_QobjEvo_mul_mat_full():\n \"QobjEvo mul_mat\"\n N = 5\n t = np.random.rand()+1\n mat = np.random.rand(N,N)+1 + 1j*np.random.rand(N,N)\n matF = np.asfortranarray(mat)\n matV = mat2vec(mat).flatten()\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n Qo1 = op(t)\n op.compile(dense=1)\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n assert_allclose(mat2vec(Qo1.data * mat).flatten(),\n op.compiled_qobjevo.ode_mul_mat_f_vec(t,matV))\n op.compiled = \"\"\n op.compile(matched=1)\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n assert_allclose(mat2vec(Qo1.data * mat).flatten(),\n op.compiled_qobjevo.ode_mul_mat_f_vec(t,matV))\n op.compiled = \"\"\n op.compile(omp=2)\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n assert_allclose(mat2vec(Qo1.data * mat).flatten(),\n op.compiled_qobjevo.ode_mul_mat_f_vec(t,matV))\n op.compiled = \"\"\n op.compile(matched=1,omp=2)\n assert_allclose(Qo1.data * mat, op.mul_mat(t,mat))\n assert_allclose(Qo1.data * matF, op.mul_mat(t,matF))\n assert_allclose(mat2vec(Qo1.data * mat).flatten(),\n op.compiled_qobjevo.ode_mul_mat_f_vec(t,matV))\n\n\ndef test_QobjEvo_expect_psi():\n \"QobjEvo expect psi\"\n N = 5\n t = np.random.rand()+1\n vec = np.arange(N)*.5+.5j\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n Qo1 = op(t)\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n op.compile()\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n\n\[email protected]\ndef test_QobjEvo_expect_psi_full():\n \"QobjEvo expect psi\"\n N = 5\n t = np.random.rand()+1\n vec = np.arange(N)*.5+.5j\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op in cqobjevos:\n Qo1 = op(t)\n op.compile(dense=1)\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n op.compiled = \"\"\n op.compile(matched=1)\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n op.compiled = \"\"\n op.compile(omp=2)\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n op.compiled = \"\"\n op.compile(matched=1,omp=2)\n assert_allclose(cy_expect_psi(Qo1.data, vec, 0), op.expect(t,vec,0))\n\n\ndef test_QobjEvo_expect_rho():\n \"QobjEvo expect rho\"\n N = 5\n t = np.random.rand()+1\n vec = np.random.rand(N*N)+1 + 1j*np.random.rand(N*N)\n mat = vec2mat(vec)\n qobj = Qobj(mat)\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op_ in cqobjevos:\n op = liouvillian(op_)\n Qo1 = op(t)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n\n op.compile()\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n\n tlist = np.linspace(0,1,300)\n args={\"w1\":1, \"w2\":2, \"w3\":3}\n data1 = np.random.random((3,3))\n data2 = np.random.random((3,3))\n td_obj_sa = QobjEvo(_random_QobjEvo((3,3), [0,3,2], tlist=tlist),\n args=args, tlist=tlist)\n td_obj_m = QobjEvo(_random_QobjEvo((3,3), [1,2,3], tlist=tlist),\n args=args, tlist=tlist)\n t = np.random.random()\n td_obj_sa = td_obj_sa.apply(spre)\n td_obj_m = td_obj_m.apply(spre)\n rho = np.arange(3*3)*0.25+.25j\n td_obj_sac = td_obj_sa.copy()\n td_obj_sac.compile()\n v1 = td_obj_sa.expect(t, rho, 0)\n v2 = td_obj_sac.expect(t, rho, 0)\n v3 = cy_expect_rho_vec(td_obj_sa(t, data=True), rho, 0)\n # check not compiled rhs const\n assert_allclose(v1, v3, rtol=1e-6)\n # check compiled rhs\n assert_allclose(v3, v2, rtol=1e-6)\n\n td_obj_mc = td_obj_m.copy()\n td_obj_mc.compile()\n v1 = td_obj_m.expect(t, rho, 1)\n v2 = td_obj_mc.expect(t, rho, 1)\n v3 = cy_expect_rho_vec(td_obj_m(t, data=True), rho, 1)\n # check not compiled rhs func\n assert_allclose(v1, v3, rtol=1e-6)\n # check compiled rhs func\n assert_allclose(v3, v2, rtol=1e-6)\n\n\[email protected]\ndef test_QobjEvo_expect_rho_full():\n \"QobjEvo expect rho\"\n N = 5\n t = np.random.rand()+1\n vec = np.random.rand(N*N)+1 + 1j*np.random.rand(N*N)\n mat = vec2mat(vec)\n qobj = Qobj(mat)\n cqobjevos, base_qobjs = _rand_cqobjevo(N)\n\n for op_ in cqobjevos:\n op = liouvillian(op_)\n Qo1 = op(t)\n op.compile(dense=1)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n op.compiled = \"\"\n op.compile(matched=1)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n op.compiled = \"\"\n op.compile(omp=2)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n op.compiled = \"\"\n op.compile(matched=1,omp=2)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,vec,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,mat,0), atol=1e-14)\n assert_allclose(cy_expect_rho_vec(Qo1.data, vec, 0),\n op.expect(t,qobj,0), atol=1e-14)\n\n tlist = np.linspace(0,1,300)\n args={\"w1\":1, \"w2\":2, \"w3\":3}\n data1 = np.random.random((3,3))\n data2 = np.random.random((3,3))\n td_obj_sa = QobjEvo(_random_QobjEvo((3,3), [0,3,2], tlist=tlist),\n args=args, tlist=tlist)\n td_obj_m = QobjEvo(_random_QobjEvo((3,3), [1,2,3], tlist=tlist),\n args=args, tlist=tlist)\n t = np.random.random()\n td_obj_sa = td_obj_sa.apply(spre)\n td_obj_m = td_obj_m.apply(spre)\n rho = np.arange(3*3)*0.25+.25j\n td_obj_sac = td_obj_sa.copy()\n td_obj_sac.compile()\n v1 = td_obj_sa.expect(t, rho, 0)\n v2 = td_obj_sac.expect(t, rho, 0)\n v3 = cy_expect_rho_vec(td_obj_sa(t, data=True), rho, 0)\n # check not compiled rhs const\n assert_allclose(v1, v3, rtol=1e-6)\n # check compiled rhs\n assert_allclose(v3, v2, rtol=1e-6)\n\n td_obj_mc = td_obj_m.copy()\n td_obj_mc.compile()\n v1 = td_obj_m.expect(t, rho, 1)\n v2 = td_obj_mc.expect(t, rho, 1)\n v3 = cy_expect_rho_vec(td_obj_m(t, data=True), rho, 1)\n # check not compiled rhs func\n assert_allclose(v1, v3, rtol=1e-6)\n # check compiled rhs func\n assert_allclose(v3, v2, rtol=1e-6)\n\n\ndef test_QobjEvo_with_state():\n \"QobjEvo dynamics_args\"\n def coeff_state(t, args):\n return np.mean(args[\"state_vec\"]) * args[\"w\"] * args[\"expect_op_0\"]\n N = 5\n vec = np.arange(N)*.5+.5j\n t = np.random.random()\n data1 = np.random.random((N, N))\n data2 = np.random.random((N, N))\n q1 = Qobj(data1)\n q2 = Qobj(data2)\n args={\"w\":5, \"state_vec\":None, \"expect_op_0\":2*qeye(N)}\n\n td_data = QobjEvo([q1, [q2, coeff_state]], args=args, e_ops=[2*qeye(N)])\n q_at_t = q1 + np.mean(vec) * args[\"w\"] * expect(2*qeye(N), Qobj(vec.T)) * q2\n # Check that the with_state call\n assert_allclose(td_data.mul_vec(t, vec), q_at_t * vec)\n td_data.compile()\n # Check that the with_state call compiled\n assert_allclose(td_data.mul_vec(t, vec), q_at_t * vec)\n\n td_data = QobjEvo([q1, [q2, \"state_vec[0] * cos(w*expect_op_0*t)\"]],\n args=args, e_ops=[2*qeye(N)])\n data_at_t = q1 + q2 * vec[0] * np.cos(10 * t * expect(qeye(N), Qobj(vec.T)))\n # Check that the with_state call for str format\n assert_allclose(td_data.mul_vec(t, vec), data_at_t * vec)\n td_data.compile()\n # Check that the with_state call for str format and compiled\n assert_allclose(td_data.mul_vec(t, vec), data_at_t * vec)\n\n args={\"state_mat\":None, \"state_vec\":None, \"state_qobj\":None}\n mat = np.arange(N*N).reshape((N,N))\n def check_dyn_args(t, args):\n if not isinstance(args[\"state_qobj\"], Qobj):\n raise TypeError(\"args['state_qobj'], Qobj\")\n if not isinstance(args[\"state_vec\"], np.ndarray):\n raise TypeError(\"args['state_vec'], np.ndarray\")\n if not isinstance(args[\"state_mat\"], np.ndarray):\n raise TypeError(\"args['state_mat'], np.ndarray\")\n\n if len(args[\"state_vec\"].shape) != 1:\n raise TypeError\n if len(args[\"state_mat\"].shape) != 2:\n raise TypeError\n\n if not np.all(args[\"state_vec\"] == args[\"state_qobj\"].full().ravel(\"F\")):\n raise Exception\n if not np.all(args[\"state_vec\"] == args[\"state_mat\"].ravel(\"F\")):\n raise Exception\n if not np.all(args[\"state_mat\"] == mat):\n raise Exception\n return 1\n td_data = QobjEvo([q1, check_dyn_args], args=args)\n td_data.mul_mat(0, mat)\n\n\ndef test_QobjEvo_pickle():\n \"QobjEvo pickle\"\n #used in parallel_map\n import pickle\n tlist = np.linspace(0,1,300)\n args={\"w1\":1, \"w2\":2}\n t = np.random.random()\n\n td_obj_c = QobjEvo(_random_QobjEvo((5,5), [0,0,0]))\n td_obj_c.compile()\n pickled = pickle.dumps(td_obj_c)\n td_pick = pickle.loads(pickled)\n # Check for const case\n assert_equal(td_obj_c(t) == td_pick(t), True)\n\n td_obj_sa = QobjEvo(_random_QobjEvo((5,5), [2,3,0], tlist=tlist),\n args=args, tlist=tlist)\n td_obj_sa.compile()\n td_obj_m = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist),\n args=args, tlist=tlist)\n\n pickled = pickle.dumps(td_obj_sa, -1)\n td_pick = pickle.loads(pickled)\n # Check for cython compiled coeff\n assert_equal(td_obj_sa(t) == td_pick(t), True)\n\n pickled = pickle.dumps(td_obj_m, -1)\n td_pick = pickle.loads(pickled)\n # Check for not compiled mix\n assert_equal(td_obj_m(t) == td_pick(t), True)\n td_obj_m.compile()\n pickled = pickle.dumps(td_obj_m, -1)\n td_pick = pickle.loads(pickled)\n # Check for ct_cqobjevo\n assert_equal(td_obj_m(t) == td_pick(t), True)\n\n\ndef test_QobjEvo_safepickle():\n \"QobjEvo with safe pickle\"\n #used in parallel_map\n import pickle\n from qutip.qobjevo import safePickle\n old_set = safePickle[0]\n safePickle[0] = True\n tlist = np.linspace(0,1,300)\n args={\"w1\":1, \"w2\":2}\n t = np.random.random()\n\n td_obj_c = QobjEvo(_random_QobjEvo((5,5), [0,0,0]))\n td_obj_c.compile()\n pickled = pickle.dumps(td_obj_c)\n td_pick = pickle.loads(pickled)\n # Check for const case\n assert_equal(td_obj_c(t) == td_pick(t), True)\n\n td_obj_sa = QobjEvo(_random_QobjEvo((5,5), [2,3,0], tlist=tlist),\n args=args, tlist=tlist)\n td_obj_sa.compile()\n td_obj_m = QobjEvo(_random_QobjEvo((5,5), [1,2,3], tlist=tlist),\n args=args, tlist=tlist)\n\n pickled = pickle.dumps(td_obj_sa, -1)\n td_pick = pickle.loads(pickled)\n # Check for cython compiled coeff\n assert_equal(td_obj_sa(t) == td_pick(t), True)\n\n pickled = pickle.dumps(td_obj_m, -1)\n td_pick = pickle.loads(pickled)\n # Check for not compiled mix\n assert_equal(td_obj_m(t) == td_pick(t), True)\n td_obj_m.compile()\n pickled = pickle.dumps(td_obj_m, -1)\n td_pick = pickle.loads(pickled)\n # Check for ct_cqobjevo\n assert_equal(td_obj_m(t) == td_pick(t), True)\n safePickle[0] = old_set\n\n\ndef test_QobjEvo_superoperator():\n \"QobjEvo superoperator\"\n cqobjevos1, _ = _rand_cqobjevo(3)\n cqobjevos2, _ = _rand_cqobjevo(3)\n cqobjevos3, _ = _rand_cqobjevo(3)\n t = np.random.rand()+1\n for op1, op2 in zip(cqobjevos1, cqobjevos2):\n Q1 = op1(t)\n Q2 = op2(t)\n _assert_qobj_almost_eq(lindblad_dissipator(Q1, Q2, chi=0.5),\n lindblad_dissipator(op1, op2, chi=0.5)(t))\n _assert_qobj_almost_eq(sprepost(Q1, Q2),\n sprepost(op1, op2)(t))\n _assert_qobj_almost_eq(liouvillian(Q1, [Q2]),\n liouvillian(op1, [op2])(t))\n", "# -*- coding: utf-8 -*-\n# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2014 and later, Alexander J G Pitchford\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n# @author: Alexander Pitchford\n# @email1: [email protected]\n# @email2: [email protected]\n# @organization: Aberystwyth University\n# @supervisor: Daniel Burgarth\n\n\"\"\"\nClasses that define the dynamics of the (quantum) system and target evolution\nto be optimised.\nThe contols are also defined here, i.e. the dynamics generators (Hamiltonians,\nLimbladians etc). The dynamics for the time slices are calculated here, along\nwith the evolution as determined by the control amplitudes.\n\nSee the subclass descriptions and choose the appropriate class for the\napplication. The choice depends on the type of matrix used to define\nthe dynamics.\n\nThese class implement functions for getting the dynamics generators for\nthe combined (drift + ctrls) dynamics with the approriate operator applied\n\nNote the methods in these classes were inspired by:\nDYNAMO - Dynamic Framework for Quantum Optimal Control\nSee Machnes et.al., arXiv.1011.4874\n\"\"\"\nimport warnings\nimport numpy as np\nimport scipy.linalg as la\nimport scipy.sparse as sp\n# QuTiP\nfrom qutip.qobj import Qobj\nfrom qutip.sparse import sp_eigs, eigh\nimport qutip.settings as settings\n# QuTiP logging\nimport qutip.logging_utils as logging\nlogger = logging.get_logger()\n# QuTiP control modules\nimport qutip.control.errors as errors\nimport qutip.control.tslotcomp as tslotcomp\nimport qutip.control.fidcomp as fidcomp\nimport qutip.control.propcomp as propcomp\nimport qutip.control.symplectic as sympl\nimport qutip.control.dump as qtrldump\n\nDEF_NUM_TSLOTS = 10\nDEF_EVO_TIME = 1.0\n\ndef _is_string(var):\n try:\n if isinstance(var, basestring):\n return True\n except NameError:\n try:\n if isinstance(var, str):\n return True\n except:\n return False\n except:\n return False\n\n return False\n\ndef _check_ctrls_container(ctrls):\n \"\"\"\n Check through the controls container.\n Convert to an array if its a list of lists\n return the processed container\n raise type error if the container structure is invalid\n \"\"\"\n if isinstance(ctrls, (list, tuple)):\n # Check to see if list of lists\n try:\n if isinstance(ctrls[0], (list, tuple)):\n ctrls = np.array(ctrls, dtype=object)\n except:\n pass\n\n if isinstance(ctrls, np.ndarray):\n if len(ctrls.shape) != 2:\n raise TypeError(\"Incorrect shape for ctrl dyn gen array\")\n for k in range(ctrls.shape[0]):\n for j in range(ctrls.shape[1]):\n if not isinstance(ctrls[k, j], Qobj):\n raise TypeError(\"All control dyn gen must be Qobj\")\n elif isinstance(ctrls, (list, tuple)):\n for ctrl in ctrls:\n if not isinstance(ctrl, Qobj):\n raise TypeError(\"All control dyn gen must be Qobj\")\n else:\n raise TypeError(\"Controls list or array not set correctly\")\n\n return ctrls\n\ndef _check_drift_dyn_gen(drift):\n if not isinstance(drift, Qobj):\n if not isinstance(drift, (list, tuple)):\n raise TypeError(\"drift should be a Qobj or a list of Qobj\")\n else:\n for d in drift:\n if not isinstance(d, Qobj):\n raise TypeError(\n \"drift should be a Qobj or a list of Qobj\")\n\nwarnings.simplefilter('always', DeprecationWarning) #turn off filter\ndef _attrib_deprecation(message, stacklevel=3):\n \"\"\"\n Issue deprecation warning\n Using stacklevel=3 will ensure message refers the function\n calling with the deprecated parameter,\n \"\"\"\n warnings.warn(message, DeprecationWarning, stacklevel=stacklevel)\n\ndef _func_deprecation(message, stacklevel=3):\n \"\"\"\n Issue deprecation warning\n Using stacklevel=3 will ensure message refers the function\n calling with the deprecated parameter,\n \"\"\"\n warnings.warn(message, DeprecationWarning, stacklevel=stacklevel)\n\nclass Dynamics(object):\n \"\"\"\n This is a base class only. See subclass descriptions and choose an\n appropriate one for the application.\n\n Note that initialize_controls must be called before most of the methods\n can be used. init_timeslots can be called sometimes earlier in order\n to access timeslot related attributes\n\n This acts as a container for the operators that are used to calculate\n time evolution of the system under study. That is the dynamics generators\n (Hamiltonians, Lindbladians etc), the propagators from one timeslot to\n the next, and the evolution operators. Due to the large number of matrix\n additions and multiplications, for small systems at least, the optimisation\n performance is much better using ndarrays to represent these operators.\n However\n\n Attributes\n ----------\n log_level : integer\n level of messaging output from the logger.\n Options are attributes of qutip.logging_utils,\n in decreasing levels of messaging, are:\n DEBUG_INTENSE, DEBUG_VERBOSE, DEBUG, INFO, WARN, ERROR, CRITICAL\n Anything WARN or above is effectively 'quiet' execution,\n assuming everything runs as expected.\n The default NOTSET implies that the level will be taken from\n the QuTiP settings file, which by default is WARN\n\n params: Dictionary\n The key value pairs are the attribute name and value\n Note: attributes are created if they do not exist already,\n and are overwritten if they do.\n\n stats : Stats\n Attributes of which give performance stats for the optimisation\n set to None to reduce overhead of calculating stats.\n Note it is (usually) shared with the Optimizer object\n\n tslot_computer : TimeslotComputer (subclass instance)\n Used to manage when the timeslot dynamics\n generators, propagators, gradients etc are updated\n\n prop_computer : PropagatorComputer (subclass instance)\n Used to compute the propagators and their gradients\n\n fid_computer : FidelityComputer (subclass instance)\n Used to computer the fidelity error and the fidelity error\n gradient.\n\n memory_optimization : int\n Level of memory optimisation. Setting to 0 (default) means that\n execution speed is prioritized over memory.\n Setting to 1 means that some memory prioritisation steps will be\n taken, for instance using Qobj (and hence sparse arrays) as the\n the internal operator data type, and not caching some operators\n Potentially further memory saving maybe made with\n memory_optimization > 1.\n The options are processed in _set_memory_optimizations, see\n this for more information. Individual memory saving options can be\n switched by settting them directly (see below)\n\n oper_dtype : type\n Data type for internal dynamics generators, propagators and time\n evolution operators. This can be ndarray or Qobj, or (in theory) any\n other representaion that supports typical matrix methods (e.g. dot)\n ndarray performs best for smaller quantum systems.\n Qobj may perform better for larger systems, and will also\n perform better when (custom) fidelity measures use Qobj methods\n such as partial trace.\n See _choose_oper_dtype for how this is chosen when not specified\n\n cache_phased_dyn_gen : bool\n If True then the dynamics generators will be saved with and\n without the propagation prefactor (if there is one)\n Defaults to True when memory_optimization=0, otherwise False\n\n cache_prop_grad : bool\n If the True then the propagator gradients (for exact gradients) will\n be computed when the propagator are computed and cache until\n the are used by the fidelity computer. If False then the\n fidelity computer will calculate them as needed.\n Defaults to True when memory_optimization=0, otherwise False\n\n cache_dyn_gen_eigenvectors_adj: bool\n If True then DynamicsUnitary will cached the adjoint of\n the Hamiltion eignvector matrix\n Defaults to True when memory_optimization=0, otherwise False\n\n sparse_eigen_decomp: bool\n If True then DynamicsUnitary will use the sparse eigenvalue\n decomposition.\n Defaults to True when memory_optimization<=1, otherwise False\n\n num_tslots : integer\n Number of timeslots (aka timeslices)\n\n num_ctrls : integer\n Number of controls.\n Note this is calculated as the length of ctrl_dyn_gen when first used.\n And is recalculated during initialise_controls only.\n\n evo_time : float\n Total time for the evolution\n\n tau : array[num_tslots] of float\n Duration of each timeslot\n Note that if this is set before initialize_controls is called\n then num_tslots and evo_time are calculated from tau, otherwise\n tau is generated from num_tslots and evo_time, that is\n equal size time slices\n\n time : array[num_tslots+1] of float\n Cumulative time for the evolution, that is the time at the start\n of each time slice\n\n drift_dyn_gen : Qobj or list of Qobj\n Drift or system dynamics generator (Hamiltonian)\n Matrix defining the underlying dynamics of the system\n Can also be a list of Qobj (length num_tslots) for time varying\n drift dynamics\n\n ctrl_dyn_gen : List of Qobj\n Control dynamics generator (Hamiltonians)\n List of matrices defining the control dynamics\n\n initial : Qobj\n Starting state / gate\n The matrix giving the initial state / gate, i.e. at time 0\n Typically the identity for gate evolution\n\n target : Qobj\n Target state / gate:\n The matrix giving the desired state / gate for the evolution\n\n ctrl_amps : array[num_tslots, num_ctrls] of float\n Control amplitudes\n The amplitude (scale factor) for each control in each timeslot\n\n initial_ctrl_scaling : float\n Scale factor applied to be applied the control amplitudes\n when they are initialised\n This is used by the PulseGens rather than in any fucntions in\n this class\n\n initial_ctrl_offset : float\n Linear offset applied to be applied the control amplitudes\n when they are initialised\n This is used by the PulseGens rather than in any fucntions in\n this class\n\n dyn_gen : List of Qobj\n Dynamics generators\n the combined drift and control dynamics generators\n for each timeslot\n\n prop : list of Qobj\n Propagators - used to calculate time evolution from one\n timeslot to the next\n\n prop_grad : array[num_tslots, num_ctrls] of Qobj\n Propagator gradient (exact gradients only)\n Array of Qobj that give the gradient\n with respect to the control amplitudes in a timeslot\n Note this attribute is only created when the selected\n PropagatorComputer is an exact gradient type.\n\n fwd_evo : List of Qobj\n Forward evolution (or propagation)\n the time evolution operator from the initial state / gate to the\n specified timeslot as generated by the dyn_gen\n\n onwd_evo : List of Qobj\n Onward evolution (or propagation)\n the time evolution operator from the specified timeslot to\n end of the evolution time as generated by the dyn_gen\n\n onto_evo : List of Qobj\n 'Backward' List of Qobj propagation\n the overlap of the onward propagation with the inverse of the\n target.\n Note this is only used (so far) by the unitary dynamics fidelity\n\n evo_current : Boolean\n Used to flag that the dynamics used to calculate the evolution\n operators is current. It is set to False when the amplitudes\n change\n\n fact_mat_round_prec : float\n Rounding precision used when calculating the factor matrix\n to determine if two eigenvalues are equivalent\n Only used when the PropagatorComputer uses diagonalisation\n\n def_amps_fname : string\n Default name for the output used when save_amps is called\n\n unitarity_check_level : int\n If > 0 then unitarity of the system evolution is checked at at\n evolution recomputation.\n level 1 checks all propagators\n level 2 checks eigen basis as well\n Default is 0\n\n unitarity_tol :\n Tolerance used in checking if operator is unitary\n Default is 1e-10\n\n dump : :class:`dump.DynamicsDump`\n Store of historical calculation data.\n Set to None (Default) for no storing of historical data\n Use dumping property to set level of data dumping\n\n dumping : string\n level of data dumping: NONE, SUMMARY, FULL or CUSTOM\n See property docstring for details\n\n dump_to_file : bool\n If set True then data will be dumped to file during the calculations\n dumping will be set to SUMMARY during init_evo if dump_to_file is True\n and dumping not set.\n Default is False\n\n dump_dir : string\n Basically a link to dump.dump_dir. Exists so that it can be set through\n dyn_params.\n If dump is None then will return None or will set dumping to SUMMARY\n when setting a path\n\n \"\"\"\n def __init__(self, optimconfig, params=None):\n self.config = optimconfig\n self.params = params\n self.reset()\n\n def reset(self):\n # Link to optimiser object if self is linked to one\n self.parent = None\n # Main functional attributes\n self.time = None\n self.initial = None\n self.target = None\n self.ctrl_amps = None\n self.initial_ctrl_scaling = 1.0\n self.initial_ctrl_offset = 0.0\n self.drift_dyn_gen = None\n self.ctrl_dyn_gen = None\n self._tau = None\n self._evo_time = None\n self._num_ctrls = None\n self._num_tslots = None\n # attributes used for processing evolution\n self.memory_optimization = 0\n self.oper_dtype = None\n self.cache_phased_dyn_gen = None\n self.cache_prop_grad = None\n self.cache_dyn_gen_eigenvectors_adj = None\n self.sparse_eigen_decomp = None\n self.dyn_dims = None\n self.dyn_shape = None\n self.sys_dims = None\n self.sys_shape = None\n self.time_depend_drift = False\n self.time_depend_ctrl_dyn_gen = False\n # These internal attributes will be of the internal operator data type\n # used to compute the evolution\n # Note this maybe ndarray, Qobj or some other depending on oper_dtype\n self._drift_dyn_gen = None\n self._ctrl_dyn_gen = None\n self._phased_ctrl_dyn_gen = None\n self._dyn_gen_phase = None\n self._phase_application = None\n self._initial = None\n self._target = None\n self._onto_evo_target = None\n self._dyn_gen = None\n self._phased_dyn_gen = None\n self._prop = None\n self._prop_grad = None\n self._fwd_evo = None\n self._onwd_evo = None\n self._onto_evo = None\n # The _qobj attribs are Qobj representations of the equivalent\n # internal attribute. They are only set when the extenal accessors\n # are used\n self._onto_evo_target_qobj = None\n self._dyn_gen_qobj = None\n self._prop_qobj = None\n self._prop_grad_qobj = None\n self._fwd_evo_qobj = None\n self._onwd_evo_qobj = None\n self._onto_evo_qobj = None\n # Atrributes used in diagonalisation\n # again in internal operator data type (see above)\n self._decomp_curr = None\n self._prop_eigen = None\n self._dyn_gen_eigenvectors = None\n self._dyn_gen_eigenvectors_adj = None\n self._dyn_gen_factormatrix = None\n self.fact_mat_round_prec = 1e-10\n\n # Debug and information attribs\n self.stats = None\n self.id_text = 'DYN_BASE'\n self.def_amps_fname = \"ctrl_amps.txt\"\n self.log_level = self.config.log_level\n # Internal flags\n self._dyn_gen_mapped = False\n self._evo_initialized = False\n self._timeslots_initialized = False\n self._ctrls_initialized = False\n self._ctrl_dyn_gen_checked = False\n self._drift_dyn_gen_checked = False\n # Unitary checking\n self.unitarity_check_level = 0\n self.unitarity_tol = 1e-10\n # Data dumping\n self.dump = None\n self.dump_to_file = False\n\n self.apply_params()\n\n # Create the computing objects\n self._create_computers()\n\n self.clear()\n\n def apply_params(self, params=None):\n \"\"\"\n Set object attributes based on the dictionary (if any) passed in the\n instantiation, or passed as a parameter\n This is called during the instantiation automatically.\n The key value pairs are the attribute name and value\n Note: attributes are created if they do not exist already,\n and are overwritten if they do.\n \"\"\"\n if not params:\n params = self.params\n\n if isinstance(params, dict):\n self.params = params\n for key in params:\n setattr(self, key, params[key])\n\n @property\n def log_level(self):\n return logger.level\n\n @log_level.setter\n def log_level(self, lvl):\n \"\"\"\n Set the log_level attribute and set the level of the logger\n that is call logger.setLevel(lvl)\n \"\"\"\n logger.setLevel(lvl)\n\n @property\n def dumping(self):\n \"\"\"\n The level of data dumping that will occur during the time evolution\n calculation.\n - NONE : No processing data dumped (Default)\n - SUMMARY : A summary of each time evolution will be recorded\n - FULL : All operators used or created in the calculation dumped\n - CUSTOM : Some customised level of dumping\n When first set to CUSTOM this is equivalent to SUMMARY. It is then up\n to the user to specify which operators are dumped\n WARNING: FULL could consume a lot of memory!\n \"\"\"\n if self.dump is None:\n lvl = 'NONE'\n else:\n lvl = self.dump.level\n\n return lvl\n\n @dumping.setter\n def dumping(self, value):\n if value is None:\n self.dump = None\n else:\n if not _is_string(value):\n raise TypeError(\"Value must be string value\")\n lvl = value.upper()\n if lvl == 'NONE':\n self.dump = None\n else:\n if not isinstance(self.dump, qtrldump.DynamicsDump):\n self.dump = qtrldump.DynamicsDump(self, level=lvl)\n else:\n self.dump.level = lvl\n\n @property\n def dump_dir(self):\n if self.dump:\n return self.dump.dump_dir\n else:\n return None\n\n @dump_dir.setter\n def dump_dir(self, value):\n if not self.dump:\n self.dumping = 'SUMMARY'\n self.dump.dump_dir = value\n\n def _create_computers(self):\n \"\"\"\n Create the default timeslot, fidelity and propagator computers\n \"\"\"\n # The time slot computer. By default it is set to UpdateAll\n # can be set to DynUpdate in the configuration\n # (see class file for details)\n if self.config.tslot_type == 'DYNAMIC':\n self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self)\n else:\n self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self)\n\n self.prop_computer = propcomp.PropCompFrechet(self)\n self.fid_computer = fidcomp.FidCompTraceDiff(self)\n\n def clear(self):\n self.ctrl_amps = None\n self.evo_current = False\n if self.fid_computer is not None:\n self.fid_computer.clear()\n\n @property\n def num_tslots(self):\n if not self._timeslots_initialized:\n self.init_timeslots()\n return self._num_tslots\n\n @num_tslots.setter\n def num_tslots(self, value):\n self._num_tslots = value\n if self._timeslots_initialized:\n self._tau = None\n self.init_timeslots()\n\n @property\n def evo_time(self):\n if not self._timeslots_initialized:\n self.init_timeslots()\n return self._evo_time\n\n @evo_time.setter\n def evo_time(self, value):\n self._evo_time = value\n if self._timeslots_initialized:\n self._tau = None\n self.init_timeslots()\n\n @property\n def tau(self):\n if not self._timeslots_initialized:\n self.init_timeslots()\n return self._tau\n\n @tau.setter\n def tau(self, value):\n self._tau = value\n self.init_timeslots()\n\n def init_timeslots(self):\n \"\"\"\n Generate the timeslot duration array 'tau' based on the evo_time\n and num_tslots attributes, unless the tau attribute is already set\n in which case this step in ignored\n Generate the cumulative time array 'time' based on the tau values\n \"\"\"\n # set the time intervals to be equal timeslices of the total if\n # the have not been set already (as part of user config)\n if self._num_tslots is None:\n self._num_tslots = DEF_NUM_TSLOTS\n if self._evo_time is None:\n self._evo_time = DEF_EVO_TIME\n\n if self._tau is None:\n self._tau = np.ones(self._num_tslots, dtype='f') * \\\n self._evo_time/self._num_tslots\n else:\n self._num_tslots = len(self._tau)\n self._evo_time = np.sum(self._tau)\n\n self.time = np.zeros(self._num_tslots+1, dtype=float)\n # set the cumulative time by summing the time intervals\n for t in range(self._num_tslots):\n self.time[t+1] = self.time[t] + self._tau[t]\n\n self._timeslots_initialized = True\n\n def _set_memory_optimizations(self):\n \"\"\"\n Set various memory optimisation attributes based on the\n memory_optimization attribute\n If they have been set already, e.g. in apply_params\n then they will not be overridden here\n \"\"\"\n logger.info(\"Setting memory optimisations for level {}\".format(\n self.memory_optimization))\n\n if self.oper_dtype is None:\n self._choose_oper_dtype()\n logger.info(\"Internal operator data type choosen to be {}\".format(\n self.oper_dtype))\n else:\n logger.info(\"Using operator data type {}\".format(\n self.oper_dtype))\n\n if self.cache_phased_dyn_gen is None:\n if self.memory_optimization > 0:\n self.cache_phased_dyn_gen = False\n else:\n self.cache_phased_dyn_gen = True\n logger.info(\"phased dynamics generator caching {}\".format(\n self.cache_phased_dyn_gen))\n\n if self.cache_prop_grad is None:\n if self.memory_optimization > 0:\n self.cache_prop_grad = False\n else:\n self.cache_prop_grad = True\n logger.info(\"propagator gradient caching {}\".format(\n self.cache_prop_grad))\n\n if self.cache_dyn_gen_eigenvectors_adj is None:\n if self.memory_optimization > 0:\n self.cache_dyn_gen_eigenvectors_adj = False\n else:\n self.cache_dyn_gen_eigenvectors_adj = True\n logger.info(\"eigenvector adjoint caching {}\".format(\n self.cache_dyn_gen_eigenvectors_adj))\n\n if self.sparse_eigen_decomp is None:\n if self.memory_optimization > 1:\n self.sparse_eigen_decomp = True\n else:\n self.sparse_eigen_decomp = False\n logger.info(\"use sparse eigen decomp {}\".format(\n self.sparse_eigen_decomp))\n\n def _choose_oper_dtype(self):\n \"\"\"\n Attempt select most efficient internal operator data type\n \"\"\"\n\n if self.memory_optimization > 0:\n self.oper_dtype = Qobj\n else:\n # Method taken from Qobj.expm()\n # if method is not explicitly given, try to make a good choice\n # between sparse and dense solvers by considering the size of the\n # system and the number of non-zero elements.\n if self.time_depend_drift:\n dg = self.drift_dyn_gen[0]\n else:\n dg = self.drift_dyn_gen\n if self.time_depend_ctrl_dyn_gen:\n ctrls = self.ctrl_dyn_gen[0, :]\n else:\n ctrls = self.ctrl_dyn_gen\n for c in ctrls:\n dg = dg + c\n\n N = dg.data.shape[0]\n n = dg.data.nnz\n\n if N ** 2 < 100 * n:\n # large number of nonzero elements, revert to dense solver\n self.oper_dtype = np.ndarray\n elif N > 400:\n # large system, and quite sparse -> qutips sparse method\n self.oper_dtype = Qobj\n else:\n # small system, but quite sparse -> qutips sparse/dense method\n self.oper_dtype = np.ndarray\n\n return self.oper_dtype\n\n def _init_evo(self):\n \"\"\"\n Create the container lists / arrays for the:\n dynamics generations, propagators, and evolutions etc\n Set the time slices and cumulative time\n \"\"\"\n # check evolution operators\n if not self._drift_dyn_gen_checked:\n _check_drift_dyn_gen(self.drift_dyn_gen)\n if not self._ctrl_dyn_gen_checked:\n self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen)\n\n if not isinstance(self.initial, Qobj):\n raise TypeError(\"initial must be a Qobj\")\n\n if not isinstance(self.target, Qobj):\n raise TypeError(\"target must be a Qobj\")\n\n self.refresh_drift_attribs()\n self.sys_dims = self.initial.dims\n self.sys_shape = self.initial.shape\n # Set the phase application method\n self._init_phase()\n self._set_memory_optimizations()\n n_ts = self.num_tslots\n n_ctrls = self.num_ctrls\n if self.oper_dtype == Qobj:\n self._initial = self.initial\n self._target = self.target\n self._drift_dyn_gen = self.drift_dyn_gen\n self._ctrl_dyn_gen = self.ctrl_dyn_gen\n elif self.oper_dtype == np.ndarray:\n self._initial = self.initial.full()\n self._target = self.target.full()\n if self.time_depend_drift:\n self._drift_dyn_gen = [d.full() for d in self.drift_dyn_gen]\n else:\n self._drift_dyn_gen = self.drift_dyn_gen.full()\n if self.time_depend_ctrl_dyn_gen:\n self._ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object)\n for k in range(n_ts):\n for j in range(n_ctrls):\n self._ctrl_dyn_gen[k, j] = \\\n self.ctrl_dyn_gen[k, j].full()\n else:\n self._ctrl_dyn_gen = [ctrl.full()\n for ctrl in self.ctrl_dyn_gen]\n elif self.oper_dtype == sp.csr_matrix:\n self._initial = self.initial.data\n self._target = self.target.data\n if self.time_depend_drift:\n self._drift_dyn_gen = [d.data for d in self.drift_dyn_gen]\n else:\n self._drift_dyn_gen = self.drift_dyn_gen.data\n\n if self.time_depend_ctrl_dyn_gen:\n self._ctrl_dyn_gen = np.empty([n_ts, n_ctrls], dtype=object)\n for k in range(n_ts):\n for j in range(n_ctrls):\n self._ctrl_dyn_gen[k, j] = \\\n self.ctrl_dyn_gen[k, j].data\n else:\n self._ctrl_dyn_gen = [ctrl.data for ctrl in self.ctrl_dyn_gen]\n else:\n logger.warn(\"Unknown option '{}' for oper_dtype. \"\n \"Assuming that internal drift, ctrls, initial and target \"\n \"have been set correctly\".format(self.oper_dtype))\n\n if self.cache_phased_dyn_gen:\n if self.time_depend_ctrl_dyn_gen:\n self._phased_ctrl_dyn_gen = np.empty([n_ts, n_ctrls],\n dtype=object)\n for k in range(n_ts):\n for j in range(n_ctrls):\n self._phased_ctrl_dyn_gen[k, j] = self._apply_phase(\n self._ctrl_dyn_gen[k, j])\n else:\n self._phased_ctrl_dyn_gen = [self._apply_phase(ctrl)\n for ctrl in self._ctrl_dyn_gen]\n\n self._dyn_gen = [object for x in range(self.num_tslots)]\n if self.cache_phased_dyn_gen:\n self._phased_dyn_gen = [object for x in range(self.num_tslots)]\n self._prop = [object for x in range(self.num_tslots)]\n if self.prop_computer.grad_exact and self.cache_prop_grad:\n self._prop_grad = np.empty([self.num_tslots, self.num_ctrls],\n dtype=object)\n # Time evolution operator (forward propagation)\n self._fwd_evo = [object for x in range(self.num_tslots+1)]\n self._fwd_evo[0] = self._initial\n if self.fid_computer.uses_onwd_evo:\n # Time evolution operator (onward propagation)\n self._onwd_evo = [object for x in range(self.num_tslots)]\n if self.fid_computer.uses_onto_evo:\n # Onward propagation overlap with inverse target\n self._onto_evo = [object for x in range(self.num_tslots+1)]\n self._onto_evo[self.num_tslots] = self._get_onto_evo_target()\n\n if isinstance(self.prop_computer, propcomp.PropCompDiag):\n self._create_decomp_lists()\n\n if (self.log_level <= logging.DEBUG\n and isinstance(self, DynamicsUnitary)):\n self.unitarity_check_level = 1\n\n if self.dump_to_file:\n if self.dump is None:\n self.dumping = 'SUMMARY'\n self.dump.write_to_file = True\n self.dump.create_dump_dir()\n logger.info(\"Dynamics dump will be written to:\\n{}\".format(\n self.dump.dump_dir))\n\n self._evo_initialized = True\n\n @property\n def dyn_gen_phase(self):\n \"\"\"\n Some op that is applied to the dyn_gen before expontiating to\n get the propagator.\n See `phase_application` for how this is applied\n \"\"\"\n # Note that if this returns None then _apply_phase will never be\n # called\n return self._dyn_gen_phase\n\n @dyn_gen_phase.setter\n def dyn_gen_phase(self, value):\n self._dyn_gen_phase = value\n\n @property\n def phase_application(self):\n \"\"\"\n phase_application : scalar(string), default='preop'\n Determines how the phase is applied to the dynamics generators\n - 'preop' : P = expm(phase*dyn_gen)\n - 'postop' : P = expm(dyn_gen*phase)\n - 'custom' : Customised phase application\n The 'custom' option assumes that the _apply_phase method has been\n set to a custom function\n \"\"\"\n return self._phase_application\n\n @phase_application.setter\n def phase_application(self, value):\n self._set_phase_application(value)\n\n def _set_phase_application(self, value):\n self._config_phase_application(value)\n self._phase_application = value\n\n def _config_phase_application(self, ph_app=None):\n \"\"\"\n Set the appropriate function for the phase application\n \"\"\"\n err_msg = (\"Invalid value '{}' for phase application. Must be either \"\n \"'preop', 'postop' or 'custom'\".format(ph_app))\n\n if ph_app is None:\n ph_app = self._phase_application\n\n try:\n ph_app = ph_app.lower()\n except:\n raise ValueError(err_msg)\n\n if ph_app == 'preop':\n self._apply_phase = self._apply_phase_preop\n elif ph_app == 'postop':\n self._apply_phase = self._apply_phase_postop\n elif ph_app == 'custom':\n # Do nothing, assume _apply_phase set elsewhere\n pass\n else:\n raise ValueError(err_msg)\n\n def _init_phase(self):\n if self.dyn_gen_phase is not None:\n self._config_phase_application()\n else:\n self.cache_phased_dyn_gen = False\n\n def _apply_phase(self, dg):\n \"\"\"\n This default method does nothing.\n It will be set to another method automatically if `phase_application`\n is 'preop' or 'postop'. It should be overridden repointed if\n `phase_application` is 'custom'\n It will never be called if `dyn_gen_phase` is None\n \"\"\"\n return dg\n\n def _apply_phase_preop(self, dg):\n \"\"\"\n Apply phasing operator to dynamics generator.\n This called during the propagator calculation.\n In this case it will be applied as phase*dg\n \"\"\"\n if hasattr(self.dyn_gen_phase, 'dot'):\n phased_dg = self._dyn_gen_phase.dot(dg)\n else:\n phased_dg = self._dyn_gen_phase*dg\n return phased_dg\n\n def _apply_phase_postop(self, dg):\n \"\"\"\n Apply phasing operator to dynamics generator.\n This called during the propagator calculation.\n In this case it will be applied as dg*phase\n \"\"\"\n if hasattr(self.dyn_gen_phase, 'dot'):\n phased_dg = dg.dot(self._dyn_gen_phase)\n else:\n phased_dg = dg*self._dyn_gen_phase\n return phased_dg\n\n def _create_decomp_lists(self):\n \"\"\"\n Create lists that will hold the eigen decomposition\n used in calculating propagators and gradients\n Note: used with PropCompDiag propagator calcs\n \"\"\"\n n_ts = self.num_tslots\n self._decomp_curr = [False for x in range(n_ts)]\n self._prop_eigen = [object for x in range(n_ts)]\n self._dyn_gen_eigenvectors = [object for x in range(n_ts)]\n if self.cache_dyn_gen_eigenvectors_adj:\n self._dyn_gen_eigenvectors_adj = [object for x in range(n_ts)]\n self._dyn_gen_factormatrix = [object for x in range(n_ts)]\n\n def initialize_controls(self, amps, init_tslots=True):\n \"\"\"\n Set the initial control amplitudes and time slices\n Note this must be called after the configuration is complete\n before any dynamics can be calculated\n \"\"\"\n if not isinstance(self.prop_computer, propcomp.PropagatorComputer):\n raise errors.UsageError(\n \"No prop_computer (propagator computer) \"\n \"set. A default should be assigned by the Dynamics subclass\")\n\n if not isinstance(self.tslot_computer, tslotcomp.TimeslotComputer):\n raise errors.UsageError(\n \"No tslot_computer (Timeslot computer)\"\n \" set. A default should be assigned by the Dynamics class\")\n\n if not isinstance(self.fid_computer, fidcomp.FidelityComputer):\n raise errors.UsageError(\n \"No fid_computer (Fidelity computer)\"\n \" set. A default should be assigned by the Dynamics subclass\")\n\n self.ctrl_amps = None\n if not self._timeslots_initialized:\n init_tslots = True\n if init_tslots:\n self.init_timeslots()\n self._init_evo()\n self.tslot_computer.init_comp()\n self.fid_computer.init_comp()\n self._ctrls_initialized = True\n self.update_ctrl_amps(amps)\n\n def check_ctrls_initialized(self):\n if not self._ctrls_initialized:\n raise errors.UsageError(\n \"Controls not initialised. \"\n \"Ensure Dynamics.initialize_controls has been \"\n \"executed with the initial control amplitudes.\")\n\n def get_amp_times(self):\n return self.time[:self.num_tslots]\n\n def save_amps(self, file_name=None, times=None, amps=None, verbose=False):\n \"\"\"\n Save a file with the current control amplitudes in each timeslot\n The first column in the file will be the start time of the slot\n\n Parameters\n ----------\n file_name : string\n Name of the file\n If None given the def_amps_fname attribuite will be used\n\n times : List type (or string)\n List / array of the start times for each slot\n If None given this will be retrieved through get_amp_times()\n If 'exclude' then times will not be saved in the file, just\n the amplitudes\n\n amps : Array[num_tslots, num_ctrls]\n Amplitudes to be saved\n If None given the ctrl_amps attribute will be used\n\n verbose : Boolean\n If True then an info message will be logged\n \"\"\"\n self.check_ctrls_initialized()\n\n inctimes = True\n if file_name is None:\n file_name = self.def_amps_fname\n if amps is None:\n amps = self.ctrl_amps\n if times is None:\n times = self.get_amp_times()\n else:\n if _is_string(times):\n if times.lower() == 'exclude':\n inctimes = False\n else:\n logger.warn(\"Unknown option for times '{}' \"\n \"when saving amplitudes\".format(times))\n times = self.get_amp_times()\n\n try:\n if inctimes:\n shp = amps.shape\n data = np.empty([shp[0], shp[1] + 1], dtype=float)\n data[:, 0] = times\n data[:, 1:] = amps\n else:\n data = amps\n\n np.savetxt(file_name, data, delimiter='\\t', fmt='%14.6g')\n\n if verbose:\n logger.info(\"Amplitudes saved to file: \" + file_name)\n except Exception as e:\n logger.error(\"Failed to save amplitudes due to underling \"\n \"error: {}\".format(e))\n\n def update_ctrl_amps(self, new_amps):\n \"\"\"\n Determine if any amplitudes have changed. If so, then mark the\n timeslots as needing recalculation\n The actual work is completed by the compare_amps method of the\n timeslot computer\n \"\"\"\n\n if self.log_level <= logging.DEBUG_INTENSE:\n logger.log(logging.DEBUG_INTENSE, \"Updating amplitudes...\\n\"\n \"Current control amplitudes:\\n\" + str(self.ctrl_amps) +\n \"\\n(potenially) new amplitudes:\\n\" + str(new_amps))\n\n self.tslot_computer.compare_amps(new_amps)\n\n def flag_system_changed(self):\n \"\"\"\n Flag evolution, fidelity and gradients as needing recalculation\n \"\"\"\n self.evo_current = False\n self.fid_computer.flag_system_changed()\n\n def get_drift_dim(self):\n \"\"\"\n Returns the size of the matrix that defines the drift dynamics\n that is assuming the drift is NxN, then this returns N\n \"\"\"\n if self.dyn_shape is None:\n self.refresh_drift_attribs()\n return self.dyn_shape[0]\n\n def refresh_drift_attribs(self):\n \"\"\"Reset the dyn_shape, dyn_dims and time_depend_drift attribs\"\"\"\n\n if isinstance(self.drift_dyn_gen, (list, tuple)):\n d0 = self.drift_dyn_gen[0]\n self.time_depend_drift = True\n else:\n d0 = self.drift_dyn_gen\n self.time_depend_drift = False\n\n if not isinstance(d0, Qobj):\n raise TypeError(\"Unable to determine drift attributes, \"\n \"because drift_dyn_gen is not Qobj (nor list of)\")\n\n self.dyn_shape = d0.shape\n self.dyn_dims = d0.dims\n\n def get_num_ctrls(self):\n \"\"\"\n calculate the of controls from the length of the control list\n sets the num_ctrls property, which can be used alternatively\n subsequently\n \"\"\"\n _func_deprecation(\"'get_num_ctrls' has been replaced by \"\n \"'num_ctrls' property\")\n return self.num_ctrls\n\n def _get_num_ctrls(self):\n if not self._ctrl_dyn_gen_checked:\n self.ctrl_dyn_gen = _check_ctrls_container(self.ctrl_dyn_gen)\n self._ctrl_dyn_gen_checked = True\n if isinstance(self.ctrl_dyn_gen, np.ndarray):\n self._num_ctrls = self.ctrl_dyn_gen.shape[1]\n self.time_depend_ctrl_dyn_gen = True\n else:\n self._num_ctrls = len(self.ctrl_dyn_gen)\n\n return self._num_ctrls\n\n @property\n def num_ctrls(self):\n \"\"\"\n calculate the of controls from the length of the control list\n sets the num_ctrls property, which can be used alternatively\n subsequently\n \"\"\"\n if self._num_ctrls is None:\n self._num_ctrls = self._get_num_ctrls()\n return self._num_ctrls\n\n @property\n def onto_evo_target(self):\n if self._onto_evo_target is None:\n self._get_onto_evo_target()\n\n if self._onto_evo_target_qobj is None:\n if isinstance(self._onto_evo_target, Qobj):\n self._onto_evo_target_qobj = self._onto_evo_target\n else:\n rev_dims = [self.sys_dims[1], self.sys_dims[0]]\n self._onto_evo_target_qobj = Qobj(self._onto_evo_target,\n dims=rev_dims)\n\n return self._onto_evo_target_qobj\n\n def get_owd_evo_target(self):\n _func_deprecation(\"'get_owd_evo_target' has been replaced by \"\n \"'onto_evo_target' property\")\n return self.onto_evo_target\n\n def _get_onto_evo_target(self):\n \"\"\"\n Get the inverse of the target.\n Used for calculating the 'onto target' evolution\n This is actually only relevant for unitary dynamics where\n the target.dag() is what is required\n However, for completeness, in general the inverse of the target\n operator is is required\n For state-to-state, the bra corresponding to the is required ket\n \"\"\"\n if self.target.shape[0] == self.target.shape[1]:\n #Target is operator\n targ = la.inv(self.target.full())\n if self.oper_dtype == Qobj:\n self._onto_evo_target = Qobj(targ)\n elif self.oper_dtype == np.ndarray:\n self._onto_evo_target = targ\n elif self.oper_dtype == sp.csr_matrix:\n self._onto_evo_target = sp.csr_matrix(targ)\n else:\n targ_cls = self._target.__class__\n self._onto_evo_target = targ_cls(targ)\n else:\n if self.oper_dtype == Qobj:\n self._onto_evo_target = self.target.dag()\n elif self.oper_dtype == np.ndarray:\n self._onto_evo_target = self.target.dag().full()\n elif self.oper_dtype == sp.csr_matrix:\n self._onto_evo_target = self.target.dag().data\n else:\n targ_cls = self._target.__class__\n self._onto_evo_target = targ_cls(self.target.dag().full())\n\n return self._onto_evo_target\n\n def combine_dyn_gen(self, k):\n \"\"\"\n Computes the dynamics generator for a given timeslot\n The is the combined Hamiltion for unitary systems\n \"\"\"\n _func_deprecation(\"'combine_dyn_gen' has been replaced by \"\n \"'_combine_dyn_gen'\")\n self._combine_dyn_gen(k)\n return self._dyn_gen(k)\n\n def _combine_dyn_gen(self, k):\n \"\"\"\n Computes the dynamics generator for a given timeslot\n The is the combined Hamiltion for unitary systems\n Also applies the phase (if any required by the propagation)\n \"\"\"\n if self.time_depend_drift:\n dg = self._drift_dyn_gen[k]\n else:\n dg = self._drift_dyn_gen\n for j in range(self._num_ctrls):\n if self.time_depend_ctrl_dyn_gen:\n dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[k, j]\n else:\n dg = dg + self.ctrl_amps[k, j]*self._ctrl_dyn_gen[j]\n\n self._dyn_gen[k] = dg\n if self.cache_phased_dyn_gen:\n self._phased_dyn_gen[k] = self._apply_phase(dg)\n\n def get_dyn_gen(self, k):\n \"\"\"\n Get the combined dynamics generator for the timeslot\n Not implemented in the base class. Choose a subclass\n \"\"\"\n _func_deprecation(\"'get_dyn_gen' has been replaced by \"\n \"'_get_phased_dyn_gen'\")\n return self._get_phased_dyn_gen(k)\n\n def _get_phased_dyn_gen(self, k):\n if self.dyn_gen_phase is None:\n return self._dyn_gen[k]\n else:\n if self._phased_dyn_gen is None:\n return self._apply_phase(self._dyn_gen[k])\n else:\n return self._phased_dyn_gen[k]\n\n def get_ctrl_dyn_gen(self, j):\n \"\"\"\n Get the dynamics generator for the control\n Not implemented in the base class. Choose a subclass\n \"\"\"\n _func_deprecation(\"'get_ctrl_dyn_gen' has been replaced by \"\n \"'_get_phased_ctrl_dyn_gen'\")\n return self._get_phased_ctrl_dyn_gen(0, j)\n\n def _get_phased_ctrl_dyn_gen(self, k, j):\n if self._phased_ctrl_dyn_gen is not None:\n if self.time_depend_ctrl_dyn_gen:\n return self._phased_ctrl_dyn_gen[k, j]\n else:\n return self._phased_ctrl_dyn_gen[j]\n else:\n if self.time_depend_ctrl_dyn_gen:\n if self.dyn_gen_phase is None:\n return self._ctrl_dyn_gen[k, j]\n else:\n return self._apply_phase(self._ctrl_dyn_gen[k, j])\n else:\n if self.dyn_gen_phase is None:\n return self._ctrl_dyn_gen[j]\n else:\n return self._apply_phase(self._ctrl_dyn_gen[j])\n\n @property\n def dyn_gen(self):\n \"\"\"\n List of combined dynamics generators (Qobj) for each timeslot\n \"\"\"\n if self._dyn_gen is not None:\n if self._dyn_gen_qobj is None:\n if self.oper_dtype == Qobj:\n self._dyn_gen_qobj = self._dyn_gen\n else:\n self._dyn_gen_qobj = [Qobj(dg, dims=self.dyn_dims)\n for dg in self._dyn_gen]\n return self._dyn_gen_qobj\n\n @property\n def prop(self):\n \"\"\"\n List of propagators (Qobj) for each timeslot\n \"\"\"\n if self._prop is not None:\n if self._prop_qobj is None:\n if self.oper_dtype == Qobj:\n self._prop_qobj = self._prop\n else:\n self._prop_qobj = [Qobj(dg, dims=self.dyn_dims)\n for dg in self._prop]\n return self._prop_qobj\n\n @property\n def prop_grad(self):\n \"\"\"\n Array of propagator gradients (Qobj) for each timeslot, control\n \"\"\"\n if self._prop_grad is not None:\n if self._prop_grad_qobj is None:\n if self.oper_dtype == Qobj:\n self._prop_grad_qobj = self._prop_grad\n else:\n self._prop_grad_qobj = np.empty(\n [self.num_tslots, self.num_ctrls],\n dtype=object)\n for k in range(self.num_tslots):\n for j in range(self.num_ctrls):\n self._prop_grad_qobj[k, j] = Qobj(\n self._prop_grad[k, j],\n dims=self.dyn_dims)\n return self._prop_grad_qobj\n\n def _get_prop_grad(self, k, j):\n if self.cache_prop_grad:\n prop_grad = self._prop_grad[k, j]\n else:\n prop_grad = self.prop_computer._compute_prop_grad(k, j,\n compute_prop = False)\n return prop_grad\n\n @property\n def evo_init2t(self):\n _attrib_deprecation(\n \"'evo_init2t' has been replaced by '_fwd_evo'\")\n return self._fwd_evo\n\n @property\n def fwd_evo(self):\n \"\"\"\n List of evolution operators (Qobj) from the initial to the given\n timeslot\n \"\"\"\n if self._fwd_evo is not None:\n if self._fwd_evo_qobj is None:\n if self.oper_dtype == Qobj:\n self._fwd_evo_qobj = self._fwd_evo\n else:\n self._fwd_evo_qobj = [self.initial]\n for k in range(1, self.num_tslots+1):\n self._fwd_evo_qobj.append(Qobj(self._fwd_evo[k],\n dims=self.sys_dims))\n return self._fwd_evo_qobj\n\n def _get_full_evo(self):\n return self._fwd_evo[self._num_tslots]\n\n @property\n def full_evo(self):\n \"\"\"Full evolution - time evolution at final time slot\"\"\"\n return self.fwd_evo[self.num_tslots]\n\n @property\n def evo_t2end(self):\n _attrib_deprecation(\n \"'evo_t2end' has been replaced by '_onwd_evo'\")\n return self._onwd_evo\n\n @property\n def onwd_evo(self):\n \"\"\"\n List of evolution operators (Qobj) from the initial to the given\n timeslot\n \"\"\"\n if self._onwd_evo is not None:\n if self._onwd_evo_qobj is None:\n if self.oper_dtype == Qobj:\n self._onwd_evo_qobj = self._fwd_evo\n else:\n self._onwd_evo_qobj = [Qobj(dg, dims=self.sys_dims)\n for dg in self._onwd_evo]\n return self._onwd_evo_qobj\n\n @property\n def evo_t2targ(self):\n _attrib_deprecation(\n \"'evo_t2targ' has been replaced by '_onto_evo'\")\n return self._onto_evo\n\n @property\n def onto_evo(self):\n \"\"\"\n List of evolution operators (Qobj) from the initial to the given\n timeslot\n \"\"\"\n if self._onto_evo is not None:\n if self._onto_evo_qobj is None:\n if self.oper_dtype == Qobj:\n self._onto_evo_qobj = self._onto_evo\n else:\n self._onto_evo_qobj = []\n for k in range(0, self.num_tslots):\n self._onto_evo_qobj.append(Qobj(self._onto_evo[k],\n dims=self.sys_dims))\n self._onto_evo_qobj.append(self.onto_evo_target)\n\n return self._onto_evo_qobj\n\n def compute_evolution(self):\n \"\"\"\n Recalculate the time evolution operators\n Dynamics generators (e.g. Hamiltonian) and\n prop (propagators) are calculated as necessary\n Actual work is completed by the recompute_evolution method\n of the timeslot computer\n \"\"\"\n\n # Check if values are already current, otherwise calculate all values\n if not self.evo_current:\n if self.log_level <= logging.DEBUG_VERBOSE:\n logger.log(logging.DEBUG_VERBOSE, \"Computing evolution\")\n self.tslot_computer.recompute_evolution()\n self.evo_current = True\n return True\n else:\n return False\n\n def _ensure_decomp_curr(self, k):\n \"\"\"\n Checks to see if the diagonalisation has been completed since\n the last update of the dynamics generators\n (after the amplitude update)\n If not then the diagonlisation is completed\n \"\"\"\n if self._decomp_curr is None:\n raise errors.UsageError(\"Decomp lists have not been created\")\n if not self._decomp_curr[k]:\n self._spectral_decomp(k)\n\n def _spectral_decomp(self, k):\n \"\"\"\n Calculate the diagonalization of the dynamics generator\n generating lists of eigenvectors, propagators in the diagonalised\n basis, and the 'factormatrix' used in calculating the propagator\n gradient\n Not implemented in this base class, because the method is specific\n to the matrix type\n \"\"\"\n raise errors.UsageError(\"Decomposition cannot be completed by \"\n \"this class. Try a(nother) subclass\")\n\n def _is_unitary(self, A):\n \"\"\"\n Checks whether operator A is unitary\n A can be either Qobj or ndarray\n \"\"\"\n if isinstance(A, Qobj):\n unitary = np.allclose(np.eye(A.shape[0]), A*A.dag().full(),\n atol=self.unitarity_tol)\n else:\n unitary = np.allclose(np.eye(len(A)), A.dot(A.T.conj()),\n atol=self.unitarity_tol)\n\n return unitary\n\n def _calc_unitary_err(self, A):\n if isinstance(A, Qobj):\n err = np.sum(abs(np.eye(A.shape[0]) - A*A.dag().full()))\n else:\n err = np.sum(abs(np.eye(len(A)) - A.dot(A.T.conj())))\n\n return err\n\n def unitarity_check(self):\n \"\"\"\n Checks whether all propagators are unitary\n \"\"\"\n for k in range(self.num_tslots):\n if not self._is_unitary(self._prop[k]):\n logger.warning(\n \"Progator of timeslot {} is not unitary\".format(k))\n\n\nclass DynamicsGenMat(Dynamics):\n \"\"\"\n This sub class can be used for any system where no additional\n operator is applied to the dynamics generator before calculating\n the propagator, e.g. classical dynamics, Lindbladian\n \"\"\"\n def reset(self):\n Dynamics.reset(self)\n self.id_text = 'GEN_MAT'\n self.apply_params()\n\nclass DynamicsUnitary(Dynamics):\n \"\"\"\n This is the subclass to use for systems with dynamics described by\n unitary matrices. E.g. closed systems with Hermitian Hamiltonians\n Note a matrix diagonalisation is used to compute the exponent\n The eigen decomposition is also used to calculate the propagator gradient.\n The method is taken from DYNAMO (see file header)\n\n Attributes\n ----------\n drift_ham : Qobj\n This is the drift Hamiltonian for unitary dynamics\n It is mapped to drift_dyn_gen during initialize_controls\n\n ctrl_ham : List of Qobj\n These are the control Hamiltonians for unitary dynamics\n It is mapped to ctrl_dyn_gen during initialize_controls\n\n H : List of Qobj\n The combined drift and control Hamiltonians for each timeslot\n These are the dynamics generators for unitary dynamics.\n It is mapped to dyn_gen during initialize_controls\n \"\"\"\n\n def reset(self):\n Dynamics.reset(self)\n self.id_text = 'UNIT'\n self.drift_ham = None\n self.ctrl_ham = None\n self.H = None\n self._dyn_gen_phase = -1j\n self._phase_application = 'preop'\n self.apply_params()\n\n def _create_computers(self):\n \"\"\"\n Create the default timeslot, fidelity and propagator computers\n \"\"\"\n # The time slot computer. By default it is set to _UpdateAll\n # can be set to _DynUpdate in the configuration\n # (see class file for details)\n if self.config.tslot_type == 'DYNAMIC':\n self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self)\n else:\n self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self)\n\n # set the default fidelity computer\n self.fid_computer = fidcomp.FidCompUnitary(self)\n # set the default propagator computer\n self.prop_computer = propcomp.PropCompDiag(self)\n\n def initialize_controls(self, amplitudes, init_tslots=True):\n # Either the _dyn_gen or _ham names can be used\n # This assumes that one or other has been set in the configuration\n\n self._map_dyn_gen_to_ham()\n Dynamics.initialize_controls(self, amplitudes, init_tslots=init_tslots)\n #self.H = self._dyn_gen\n\n def _map_dyn_gen_to_ham(self):\n if self.drift_dyn_gen is None:\n self.drift_dyn_gen = self.drift_ham\n else:\n self.drift_ham = self.drift_dyn_gen\n\n if self.ctrl_dyn_gen is None:\n self.ctrl_dyn_gen = self.ctrl_ham\n else:\n self.ctrl_ham = self.ctrl_dyn_gen\n\n self._dyn_gen_mapped = True\n\n @property\n def num_ctrls(self):\n if not self._dyn_gen_mapped:\n self._map_dyn_gen_to_ham()\n if self._num_ctrls is None:\n self._num_ctrls = self._get_num_ctrls()\n return self._num_ctrls\n\n def _get_onto_evo_target(self):\n \"\"\"\n Get the adjoint of the target.\n Used for calculating the 'backward' evolution\n \"\"\"\n if self.oper_dtype == Qobj:\n self._onto_evo_target = self.target.dag()\n else:\n self._onto_evo_target = self._target.T.conj()\n return self._onto_evo_target\n\n def _spectral_decomp(self, k):\n \"\"\"\n Calculates the diagonalization of the dynamics generator\n generating lists of eigenvectors, propagators in the diagonalised\n basis, and the 'factormatrix' used in calculating the propagator\n gradient\n \"\"\"\n\n if self.oper_dtype == Qobj:\n H = self._dyn_gen[k]\n # Returns eigenvalues as array (row)\n # and eigenvectors as rows of an array\n eig_val, eig_vec = sp_eigs(H.data, H.isherm,\n sparse=self.sparse_eigen_decomp)\n eig_vec = eig_vec.T\n\n elif self.oper_dtype == np.ndarray:\n H = self._dyn_gen[k]\n # returns row vector of eigenvals, columns with the eigenvecs\n eig_val, eig_vec = eigh(H)\n else:\n if sparse:\n H = self._dyn_gen[k].toarray()\n else:\n H = self._dyn_gen[k]\n # returns row vector of eigenvals, columns with the eigenvecs\n eig_val, eig_vec = eigh(H)\n\n # assuming H is an nxn matrix, find n\n n = self.get_drift_dim()\n\n # Calculate the propagator in the diagonalised basis\n eig_val_tau = -1j*eig_val*self.tau[k]\n prop_eig = np.exp(eig_val_tau)\n\n # Generate the factor matrix through the differences\n # between each of the eigenvectors and the exponentiations\n # create nxn matrix where each eigen val is repeated n times\n # down the columns\n o = np.ones([n, n])\n eig_val_cols = eig_val_tau*o\n # calculate all the differences by subtracting it from its transpose\n eig_val_diffs = eig_val_cols - eig_val_cols.T\n # repeat for the propagator\n prop_eig_cols = prop_eig*o\n prop_eig_diffs = prop_eig_cols - prop_eig_cols.T\n # the factor matrix is the elementwise quotient of the\n # differeneces between the exponentiated eigen vals and the\n # differences between the eigen vals\n # need to avoid division by zero that would arise due to denegerate\n # eigenvalues and the diagonals\n degen_mask = np.abs(eig_val_diffs) < self.fact_mat_round_prec\n eig_val_diffs[degen_mask] = 1\n factors = prop_eig_diffs / eig_val_diffs\n # for degenerate eigenvalues the factor is just the exponent\n factors[degen_mask] = prop_eig_cols[degen_mask]\n\n # Store eigenvectors, propagator and factor matric\n # for use in propagator computations\n self._decomp_curr[k] = True\n if isinstance(factors, np.ndarray):\n self._dyn_gen_factormatrix[k] = factors\n else:\n self._dyn_gen_factormatrix[k] = np.array(factors)\n\n if self.oper_dtype == Qobj:\n self._prop_eigen[k] = Qobj(np.diagflat(prop_eig),\n dims=self.dyn_dims)\n self._dyn_gen_eigenvectors[k] = Qobj(eig_vec,\n dims=self.dyn_dims)\n # The _dyn_gen_eigenvectors_adj list is not used in\n # memory optimised modes\n if self._dyn_gen_eigenvectors_adj is not None:\n self._dyn_gen_eigenvectors_adj[k] = \\\n self._dyn_gen_eigenvectors[k].dag()\n else:\n self._prop_eigen[k] = np.diagflat(prop_eig)\n self._dyn_gen_eigenvectors[k] = eig_vec\n # The _dyn_gen_eigenvectors_adj list is not used in\n # memory optimised modes\n if self._dyn_gen_eigenvectors_adj is not None:\n self._dyn_gen_eigenvectors_adj[k] = \\\n self._dyn_gen_eigenvectors[k].conj().T\n\n def _get_dyn_gen_eigenvectors_adj(self, k):\n # The _dyn_gen_eigenvectors_adj list is not used in\n # memory optimised modes\n if self._dyn_gen_eigenvectors_adj is not None:\n return self._dyn_gen_eigenvectors_adj[k]\n else:\n if self.oper_dtype == Qobj:\n return self._dyn_gen_eigenvectors[k].dag()\n else:\n return self._dyn_gen_eigenvectors[k].conj().T\n\n def check_unitarity(self):\n \"\"\"\n Checks whether all propagators are unitary\n For propagators found not to be unitary, the potential underlying\n causes are investigated.\n \"\"\"\n for k in range(self.num_tslots):\n prop_unit = self._is_unitary(self._prop[k])\n if not prop_unit:\n logger.warning(\n \"Progator of timeslot {} is not unitary\".format(k))\n if not prop_unit or self.unitarity_check_level > 1:\n # Check Hamiltonian\n H = self._dyn_gen[k]\n if isinstance(H, Qobj):\n herm = H.isherm\n else:\n diff = np.abs(H.T.conj() - H)\n herm = False if np.any(diff > settings.atol) else True\n eigval_unit = self._is_unitary(self._prop_eigen[k])\n eigvec_unit = self._is_unitary(self._dyn_gen_eigenvectors[k])\n if self._dyn_gen_eigenvectors_adj is not None:\n eigvecadj_unit = self._is_unitary(\n self._dyn_gen_eigenvectors_adj[k])\n else:\n eigvecadj_unit = None\n msg = (\"prop unit: {}; H herm: {}; \"\n \"eigval unit: {}; eigvec unit: {}; \"\n \"eigvecadj_unit: {}\".format(\n prop_unit, herm, eigval_unit, eigvec_unit,\n eigvecadj_unit))\n logger.info(msg)\n\nclass DynamicsSymplectic(Dynamics):\n \"\"\"\n Symplectic systems\n This is the subclass to use for systems where the dynamics is described\n by symplectic matrices, e.g. coupled oscillators, quantum optics\n\n Attributes\n ----------\n omega : array[drift_dyn_gen.shape]\n matrix used in the calculation of propagators (time evolution)\n with symplectic systems.\n\n \"\"\"\n\n def reset(self):\n Dynamics.reset(self)\n self.id_text = 'SYMPL'\n self._omega = None\n self._omega_qobj = None\n self._phase_application = 'postop'\n self.grad_exact = True\n self.apply_params()\n\n def _create_computers(self):\n \"\"\"\n Create the default timeslot, fidelity and propagator computers\n \"\"\"\n # The time slot computer. By default it is set to _UpdateAll\n # can be set to _DynUpdate in the configuration\n # (see class file for details)\n if self.config.tslot_type == 'DYNAMIC':\n self.tslot_computer = tslotcomp.TSlotCompDynUpdate(self)\n else:\n self.tslot_computer = tslotcomp.TSlotCompUpdateAll(self)\n\n self.prop_computer = propcomp.PropCompFrechet(self)\n self.fid_computer = fidcomp.FidCompTraceDiff(self)\n\n @property\n def omega(self):\n if self._omega is None:\n self._get_omega()\n if self._omega_qobj is None:\n self._omega_qobj = Qobj(self._omega, dims=self.dyn_dims)\n return self._omega_qobj\n\n def _get_omega(self):\n if self._omega is None:\n n = self.get_drift_dim() // 2\n omg = sympl.calc_omega(n)\n if self.oper_dtype == Qobj:\n self._omega = Qobj(omg, dims=self.dyn_dims)\n self._omega_qobj = self._omega\n elif self.oper_dtype == sp.csr_matrix:\n self._omega = sp.csr_matrix(omg)\n else:\n self._omega = omg\n return self._omega\n\n def _set_phase_application(self, value):\n Dynamics._set_phase_application(self, value)\n if self._evo_initialized:\n phase = self._get_dyn_gen_phase()\n if phase is not None:\n self._dyn_gen_phase = phase\n\n def _get_dyn_gen_phase(self):\n if self._phase_application == 'postop':\n phase = -self._get_omega()\n elif self._phase_application == 'preop':\n phase = self._get_omega()\n elif self._phase_application == 'custom':\n phase = None\n # Assume phase set by user\n else:\n raise ValueError(\"No option for phase_application \"\n \"'{}'\".format(self._phase_application))\n return phase\n\n @property\n def dyn_gen_phase(self):\n r\"\"\"\n The phasing operator for the symplectic group generators\n usually refered to as \\Omega\n By default this is applied as 'postop' dyn_gen*-\\Omega\n If phase_application is 'preop' it is applied as \\Omega*dyn_gen\n \"\"\"\n # Cannot be calculated until the dyn_shape is set\n # that is after the drift dyn gen has been set.\n if self._dyn_gen_phase is None:\n self._dyn_gen_phase = self._get_dyn_gen_phase()\n return self._dyn_gen_phase\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.ones_like", "numpy.sqrt", "numpy.abs", "scipy.sparse.csr_matrix", "numpy.argmin", "numpy.zeros_like", "numpy.unravel_index" ], [ "numpy.testing.assert_equal", "numpy.random.random", "numpy.abs", "numpy.linspace", "numpy.logspace", "numpy.arange", "numpy.asfortranarray", "numpy.cos", "numpy.sin", "numpy.all", "numpy.mean", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.exp" ], [ "numpy.abs", "numpy.diagflat", "numpy.eye", "scipy.sparse.csr_matrix", "numpy.ones", "numpy.any", "numpy.savetxt", "numpy.array", "numpy.exp", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
dfarrow0/flu-contest
[ "8356cf48910a76d2643d105651342288076a9377" ]
[ "src/epicast/fc_epicast_analysis.py" ]
[ "from statistics import median_low\nimport mysql.connector\nimport numpy as np\nimport scipy.stats\nfrom ..forecasters.fc_abstract import Forecaster\nfrom delphi.epidata.client.delphi_epidata import Epidata\nimport delphi.operations.secrets as secrets\nimport delphi.utils.epiweek as flu\nfrom ..utils.forecast_type import ForecastType\n\n\nclass Epicast(Forecaster):\n\n def __init__(self, test_season, locations, forecast_type, verbose=False, users=None):\n super().__init__('epicast', test_season, locations, forecast_type, smooth_weeks_bw=0, smooth_wili_bw=0)\n self.verbose = verbose\n self.users = users\n\n @staticmethod\n def fit_distribution(values, num_bins, bin_size, first_value, unbounded, num_users):\n values = [v for v in values if v is not None]\n if len(values) == 0:\n values = [0]\n mu = np.median(values)\n if len(values) == 1:\n sigma = 0\n else:\n sigma = np.std(values, ddof=1)\n sigma = max(sigma, 1e-3)\n df = max(1, num_users - 1)\n cdf = scipy.stats.t(df, mu, sigma).cdf\n dist = []\n for i in range(num_bins):\n a = first_value + i * bin_size\n if unbounded and i == num_bins - 1:\n b = float('inf')\n else:\n b = a + bin_size\n dist.append(cdf(b) - cdf(a))\n dist = np.array(dist)\n mass = sum(dist)\n if mass > 0:\n dist /= mass\n return dist\n\n @staticmethod\n def get_week_forecast(num_users):\n def _forecast(first_epiweek, num_bins, indices, uniform_weight, smooth_bw, allow_none):\n if smooth_bw > 0:\n print(' [EC] warning: epicast doesnt smooth week bins, but smooth_bw = %.3f' % smooth_bw)\n num_none = indices.count(None)\n if num_none > 0 and not allow_none:\n raise Exception('target does not allow None, but None given')\n dist = Epicast.fit_distribution(indices, num_bins, 1, -0.5, False, num_users)\n dist *= len(indices) - num_none\n extra = [num_none] if allow_none else []\n dist = Forecaster.Utils.normalize(list(dist) + extra)\n dist = Forecaster.Utils.blend(dist, uniform_weight)\n if allow_none:\n dist, none = dist[:-1], dist[-1]\n else:\n none = None\n possibilities = [i for i in indices if i is not None]\n if len(possibilities) == 0:\n possibilities = [0]\n point = flu.add_epiweeks(first_epiweek, int(np.median(possibilities)))\n return (dist, none, point)\n return _forecast\n\n @staticmethod\n def get_wili_forecast(num_users):\n def _forecast(bin_size, num_bins, wili, uniform_weight, smooth_bw):\n if smooth_bw > 0:\n print(' [EC] warning: epicast doesnt smooth wILI bins, but smooth_bw = %.3f' % smooth_bw)\n dist = Epicast.fit_distribution(wili, num_bins, bin_size, 0, True, num_users)\n dist = Forecaster.Utils.normalize(dist)\n dist = Forecaster.Utils.blend(dist, uniform_weight)\n point = np.median(wili)\n return (dist, point)\n return _forecast\n\n\n def extractUsers(self, region, epiweek_now):\n self.cur = self.cnx.cursor(buffered=True)\n\n # 1. load forecast, with dimensions [location, user, ew2 (+1, 2, 3, 4)]\n # Get all user_id\n self.cur.execute(\"select distinct(user_id) from ec_fluv_forecast_mturk where epiweek_now = %d\" % epiweek_now)\n num_users = 0\n user_ids = []\n for user_id in self.cur:\n user_id = user_id[0]\n if user_id not in [45, 312, 539, 670, 145, 410, 411, 1, 2, 3, 4, 5, 6, 7, 8]:\n num_users += 1\n user_ids.append(user_id)\n\n # Get forecasts\n forecast = {}\n region_ids = [i for i in range(1, 24)] + [i for i in range(25, 30)] + [i for i in range(31, 62)]\n region_user_map = {}\n for r in region_ids:\n forecast[r] = {}\n region_user_map[r] = {}\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n forecast[r][ew2] = {}\n\n self.cur.execute(\"\"\"\n select f.user_id, f.region_id, f.epiweek_now, f.epiweek, f.wili from ec_fluv_forecast_mturk f \n JOIN ec_fluv_submissions_mturk s ON f.user_id = s.user_id AND f.region_id = s.region_id AND\n f.epiweek_now = s.epiweek_now where f.epiweek_now = %d and f.epiweek <= 201920\"\"\" % epiweek_now)\n\n num_predictions = 0\n for (u, r, ew1, ew2, wili) in self.cur:\n if ew1 == epiweek_now:\n try:\n forecast[r][ew2][u] = wili\n region_user_map[r][u] = 1\n num_predictions += 1\n except:\n pass\n\n # 2. for each location and epiweek, compute the median\n medians = {}\n for r in region_ids:\n medians[r] = {}\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n # print('forecast for this region and ew2: ', list(forecast[r][ew2].keys()))\n medians[r][ew2] = np.median(list(forecast[r][ew2].values()))\n\n # 3. for each location, for each user, get the sum of distance of the 4 weeks' forecasts\n errors = {}\n for r in region_ids:\n errors[r] = {}\n for user_id in region_user_map[r]:\n errors[r][user_id] = 0\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n errors[r][user_id] += abs(medians[r][ew2] - forecast[r][ew2][user_id])\n\n # 4. for each region, rank the users and take the upper half\n topWorkers = {}\n for r in region_ids:\n ranks = []\n topWorkers[r] = []\n for user_id in region_user_map[r]:\n error = errors[r][user_id]\n ranks.append({'user_id': user_id, 'error': error})\n sorted_users = sorted(ranks, key=lambda x: x['error'])\n numTopHalf = len(sorted_users) // 2\n tmp = sorted_users[:numTopHalf]\n for worker in tmp:\n topWorkers[r].append(worker['user_id'])\n\n # get region id from region (which is fluview_name)\n region = \"'\" + region + \"'\"\n self.cur.execute(\"select id from ec_fluv_regions where fluview_name = %s\" % region)\n print(self.cur)\n for id in self.cur:\n region = id[0]\n return topWorkers[region]\n\n\n def fetch_submissions(self, region, epiweek_now):\n topUsers = self.extractUsers(region, epiweek_now)\n print(topUsers)\n final_week = flu.join_epiweek(self.test_season + 1, 20)\n self.cur = self.cnx.cursor()\n self.cur.execute(\"\"\"\n SELECT\n u.`id` `user_id`, f.`epiweek`, f.`wili`\n FROM (\n SELECT\n u.*\n FROM\n `ec_fluv_users_mturk_2019` u\n JOIN\n `ec_fluv_defaults` d\n ON\n TRUE\n LEFT JOIN\n `ec_fluv_user_preferences` p\n ON\n p.`user_id` = u.`id` AND p.`name` = d.`name`\n WHERE\n d.`name` = '_debug' AND coalesce(p.`value`, d.`value`) = '0'\n ) u\n JOIN\n `ec_fluv_submissions_mturk` s\n ON\n s.`user_id` = u.`id`\n JOIN\n `ec_fluv_forecast_mturk` f\n ON\n f.`user_id` = u.`id` AND f.`region_id` = s.`region_id` AND f.`epiweek_now` = s.`epiweek_now`\n JOIN\n `ec_fluv_regions` r\n ON\n r.`id` = s.`region_id`\n WHERE\n r.`fluview_name` = %s AND s.`epiweek_now` = %s AND f.`epiweek` <= %s AND f.`wili` > 0\n ORDER BY\n u.`id` ASC, f.`epiweek` ASC\n \"\"\", (region, epiweek_now, final_week))\n submissions = {}\n for (user, epiweek, wili) in self.cur:\n if self.users is not None and user not in self.users:\n continue\n # only get performance from top users\n if user in topUsers:\n if user not in submissions:\n submissions[user] = []\n submissions[user].append(wili)\n self.cur.close()\n curves = []\n expected_weeks = flu.delta_epiweeks(epiweek_now, final_week)\n for user in submissions:\n if len(submissions[user]) != expected_weeks:\n print(' [EC] warning: missing data in user submission [%d|%s|%d]' % (user, region, epiweek_now))\n else:\n curves.append(submissions[user])\n\n print(region, curves)\n return curves\n\n def _init(self):\n if self.test_season == 2014:\n db = 'epicast'\n elif self.test_season >= 2015:\n db = 'epicast2'\n else:\n raise Exception('invalid epicast season [%d]' % self.test_season)\n u, p = secrets.db.epi\n self.cnx = mysql.connector.connect(user=u, password=p, database=db)\n\n def _fini(self):\n self.cnx.commit()\n self.cnx.close()\n\n def _train(self, region):\n pass\n\n def _forecast(self, region, epiweek):\n # season setup and sanity check\n ew1 = flu.join_epiweek(self.test_season, 40)\n ew2 = flu.join_epiweek(self.test_season + 1, 20)\n if not ew1 <= epiweek <= ew2:\n raise Exception('`epiweek` outside of `test_season`')\n # get past values (left half) from the Epidata API\n epidata = Forecaster.Utils.decode(Epidata.fluview(region, Epidata.range(ew1, epiweek), issues=epiweek))\n pinned = [row['wili'] for row in epidata]\n if len(pinned) != flu.delta_epiweeks(ew1, epiweek) + 1:\n raise Exception('missing ILINet data')\n # get the user submissions (right half) from the database\n submissions = self.fetch_submissions(region, epiweek)\n self._num_users = len(submissions)\n print(' [EC] %d users found for %s on %d' % (len(submissions), region, epiweek))\n # concatenate observed data and user submissions\n return [pinned + sub for sub in submissions]\n" ]
[ [ "numpy.std", "numpy.median", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
C3RV1/LaytonEditor
[ "51e1a9a372a8acdaa4183ae008235a721dc56cdc" ]
[ "formats/sound/sample_transform.py" ]
[ "import math\nimport numpy as np\n\n\ndef change_sample_rate(buffer: np.ndarray, current, target) -> np.ndarray:\n shape = [0, 0]\n shape[0] = buffer.shape[0]\n\n # RATEo = SAMPLESo\n # RATEm = (SAMPLESo / RATEo) * RATEm\n extend = target / current\n shape[1] = int(math.ceil(buffer.shape[1] * extend))\n converted = np.zeros(shape, dtype=buffer.dtype)\n\n for channel in range(shape[0]):\n for dst_i in range(shape[1]):\n converted[channel][dst_i] = buffer[channel][int(dst_i // extend)]\n\n return converted\n\ndef change_channels(buffer: np.ndarray, target: int) -> np.ndarray:\n converted = np.ndarray(shape=(target, buffer.shape[1]), dtype=buffer.dtype)\n for i in range(target):\n if i < buffer.shape[0]:\n converted[i] = buffer[i]\n else:\n converted[i] = buffer[-1]\n return converted\n" ]
[ [ "numpy.zeros", "numpy.ndarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sunmengnan/city_brain
[ "478f0b974f4491b4201956f37b83ce6860712bc8" ]
[ "algorithms/02-edge-subdivision/osmnx_hz/district.py" ]
[ "import pandas as pd\nimport osmnx\nimport numpy as np\n\nfix = {'西湖区,杭州市,浙江省,中国': 2}\ncity_query = [\n '杭州市,浙江省,中国',\n]\ndistrict_query = [\n '上城区,杭州市,浙江省,中国',\n '下城区,杭州市,浙江省,中国',\n '江干区,杭州市,浙江省,中国',\n '西湖区,杭州市,浙江省,中国',\n '拱墅区,杭州市,浙江省,中国',\n '滨江区,杭州市,浙江省,中国',\n]\n\n\ndef query_str_to_dic(query_str):\n result = query_str.split(',')\n if len(result) == 3:\n result.insert(0, '')\n query_dic = {\n 'district': result[0],\n 'city': result[1],\n 'province': result[2],\n }\n return query_dic\n\n\ndef process_query(q):\n query_dic = query_str_to_dic(q)\n limit = fix.get(q, 1)\n nominatim_response = osmnx.osm_polygon_download(q, limit=limit)\n response_json = nominatim_response[limit - 1]\n result_dic = {}\n result_dic.update(response_json)\n result_dic.update(query_dic)\n result_dic['q'] = q\n return result_dic\n\n\ndistrict_df = pd.DataFrame()\nq_result_list = []\nfor q in district_query:\n q_result = process_query(q)\n q_result_list.append(q_result)\ndistrict_df = pd.DataFrame(q_result_list)\nprint(district_df)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
shun60s/impulse-response
[ "4bdf8ef671ed0b55afd452a12b43f6fde6cdf3ac" ]
[ "ola_convolve.py" ]
[ "#coding:utf-8\r\n\r\n# overlap-add convolve with impulse response waveform\r\n\r\n\r\nimport sys\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nfrom scipy import signal\r\nfrom scipy.io.wavfile import read as wavread\r\nfrom scipy.io.wavfile import write as wavwrite\r\n\r\n# Check version\r\n# Python 3.6.4 on win32 (Windows 10)\r\n# numpy 1.16.3\r\n# scipy 1.4.1\r\n\r\ndef load_wav( path0, force_mono=False):\r\n # return \r\n # yg: wav data\r\n # sr: sampling rate\r\n try:\r\n sr, y = wavread(path0)\r\n except:\r\n print ('error: wavread ', path0)\r\n sys.exit()\r\n else:\r\n yg= y / (2 ** 15)\r\n if force_mono :\r\n if yg.ndim == 2: # if stereo\r\n yg= np.average(yg, axis=1)\r\n \r\n print ('file ', path0)\r\n print ('sampling rate ', sr)\r\n print ('length ', yg.shape)\r\n print ('yg.max', np.amax( np.abs(yg)))\r\n return yg,sr\r\n \r\ndef load_wav32( path0, wave_len, yg_in):\r\n #\r\n # wave_len: impluse effective length time [sec]\r\n # return \r\n # yg: wav data (stereo)\r\n # sr: sampling rate\r\n try:\r\n sr, y = wavread(path0)\r\n except:\r\n print ('error: wavread ', path0)\r\n sys.exit()\r\n else:\r\n len0= int(wave_len * sr)\r\n yg= y[sr : sr+len0] # / (2 ** 31)\r\n \r\n if yg_in.ndim == 2:\r\n yg2=np.hstack((yg,yg)).reshape( 2, len(yg) ).T\r\n else:\r\n yg2=yg.copy()\r\n \r\n print ('file ', path0)\r\n print ('sampling rate ', sr)\r\n print ('yg2.shape', yg2.shape)\r\n print ('yg.max', np.amax( np.abs(yg)), yg[0],yg[-1])\r\n return yg2, sr\r\n\r\ndef save_wav( path0, data, sr=44100, normalize=False):\r\n #\r\n print ('file ', path0)\r\n \r\n amplitude = np.iinfo(np.int16).max\r\n max_data = np.amax(np.abs(data)) # normalize, max level is 16bit full bit\r\n if max_data < (1.0 / amplitude):\r\n max_data=1.0\r\n \r\n try:\r\n if normalize :\r\n wavwrite(path0, sr, np.array( (amplitude / max_data) * data , dtype=np.int16))\r\n else:\r\n wavwrite(path0, sr, np.array( amplitude * data , dtype=np.int16))\r\n except:\r\n print ('error: wavwrite ', path0)\r\n sys.exit()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #\r\n parser = argparse.ArgumentParser(description='overlap-add convolve with impulse response waveform')\r\n parser.add_argument('--wav_file', '-w', default='test.wav', help='wav file name(16bit)')\r\n parser.add_argument('--wav_32_file', '-i', default='impulse_1sec_100_1sec_44100-TwoTube-output-rtwdf.wav', help='impulse response wav file name (mono 32bit)')\r\n args = parser.parse_args()\r\n \r\n \r\n path0= args.wav_file\r\n # overwrite path0\r\n # path0='test_882.wav'\r\n yg,sr= load_wav(path0)\r\n \r\n path2= args.wav_32_file\r\n # overwrite path2\r\n # path2='impulse_1sec_10_1sec_88200-output-rtwdf.wav\r\n yg2,sr2= load_wav32(path2, 0.150, yg)\r\n \r\n # overlap-add convolve with impulse response waveform\r\n out1= signal.oaconvolve( yg, yg2, axes=0) # need scipy > 1.4.1\r\n # set output file name\r\n path_out0= os.path.splitext(os.path.basename(path0))[0] + '_overlapadd_out.wav'\r\n save_wav( path_out0, out1, sr, normalize=True)\r\n" ]
[ [ "numpy.hstack", "numpy.abs", "numpy.iinfo", "scipy.signal.oaconvolve", "numpy.array", "numpy.average", "scipy.io.wavfile.read" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] } ]
erikolofsson/scrypted
[ "39016a617464003cac13719a426eefcc2421e51a" ]
[ "plugins/opencv/src/opencv/__init__.py" ]
[ "from __future__ import annotations\nfrom time import sleep\nfrom detect import DetectionSession, DetectPlugin\nfrom typing import Any, List\nimport numpy as np\nimport cv2\nimport imutils\nfrom gi.repository import GLib, Gst\nfrom scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected\n\nclass OpenCVDetectionSession(DetectionSession):\n cap: cv2.VideoCapture\n previous_frame: Any\n\n def __init__(self) -> None:\n super().__init__()\n self.previous_frame = None\n self.cap = None\n\ndefaultThreshold = 25\ndefaultArea = 2000\ndefaultInterval = 250\n\nclass OpenCVPlugin(DetectPlugin):\n def __init__(self, nativeId: str | None = None):\n super().__init__(nativeId=nativeId)\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n\n if True:\n self.retainAspectRatio = False\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n else:\n self.retainAspectRatio = True\n self.color2Gray = cv2.COLOR_BGRA2GRAY\n self.pixelFormat = \"BGRA\"\n self.pixelFormatChannelCount = 4\n\n async def getDetectionModel(self) -> ObjectDetectionModel:\n d: ObjectDetectionModel = {\n 'name': '@scrypted/opencv',\n 'classes': ['motion'],\n }\n settings = [\n {\n 'title': \"Motion Area\",\n 'description': \"The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.\",\n 'value': defaultArea,\n 'key': 'area',\n 'placeholder': defaultArea,\n 'type': 'number',\n },\n {\n 'title': \"Motion Threshold\",\n 'description': \"The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.\",\n 'value': defaultThreshold,\n 'key': 'threshold',\n 'placeholder': defaultThreshold,\n 'type': 'number',\n },\n {\n 'title': \"Frame Analysis Interval\",\n 'description': \"The number of milliseconds to wait between motion analysis.\",\n 'value': defaultInterval,\n 'key': 'interval',\n 'placeholder': defaultInterval,\n 'type': 'number',\n },\n ]\n d['settings'] = settings\n return d\n\n def get_pixel_format(self):\n return self.pixelFormat\n\n def parse_settings(self, settings: Any):\n area = defaultArea\n threshold = defaultThreshold\n interval = defaultInterval\n if settings:\n area = float(settings.get('area', area))\n threshold = int(settings.get('threshold', threshold))\n interval = float(settings.get('interval', interval))\n return area, threshold, interval\n\n def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:\n area, threshold, interval = self.parse_settings(settings)\n\n # see get_detection_input_size on undocumented size requirements for GRAY8\n if self.color2Gray != None:\n gray = cv2.cvtColor(frame, self.color2Gray)\n else:\n gray = frame\n curFrame = cv2.GaussianBlur(gray, (21,21), 0)\n\n if detection_session.previous_frame is None:\n detection_session.previous_frame = curFrame\n return\n\n frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)\n detection_session.previous_frame = curFrame\n\n _, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=2)\n fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(fcontours)\n\n detections: List[ObjectDetectionResult] = []\n detection_result: ObjectsDetected = {}\n detection_result['detections'] = detections\n detection_result['inputDimensions'] = src_size\n \n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n # if w * h != contour_area:\n # print(\"mismatch w/h\", contour_area - w * h)\n\n x2, y2, _ = convert_to_src_size((x + w, y + h))\n x, y, _ = convert_to_src_size((x, y))\n w = x2 - x + 1\n h = y2 - y + 1\n\n contour_area = w * h\n\n if not area or contour_area > area:\n detection: ObjectDetectionResult = {}\n detection['boundingBox'] = (x, y, w, h)\n detection['className'] = 'motion'\n detection['score'] = 1 if area else contour_area\n detections.append(detection)\n\n return detection_result \n\n def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:\n raise Exception('can not run motion detection on jpeg')\n\n def get_detection_input_size(self, src_size):\n # The initial implementation of this plugin used BGRA\n # because it seemed impossible to pull the Y frame out of I420 without corruption.\n # This is because while 318x174 is aspect ratio correct,\n # it seems to cause strange issues with stride and the image is skewed.\n # By using 300x300, this seems to avoid some undocumented minimum size\n # reqiurement in gst-videoscale or opencv. Unclear which.\n\n # This is the same input size as tensorflow-lite. Allows for better pipelining.\n if not self.retainAspectRatio:\n return (300, 300)\n\n width, height = src_size\n if (width > height):\n if (width > 318):\n height = height / width * 318\n width = 318\n else:\n if (height > 318):\n width = width / height * 318\n height = 318\n\n width = int(np.floor(width / 6) * 6)\n height = int(np.floor(height / 6) * 6)\n\n return width, height\n\n def end_session(self, detection_session: OpenCVDetectionSession):\n if detection_session and detection_session.cap:\n detection_session.cap.release()\n detection_session.cap = None\n return super().end_session(detection_session)\n\n def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:\n buf = gst_sample.get_buffer()\n caps = gst_sample.get_caps()\n # can't trust the width value, compute the stride\n height = caps.get_structure(0).get_value('height')\n width = caps.get_structure(0).get_value('width')\n result, info = buf.map(Gst.MapFlags.READ)\n if not result:\n return\n try:\n mat = np.ndarray(\n (height,\n width,\n self.pixelFormatChannelCount),\n buffer=info.data,\n dtype= np.uint8)\n return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)\n finally:\n buf.unmap(info)\n\n def create_detection_session(self):\n return OpenCVDetectionSession()\n\n def detection_event_notified(self, settings: Any):\n area, threshold, interval = self.parse_settings(settings)\n # it is safe to block here because gstreamer creates a queue thread\n sleep(interval / 1000)\n return super().detection_event_notified(settings)\n" ]
[ [ "numpy.ndarray", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tbhuwan14/ga-learner-dsb-repo
[ "1d2271037214e6203a0ff92bae75aff32964263e" ]
[ "Banking-Inference/code.py" ]
[ "# --------------\nimport pandas as pd\r\nimport scipy.stats as stats\r\nimport math\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n#Sample_Size\r\nsample_size=2000\r\n\r\n#Z_Critical Score\r\nz_critical = stats.norm.ppf(q = 0.95) \r\n\r\n\r\n# path [File location variable]\r\ndata=pd.read_csv(path)\r\n\r\n#Code starts here\r\ndata_sample=data.sample(n=sample_size,random_state=0)\r\n\r\nsample_mean=data_sample['installment'].mean()\r\nprint(sample_mean)\r\n\r\nsample_std=data_sample['installment'].std()\r\nprint(sample_std)\r\n\r\nmargin_of_error=z_critical*sample_std/math.sqrt(sample_size)\r\nprint(margin_of_error)\r\n\r\nconfidence_interval=(sample_mean-margin_of_error,sample_mean+margin_of_error)\r\nprint(confidence_interval)\r\n\r\ntrue_mean=data['installment'].mean()\r\nprint(true_mean)\n\n\n# --------------\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Different sample sizes to take\r\nsample_size=np.array([20,50,100])\r\n\r\n#Code starts here\r\nfig ,axes=plt.subplots(1,3,figsize=(20,10))\r\nfor i in range(len(sample_size)):\r\n m=[]\r\n for j in range(1000):\r\n m.append(data['installment'].sample(n=sample_size[i]).mean())\r\n mean_series=pd.Series(m)\r\n axes[i].hist(mean_series)\r\nplt.show()\n\n\n# --------------\n#Importing header files\r\n\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\ndata['int.rate'] = (data['int.rate'].str.replace('%', '')).astype(float)/100\r\n\r\nz_statistic , p_value=ztest(data[data['purpose']=='small_business']['int.rate'],value=data['int.rate'].mean(),alternative='larger')\r\nprint(z_statistic,p_value)\r\nif p_value<0.05:\r\n a='reject'\r\nelse:\r\n a='accept'\r\n\r\nprint('We',a,'The Null Hypothesis')\r\n\n\n\n# --------------\n#Importing header files\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\nz_statistic, p_value=ztest(x1=data[data['paid.back.loan']=='No']['installment'],x2=data[data['paid.back.loan']=='Yes']['installment'])\r\nprint(z_statistic,p_value)\r\n\r\nif p_value<0.05:\r\n a='Reject'\r\nelse:\r\n a='Accept'\r\n\r\nprint('We',a,'The Null Hypothesis')\n\n\n# --------------\n#Importing header files\r\nfrom scipy.stats import chi2_contingency\r\n\r\n#Critical value \r\ncritical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*\r\n df = 6) # Df = number of variable categories(in purpose) - 1\r\n\r\n#Code starts here\r\nyes=data[data['paid.back.loan']=='Yes']['purpose'].value_counts()\r\nno=data[data['paid.back.loan']=='No']['purpose'].value_counts()\r\nobserved=pd.concat((yes.transpose(), no.transpose()),axis=1, keys=['Yes', 'No'])\r\nchi2, p, dof, ex=chi2_contingency(observed)\r\n\r\nif chi2>critical_value:\r\n a='Reject'\r\nelse:\r\n a='Accept'\r\nprint('We',a,'The Null Hypothesis')\r\n\n\n\n" ]
[ [ "scipy.stats.chi2.ppf", "scipy.stats.norm.ppf", "pandas.read_csv", "pandas.Series", "scipy.stats.chi2_contingency", "matplotlib.pyplot.subplots", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
And1210/SRGAN
[ "200731d6249c674d0ed556ba287ad7a7698f88b5" ]
[ "datasets/Pedestron_dataset.py" ]
[ "import os\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom torchvision.transforms import transforms\nfrom torch.utils.data import Dataset\nfrom datasets.base_dataset import BaseDataset\nfrom utils.augmenters.augment import seg\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport random\n\nclass CARLADataset(BaseDataset):\n \"\"\"\n Input params:\n stage: The stage of training.\n configuration: Configuration dictionary.\n \"\"\"\n def __init__(self, configuration):\n super().__init__(configuration)\n\n self._stage = configuration[\"stage\"]\n\n self._image_size = tuple(configuration[\"input_size\"])\n self._downsample_size = tuple(configuration[\"downsample_size\"])\n\n self.dataset_path = os.path.join(configuration[\"dataset_path\"])#, \"{}\".format(self._stage))\n\n #-----------------------------------------------------------------------\n #Here is where you can do things like preload data and labels or do image preprocessing\n\n self.sim_img_paths = []\n for i in os.listdir(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"])):\n for j in os.listdir(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"], i)):\n self.sim_img_paths.append(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"], i, j))\n\n #-----------------------------------------------------------------------\n\n\n self._transform = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.ToTensor(),\n ]\n )\n\n #This function returns an data, label pair. All data processing and modification should be done by the end of this function\n def __getitem__(self, index):\n sim_filename = self.sim_img_paths[index]\n\n #Image loading assuming the images are in the 'images' folder in the dataset root path\n sim_image = Image.open(sim_filename)\n sim_image = np.asarray(sim_image)\n sim_image = sim_image.astype(np.uint8)\n\n #Image resizing\n sim_image = cv2.resize(sim_image, self._image_size)\n downsample_image = cv2.resize(sim_image, self._downsample_size)\n\n #Image formatting\n sim_image = np.dstack([sim_image] * 1)\n downsample_image = np.dstack([downsample_image] * 1)\n\n #Some image augmentation\n # image = seg(image=image)\n\n #Apply defined transforms to image from constructor (will convert to tensor)\n sim_image = self._transform(sim_image)\n downsample_image = self._transform(downsample_image)\n\n #image should be the image data, target should be the label\n return sim_image, downsample_image\n\n def __len__(self):\n # return the size of the dataset, replace with len of labels array\n return len(self.sim_img_paths)\n" ]
[ [ "numpy.asarray", "numpy.dstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ORNL-BSEC/morph-net
[ "eb1a493ca07ba4992af1f91ab3b73a6c4fb9cca8" ]
[ "morph_net/network_regularizers/cost_calculator.py" ]
[ "\"\"\"CostCalculator that computes network cost or regularization loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nCONV2D_OPS = ('Conv2D', 'Conv2DBackpropInput', 'DepthwiseConv2dNative')\nFLOP_OPS = CONV2D_OPS + ('MatMul',)\nSUPPORTED_OPS = FLOP_OPS + (\n 'Add', 'AddN', 'ConcatV2', 'FusedBatchNorm', 'Mul', 'Relu', 'Relu6', 'Sum')\n\n\nclass CostCalculator(object):\n \"\"\"CostCalculator that calculates resource cost/loss for a network.\"\"\"\n\n def __init__(self, op_regularizer_manager, resource_function):\n \"\"\"Creates an instance.\n\n Args:\n op_regularizer_manager: OpRegularizerManager that contains the\n OpRegularizer for each op in the network.\n resource_function: Callable that returns the resource (e.g. FLOP) cost or\n loss for an op. The function signature is:\n op; A tf.Operation.\n is_regularization; Boolean indicating whether to calculate\n regularization loss. If False, calculate cost instead.\n num_alive_inputs; Scalar Tensor indicating how many input channels are\n considered alive.\n num_alive_outputs; Scalar Tensor indicating how many output channels\n are considered alive.\n reg_inputs; Scalar Tensor which is the sum over the input\n regularization vector.\n reg_outputs; Scalar Tensor which is the sum over the output\n regularization vector.\n batch_size; Integer batch size to calculate cost/loss for.\n \"\"\"\n self._manager = op_regularizer_manager\n self._resource_function = resource_function\n\n def _get_cost_or_regularization_term(self, is_regularization, ops=None):\n \"\"\"Returns cost or regularization term for ops.\n\n Args:\n is_regularization: Boolean indicating whether to calculate regularization\n loss. If False, calculate cost instead.\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Cost or regularization term for ops as a tensor or float.\n \"\"\"\n total = 0.0\n if not ops:\n ops = self._manager.ops\n for op in ops:\n if op.type not in SUPPORTED_OPS:\n continue\n\n # Get regularization and alive terms for input and output.\n input_tensor = _get_input(op)\n if op.type == 'ConcatV2':\n # For concat, the input and output regularization are identical but the\n # input is composed of multiple concatenated regularizers. Thus, just\n # use the output regularizer as the input regularizer for simpler cost\n # calculation.\n input_tensor = op.outputs[0]\n input_op_reg = self._manager.get_regularizer(input_tensor.op)\n output_op_reg = self._manager.get_regularizer(op)\n num_alive_inputs = _count_alive(input_tensor, input_op_reg)\n num_alive_outputs = _count_alive(op.outputs[0], output_op_reg)\n reg_inputs = _sum_of_reg_vector(input_op_reg)\n reg_outputs = _sum_of_reg_vector(output_op_reg)\n\n total += self._resource_function(\n op, is_regularization, num_alive_inputs, num_alive_outputs,\n reg_inputs, reg_outputs)\n\n # If at least one supported op is present, type would be tensor, not float.\n if isinstance(total, float):\n # Tests rely on this function not raising exception in this case.\n tf.logging.warning('No supported ops found.')\n return total\n\n def get_cost(self, ops=None):\n \"\"\"Returns cost for ops.\n\n Args:\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Cost of ops as a tensor for float.\n \"\"\"\n\n return self._get_cost_or_regularization_term(False, ops)\n\n def get_regularization_term(self, ops=None):\n \"\"\"Returns regularization for ops.\n\n Args:\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Regularization term of ops as a tensor or float.\n \"\"\"\n return self._get_cost_or_regularization_term(True, ops)\n\n\ndef _get_input(op):\n \"\"\"Returns the input to that op that represents the activations.\n\n (as opposed to e.g. weights.)\n\n Args:\n op: A tf.Operation object with type in SUPPORTED_OPS.\n\n Returns:\n A tf.Tensor representing the input activations.\n\n Raises:\n ValueError: MatMul is used with transposition (unsupported).\n \"\"\"\n assert op.type in SUPPORTED_OPS, 'Op type %s is not supported.' % op.type\n if op.type == 'Conv2D' or op.type == 'DepthwiseConv2dNative':\n return op.inputs[0]\n if op.type == 'Conv2DBackpropInput':\n return op.inputs[2]\n if op.type == 'MatMul':\n if op.get_attr('transpose_a') or op.get_attr('transpose_b'):\n raise ValueError('MatMul with transposition is not yet supported.')\n return op.inputs[0]\n return op.inputs[0]\n\n\ndef _count_alive(tensor, opreg):\n if opreg:\n return tf.reduce_sum(tf.cast(opreg.alive_vector, tf.float32))\n shape = tensor.shape.as_list()\n if shape:\n num_outputs = tensor.shape.as_list()[-1]\n if num_outputs is not None:\n return tf.constant(num_outputs, tf.float32)\n tf.logging.info('Unknown channel count in tensor %s', tensor)\n return tf.constant(0, tf.float32)\n\n\ndef _sum_of_reg_vector(opreg):\n if opreg:\n return tf.reduce_sum(opreg.regularization_vector)\n else:\n return tf.constant(0.0, tf.float32)\n" ]
[ [ "tensorflow.logging.warning", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.logging.info" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
timwillhack/dm-haikuBah2
[ "b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5" ]
[ "haiku/_src/layer_norm_test.py" ]
[ "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for haiku._src.layer_norm.\"\"\"\n\nimport itertools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom haiku._src import initializers\nfrom haiku._src import layer_norm\nfrom haiku._src import test_utils\nfrom haiku._src import transform\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\nclass LayerNormTest(parameterized.TestCase):\n\n @test_utils.transform_and_run\n def test_connection(self):\n data = jnp.zeros([2, 3, 4, 5])\n norms = []\n for axis in range(4):\n norms.append(layer_norm.LayerNorm(axis=axis, create_scale=True,\n create_offset=True)(data))\n\n norms.append(layer_norm.LayerNorm(axis=slice(1, None), create_scale=True,\n create_offset=True)(data))\n norms.append(layer_norm.LayerNorm(axis=slice(2, None), create_scale=True,\n create_offset=True)(data))\n norms.append(layer_norm.LayerNorm(axis=slice(1, -1), create_scale=True,\n create_offset=True)(data))\n\n return norms\n\n @parameterized.parameters(itertools.product([True, False], repeat=3))\n def test_bf16(self, create_scale, create_offset, use_fast_variance):\n \"\"\"For all configurations, ensure bf16 outputs from bf16 inputs.\"\"\"\n def f(x):\n ln = layer_norm.LayerNorm(\n axis=-1, create_scale=create_scale, create_offset=create_offset,\n use_fast_variance=use_fast_variance)\n return ln(x)\n\n fwd = transform.transform(f)\n data = jnp.zeros([2, 3, 4, 5], dtype=jnp.bfloat16)\n params = fwd.init(jax.random.PRNGKey(428), data)\n bf16_params = jax.tree_map(lambda t: t.astype(jnp.bfloat16), params)\n self.assertEqual(fwd.apply(bf16_params, None, data).dtype, jnp.bfloat16)\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_simple_case(self, use_fast_variance):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n inputs = np.ones([2, 3, 3, 5])\n\n outputs = layer(inputs)\n for x in np.nditer(outputs):\n self.assertEqual(x, 0.0)\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_simple_case_var(self, use_fast_variance):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=True,\n create_offset=True,\n scale_init=initializers.Constant(0.5),\n offset_init=initializers.Constant(2.0),\n use_fast_variance=use_fast_variance)\n\n inputs = np.ones([2, 3, 3, 5])\n\n outputs = layer(inputs)\n for x in np.nditer(outputs):\n self.assertEqual(x, 2.0)\n\n @test_utils.transform_and_run\n def test_simple_case_tensor(self):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=False,\n create_offset=False)\n\n inputs = np.ones([2, 3, 3, 5])\n scale = np.full((5,), 0.5)\n offset = np.full((5,), 2.0)\n\n outputs = layer(inputs, scale, offset)\n for x in np.nditer(outputs):\n self.assertEqual(x, 2.0)\n\n @parameterized.named_parameters((\"String\", \"foo\"), (\"ListString\", [\"foo\"]))\n @test_utils.transform_and_run\n def test_invalid_axis(self, axis):\n with self.assertRaisesRegex(\n ValueError, \"`axis` should be an int, slice or iterable of ints.\"):\n layer_norm.LayerNorm(axis, create_scale=False, create_offset=False)\n\n @test_utils.transform_and_run\n def test_no_scale_and_init_provided(self):\n with self.assertRaisesRegex(\n ValueError, \"Cannot set `scale_init` if `create_scale=False`.\"):\n layer_norm.LayerNorm(\n 3, create_scale=False, create_offset=True, scale_init=np.ones)\n\n @test_utils.transform_and_run\n def test_no_offset_beta_init_provided(self):\n with self.assertRaisesRegex(\n ValueError, \"Cannot set `offset_init` if `create_offset=False`.\"):\n layer_norm.LayerNorm(\n 3, create_scale=True, create_offset=False, offset_init=np.zeros)\n\n @test_utils.transform_and_run\n def test_create_scale_and_scale_provided(self):\n layer = layer_norm.LayerNorm([2], create_scale=True, create_offset=False)\n\n with self.assertRaisesRegex(\n ValueError, \"Cannot pass `scale` at call time if `create_scale=True`.\"):\n layer(np.ones([2, 3, 4]), scale=np.ones([4]))\n\n @test_utils.transform_and_run\n def test_create_offset_and_offset_provided(self):\n layer = layer_norm.LayerNorm([2], create_offset=True, create_scale=False)\n\n with self.assertRaisesRegex(\n ValueError,\n \"Cannot pass `offset` at call time if `create_offset=True`.\"):\n layer(np.ones([2, 3, 4]), offset=np.ones([4]))\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_slice_axis(self, use_fast_variance):\n slice_layer = layer_norm.LayerNorm(\n slice(1, -1),\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n axis_layer = layer_norm.LayerNorm((1, 2),\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n\n inputs = np.random.uniform(size=[3, 4, 4, 5], low=0, high=10)\n scale = np.random.normal(size=(5,), loc=1.0)\n offset = np.random.normal(size=(5,))\n\n slice_outputs = slice_layer(inputs, scale, offset)\n axis_outputs = axis_layer(inputs, scale, offset)\n\n np.testing.assert_array_equal(slice_outputs, axis_outputs)\n\n @test_utils.transform_and_run\n def test_connection_instance_norm(self):\n layer = layer_norm.InstanceNorm(create_scale=True, create_offset=True)\n\n inputs = np.ones([3, 4, 5, 6])\n result = layer(inputs)\n\n self.assertEqual(result.shape, (3, 4, 5, 6))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n" ]
[ [ "numpy.nditer", "numpy.full", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.random.normal", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
unbun/snake.ai
[ "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207", "0c017357608dc7c06af0ca3ca57d870641461207" ]
[ "venv/Lib/site-packages/scipy/fftpack/tests/test_basic.py", "venv/Lib/site-packages/numpy/core/tests/test_indexing.py", "venv/Lib/site-packages/scipy/interpolate/interpolate.py", "venv/Lib/site-packages/numpy/distutils/tests/test_system_info.py", "venv/Lib/site-packages/numpy/core/records.py", "venv/Lib/site-packages/numpy/core/tests/test_dtype.py", "venv/Lib/site-packages/numpy/core/_dtype_ctypes.py", "venv/Lib/site-packages/numpy/lib/tests/test_function_base.py" ]
[ "# Created by Pearu Peterson, September 2002\n\nfrom __future__ import division, print_function, absolute_import\n\n__usage__ = \"\"\"\nBuild fftpack:\n python setup_fftpack.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.fftpack.test()'\nRun tests if fftpack is not installed:\n python tests/test_basic.py\n\"\"\"\n\nfrom numpy.testing import (assert_, assert_equal, assert_array_almost_equal,\n assert_array_almost_equal_nulp, assert_array_less)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2\nfrom scipy.fftpack import _fftpack as fftpack\nfrom scipy.fftpack.basic import _is_safe_size\n\nfrom numpy import (arange, add, array, asarray, zeros, dot, exp, pi,\n swapaxes, double, cdouble)\nimport numpy as np\nimport numpy.fft\nfrom numpy.random import rand\n\n# \"large\" composite numbers supported by FFTPACK\nLARGE_COMPOSITE_SIZES = [\n 2**13,\n 2**5 * 3**5,\n 2**3 * 3**3 * 5**2,\n]\nSMALL_COMPOSITE_SIZES = [\n 2,\n 2*3*5,\n 2*2*3*3,\n]\n# prime\nLARGE_PRIME_SIZES = [\n 2011\n]\nSMALL_PRIME_SIZES = [\n 29\n]\n\n\ndef _assert_close_in_norm(x, y, rtol, size, rdt):\n # helper function for testing\n err_msg = \"size: %s rdt: %s\" % (size, rdt)\n assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)\n\n\ndef random(size):\n return rand(*size)\n\n\ndef get_mat(n):\n data = arange(n)\n data = add.outer(data, data)\n return data\n\n\ndef direct_dft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n, dtype=cdouble)\n w = -arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w), x)\n return y\n\n\ndef direct_idft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n, dtype=cdouble)\n w = arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w), x)/n\n return y\n\n\ndef direct_dftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = fft(x, axis=axis)\n return x\n\n\ndef direct_idftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = ifft(x, axis=axis)\n return x\n\n\ndef direct_rdft(x):\n x = asarray(x)\n n = len(x)\n w = -arange(n)*(2j*pi/n)\n r = zeros(n, dtype=double)\n for i in range(n//2+1):\n y = dot(exp(i*w), x)\n if i:\n r[2*i-1] = y.real\n if 2*i < n:\n r[2*i] = y.imag\n else:\n r[0] = y.real\n return r\n\n\ndef direct_irdft(x):\n x = asarray(x)\n n = len(x)\n x1 = zeros(n, dtype=cdouble)\n for i in range(n//2+1):\n if i:\n if 2*i < n:\n x1[i] = x[2*i-1] + 1j*x[2*i]\n x1[n-i] = x[2*i-1] - 1j*x[2*i]\n else:\n x1[i] = x[2*i-1]\n else:\n x1[0] = x[0]\n return direct_idft(x1).real\n\n\nclass _TestFFTBase(object):\n def setup_method(self):\n self.cdt = None\n self.rdt = None\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)\n y = fft(x)\n assert_equal(y.dtype, self.cdt)\n y1 = direct_dft(x)\n assert_array_almost_equal(y,y1)\n x = np.array([1,2,3,4+0j,5], dtype=self.cdt)\n assert_array_almost_equal(fft(x),direct_dft(x))\n\n def test_n_argument_real(self):\n x1 = np.array([1,2,3,4], dtype=self.rdt)\n x2 = np.array([1,2,3,4], dtype=self.rdt)\n y = fft([x1,x2],n=4)\n assert_equal(y.dtype, self.cdt)\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def _test_n_argument_complex(self):\n x1 = np.array([1,2,3,4+1j], dtype=self.cdt)\n x2 = np.array([1,2,3,4+1j], dtype=self.cdt)\n y = fft([x1,x2],n=4)\n assert_equal(y.dtype, self.cdt)\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x)\n y2 = numpy.fft.fft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x)\n assert_array_almost_equal(y,y2)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, fft, [])\n assert_raises(ValueError, fft, [[1,1],[2,2]], -5)\n\n def test__is_safe_size(self):\n vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False),\n (15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True),\n (120, True), (210, False)]\n for n, is_safe in vals:\n assert_equal(_is_safe_size(n), is_safe)\n\n\nclass TestDoubleFFT(_TestFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestSingleFFT(_TestFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n @pytest.mark.xfail(run=False, reason=\"single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved\")\n def test_notice(self):\n pass\n\n\nclass TestFloat16FFT(object):\n\n def test_1_argument_real(self):\n x1 = np.array([1, 2, 3, 4], dtype=np.float16)\n y = fft(x1, n=4)\n assert_equal(y.dtype, np.complex64)\n assert_equal(y.shape, (4, ))\n assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))\n\n def test_n_argument_real(self):\n x1 = np.array([1, 2, 3, 4], dtype=np.float16)\n x2 = np.array([1, 2, 3, 4], dtype=np.float16)\n y = fft([x1, x2], n=4)\n assert_equal(y.dtype, np.complex64)\n assert_equal(y.shape, (2, 4))\n assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))\n assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))\n\n\nclass _TestIFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)\n y = ifft(x)\n y1 = direct_idft(x)\n assert_equal(y.dtype, self.cdt)\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4+0j,5], self.cdt)\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_definition_real(self):\n x = np.array([1,2,3,4,1,2,3,4], self.rdt)\n y = ifft(x)\n assert_equal(y.dtype, self.cdt)\n y1 = direct_idft(x)\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4,5], dtype=self.rdt)\n assert_equal(y.dtype, self.cdt)\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x,direction=-1)\n y2 = numpy.fft.ifft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x,direction=-1)\n assert_array_almost_equal(y,y2)\n\n def test_random_complex(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.cdt)\n x = random([size]).astype(self.cdt) + 1j*x\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n assert_equal(y1.dtype, self.cdt)\n assert_equal(y2.dtype, self.cdt)\n assert_array_almost_equal(y1, x)\n assert_array_almost_equal(y2, x)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n assert_equal(y1.dtype, self.cdt)\n assert_equal(y2.dtype, self.cdt)\n assert_array_almost_equal(y1, x)\n assert_array_almost_equal(y2, x)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = ifft(fft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = fft(ifft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n x = (x + 1j*np.random.rand(size)).astype(self.cdt)\n y = ifft(fft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = fft(ifft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, ifft, [])\n assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)\n\n\nclass TestDoubleIFFT(_TestIFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestSingleIFFT(_TestIFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n\nclass _TestRFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:\n x = np.array(t, dtype=self.rdt)\n y = rfft(x)\n y1 = direct_rdft(x)\n assert_array_almost_equal(y,y1)\n assert_equal(y.dtype, self.rdt)\n\n def test_djbfft(self):\n from numpy.fft import fft as numpy_fft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y2 = numpy_fft(x)\n y1 = zeros((n,),dtype=double)\n y1[0] = y2[0].real\n y1[-1] = y2[n//2].real\n for k in range(1, n//2):\n y1[2*k-1] = y2[k].real\n y1[2*k] = y2[k].imag\n y = fftpack.drfft(x)\n assert_array_almost_equal(y,y1)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, rfft, [])\n assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)\n\n # See gh-5790\n class MockSeries(object):\n def __init__(self, data):\n self.data = np.asarray(data)\n\n def __getattr__(self, item):\n try:\n return getattr(self.data, item)\n except AttributeError:\n raise AttributeError((\"'MockSeries' object \"\n \"has no attribute '{attr}'\".\n format(attr=item)))\n\n def test_non_ndarray_with_dtype(self):\n x = np.array([1., 2., 3., 4., 5.])\n xs = _TestRFFTBase.MockSeries(x)\n\n expected = [1, 2, 3, 4, 5]\n out = rfft(xs)\n\n # Data should not have been overwritten\n assert_equal(x, expected)\n assert_equal(xs.data, expected)\n\nclass TestRFFTDouble(_TestRFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestRFFTSingle(_TestRFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n\nclass _TestIRFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x1 = [1,2,3,4,1,2,3,4]\n x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]\n x2 = [1,2,3,4,1,2,3,4,5]\n x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]\n\n def _test(x, xr):\n y = irfft(np.array(x, dtype=self.rdt))\n y1 = direct_irdft(x)\n assert_equal(y.dtype, self.rdt)\n assert_array_almost_equal(y,y1, decimal=self.ndec)\n assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)\n\n _test(x1, x1_1)\n _test(x2, x2_1)\n\n def test_djbfft(self):\n from numpy.fft import ifft as numpy_ifft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n x1 = zeros((n,),dtype=cdouble)\n x1[0] = x[0]\n for k in range(1, n//2):\n x1[k] = x[2*k-1]+1j*x[2*k]\n x1[n-k] = x[2*k-1]-1j*x[2*k]\n x1[n//2] = x[-1]\n y1 = numpy_ifft(x1)\n y = fftpack.drfft(x,direction=-1)\n assert_array_almost_equal(y,y1)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = irfft(rfft(x))\n y2 = rfft(irfft(x))\n assert_equal(y1.dtype, self.rdt)\n assert_equal(y2.dtype, self.rdt)\n assert_array_almost_equal(y1, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n assert_array_almost_equal(y2, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = irfft(rfft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = rfft(irfft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, irfft, [])\n assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)\n\n\n# self.ndec is bogus; we should have a assert_array_approx_equal for number of\n# significant digits\n\nclass TestIRFFTDouble(_TestIRFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n self.ndec = 14\n\n\nclass TestIRFFTSingle(_TestIRFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n self.ndec = 5\n\n\nclass Testfft2(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_regression_244(self):\n \"\"\"FFT returns wrong result with axes parameter.\"\"\"\n # fftn (and hence fft2) used to break when both axes and shape were\n # used\n x = numpy.ones((4, 4, 2))\n y = fft2(x, shape=(8, 8), axes=(-3, -2))\n y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))\n assert_array_almost_equal(y, y_r)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, fft2, [[]])\n assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))\n\n\nclass TestFftnSingle(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(np.array(x, np.float32))\n assert_(y.dtype == np.complex64,\n msg=\"double precision output with single precision\")\n\n y_r = np.array(fftn(x), np.complex64)\n assert_array_almost_equal_nulp(y, y_r)\n\n @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)\n def test_size_accuracy_small(self, size):\n x = np.random.rand(size, size) + 1j*np.random.rand(size, size)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\n @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)\n def test_size_accuracy_large(self, size):\n x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\n def test_definition_float16(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(np.array(x, np.float16))\n assert_equal(y.dtype, np.complex64)\n y_r = np.array(fftn(x), np.complex64)\n assert_array_almost_equal_nulp(y, y_r)\n\n @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)\n def test_float16_input_small(self, size):\n x = np.random.rand(size, size) + 1j*np.random.rand(size, size)\n y1 = fftn(x.real.astype(np.float16))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 5e5)\n\n @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)\n def test_float16_input_large(self, size):\n x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)\n y1 = fftn(x.real.astype(np.float16))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2e6)\n\n\nclass TestFftn(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(x)\n assert_array_almost_equal(y, direct_dftn(x))\n\n x = random((20, 26))\n assert_array_almost_equal(fftn(x), direct_dftn(x))\n\n x = random((5, 4, 3, 20))\n assert_array_almost_equal(fftn(x), direct_dftn(x))\n\n def test_axes_argument(self):\n # plane == ji_plane, x== kji_space\n plane1 = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n plane2 = [[10, 11, 12],\n [13, 14, 15],\n [16, 17, 18]]\n plane3 = [[19, 20, 21],\n [22, 23, 24],\n [25, 26, 27]]\n ki_plane1 = [[1, 2, 3],\n [10, 11, 12],\n [19, 20, 21]]\n ki_plane2 = [[4, 5, 6],\n [13, 14, 15],\n [22, 23, 24]]\n ki_plane3 = [[7, 8, 9],\n [16, 17, 18],\n [25, 26, 27]]\n jk_plane1 = [[1, 10, 19],\n [4, 13, 22],\n [7, 16, 25]]\n jk_plane2 = [[2, 11, 20],\n [5, 14, 23],\n [8, 17, 26]]\n jk_plane3 = [[3, 12, 21],\n [6, 15, 24],\n [9, 18, 27]]\n kj_plane1 = [[1, 4, 7],\n [10, 13, 16], [19, 22, 25]]\n kj_plane2 = [[2, 5, 8],\n [11, 14, 17], [20, 23, 26]]\n kj_plane3 = [[3, 6, 9],\n [12, 15, 18], [21, 24, 27]]\n ij_plane1 = [[1, 4, 7],\n [2, 5, 8],\n [3, 6, 9]]\n ij_plane2 = [[10, 13, 16],\n [11, 14, 17],\n [12, 15, 18]]\n ij_plane3 = [[19, 22, 25],\n [20, 23, 26],\n [21, 24, 27]]\n ik_plane1 = [[1, 10, 19],\n [2, 11, 20],\n [3, 12, 21]]\n ik_plane2 = [[4, 13, 22],\n [5, 14, 23],\n [6, 15, 24]]\n ik_plane3 = [[7, 16, 25],\n [8, 17, 26],\n [9, 18, 27]]\n ijk_space = [jk_plane1, jk_plane2, jk_plane3]\n ikj_space = [kj_plane1, kj_plane2, kj_plane3]\n jik_space = [ik_plane1, ik_plane2, ik_plane3]\n jki_space = [ki_plane1, ki_plane2, ki_plane3]\n kij_space = [ij_plane1, ij_plane2, ij_plane3]\n x = array([plane1, plane2, plane3])\n\n assert_array_almost_equal(fftn(x),\n fftn(x, axes=(-3, -2, -1))) # kji_space\n assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))\n assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))\n y = fftn(x, axes=(2, 1, 0)) # ijk_space\n assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))\n y = fftn(x, axes=(2, 0, 1)) # ikj_space\n assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),\n fftn(ikj_space))\n y = fftn(x, axes=(1, 2, 0)) # jik_space\n assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),\n fftn(jik_space))\n y = fftn(x, axes=(1, 0, 2)) # jki_space\n assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))\n y = fftn(x, axes=(0, 2, 1)) # kij_space\n assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))\n\n y = fftn(x, axes=(-2, -1)) # ji_plane\n assert_array_almost_equal(fftn(plane1), y[0])\n assert_array_almost_equal(fftn(plane2), y[1])\n assert_array_almost_equal(fftn(plane3), y[2])\n\n y = fftn(x, axes=(1, 2)) # ji_plane\n assert_array_almost_equal(fftn(plane1), y[0])\n assert_array_almost_equal(fftn(plane2), y[1])\n assert_array_almost_equal(fftn(plane3), y[2])\n\n y = fftn(x, axes=(-3, -2)) # kj_plane\n assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])\n assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])\n assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])\n\n y = fftn(x, axes=(-3, -1)) # ki_plane\n assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])\n assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])\n assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])\n\n y = fftn(x, axes=(-1, -2)) # ij_plane\n assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))\n assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))\n assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))\n\n y = fftn(x, axes=(-1, -3)) # ik_plane\n assert_array_almost_equal(fftn(ik_plane1),\n swapaxes(y[:, 0, :], -1, -2))\n assert_array_almost_equal(fftn(ik_plane2),\n swapaxes(y[:, 1, :], -1, -2))\n assert_array_almost_equal(fftn(ik_plane3),\n swapaxes(y[:, 2, :], -1, -2))\n\n y = fftn(x, axes=(-2, -3)) # jk_plane\n assert_array_almost_equal(fftn(jk_plane1),\n swapaxes(y[:, :, 0], -1, -2))\n assert_array_almost_equal(fftn(jk_plane2),\n swapaxes(y[:, :, 1], -1, -2))\n assert_array_almost_equal(fftn(jk_plane3),\n swapaxes(y[:, :, 2], -1, -2))\n\n y = fftn(x, axes=(-1,)) # i_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])\n y = fftn(x, axes=(-2,)) # j_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])\n y = fftn(x, axes=(0,)) # k_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])\n\n y = fftn(x, axes=()) # point\n assert_array_almost_equal(y, x)\n\n def test_shape_argument(self):\n small_x = [[1, 2, 3],\n [4, 5, 6]]\n large_x1 = [[1, 2, 3, 0],\n [4, 5, 6, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]\n\n y = fftn(small_x, shape=(4, 4))\n assert_array_almost_equal(y, fftn(large_x1))\n\n y = fftn(small_x, shape=(3, 4))\n assert_array_almost_equal(y, fftn(large_x1[:-1]))\n\n def test_shape_axes_argument(self):\n small_x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n large_x1 = array([[1, 2, 3, 0],\n [4, 5, 6, 0],\n [7, 8, 9, 0],\n [0, 0, 0, 0]])\n y = fftn(small_x, shape=(4, 4), axes=(-2, -1))\n assert_array_almost_equal(y, fftn(large_x1))\n y = fftn(small_x, shape=(4, 4), axes=(-1, -2))\n\n assert_array_almost_equal(y, swapaxes(\n fftn(swapaxes(large_x1, -1, -2)), -1, -2))\n\n def test_shape_axes_argument2(self):\n # Change shape of the last axis\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-1,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-1, n=8))\n\n # Change shape of an arbitrary axis which is not the last one\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-2,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-2, n=8))\n\n # Change shape of axes: cf #244, where shape and axes were mixed up\n x = numpy.random.random((4, 4, 2))\n y = fftn(x, axes=(-3, -2), shape=(8, 8))\n assert_array_almost_equal(y,\n numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))\n\n def test_shape_argument_more(self):\n x = zeros((4, 4, 2))\n with assert_raises(ValueError,\n match=\"when given, axes and shape arguments\"\n \" have to be of the same length\"):\n fftn(x, shape=(8, 8, 2, 1))\n\n def test_invalid_sizes(self):\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[1 0\\]\\) specified\"):\n fftn([[]])\n\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[ 4 -3\\]\\) specified\"):\n fftn([[1, 1], [2, 2]], (4, -3))\n\n\nclass TestIfftn(object):\n dtype = None\n cdtype = None\n\n def setup_method(self):\n np.random.seed(1234)\n\n @pytest.mark.parametrize('dtype,cdtype,maxnlp',\n [(np.float64, np.complex128, 2000),\n (np.float32, np.complex64, 3500)])\n def test_definition(self, dtype, cdtype, maxnlp):\n x = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=dtype)\n y = ifftn(x)\n assert_equal(y.dtype, cdtype)\n assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)\n\n x = random((20, 26))\n assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)\n\n x = random((5, 4, 3, 20))\n assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)\n\n @pytest.mark.parametrize('maxnlp', [2000, 3500])\n @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])\n def test_random_complex(self, maxnlp, size):\n x = random([size, size]) + 1j*random([size, size])\n assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)\n assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)\n\n def test_invalid_sizes(self):\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[1 0\\]\\) specified\"):\n ifftn([[]])\n\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[ 4 -3\\]\\) specified\"):\n ifftn([[1, 1], [2, 2]], (4, -3))\n\n\nclass TestLongDoubleFailure(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_complex(self):\n if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longdouble) + \\\n 1j * np.random.randn(10).astype(np.longdouble)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type {0} not supported but does not fail\" %\n np.longcomplex)\n except ValueError:\n pass\n\n def test_real(self):\n if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longcomplex)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type %r not supported but does not fail\" %\n np.longcomplex)\n except ValueError:\n pass\n\n\nclass FakeArray(object):\n def __init__(self, data):\n self._data = data\n self.__array_interface__ = data.__array_interface__\n\n\nclass FakeArray2(object):\n def __init__(self, data):\n self._data = data\n\n def __array__(self):\n return self._data\n\n\nclass TestOverwrite(object):\n \"\"\"Check input overwrite behavior of the FFT functions.\"\"\"\n\n real_dtypes = [np.float32, np.float64]\n dtypes = real_dtypes + [np.complex64, np.complex128]\n fftsizes = [8, 16, 32]\n\n def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):\n x2 = x.copy()\n for fake in [lambda x: x, FakeArray, FakeArray2]:\n routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)\n\n sig = \"%s(%s%r, %r, axis=%r, overwrite_x=%r)\" % (\n routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)\n if not should_overwrite:\n assert_equal(x2, x, err_msg=\"spurious overwrite in %s\" % sig)\n\n def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,\n fftsize, overwrite_x):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n should_overwrite = (overwrite_x\n and dtype in overwritable_dtypes\n and fftsize <= shape[axis]\n and (len(shape) == 1 or\n (axis % len(shape) == len(shape)-1\n and fftsize == shape[axis])))\n self._check(data, routine, fftsize, axis,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n\n @pytest.mark.parametrize('dtype', dtypes)\n @pytest.mark.parametrize('fftsize', fftsizes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), -1),\n ((16, 2), 0),\n ((2, 16), 1)])\n def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):\n overwritable = (np.complex128, np.complex64)\n self._check_1d(fft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n self._check_1d(ifft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n\n @pytest.mark.parametrize('dtype', real_dtypes)\n @pytest.mark.parametrize('fftsize', fftsizes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), -1),\n ((16, 2), 0),\n ((2, 16), 1)])\n def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):\n overwritable = self.real_dtypes\n self._check_1d(irfft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n self._check_1d(rfft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n\n def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,\n overwrite_x):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n def fftshape_iter(shp):\n if len(shp) <= 0:\n yield ()\n else:\n for j in (shp[0]//2, shp[0], shp[0]*2):\n for rest in fftshape_iter(shp[1:]):\n yield (j,) + rest\n\n if axes is None:\n part_shape = shape\n else:\n part_shape = tuple(np.take(shape, axes))\n\n for fftshape in fftshape_iter(part_shape):\n should_overwrite = (overwrite_x\n and data.ndim == 1\n and np.all([x < y for x, y in zip(fftshape,\n part_shape)])\n and dtype in overwritable_dtypes)\n self._check(data, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n if data.ndim > 1:\n # check fortran order: it never overwrites\n self._check(data.T, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=False)\n\n @pytest.mark.parametrize('dtype', dtypes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), None),\n ((16,), (0,)),\n ((16, 2), (0,)),\n ((2, 16), (1,)),\n ((8, 16), None),\n ((8, 16), (0, 1)),\n ((8, 16, 2), (0, 1)),\n ((8, 16, 2), (1, 2)),\n ((8, 16, 2), (0,)),\n ((8, 16, 2), (1,)),\n ((8, 16, 2), (2,)),\n ((8, 16, 2), None),\n ((8, 16, 2), (0, 1, 2))])\n def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):\n overwritable = (np.complex128, np.complex64)\n self._check_nd_one(fftn, dtype, shape, axes, overwritable,\n overwrite_x)\n self._check_nd_one(ifftn, dtype, shape, axes, overwritable,\n overwrite_x)\n", "from __future__ import division, absolute_import, print_function\n\nimport sys\nimport warnings\nimport functools\nimport operator\nimport pytest\n\nimport numpy as np\nfrom numpy.core._multiarray_tests import array_indexing\nfrom itertools import product\nfrom numpy.testing import (\n assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,\n HAS_REFCOUNT, suppress_warnings,\n )\n\n\nclass TestIndexing(object):\n def test_index_no_floats(self):\n a = np.array([[[5]]])\n\n assert_raises(IndexError, lambda: a[0.0])\n assert_raises(IndexError, lambda: a[0, 0.0])\n assert_raises(IndexError, lambda: a[0.0, 0])\n assert_raises(IndexError, lambda: a[0.0,:])\n assert_raises(IndexError, lambda: a[:, 0.0])\n assert_raises(IndexError, lambda: a[:, 0.0,:])\n assert_raises(IndexError, lambda: a[0.0,:,:])\n assert_raises(IndexError, lambda: a[0, 0, 0.0])\n assert_raises(IndexError, lambda: a[0.0, 0, 0])\n assert_raises(IndexError, lambda: a[0, 0.0, 0])\n assert_raises(IndexError, lambda: a[-1.4])\n assert_raises(IndexError, lambda: a[0, -1.4])\n assert_raises(IndexError, lambda: a[-1.4, 0])\n assert_raises(IndexError, lambda: a[-1.4,:])\n assert_raises(IndexError, lambda: a[:, -1.4])\n assert_raises(IndexError, lambda: a[:, -1.4,:])\n assert_raises(IndexError, lambda: a[-1.4,:,:])\n assert_raises(IndexError, lambda: a[0, 0, -1.4])\n assert_raises(IndexError, lambda: a[-1.4, 0, 0])\n assert_raises(IndexError, lambda: a[0, -1.4, 0])\n assert_raises(IndexError, lambda: a[0.0:, 0.0])\n assert_raises(IndexError, lambda: a[0.0:, 0.0,:])\n\n def test_slicing_no_floats(self):\n a = np.array([[5]])\n\n # start as float.\n assert_raises(TypeError, lambda: a[0.0:])\n assert_raises(TypeError, lambda: a[0:, 0.0:2])\n assert_raises(TypeError, lambda: a[0.0::2, :0])\n assert_raises(TypeError, lambda: a[0.0:1:2,:])\n assert_raises(TypeError, lambda: a[:, 0.0:])\n # stop as float.\n assert_raises(TypeError, lambda: a[:0.0])\n assert_raises(TypeError, lambda: a[:0, 1:2.0])\n assert_raises(TypeError, lambda: a[:0.0:2, :0])\n assert_raises(TypeError, lambda: a[:0.0,:])\n assert_raises(TypeError, lambda: a[:, 0:4.0:2])\n # step as float.\n assert_raises(TypeError, lambda: a[::1.0])\n assert_raises(TypeError, lambda: a[0:, :2:2.0])\n assert_raises(TypeError, lambda: a[1::4.0, :0])\n assert_raises(TypeError, lambda: a[::5.0,:])\n assert_raises(TypeError, lambda: a[:, 0:4:2.0])\n # mixed.\n assert_raises(TypeError, lambda: a[1.0:2:2.0])\n assert_raises(TypeError, lambda: a[1.0::2.0])\n assert_raises(TypeError, lambda: a[0:, :2.0:2.0])\n assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])\n assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])\n assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])\n # should still get the DeprecationWarning if step = 0.\n assert_raises(TypeError, lambda: a[::0.0])\n\n def test_index_no_array_to_index(self):\n # No non-scalar arrays.\n a = np.array([[[1]]])\n\n assert_raises(TypeError, lambda: a[a:a:a])\n\n def test_none_index(self):\n # `None` index adds newaxis\n a = np.array([1, 2, 3])\n assert_equal(a[None], a[np.newaxis])\n assert_equal(a[None].ndim, a.ndim + 1)\n\n def test_empty_tuple_index(self):\n # Empty tuple index creates a view\n a = np.array([1, 2, 3])\n assert_equal(a[()], a)\n assert_(a[()].base is a)\n a = np.array(0)\n assert_(isinstance(a[()], np.int_))\n\n def test_void_scalar_empty_tuple(self):\n s = np.zeros((), dtype='V4')\n assert_equal(s[()].dtype, s.dtype)\n assert_equal(s[()], s)\n assert_equal(type(s[...]), np.ndarray)\n\n def test_same_kind_index_casting(self):\n # Indexes should be cast with same-kind and not safe, even if that\n # is somewhat unsafe. So test various different code paths.\n index = np.arange(5)\n u_index = index.astype(np.uintp)\n arr = np.arange(10)\n\n assert_array_equal(arr[index], arr[u_index])\n arr[u_index] = np.arange(5)\n assert_array_equal(arr, np.arange(10))\n\n arr = np.arange(10).reshape(5, 2)\n assert_array_equal(arr[index], arr[u_index])\n\n arr[u_index] = np.arange(5)[:,None]\n assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))\n\n arr = np.arange(25).reshape(5, 5)\n assert_array_equal(arr[u_index, u_index], arr[index, index])\n\n def test_empty_fancy_index(self):\n # Empty list index creates an empty array\n # with the same dtype (but with weird shape)\n a = np.array([1, 2, 3])\n assert_equal(a[[]], [])\n assert_equal(a[[]].dtype, a.dtype)\n\n b = np.array([], dtype=np.intp)\n assert_equal(a[[]], [])\n assert_equal(a[[]].dtype, a.dtype)\n\n b = np.array([])\n assert_raises(IndexError, a.__getitem__, b)\n\n def test_ellipsis_index(self):\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n assert_(a[...] is not a)\n assert_equal(a[...], a)\n # `a[...]` was `a` in numpy <1.9.\n assert_(a[...].base is a)\n\n # Slicing with ellipsis can skip an\n # arbitrary number of dimensions\n assert_equal(a[0, ...], a[0])\n assert_equal(a[0, ...], a[0,:])\n assert_equal(a[..., 0], a[:, 0])\n\n # Slicing with ellipsis always results\n # in an array, not a scalar\n assert_equal(a[0, ..., 1], np.array(2))\n\n # Assignment with `(Ellipsis,)` on 0-d arrays\n b = np.array(1)\n b[(Ellipsis,)] = 2\n assert_equal(b, 2)\n\n def test_single_int_index(self):\n # Single integer index selects one row\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n\n assert_equal(a[0], [1, 2, 3])\n assert_equal(a[-1], [7, 8, 9])\n\n # Index out of bounds produces IndexError\n assert_raises(IndexError, a.__getitem__, 1 << 30)\n # Index overflow produces IndexError\n assert_raises(IndexError, a.__getitem__, 1 << 64)\n\n def test_single_bool_index(self):\n # Single boolean index\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n\n assert_equal(a[np.array(True)], a[None])\n assert_equal(a[np.array(False)], a[None][0:0])\n\n def test_boolean_shape_mismatch(self):\n arr = np.ones((5, 4, 3))\n\n index = np.array([True])\n assert_raises(IndexError, arr.__getitem__, index)\n\n index = np.array([False] * 6)\n assert_raises(IndexError, arr.__getitem__, index)\n\n index = np.zeros((4, 4), dtype=bool)\n assert_raises(IndexError, arr.__getitem__, index)\n\n assert_raises(IndexError, arr.__getitem__, (slice(None), index))\n\n def test_boolean_indexing_onedim(self):\n # Indexing a 2-dimensional array with\n # boolean array of length one\n a = np.array([[ 0., 0., 0.]])\n b = np.array([ True], dtype=bool)\n assert_equal(a[b], a)\n # boolean assignment\n a[b] = 1.\n assert_equal(a, [[1., 1., 1.]])\n\n def test_boolean_assignment_value_mismatch(self):\n # A boolean assignment should fail when the shape of the values\n # cannot be broadcast to the subscription. (see also gh-3458)\n a = np.arange(4)\n\n def f(a, v):\n a[a > -1] = v\n\n assert_raises(ValueError, f, a, [])\n assert_raises(ValueError, f, a, [1, 2, 3])\n assert_raises(ValueError, f, a[:1], [1, 2, 3])\n\n def test_boolean_assignment_needs_api(self):\n # See also gh-7666\n # This caused a segfault on Python 2 due to the GIL not being\n # held when the iterator does not need it, but the transfer function\n # does\n arr = np.zeros(1000)\n indx = np.zeros(1000, dtype=bool)\n indx[:100] = True\n arr[indx] = np.ones(100, dtype=object)\n\n expected = np.zeros(1000)\n expected[:100] = 1\n assert_array_equal(arr, expected)\n\n def test_boolean_indexing_twodim(self):\n # Indexing a 2-dimensional array with\n # 2-dimensional boolean array\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n b = np.array([[ True, False, True],\n [False, True, False],\n [ True, False, True]])\n assert_equal(a[b], [1, 3, 5, 7, 9])\n assert_equal(a[b[1]], [[4, 5, 6]])\n assert_equal(a[b[0]], a[b[2]])\n\n # boolean assignment\n a[b] = 0\n assert_equal(a, [[0, 2, 0],\n [4, 0, 6],\n [0, 8, 0]])\n\n def test_boolean_indexing_list(self):\n # Regression test for #13715. It's a use-after-free bug which the\n # test won't directly catch, but it will show up in valgrind.\n a = np.array([1, 2, 3])\n b = [True, False, True]\n # Two variants of the test because the first takes a fast path\n assert_equal(a[b], [1, 3])\n assert_equal(a[None, b], [[1, 3]])\n\n def test_reverse_strides_and_subspace_bufferinit(self):\n # This tests that the strides are not reversed for simple and\n # subspace fancy indexing.\n a = np.ones(5)\n b = np.zeros(5, dtype=np.intp)[::-1]\n c = np.arange(5)[::-1]\n\n a[b] = c\n # If the strides are not reversed, the 0 in the arange comes last.\n assert_equal(a[0], 0)\n\n # This also tests that the subspace buffer is initialized:\n a = np.ones((5, 2))\n c = np.arange(10).reshape(5, 2)[::-1]\n a[b, :] = c\n assert_equal(a[0], [0, 1])\n\n def test_reversed_strides_result_allocation(self):\n # Test a bug when calculating the output strides for a result array\n # when the subspace size was 1 (and test other cases as well)\n a = np.arange(10)[:, None]\n i = np.arange(10)[::-1]\n assert_array_equal(a[i], a[i.copy('C')])\n\n a = np.arange(20).reshape(-1, 2)\n\n def test_uncontiguous_subspace_assignment(self):\n # During development there was a bug activating a skip logic\n # based on ndim instead of size.\n a = np.full((3, 4, 2), -1)\n b = np.full((3, 4, 2), -1)\n\n a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T\n b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()\n\n assert_equal(a, b)\n\n def test_too_many_fancy_indices_special_case(self):\n # Just documents behaviour, this is a small limitation.\n a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS\n assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)\n\n def test_scalar_array_bool(self):\n # NumPy bools can be used as boolean index (python ones as of yet not)\n a = np.array(1)\n assert_equal(a[np.bool_(True)], a[np.array(True)])\n assert_equal(a[np.bool_(False)], a[np.array(False)])\n\n # After deprecating bools as integers:\n #a = np.array([0,1,2])\n #assert_equal(a[True, :], a[None, :])\n #assert_equal(a[:, True], a[:, None])\n #\n #assert_(not np.may_share_memory(a, a[True, :]))\n\n def test_everything_returns_views(self):\n # Before `...` would return a itself.\n a = np.arange(5)\n\n assert_(a is not a[()])\n assert_(a is not a[...])\n assert_(a is not a[:])\n\n def test_broaderrors_indexing(self):\n a = np.zeros((5, 5))\n assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))\n assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)\n\n def test_trivial_fancy_out_of_bounds(self):\n a = np.zeros(5)\n ind = np.ones(20, dtype=np.intp)\n ind[-1] = 10\n assert_raises(IndexError, a.__getitem__, ind)\n assert_raises(IndexError, a.__setitem__, ind, 0)\n ind = np.ones(20, dtype=np.intp)\n ind[0] = 11\n assert_raises(IndexError, a.__getitem__, ind)\n assert_raises(IndexError, a.__setitem__, ind, 0)\n\n def test_trivial_fancy_not_possible(self):\n # Test that the fast path for trivial assignment is not incorrectly\n # used when the index is not contiguous or 1D, see also gh-11467.\n a = np.arange(6)\n idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]\n assert_array_equal(a[idx], idx)\n\n # this case must not go into the fast path, note that idx is\n # a non-contiuguous none 1D array here.\n a[idx] = -1\n res = np.arange(6)\n res[0] = -1\n res[3] = -1\n assert_array_equal(a, res)\n\n def test_nonbaseclass_values(self):\n class SubClass(np.ndarray):\n def __array_finalize__(self, old):\n # Have array finalize do funny things\n self.fill(99)\n\n a = np.zeros((5, 5))\n s = a.copy().view(type=SubClass)\n s.fill(1)\n\n a[[0, 1, 2, 3, 4], :] = s\n assert_((a == 1).all())\n\n # Subspace is last, so transposing might want to finalize\n a[:, [0, 1, 2, 3, 4]] = s\n assert_((a == 1).all())\n\n a.fill(0)\n a[...] = s\n assert_((a == 1).all())\n\n def test_subclass_writeable(self):\n d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],\n dtype=[('target', 'S20'), ('V_mag', '>f4')])\n ind = np.array([False, True, True], dtype=bool)\n assert_(d[ind].flags.writeable)\n ind = np.array([0, 1])\n assert_(d[ind].flags.writeable)\n assert_(d[...].flags.writeable)\n assert_(d[0].flags.writeable)\n\n def test_memory_order(self):\n # This is not necessary to preserve. Memory layouts for\n # more complex indices are not as simple.\n a = np.arange(10)\n b = np.arange(10).reshape(5,2).T\n assert_(a[b].flags.f_contiguous)\n\n # Takes a different implementation branch:\n a = a.reshape(-1, 1)\n assert_(a[b, 0].flags.f_contiguous)\n\n def test_scalar_return_type(self):\n # Full scalar indices should return scalars and object\n # arrays should not call PyArray_Return on their items\n class Zero(object):\n # The most basic valid indexing\n def __index__(self):\n return 0\n\n z = Zero()\n\n class ArrayLike(object):\n # Simple array, should behave like the array\n def __array__(self):\n return np.array(0)\n\n a = np.zeros(())\n assert_(isinstance(a[()], np.float_))\n a = np.zeros(1)\n assert_(isinstance(a[z], np.float_))\n a = np.zeros((1, 1))\n assert_(isinstance(a[z, np.array(0)], np.float_))\n assert_(isinstance(a[z, ArrayLike()], np.float_))\n\n # And object arrays do not call it too often:\n b = np.array(0)\n a = np.array(0, dtype=object)\n a[()] = b\n assert_(isinstance(a[()], np.ndarray))\n a = np.array([b, None])\n assert_(isinstance(a[z], np.ndarray))\n a = np.array([[b, None]])\n assert_(isinstance(a[z, np.array(0)], np.ndarray))\n assert_(isinstance(a[z, ArrayLike()], np.ndarray))\n\n def test_small_regressions(self):\n # Reference count of intp for index checks\n a = np.array([0])\n if HAS_REFCOUNT:\n refcount = sys.getrefcount(np.dtype(np.intp))\n # item setting always checks indices in separate function:\n a[np.array([0], dtype=np.intp)] = 1\n a[np.array([0], dtype=np.uint8)] = 1\n assert_raises(IndexError, a.__setitem__,\n np.array([1], dtype=np.intp), 1)\n assert_raises(IndexError, a.__setitem__,\n np.array([1], dtype=np.uint8), 1)\n\n if HAS_REFCOUNT:\n assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)\n\n def test_unaligned(self):\n v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]\n d = v.view(np.dtype(\"S8\"))\n # unaligned source\n x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]\n x = x.view(np.dtype(\"S8\"))\n x[...] = np.array(\"b\" * 8, dtype=\"S\")\n b = np.arange(d.size)\n #trivial\n assert_equal(d[b], d)\n d[b] = x\n # nontrivial\n # unaligned index array\n b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]\n b = b.view(np.intp)[:d.size]\n b[...] = np.arange(d.size)\n assert_equal(d[b.astype(np.int16)], d)\n d[b.astype(np.int16)] = x\n # boolean\n d[b % 2 == 0]\n d[b % 2 == 0] = x[::2]\n\n def test_tuple_subclass(self):\n arr = np.ones((5, 5))\n\n # A tuple subclass should also be an nd-index\n class TupleSubclass(tuple):\n pass\n index = ([1], [1])\n index = TupleSubclass(index)\n assert_(arr[index].shape == (1,))\n # Unlike the non nd-index:\n assert_(arr[index,].shape != (1,))\n\n def test_broken_sequence_not_nd_index(self):\n # See gh-5063:\n # If we have an object which claims to be a sequence, but fails\n # on item getting, this should not be converted to an nd-index (tuple)\n # If this object happens to be a valid index otherwise, it should work\n # This object here is very dubious and probably bad though:\n class SequenceLike(object):\n def __index__(self):\n return 0\n\n def __len__(self):\n return 1\n\n def __getitem__(self, item):\n raise IndexError('Not possible')\n\n arr = np.arange(10)\n assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])\n\n # also test that field indexing does not segfault\n # for a similar reason, by indexing a structured array\n arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])\n assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])\n\n def test_indexing_array_weird_strides(self):\n # See also gh-6221\n # the shapes used here come from the issue and create the correct\n # size for the iterator buffering size.\n x = np.ones(10)\n x2 = np.ones((10, 2))\n ind = np.arange(10)[:, None, None, None]\n ind = np.broadcast_to(ind, (10, 55, 4, 4))\n\n # single advanced index case\n assert_array_equal(x[ind], x[ind.copy()])\n # higher dimensional advanced index\n zind = np.zeros(4, dtype=np.intp)\n assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])\n\n def test_indexing_array_negative_strides(self):\n # From gh-8264,\n # core dumps if negative strides are used in iteration\n arro = np.zeros((4, 4))\n arr = arro[::-1, ::-1]\n\n slices = (slice(None), [0, 1, 2, 3])\n arr[slices] = 10\n assert_array_equal(arr, 10.)\n\nclass TestFieldIndexing(object):\n def test_scalar_return_type(self):\n # Field access on an array should return an array, even if it\n # is 0-d.\n a = np.zeros((), [('a','f8')])\n assert_(isinstance(a['a'], np.ndarray))\n assert_(isinstance(a[['a']], np.ndarray))\n\n\nclass TestBroadcastedAssignments(object):\n def assign(self, a, ind, val):\n a[ind] = val\n return a\n\n def test_prepending_ones(self):\n a = np.zeros((3, 2))\n\n a[...] = np.ones((1, 3, 2))\n # Fancy with subspace with and without transpose\n a[[0, 1, 2], :] = np.ones((1, 3, 2))\n a[:, [0, 1]] = np.ones((1, 3, 2))\n # Fancy without subspace (with broadcasting)\n a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))\n\n def test_prepend_not_one(self):\n assign = self.assign\n s_ = np.s_\n a = np.zeros(5)\n\n # Too large and not only ones.\n assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))\n assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))\n assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))\n\n def test_simple_broadcasting_errors(self):\n assign = self.assign\n s_ = np.s_\n a = np.zeros((5, 1))\n\n assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))\n assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))\n assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))\n assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))\n assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))\n\n def test_index_is_larger(self):\n # Simple case of fancy index broadcasting of the index.\n a = np.zeros((5, 5))\n a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]\n\n assert_((a[:3, :3] == [2, 3, 4]).all())\n\n def test_broadcast_subspace(self):\n a = np.zeros((100, 100))\n v = np.arange(100)[:,None]\n b = np.arange(100)[::-1]\n a[b] = v\n assert_((a[::-1] == v).all())\n\n\nclass TestSubclasses(object):\n def test_basic(self):\n # Test that indexing in various ways produces SubClass instances,\n # and that the base is set up correctly: the original subclass\n # instance for views, and a new ndarray for advanced/boolean indexing\n # where a copy was made (latter a regression test for gh-11983).\n class SubClass(np.ndarray):\n pass\n\n a = np.arange(5)\n s = a.view(SubClass)\n s_slice = s[:3]\n assert_(type(s_slice) is SubClass)\n assert_(s_slice.base is s)\n assert_array_equal(s_slice, a[:3])\n\n s_fancy = s[[0, 1, 2]]\n assert_(type(s_fancy) is SubClass)\n assert_(s_fancy.base is not s)\n assert_(type(s_fancy.base) is np.ndarray)\n assert_array_equal(s_fancy, a[[0, 1, 2]])\n assert_array_equal(s_fancy.base, a[[0, 1, 2]])\n\n s_bool = s[s > 0]\n assert_(type(s_bool) is SubClass)\n assert_(s_bool.base is not s)\n assert_(type(s_bool.base) is np.ndarray)\n assert_array_equal(s_bool, a[a > 0])\n assert_array_equal(s_bool.base, a[a > 0])\n\n def test_finalize_gets_full_info(self):\n # Array finalize should be called on the filled array.\n class SubClass(np.ndarray):\n def __array_finalize__(self, old):\n self.finalize_status = np.array(self)\n self.old = old\n\n s = np.arange(10).view(SubClass)\n new_s = s[:3]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n new_s = s[[0,1,2,3]]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n new_s = s[s > 0]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_slice_decref_getsetslice(self):\n # See gh-10066, a temporary slice object should be discarted.\n # This test is only really interesting on Python 2 since\n # it goes through `__set/getslice__` here and can probably be\n # removed. Use 0:7 to make sure it is never None:7.\n class KeepIndexObject(np.ndarray):\n def __getitem__(self, indx):\n self.indx = indx\n if indx == slice(0, 7):\n raise ValueError\n\n def __setitem__(self, indx, val):\n self.indx = indx\n if indx == slice(0, 4):\n raise ValueError\n\n k = np.array([1]).view(KeepIndexObject)\n k[0:5]\n assert_equal(k.indx, slice(0, 5))\n assert_equal(sys.getrefcount(k.indx), 2)\n try:\n k[0:7]\n raise AssertionError\n except ValueError:\n # The exception holds a reference to the slice so clear on Py2\n if hasattr(sys, 'exc_clear'):\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n sys.exc_clear()\n assert_equal(k.indx, slice(0, 7))\n assert_equal(sys.getrefcount(k.indx), 2)\n\n k[0:3] = 6\n assert_equal(k.indx, slice(0, 3))\n assert_equal(sys.getrefcount(k.indx), 2)\n try:\n k[0:4] = 2\n raise AssertionError\n except ValueError:\n # The exception holds a reference to the slice so clear on Py2\n if hasattr(sys, 'exc_clear'):\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n sys.exc_clear()\n assert_equal(k.indx, slice(0, 4))\n assert_equal(sys.getrefcount(k.indx), 2)\n\n\nclass TestFancyIndexingCast(object):\n def test_boolean_index_cast_assign(self):\n # Setup the boolean index and float arrays.\n shape = (8, 63)\n bool_index = np.zeros(shape).astype(bool)\n bool_index[0, 1] = True\n zero_array = np.zeros(shape)\n\n # Assigning float is fine.\n zero_array[bool_index] = np.array([1])\n assert_equal(zero_array[0, 1], 1)\n\n # Fancy indexing works, although we get a cast warning.\n assert_warns(np.ComplexWarning,\n zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))\n assert_equal(zero_array[0, 1], 2) # No complex part\n\n # Cast complex to float, throwing away the imaginary portion.\n assert_warns(np.ComplexWarning,\n zero_array.__setitem__, bool_index, np.array([1j]))\n assert_equal(zero_array[0, 1], 0)\n\nclass TestFancyIndexingEquivalence(object):\n def test_object_assign(self):\n # Check that the field and object special case using copyto is active.\n # The right hand side cannot be converted to an array here.\n a = np.arange(5, dtype=object)\n b = a.copy()\n a[:3] = [1, (1,2), 3]\n b[[0, 1, 2]] = [1, (1,2), 3]\n assert_array_equal(a, b)\n\n # test same for subspace fancy indexing\n b = np.arange(5, dtype=object)[None, :]\n b[[0], :3] = [[1, (1,2), 3]]\n assert_array_equal(a, b[0])\n\n # Check that swapping of axes works.\n # There was a bug that made the later assignment throw a ValueError\n # do to an incorrectly transposed temporary right hand side (gh-5714)\n b = b.T\n b[:3, [0]] = [[1], [(1,2)], [3]]\n assert_array_equal(a, b[:, 0])\n\n # Another test for the memory order of the subspace\n arr = np.ones((3, 4, 5), dtype=object)\n # Equivalent slicing assignment for comparison\n cmp_arr = arr.copy()\n cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]\n arr[[0], ...] = [[[1], [2], [3], [4]]]\n assert_array_equal(arr, cmp_arr)\n arr = arr.copy('F')\n arr[[0], ...] = [[[1], [2], [3], [4]]]\n assert_array_equal(arr, cmp_arr)\n\n def test_cast_equivalence(self):\n # Yes, normal slicing uses unsafe casting.\n a = np.arange(5)\n b = a.copy()\n\n a[:3] = np.array(['2', '-3', '-1'])\n b[[0, 2, 1]] = np.array(['2', '-1', '-3'])\n assert_array_equal(a, b)\n\n # test the same for subspace fancy indexing\n b = np.arange(5)[None, :]\n b[[0], :3] = np.array([['2', '-3', '-1']])\n assert_array_equal(a, b[0])\n\n\nclass TestMultiIndexingAutomated(object):\n \"\"\"\n These tests use code to mimic the C-Code indexing for selection.\n\n NOTE:\n\n * This still lacks tests for complex item setting.\n * If you change behavior of indexing, you might want to modify\n these tests to try more combinations.\n * Behavior was written to match numpy version 1.8. (though a\n first version matched 1.7.)\n * Only tuple indices are supported by the mimicking code.\n (and tested as of writing this)\n * Error types should match most of the time as long as there\n is only one error. For multiple errors, what gets raised\n will usually not be the same one. They are *not* tested.\n\n Update 2016-11-30: It is probably not worth maintaining this test\n indefinitely and it can be dropped if maintenance becomes a burden.\n\n \"\"\"\n\n def setup(self):\n self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)\n self.b = np.empty((3, 0, 5, 6))\n self.complex_indices = ['skip', Ellipsis,\n 0,\n # Boolean indices, up to 3-d for some special cases of eating up\n # dimensions, also need to test all False\n np.array([True, False, False]),\n np.array([[True, False], [False, True]]),\n np.array([[[False, False], [False, False]]]),\n # Some slices:\n slice(-5, 5, 2),\n slice(1, 1, 100),\n slice(4, -1, -2),\n slice(None, None, -3),\n # Some Fancy indexes:\n np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast\n np.array([0, 1, -2]),\n np.array([[2], [0], [1]]),\n np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),\n np.array([2, -1], dtype=np.int8),\n np.zeros([1]*31, dtype=int), # trigger too large array.\n np.array([0., 1.])] # invalid datatype\n # Some simpler indices that still cover a bit more\n self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),\n 'skip']\n # Very simple ones to fill the rest:\n self.fill_indices = [slice(None, None), 0]\n\n def _get_multi_index(self, arr, indices):\n \"\"\"Mimic multi dimensional indexing.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed.\n indices : tuple of index objects\n\n Returns\n -------\n out : ndarray\n An array equivalent to the indexing operation (but always a copy).\n `arr[indices]` should be identical.\n no_copy : bool\n Whether the indexing operation requires a copy. If this is `True`,\n `np.may_share_memory(arr, arr[indices])` should be `True` (with\n some exceptions for scalars and possibly 0-d arrays).\n\n Notes\n -----\n While the function may mostly match the errors of normal indexing this\n is generally not the case.\n \"\"\"\n in_indices = list(indices)\n indices = []\n # if False, this is a fancy or boolean index\n no_copy = True\n # number of fancy/scalar indexes that are not consecutive\n num_fancy = 0\n # number of dimensions indexed by a \"fancy\" index\n fancy_dim = 0\n # NOTE: This is a funny twist (and probably OK to change).\n # The boolean array has illegal indexes, but this is\n # allowed if the broadcast fancy-indices are 0-sized.\n # This variable is to catch that case.\n error_unless_broadcast_to_empty = False\n\n # We need to handle Ellipsis and make arrays from indices, also\n # check if this is fancy indexing (set no_copy).\n ndim = 0\n ellipsis_pos = None # define here mostly to replace all but first.\n for i, indx in enumerate(in_indices):\n if indx is None:\n continue\n if isinstance(indx, np.ndarray) and indx.dtype == bool:\n no_copy = False\n if indx.ndim == 0:\n raise IndexError\n # boolean indices can have higher dimensions\n ndim += indx.ndim\n fancy_dim += indx.ndim\n continue\n if indx is Ellipsis:\n if ellipsis_pos is None:\n ellipsis_pos = i\n continue # do not increment ndim counter\n raise IndexError\n if isinstance(indx, slice):\n ndim += 1\n continue\n if not isinstance(indx, np.ndarray):\n # This could be open for changes in numpy.\n # numpy should maybe raise an error if casting to intp\n # is not safe. It rejects np.array([1., 2.]) but not\n # [1., 2.] as index (same for ie. np.take).\n # (Note the importance of empty lists if changing this here)\n try:\n indx = np.array(indx, dtype=np.intp)\n except ValueError:\n raise IndexError\n in_indices[i] = indx\n elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':\n raise IndexError('arrays used as indices must be of '\n 'integer (or boolean) type')\n if indx.ndim != 0:\n no_copy = False\n ndim += 1\n fancy_dim += 1\n\n if arr.ndim - ndim < 0:\n # we can't take more dimensions then we have, not even for 0-d\n # arrays. since a[()] makes sense, but not a[(),]. We will\n # raise an error later on, unless a broadcasting error occurs\n # first.\n raise IndexError\n\n if ndim == 0 and None not in in_indices:\n # Well we have no indexes or one Ellipsis. This is legal.\n return arr.copy(), no_copy\n\n if ellipsis_pos is not None:\n in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *\n (arr.ndim - ndim))\n\n for ax, indx in enumerate(in_indices):\n if isinstance(indx, slice):\n # convert to an index array\n indx = np.arange(*indx.indices(arr.shape[ax]))\n indices.append(['s', indx])\n continue\n elif indx is None:\n # this is like taking a slice with one element from a new axis:\n indices.append(['n', np.array([0], dtype=np.intp)])\n arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))\n continue\n if isinstance(indx, np.ndarray) and indx.dtype == bool:\n if indx.shape != arr.shape[ax:ax+indx.ndim]:\n raise IndexError\n\n try:\n flat_indx = np.ravel_multi_index(np.nonzero(indx),\n arr.shape[ax:ax+indx.ndim], mode='raise')\n except Exception:\n error_unless_broadcast_to_empty = True\n # fill with 0s instead, and raise error later\n flat_indx = np.array([0]*indx.sum(), dtype=np.intp)\n # concatenate axis into a single one:\n if indx.ndim != 0:\n arr = arr.reshape((arr.shape[:ax]\n + (np.prod(arr.shape[ax:ax+indx.ndim]),)\n + arr.shape[ax+indx.ndim:]))\n indx = flat_indx\n else:\n # This could be changed, a 0-d boolean index can\n # make sense (even outside the 0-d indexed array case)\n # Note that originally this is could be interpreted as\n # integer in the full integer special case.\n raise IndexError\n else:\n # If the index is a singleton, the bounds check is done\n # before the broadcasting. This used to be different in <1.9\n if indx.ndim == 0:\n if indx >= arr.shape[ax] or indx < -arr.shape[ax]:\n raise IndexError\n if indx.ndim == 0:\n # The index is a scalar. This used to be two fold, but if\n # fancy indexing was active, the check was done later,\n # possibly after broadcasting it away (1.7. or earlier).\n # Now it is always done.\n if indx >= arr.shape[ax] or indx < - arr.shape[ax]:\n raise IndexError\n if (len(indices) > 0 and\n indices[-1][0] == 'f' and\n ax != ellipsis_pos):\n # NOTE: There could still have been a 0-sized Ellipsis\n # between them. Checked that with ellipsis_pos.\n indices[-1].append(indx)\n else:\n # We have a fancy index that is not after an existing one.\n # NOTE: A 0-d array triggers this as well, while one may\n # expect it to not trigger it, since a scalar would not be\n # considered fancy indexing.\n num_fancy += 1\n indices.append(['f', indx])\n\n if num_fancy > 1 and not no_copy:\n # We have to flush the fancy indexes left\n new_indices = indices[:]\n axes = list(range(arr.ndim))\n fancy_axes = []\n new_indices.insert(0, ['f'])\n ni = 0\n ai = 0\n for indx in indices:\n ni += 1\n if indx[0] == 'f':\n new_indices[0].extend(indx[1:])\n del new_indices[ni]\n ni -= 1\n for ax in range(ai, ai + len(indx[1:])):\n fancy_axes.append(ax)\n axes.remove(ax)\n ai += len(indx) - 1 # axis we are at\n indices = new_indices\n # and now we need to transpose arr:\n arr = arr.transpose(*(fancy_axes + axes))\n\n # We only have one 'f' index now and arr is transposed accordingly.\n # Now handle newaxis by reshaping...\n ax = 0\n for indx in indices:\n if indx[0] == 'f':\n if len(indx) == 1:\n continue\n # First of all, reshape arr to combine fancy axes into one:\n orig_shape = arr.shape\n orig_slice = orig_shape[ax:ax + len(indx[1:])]\n arr = arr.reshape((arr.shape[:ax]\n + (np.prod(orig_slice).astype(int),)\n + arr.shape[ax + len(indx[1:]):]))\n\n # Check if broadcasting works\n res = np.broadcast(*indx[1:])\n # unfortunately the indices might be out of bounds. So check\n # that first, and use mode='wrap' then. However only if\n # there are any indices...\n if res.size != 0:\n if error_unless_broadcast_to_empty:\n raise IndexError\n for _indx, _size in zip(indx[1:], orig_slice):\n if _indx.size == 0:\n continue\n if np.any(_indx >= _size) or np.any(_indx < -_size):\n raise IndexError\n if len(indx[1:]) == len(orig_slice):\n if np.product(orig_slice) == 0:\n # Work around for a crash or IndexError with 'wrap'\n # in some 0-sized cases.\n try:\n mi = np.ravel_multi_index(indx[1:], orig_slice,\n mode='raise')\n except Exception:\n # This happens with 0-sized orig_slice (sometimes?)\n # here it is a ValueError, but indexing gives a:\n raise IndexError('invalid index into 0-sized')\n else:\n mi = np.ravel_multi_index(indx[1:], orig_slice,\n mode='wrap')\n else:\n # Maybe never happens...\n raise ValueError\n arr = arr.take(mi.ravel(), axis=ax)\n try:\n arr = arr.reshape((arr.shape[:ax]\n + mi.shape\n + arr.shape[ax+1:]))\n except ValueError:\n # too many dimensions, probably\n raise IndexError\n ax += mi.ndim\n continue\n\n # If we are here, we have a 1D array for take:\n arr = arr.take(indx[1], axis=ax)\n ax += 1\n\n return arr, no_copy\n\n def _check_multi_index(self, arr, index):\n \"\"\"Check a multi index item getting and simple setting.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed, must be a reshaped arange.\n index : tuple of indexing objects\n Index being tested.\n \"\"\"\n # Test item getting\n try:\n mimic_get, no_copy = self._get_multi_index(arr, index)\n except Exception as e:\n if HAS_REFCOUNT:\n prev_refcount = sys.getrefcount(arr)\n assert_raises(type(e), arr.__getitem__, index)\n assert_raises(type(e), arr.__setitem__, index, 0)\n if HAS_REFCOUNT:\n assert_equal(prev_refcount, sys.getrefcount(arr))\n return\n\n self._compare_index_result(arr, index, mimic_get, no_copy)\n\n def _check_single_index(self, arr, index):\n \"\"\"Check a single index item getting and simple setting.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed, must be an arange.\n index : indexing object\n Index being tested. Must be a single index and not a tuple\n of indexing objects (see also `_check_multi_index`).\n \"\"\"\n try:\n mimic_get, no_copy = self._get_multi_index(arr, (index,))\n except Exception as e:\n if HAS_REFCOUNT:\n prev_refcount = sys.getrefcount(arr)\n assert_raises(type(e), arr.__getitem__, index)\n assert_raises(type(e), arr.__setitem__, index, 0)\n if HAS_REFCOUNT:\n assert_equal(prev_refcount, sys.getrefcount(arr))\n return\n\n self._compare_index_result(arr, index, mimic_get, no_copy)\n\n def _compare_index_result(self, arr, index, mimic_get, no_copy):\n \"\"\"Compare mimicked result to indexing result.\n \"\"\"\n arr = arr.copy()\n indexed_arr = arr[index]\n assert_array_equal(indexed_arr, mimic_get)\n # Check if we got a view, unless its a 0-sized or 0-d array.\n # (then its not a view, and that does not matter)\n if indexed_arr.size != 0 and indexed_arr.ndim != 0:\n assert_(np.may_share_memory(indexed_arr, arr) == no_copy)\n # Check reference count of the original array\n if HAS_REFCOUNT:\n if no_copy:\n # refcount increases by one:\n assert_equal(sys.getrefcount(arr), 3)\n else:\n assert_equal(sys.getrefcount(arr), 2)\n\n # Test non-broadcast setitem:\n b = arr.copy()\n b[index] = mimic_get + 1000\n if b.size == 0:\n return # nothing to compare here...\n if no_copy and indexed_arr.ndim != 0:\n # change indexed_arr in-place to manipulate original:\n indexed_arr += 1000\n assert_array_equal(arr, b)\n return\n # Use the fact that the array is originally an arange:\n arr.flat[indexed_arr.ravel()] += 1000\n assert_array_equal(arr, b)\n\n def test_boolean(self):\n a = np.array(5)\n assert_equal(a[np.array(True)], 5)\n a[np.array(True)] = 1\n assert_equal(a, 1)\n # NOTE: This is different from normal broadcasting, as\n # arr[boolean_array] works like in a multi index. Which means\n # it is aligned to the left. This is probably correct for\n # consistency with arr[boolean_array,] also no broadcasting\n # is done at all\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool),))\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))\n\n def test_multidim(self):\n # Automatically test combinations with complex indexes on 2nd (or 1st)\n # spot and the simple ones in one other spot.\n with warnings.catch_warnings():\n # This is so that np.array(True) is not accepted in a full integer\n # index, when running the file separately.\n warnings.filterwarnings('error', '', DeprecationWarning)\n warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)\n\n def isskip(idx):\n return isinstance(idx, str) and idx == \"skip\"\n\n for simple_pos in [0, 2, 3]:\n tocheck = [self.fill_indices, self.complex_indices,\n self.fill_indices, self.fill_indices]\n tocheck[simple_pos] = self.simple_indices\n for index in product(*tocheck):\n index = tuple(i for i in index if not isskip(i))\n self._check_multi_index(self.a, index)\n self._check_multi_index(self.b, index)\n\n # Check very simple item getting:\n self._check_multi_index(self.a, (0, 0, 0, 0))\n self._check_multi_index(self.b, (0, 0, 0, 0))\n # Also check (simple cases of) too many indices:\n assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))\n assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)\n assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))\n assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)\n\n def test_1d(self):\n a = np.arange(10)\n for index in self.complex_indices:\n self._check_single_index(a, index)\n\nclass TestFloatNonIntegerArgument(object):\n \"\"\"\n These test that ``TypeError`` is raised when you try to use\n non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``\n and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.\n\n \"\"\"\n def test_valid_indexing(self):\n # These should raise no errors.\n a = np.array([[[5]]])\n\n a[np.array([0])]\n a[[0, 0]]\n a[:, [0, 0]]\n a[:, 0,:]\n a[:,:,:]\n\n def test_valid_slicing(self):\n # These should raise no errors.\n a = np.array([[[5]]])\n\n a[::]\n a[0:]\n a[:2]\n a[0:2]\n a[::2]\n a[1::2]\n a[:2:2]\n a[1:2:2]\n\n def test_non_integer_argument_errors(self):\n a = np.array([[5]])\n\n assert_raises(TypeError, np.reshape, a, (1., 1., -1))\n assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))\n assert_raises(TypeError, np.take, a, [0], 1.)\n assert_raises(TypeError, np.take, a, [0], np.float64(1.))\n\n def test_non_integer_sequence_multiplication(self):\n # NumPy scalar sequence multiply should not work with non-integers\n def mult(a, b):\n return a * b\n\n assert_raises(TypeError, mult, [1], np.float_(3))\n # following should be OK\n mult([1], np.int_(3))\n\n def test_reduce_axis_float_index(self):\n d = np.zeros((3,3,3))\n assert_raises(TypeError, np.min, d, 0.5)\n assert_raises(TypeError, np.min, d, (0.5, 1))\n assert_raises(TypeError, np.min, d, (1, 2.2))\n assert_raises(TypeError, np.min, d, (.2, 1.2))\n\n\nclass TestBooleanIndexing(object):\n # Using a boolean as integer argument/indexing is an error.\n def test_bool_as_int_argument_errors(self):\n a = np.array([[[1]]])\n\n assert_raises(TypeError, np.reshape, a, (True, -1))\n assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))\n # Note that operator.index(np.array(True)) does not work, a boolean\n # array is thus also deprecated, but not with the same message:\n assert_raises(TypeError, operator.index, np.array(True))\n assert_warns(DeprecationWarning, operator.index, np.True_)\n assert_raises(TypeError, np.take, args=(a, [0], False))\n\n def test_boolean_indexing_weirdness(self):\n # Weird boolean indexing things\n a = np.ones((2, 3, 4))\n a[False, True, ...].shape == (0, 2, 3, 4)\n a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)\n assert_raises(IndexError, lambda: a[False, [0, 1], ...])\n\n\nclass TestArrayToIndexDeprecation(object):\n \"\"\"Creating an an index from array not 0-D is an error.\n\n \"\"\"\n def test_array_to_index_error(self):\n # so no exception is expected. The raising is effectively tested above.\n a = np.array([[[1]]])\n\n assert_raises(TypeError, operator.index, np.array([1]))\n assert_raises(TypeError, np.reshape, a, (a, -1))\n assert_raises(TypeError, np.take, a, [0], a)\n\n\nclass TestNonIntegerArrayLike(object):\n \"\"\"Tests that array_likes only valid if can safely cast to integer.\n\n For instance, lists give IndexError when they cannot be safely cast to\n an integer.\n\n \"\"\"\n def test_basic(self):\n a = np.arange(10)\n\n assert_raises(IndexError, a.__getitem__, [0.5, 1.5])\n assert_raises(IndexError, a.__getitem__, (['1', '2'],))\n\n # The following is valid\n a.__getitem__([])\n\n\nclass TestMultipleEllipsisError(object):\n \"\"\"An index can only have a single ellipsis.\n\n \"\"\"\n def test_basic(self):\n a = np.arange(10)\n assert_raises(IndexError, lambda: a[..., ...])\n assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))\n assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))\n\n\nclass TestCApiAccess(object):\n def test_getitem(self):\n subscript = functools.partial(array_indexing, 0)\n\n # 0-d arrays don't work:\n assert_raises(IndexError, subscript, np.ones(()), 0)\n # Out of bound values:\n assert_raises(IndexError, subscript, np.ones(10), 11)\n assert_raises(IndexError, subscript, np.ones(10), -11)\n assert_raises(IndexError, subscript, np.ones((10, 10)), 11)\n assert_raises(IndexError, subscript, np.ones((10, 10)), -11)\n\n a = np.arange(10)\n assert_array_equal(a[4], subscript(a, 4))\n a = a.reshape(5, 2)\n assert_array_equal(a[-4], subscript(a, -4))\n\n def test_setitem(self):\n assign = functools.partial(array_indexing, 1)\n\n # Deletion is impossible:\n assert_raises(ValueError, assign, np.ones(10), 0)\n # 0-d arrays don't work:\n assert_raises(IndexError, assign, np.ones(()), 0, 0)\n # Out of bound values:\n assert_raises(IndexError, assign, np.ones(10), 11, 0)\n assert_raises(IndexError, assign, np.ones(10), -11, 0)\n assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)\n assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)\n\n a = np.arange(10)\n assign(a, 4, 10)\n assert_(a[4] == 10)\n\n a = a.reshape(5, 2)\n assign(a, 4, 10)\n assert_array_equal(a[-1], [10, 10])\n", "\"\"\" Classes for interpolating values.\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n\n__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',\n 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',\n 'RegularGridInterpolator', 'interpn']\n\n\nimport itertools\nimport warnings\nimport functools\nimport operator\n\nimport numpy as np\nfrom numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,\n dot, ravel, poly1d, asarray, intp)\n\nimport scipy.linalg\nimport scipy.special as spec\nfrom scipy.special import comb\n\nfrom scipy._lib.six import xrange, integer_types, string_types\n\nfrom . import fitpack\nfrom . import dfitpack\nfrom . import _fitpack\nfrom .polyint import _Interpolator1D\nfrom . import _ppoly\nfrom .fitpack2 import RectBivariateSpline\nfrom .interpnd import _ndim_coords_from_arrays\nfrom ._bsplines import make_interp_spline, BSpline\n\n\ndef prod(x):\n \"\"\"Product of a list of numbers; ~40x faster vs np.prod for Python tuples\"\"\"\n if len(x) == 0:\n return 1\n return functools.reduce(operator.mul, x)\n\n\ndef lagrange(x, w):\n r\"\"\"\n Return a Lagrange interpolating polynomial.\n\n Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating\n polynomial through the points ``(x, w)``.\n\n Warning: This implementation is numerically unstable. Do not expect to\n be able to use more than about 20 points even if they are chosen optimally.\n\n Parameters\n ----------\n x : array_like\n `x` represents the x-coordinates of a set of datapoints.\n w : array_like\n `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).\n\n Returns\n -------\n lagrange : `numpy.poly1d` instance\n The Lagrange interpolating polynomial.\n\n Examples\n --------\n Interpolate :math:`f(x) = x^3` by 3 points.\n\n >>> from scipy.interpolate import lagrange\n >>> x = np.array([0, 1, 2])\n >>> y = x**3\n >>> poly = lagrange(x, y)\n\n Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,\n it is given by\n\n .. math::\n\n \\begin{aligned}\n L(x) &= 1\\times \\frac{x (x - 2)}{-1} + 8\\times \\frac{x (x-1)}{2} \\\\\n &= x (-2 + 3x)\n \\end{aligned}\n\n >>> from numpy.polynomial.polynomial import Polynomial\n >>> Polynomial(poly).coef\n array([ 3., -2., 0.])\n\n \"\"\"\n\n M = len(x)\n p = poly1d(0.0)\n for j in xrange(M):\n pt = poly1d(w[j])\n for k in xrange(M):\n if k == j:\n continue\n fac = x[j]-x[k]\n pt *= poly1d([1.0, -x[k]])/fac\n p += pt\n return p\n\n\n# !! Need to find argument for keeping initialize. If it isn't\n# !! found, get rid of it!\n\n\nclass interp2d(object):\n \"\"\"\n interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,\n fill_value=nan)\n\n Interpolate over a 2-D grid.\n\n `x`, `y` and `z` are arrays of values used to approximate some function\n f: ``z = f(x, y)``. This class returns a function whose call method uses\n spline interpolation to find the value of new points.\n\n If `x` and `y` represent a regular grid, consider using\n RectBivariateSpline.\n\n Note that calling `interp2d` with NaNs present in input values results in\n undefined behaviour.\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x, y : array_like\n Arrays defining the data point coordinates.\n\n If the points lie on a regular grid, `x` can specify the column\n coordinates and `y` the row coordinates, for example::\n\n >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]\n\n Otherwise, `x` and `y` must specify the full coordinates for each\n point, for example::\n\n >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]\n\n If `x` and `y` are multi-dimensional, they are flattened before use.\n z : array_like\n The values of the function to interpolate at the data points. If\n `z` is a multi-dimensional array, it is flattened before use. The\n length of a flattened `z` array is either\n len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates\n or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates\n for each point.\n kind : {'linear', 'cubic', 'quintic'}, optional\n The kind of spline interpolation to use. Default is 'linear'.\n copy : bool, optional\n If True, the class makes internal copies of x, y and z.\n If False, references may be used. The default is to copy.\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the\n domain of the input data (x,y), a ValueError is raised.\n If False, then `fill_value` is used.\n fill_value : number, optional\n If provided, the value to use for points outside of the\n interpolation domain. If omitted (None), values outside\n the domain are extrapolated.\n\n See Also\n --------\n RectBivariateSpline :\n Much faster 2D interpolation if your input data is on a grid\n bisplrep, bisplev :\n Spline interpolation based on FITPACK\n BivariateSpline : a more recent wrapper of the FITPACK routines\n interp1d : one dimension version of this function\n\n Notes\n -----\n The minimum number of data points required along the interpolation\n axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for\n quintic interpolation.\n\n The interpolator is constructed by `bisplrep`, with a smoothing factor\n of 0. If more control over smoothing is needed, `bisplrep` should be\n used directly.\n\n Examples\n --------\n Construct a 2-D grid and interpolate on it:\n\n >>> from scipy import interpolate\n >>> x = np.arange(-5.01, 5.01, 0.25)\n >>> y = np.arange(-5.01, 5.01, 0.25)\n >>> xx, yy = np.meshgrid(x, y)\n >>> z = np.sin(xx**2+yy**2)\n >>> f = interpolate.interp2d(x, y, z, kind='cubic')\n\n Now use the obtained interpolation function and plot the result:\n\n >>> import matplotlib.pyplot as plt\n >>> xnew = np.arange(-5.01, 5.01, 1e-2)\n >>> ynew = np.arange(-5.01, 5.01, 1e-2)\n >>> znew = f(xnew, ynew)\n >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')\n >>> plt.show()\n \"\"\"\n\n def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,\n fill_value=None):\n x = ravel(x)\n y = ravel(y)\n z = asarray(z)\n\n rectangular_grid = (z.size == len(x) * len(y))\n if rectangular_grid:\n if z.ndim == 2:\n if z.shape != (len(y), len(x)):\n raise ValueError(\"When on a regular grid with x.size = m \"\n \"and y.size = n, if z.ndim == 2, then z \"\n \"must have shape (n, m)\")\n if not np.all(x[1:] >= x[:-1]):\n j = np.argsort(x)\n x = x[j]\n z = z[:, j]\n if not np.all(y[1:] >= y[:-1]):\n j = np.argsort(y)\n y = y[j]\n z = z[j, :]\n z = ravel(z.T)\n else:\n z = ravel(z)\n if len(x) != len(y):\n raise ValueError(\n \"x and y must have equal lengths for non rectangular grid\")\n if len(z) != len(x):\n raise ValueError(\n \"Invalid length for input z for non rectangular grid\")\n\n try:\n kx = ky = {'linear': 1,\n 'cubic': 3,\n 'quintic': 5}[kind]\n except KeyError:\n raise ValueError(\"Unsupported interpolation type.\")\n\n if not rectangular_grid:\n # TODO: surfit is really not meant for interpolation!\n self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)\n else:\n nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(\n x, y, z, None, None, None, None,\n kx=kx, ky=ky, s=0.0)\n self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],\n kx, ky)\n\n self.bounds_error = bounds_error\n self.fill_value = fill_value\n self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]\n\n self.x_min, self.x_max = np.amin(x), np.amax(x)\n self.y_min, self.y_max = np.amin(y), np.amax(y)\n\n def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):\n \"\"\"Interpolate the function.\n\n Parameters\n ----------\n x : 1D array\n x-coordinates of the mesh on which to interpolate.\n y : 1D array\n y-coordinates of the mesh on which to interpolate.\n dx : int >= 0, < kx\n Order of partial derivatives in x.\n dy : int >= 0, < ky\n Order of partial derivatives in y.\n assume_sorted : bool, optional\n If False, values of `x` and `y` can be in any order and they are\n sorted first.\n If True, `x` and `y` have to be arrays of monotonically\n increasing values.\n\n Returns\n -------\n z : 2D array with shape (len(y), len(x))\n The interpolated values.\n \"\"\"\n\n x = atleast_1d(x)\n y = atleast_1d(y)\n\n if x.ndim != 1 or y.ndim != 1:\n raise ValueError(\"x and y should both be 1-D arrays\")\n\n if not assume_sorted:\n x = np.sort(x)\n y = np.sort(y)\n\n if self.bounds_error or self.fill_value is not None:\n out_of_bounds_x = (x < self.x_min) | (x > self.x_max)\n out_of_bounds_y = (y < self.y_min) | (y > self.y_max)\n\n any_out_of_bounds_x = np.any(out_of_bounds_x)\n any_out_of_bounds_y = np.any(out_of_bounds_y)\n\n if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):\n raise ValueError(\"Values out of range; x must be in %r, y in %r\"\n % ((self.x_min, self.x_max),\n (self.y_min, self.y_max)))\n\n z = fitpack.bisplev(x, y, self.tck, dx, dy)\n z = atleast_2d(z)\n z = transpose(z)\n\n if self.fill_value is not None:\n if any_out_of_bounds_x:\n z[:, out_of_bounds_x] = self.fill_value\n if any_out_of_bounds_y:\n z[out_of_bounds_y, :] = self.fill_value\n\n if len(z) == 1:\n z = z[0]\n return array(z)\n\n\ndef _check_broadcast_up_to(arr_from, shape_to, name):\n \"\"\"Helper to check that arr_from broadcasts up to shape_to\"\"\"\n shape_from = arr_from.shape\n if len(shape_to) >= len(shape_from):\n for t, f in zip(shape_to[::-1], shape_from[::-1]):\n if f != 1 and f != t:\n break\n else: # all checks pass, do the upcasting that we need later\n if arr_from.size != 1 and arr_from.shape != shape_to:\n arr_from = np.ones(shape_to, arr_from.dtype) * arr_from\n return arr_from.ravel()\n # at least one check failed\n raise ValueError('%s argument must be able to broadcast up '\n 'to shape %s but had shape %s'\n % (name, shape_to, shape_from))\n\n\ndef _do_extrapolate(fill_value):\n \"\"\"Helper to check if fill_value == \"extrapolate\" without warnings\"\"\"\n return (isinstance(fill_value, string_types) and\n fill_value == 'extrapolate')\n\n\nclass interp1d(_Interpolator1D):\n \"\"\"\n Interpolate a 1-D function.\n\n `x` and `y` are arrays of values used to approximate some function f:\n ``y = f(x)``. This class returns a function whose call method uses\n interpolation to find the value of new points.\n\n Note that calling `interp1d` with NaNs present in input values results in\n undefined behaviour.\n\n Parameters\n ----------\n x : (N,) array_like\n A 1-D array of real values.\n y : (...,N,...) array_like\n A N-D array of real values. The length of `y` along the interpolation\n axis must be equal to the length of `x`.\n kind : str or int, optional\n Specifies the kind of interpolation as a string\n ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'\n refer to a spline interpolation of zeroth, first, second or third\n order; 'previous' and 'next' simply return the previous or next value\n of the point) or as an integer specifying the order of the spline\n interpolator to use.\n Default is 'linear'.\n axis : int, optional\n Specifies the axis of `y` along which to interpolate.\n Interpolation defaults to the last axis of `y`.\n copy : bool, optional\n If True, the class makes internal copies of x and y.\n If False, references to `x` and `y` are used. The default is to copy.\n bounds_error : bool, optional\n If True, a ValueError is raised any time interpolation is attempted on\n a value outside of the range of x (where extrapolation is\n necessary). If False, out of bounds values are assigned `fill_value`.\n By default, an error is raised unless `fill_value=\"extrapolate\"`.\n fill_value : array-like or (array-like, array_like) or \"extrapolate\", optional\n - if a ndarray (or float), this value will be used to fill in for\n requested points outside of the data range. If not provided, then\n the default is NaN. The array-like must broadcast properly to the\n dimensions of the non-interpolation axes.\n - If a two-element tuple, then the first element is used as a\n fill value for ``x_new < x[0]`` and the second element is used for\n ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,\n list or ndarray, regardless of shape) is taken to be a single\n array-like argument meant to be used for both bounds as\n ``below, above = fill_value, fill_value``.\n\n .. versionadded:: 0.17.0\n - If \"extrapolate\", then points outside the data range will be\n extrapolated.\n\n .. versionadded:: 0.17.0\n assume_sorted : bool, optional\n If False, values of `x` can be in any order and they are sorted first.\n If True, `x` has to be an array of monotonically increasing values.\n\n Methods\n -------\n __call__\n\n See Also\n --------\n splrep, splev\n Spline interpolation/smoothing based on FITPACK.\n UnivariateSpline : An object-oriented wrapper of the FITPACK routines.\n interp2d : 2-D interpolation\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from scipy import interpolate\n >>> x = np.arange(0, 10)\n >>> y = np.exp(-x/3.0)\n >>> f = interpolate.interp1d(x, y)\n\n >>> xnew = np.arange(0, 9, 0.1)\n >>> ynew = f(xnew) # use interpolation function returned by `interp1d`\n >>> plt.plot(x, y, 'o', xnew, ynew, '-')\n >>> plt.show()\n \"\"\"\n\n def __init__(self, x, y, kind='linear', axis=-1,\n copy=True, bounds_error=None, fill_value=np.nan,\n assume_sorted=False):\n \"\"\" Initialize a 1D linear interpolation class.\"\"\"\n _Interpolator1D.__init__(self, x, y, axis=axis)\n\n self.bounds_error = bounds_error # used by fill_value setter\n self.copy = copy\n\n if kind in ['zero', 'slinear', 'quadratic', 'cubic']:\n order = {'zero': 0, 'slinear': 1,\n 'quadratic': 2, 'cubic': 3}[kind]\n kind = 'spline'\n elif isinstance(kind, int):\n order = kind\n kind = 'spline'\n elif kind not in ('linear', 'nearest', 'previous', 'next'):\n raise NotImplementedError(\"%s is unsupported: Use fitpack \"\n \"routines for other types.\" % kind)\n x = array(x, copy=self.copy)\n y = array(y, copy=self.copy)\n\n if not assume_sorted:\n ind = np.argsort(x)\n x = x[ind]\n y = np.take(y, ind, axis=axis)\n\n if x.ndim != 1:\n raise ValueError(\"the x array must have exactly one dimension.\")\n if y.ndim == 0:\n raise ValueError(\"the y array must have at least one dimension.\")\n\n # Force-cast y to a floating-point type, if it's not yet one\n if not issubclass(y.dtype.type, np.inexact):\n y = y.astype(np.float_)\n\n # Backward compatibility\n self.axis = axis % y.ndim\n\n # Interpolation goes internally along the first axis\n self.y = y\n self._y = self._reshape_yi(self.y)\n self.x = x\n del y, x # clean up namespace to prevent misuse; use attributes\n self._kind = kind\n self.fill_value = fill_value # calls the setter, can modify bounds_err\n\n # Adjust to interpolation kind; store reference to *unbound*\n # interpolation methods, in order to avoid circular references to self\n # stored in the bound instance methods, and therefore delayed garbage\n # collection. See: https://docs.python.org/reference/datamodel.html\n if kind in ('linear', 'nearest', 'previous', 'next'):\n # Make a \"view\" of the y array that is rotated to the interpolation\n # axis.\n minval = 2\n if kind == 'nearest':\n # Do division before addition to prevent possible integer\n # overflow\n self.x_bds = self.x / 2.0\n self.x_bds = self.x_bds[1:] + self.x_bds[:-1]\n\n self._call = self.__class__._call_nearest\n elif kind == 'previous':\n # Side for np.searchsorted and index for clipping\n self._side = 'left'\n self._ind = 0\n # Move x by one floating point value to the left\n self._x_shift = np.nextafter(self.x, -np.inf)\n self._call = self.__class__._call_previousnext\n elif kind == 'next':\n self._side = 'right'\n self._ind = 1\n # Move x by one floating point value to the right\n self._x_shift = np.nextafter(self.x, np.inf)\n self._call = self.__class__._call_previousnext\n else:\n # Check if we can delegate to numpy.interp (2x-10x faster).\n cond = self.x.dtype == np.float_ and self.y.dtype == np.float_\n cond = cond and self.y.ndim == 1\n cond = cond and not _do_extrapolate(fill_value)\n\n if cond:\n self._call = self.__class__._call_linear_np\n else:\n self._call = self.__class__._call_linear\n else:\n minval = order + 1\n\n rewrite_nan = False\n xx, yy = self.x, self._y\n if order > 1:\n # Quadratic or cubic spline. If input contains even a single\n # nan, then the output is all nans. We cannot just feed data\n # with nans to make_interp_spline because it calls LAPACK.\n # So, we make up a bogus x and y with no nans and use it\n # to get the correct shape of the output, which we then fill\n # with nans.\n # For slinear or zero order spline, we just pass nans through.\n if np.isnan(self.x).any():\n xx = np.linspace(min(self.x), max(self.x), len(self.x))\n rewrite_nan = True\n if np.isnan(self._y).any():\n yy = np.ones_like(self._y)\n rewrite_nan = True\n\n self._spline = make_interp_spline(xx, yy, k=order,\n check_finite=False)\n if rewrite_nan:\n self._call = self.__class__._call_nan_spline\n else:\n self._call = self.__class__._call_spline\n\n if len(self.x) < minval:\n raise ValueError(\"x and y arrays must have at \"\n \"least %d entries\" % minval)\n\n @property\n def fill_value(self):\n # backwards compat: mimic a public attribute\n return self._fill_value_orig\n\n @fill_value.setter\n def fill_value(self, fill_value):\n # extrapolation only works for nearest neighbor and linear methods\n if _do_extrapolate(fill_value):\n if self.bounds_error:\n raise ValueError(\"Cannot extrapolate and raise \"\n \"at the same time.\")\n self.bounds_error = False\n self._extrapolate = True\n else:\n broadcast_shape = (self.y.shape[:self.axis] +\n self.y.shape[self.axis + 1:])\n if len(broadcast_shape) == 0:\n broadcast_shape = (1,)\n # it's either a pair (_below_range, _above_range) or a single value\n # for both above and below range\n if isinstance(fill_value, tuple) and len(fill_value) == 2:\n below_above = [np.asarray(fill_value[0]),\n np.asarray(fill_value[1])]\n names = ('fill_value (below)', 'fill_value (above)')\n for ii in range(2):\n below_above[ii] = _check_broadcast_up_to(\n below_above[ii], broadcast_shape, names[ii])\n else:\n fill_value = np.asarray(fill_value)\n below_above = [_check_broadcast_up_to(\n fill_value, broadcast_shape, 'fill_value')] * 2\n self._fill_value_below, self._fill_value_above = below_above\n self._extrapolate = False\n if self.bounds_error is None:\n self.bounds_error = True\n # backwards compat: fill_value was a public attr; make it writeable\n self._fill_value_orig = fill_value\n\n def _call_linear_np(self, x_new):\n # Note that out-of-bounds values are taken care of in self._evaluate\n return np.interp(x_new, self.x, self.y)\n\n def _call_linear(self, x_new):\n # 2. Find where in the original data, the values to interpolate\n # would be inserted.\n # Note: If x_new[n] == x[m], then m is returned by searchsorted.\n x_new_indices = searchsorted(self.x, x_new)\n\n # 3. Clip x_new_indices so that they are within the range of\n # self.x indices and at least 1. Removes mis-interpolation\n # of x_new[n] = x[0]\n x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)\n\n # 4. Calculate the slope of regions that each x_new value falls in.\n lo = x_new_indices - 1\n hi = x_new_indices\n\n x_lo = self.x[lo]\n x_hi = self.x[hi]\n y_lo = self._y[lo]\n y_hi = self._y[hi]\n\n # Note that the following two expressions rely on the specifics of the\n # broadcasting semantics.\n slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]\n\n # 5. Calculate the actual value for each entry in x_new.\n y_new = slope*(x_new - x_lo)[:, None] + y_lo\n\n return y_new\n\n def _call_nearest(self, x_new):\n \"\"\" Find nearest neighbour interpolated y_new = f(x_new).\"\"\"\n\n # 2. Find where in the averaged data the values to interpolate\n # would be inserted.\n # Note: use side='left' (right) to searchsorted() to define the\n # halfway point to be nearest to the left (right) neighbour\n x_new_indices = searchsorted(self.x_bds, x_new, side='left')\n\n # 3. Clip x_new_indices so that they are within the range of x indices.\n x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)\n\n # 4. Calculate the actual value for each entry in x_new.\n y_new = self._y[x_new_indices]\n\n return y_new\n\n def _call_previousnext(self, x_new):\n \"\"\"Use previous/next neighbour of x_new, y_new = f(x_new).\"\"\"\n\n # 1. Get index of left/right value\n x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)\n\n # 2. Clip x_new_indices so that they are within the range of x indices.\n x_new_indices = x_new_indices.clip(1-self._ind,\n len(self.x)-self._ind).astype(intp)\n\n # 3. Calculate the actual value for each entry in x_new.\n y_new = self._y[x_new_indices+self._ind-1]\n\n return y_new\n\n def _call_spline(self, x_new):\n return self._spline(x_new)\n\n def _call_nan_spline(self, x_new):\n out = self._spline(x_new)\n out[...] = np.nan\n return out\n\n def _evaluate(self, x_new):\n # 1. Handle values in x_new that are outside of x. Throw error,\n # or return a list of mask array indicating the outofbounds values.\n # The behavior is set by the bounds_error variable.\n x_new = asarray(x_new)\n y_new = self._call(self, x_new)\n if not self._extrapolate:\n below_bounds, above_bounds = self._check_bounds(x_new)\n if len(y_new) > 0:\n # Note fill_value must be broadcast up to the proper size\n # and flattened to work here\n y_new[below_bounds] = self._fill_value_below\n y_new[above_bounds] = self._fill_value_above\n return y_new\n\n def _check_bounds(self, x_new):\n \"\"\"Check the inputs for being in the bounds of the interpolated data.\n\n Parameters\n ----------\n x_new : array\n\n Returns\n -------\n out_of_bounds : bool array\n The mask on x_new of values that are out of the bounds.\n \"\"\"\n\n # If self.bounds_error is True, we raise an error if any x_new values\n # fall outside the range of x. Otherwise, we return an array indicating\n # which values are outside the boundary region.\n below_bounds = x_new < self.x[0]\n above_bounds = x_new > self.x[-1]\n\n # !! Could provide more information about which values are out of bounds\n if self.bounds_error and below_bounds.any():\n raise ValueError(\"A value in x_new is below the interpolation \"\n \"range.\")\n if self.bounds_error and above_bounds.any():\n raise ValueError(\"A value in x_new is above the interpolation \"\n \"range.\")\n\n # !! Should we emit a warning if some values are out of bounds?\n # !! matlab does not.\n return below_bounds, above_bounds\n\n\nclass _PPolyBase(object):\n \"\"\"Base class for piecewise polynomials.\"\"\"\n __slots__ = ('c', 'x', 'extrapolate', 'axis')\n\n def __init__(self, c, x, extrapolate=None, axis=0):\n self.c = np.asarray(c)\n self.x = np.ascontiguousarray(x, dtype=np.float64)\n\n if extrapolate is None:\n extrapolate = True\n elif extrapolate != 'periodic':\n extrapolate = bool(extrapolate)\n self.extrapolate = extrapolate\n\n if self.c.ndim < 2:\n raise ValueError(\"Coefficients array must be at least \"\n \"2-dimensional.\")\n\n if not (0 <= axis < self.c.ndim - 1):\n raise ValueError(\"axis=%s must be between 0 and %s\" %\n (axis, self.c.ndim-1))\n\n self.axis = axis\n if axis != 0:\n # roll the interpolation axis to be the first one in self.c\n # More specifically, the target shape for self.c is (k, m, ...),\n # and axis !=0 means that we have c.shape (..., k, m, ...)\n # ^\n # axis\n # So we roll two of them.\n self.c = np.rollaxis(self.c, axis+1)\n self.c = np.rollaxis(self.c, axis+1)\n\n if self.x.ndim != 1:\n raise ValueError(\"x must be 1-dimensional\")\n if self.x.size < 2:\n raise ValueError(\"at least 2 breakpoints are needed\")\n if self.c.ndim < 2:\n raise ValueError(\"c must have at least 2 dimensions\")\n if self.c.shape[0] == 0:\n raise ValueError(\"polynomial must be at least of order 0\")\n if self.c.shape[1] != self.x.size-1:\n raise ValueError(\"number of coefficients != len(x)-1\")\n dx = np.diff(self.x)\n if not (np.all(dx >= 0) or np.all(dx <= 0)):\n raise ValueError(\"`x` must be strictly increasing or decreasing.\")\n\n dtype = self._get_dtype(self.c.dtype)\n self.c = np.ascontiguousarray(self.c, dtype=dtype)\n\n def _get_dtype(self, dtype):\n if np.issubdtype(dtype, np.complexfloating) \\\n or np.issubdtype(self.c.dtype, np.complexfloating):\n return np.complex_\n else:\n return np.float_\n\n @classmethod\n def construct_fast(cls, c, x, extrapolate=None, axis=0):\n \"\"\"\n Construct the piecewise polynomial without making checks.\n\n Takes the same parameters as the constructor. Input arguments\n `c` and `x` must be arrays of the correct shape and type. The\n `c` array can only be of dtypes float and complex, and `x`\n array must have dtype float.\n \"\"\"\n self = object.__new__(cls)\n self.c = c\n self.x = x\n self.axis = axis\n if extrapolate is None:\n extrapolate = True\n self.extrapolate = extrapolate\n return self\n\n def _ensure_c_contiguous(self):\n \"\"\"\n c and x may be modified by the user. The Cython code expects\n that they are C contiguous.\n \"\"\"\n if not self.x.flags.c_contiguous:\n self.x = self.x.copy()\n if not self.c.flags.c_contiguous:\n self.c = self.c.copy()\n\n def extend(self, c, x, right=None):\n \"\"\"\n Add additional breakpoints and coefficients to the polynomial.\n\n Parameters\n ----------\n c : ndarray, size (k, m, ...)\n Additional coefficients for polynomials in intervals. Note that\n the first additional interval will be formed using one of the\n `self.x` end points.\n x : ndarray, size (m,)\n Additional breakpoints. Must be sorted in the same order as\n `self.x` and either to the right or to the left of the current\n breakpoints.\n right\n Deprecated argument. Has no effect.\n\n .. deprecated:: 0.19\n \"\"\"\n if right is not None:\n warnings.warn(\"`right` is deprecated and will be removed.\")\n\n c = np.asarray(c)\n x = np.asarray(x)\n\n if c.ndim < 2:\n raise ValueError(\"invalid dimensions for c\")\n if x.ndim != 1:\n raise ValueError(\"invalid dimensions for x\")\n if x.shape[0] != c.shape[1]:\n raise ValueError(\"x and c have incompatible sizes\")\n if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:\n raise ValueError(\"c and self.c have incompatible shapes\")\n\n if c.size == 0:\n return\n\n dx = np.diff(x)\n if not (np.all(dx >= 0) or np.all(dx <= 0)):\n raise ValueError(\"`x` is not sorted.\")\n\n if self.x[-1] >= self.x[0]:\n if not x[-1] >= x[0]:\n raise ValueError(\"`x` is in the different order \"\n \"than `self.x`.\")\n\n if x[0] >= self.x[-1]:\n action = 'append'\n elif x[-1] <= self.x[0]:\n action = 'prepend'\n else:\n raise ValueError(\"`x` is neither on the left or on the right \"\n \"from `self.x`.\")\n else:\n if not x[-1] <= x[0]:\n raise ValueError(\"`x` is in the different order \"\n \"than `self.x`.\")\n\n if x[0] <= self.x[-1]:\n action = 'append'\n elif x[-1] >= self.x[0]:\n action = 'prepend'\n else:\n raise ValueError(\"`x` is neither on the left or on the right \"\n \"from `self.x`.\")\n\n dtype = self._get_dtype(c.dtype)\n\n k2 = max(c.shape[0], self.c.shape[0])\n c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],\n dtype=dtype)\n\n if action == 'append':\n c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c\n c2[k2-c.shape[0]:, self.c.shape[1]:] = c\n self.x = np.r_[self.x, x]\n elif action == 'prepend':\n c2[k2-self.c.shape[0]:, :c.shape[1]] = c\n c2[k2-c.shape[0]:, c.shape[1]:] = self.c\n self.x = np.r_[x, self.x]\n\n self.c = c2\n\n def __call__(self, x, nu=0, extrapolate=None):\n \"\"\"\n Evaluate the piecewise polynomial or its derivative.\n\n Parameters\n ----------\n x : array_like\n Points to evaluate the interpolant at.\n nu : int, optional\n Order of derivative to evaluate. Must be non-negative.\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used.\n If None (default), use `self.extrapolate`.\n\n Returns\n -------\n y : array_like\n Interpolated values. Shape is determined by replacing\n the interpolation axis in the original array with the shape of x.\n\n Notes\n -----\n Derivatives are evaluated piecewise for each polynomial\n segment, even if the polynomial is not differentiable at the\n breakpoints. The polynomial intervals are considered half-open,\n ``[a, b)``, except for the last interval which is closed\n ``[a, b]``.\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n x = np.asarray(x)\n x_shape, x_ndim = x.shape, x.ndim\n x = np.ascontiguousarray(x.ravel(), dtype=np.float_)\n\n # With periodic extrapolation we map x to the segment\n # [self.x[0], self.x[-1]].\n if extrapolate == 'periodic':\n x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])\n extrapolate = False\n\n out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)\n self._ensure_c_contiguous()\n self._evaluate(x, nu, extrapolate, out)\n out = out.reshape(x_shape + self.c.shape[2:])\n if self.axis != 0:\n # transpose to move the calculated values to the interpolation axis\n l = list(range(out.ndim))\n l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]\n out = out.transpose(l)\n return out\n\n\nclass PPoly(_PPolyBase):\n \"\"\"\n Piecewise polynomial in terms of coefficients and breakpoints\n\n The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the\n local power basis::\n\n S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))\n\n where ``k`` is the degree of the polynomial.\n\n Parameters\n ----------\n c : ndarray, shape (k, m, ...)\n Polynomial coefficients, order `k` and `m` intervals\n x : ndarray, shape (m+1,)\n Polynomial breakpoints. Must be sorted in either increasing or\n decreasing order.\n extrapolate : bool or 'periodic', optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs. If 'periodic',\n periodic extrapolation is used. Default is True.\n axis : int, optional\n Interpolation axis. Default is zero.\n\n Attributes\n ----------\n x : ndarray\n Breakpoints.\n c : ndarray\n Coefficients of the polynomials. They are reshaped\n to a 3-dimensional array with the last dimension representing\n the trailing dimensions of the original coefficient array.\n axis : int\n Interpolation axis.\n\n Methods\n -------\n __call__\n derivative\n antiderivative\n integrate\n solve\n roots\n extend\n from_spline\n from_bernstein_basis\n construct_fast\n\n See also\n --------\n BPoly : piecewise polynomials in the Bernstein basis\n\n Notes\n -----\n High-order polynomials in the power basis can be numerically\n unstable. Precision problems can start to appear for orders\n larger than 20-30.\n \"\"\"\n def _evaluate(self, x, nu, extrapolate, out):\n _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, x, nu, bool(extrapolate), out)\n\n def derivative(self, nu=1):\n \"\"\"\n Construct a new piecewise polynomial representing the derivative.\n\n Parameters\n ----------\n nu : int, optional\n Order of derivative to evaluate. Default is 1, i.e. compute the\n first derivative. If negative, the antiderivative is returned.\n\n Returns\n -------\n pp : PPoly\n Piecewise polynomial of order k2 = k - n representing the derivative\n of this polynomial.\n\n Notes\n -----\n Derivatives are evaluated piecewise for each polynomial\n segment, even if the polynomial is not differentiable at the\n breakpoints. The polynomial intervals are considered half-open,\n ``[a, b)``, except for the last interval which is closed\n ``[a, b]``.\n \"\"\"\n if nu < 0:\n return self.antiderivative(-nu)\n\n # reduce order\n if nu == 0:\n c2 = self.c.copy()\n else:\n c2 = self.c[:-nu, :].copy()\n\n if c2.shape[0] == 0:\n # derivative of order 0 is zero\n c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)\n\n # multiply by the correct rising factorials\n factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)\n c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]\n\n # construct a compatible polynomial\n return self.construct_fast(c2, self.x, self.extrapolate, self.axis)\n\n def antiderivative(self, nu=1):\n \"\"\"\n Construct a new piecewise polynomial representing the antiderivative.\n\n Antiderivative is also the indefinite integral of the function,\n and derivative is its inverse operation.\n\n Parameters\n ----------\n nu : int, optional\n Order of antiderivative to evaluate. Default is 1, i.e. compute\n the first integral. If negative, the derivative is returned.\n\n Returns\n -------\n pp : PPoly\n Piecewise polynomial of order k2 = k + n representing\n the antiderivative of this polynomial.\n\n Notes\n -----\n The antiderivative returned by this function is continuous and\n continuously differentiable to order n-1, up to floating point\n rounding error.\n\n If antiderivative is computed and ``self.extrapolate='periodic'``,\n it will be set to False for the returned instance. This is done because\n the antiderivative is no longer periodic and its correct evaluation\n outside of the initially given x interval is difficult.\n \"\"\"\n if nu <= 0:\n return self.derivative(-nu)\n\n c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],\n dtype=self.c.dtype)\n c[:-nu] = self.c\n\n # divide by the correct rising factorials\n factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)\n c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]\n\n # fix continuity of added degrees of freedom\n self._ensure_c_contiguous()\n _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),\n self.x, nu - 1)\n\n if self.extrapolate == 'periodic':\n extrapolate = False\n else:\n extrapolate = self.extrapolate\n\n # construct a compatible polynomial\n return self.construct_fast(c, self.x, extrapolate, self.axis)\n\n def integrate(self, a, b, extrapolate=None):\n \"\"\"\n Compute a definite integral over a piecewise polynomial.\n\n Parameters\n ----------\n a : float\n Lower integration bound\n b : float\n Upper integration bound\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used.\n If None (default), use `self.extrapolate`.\n\n Returns\n -------\n ig : array_like\n Definite integral of the piecewise polynomial over [a, b]\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n\n # Swap integration bounds if needed\n sign = 1\n if b < a:\n a, b = b, a\n sign = -1\n\n range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)\n self._ensure_c_contiguous()\n\n # Compute the integral.\n if extrapolate == 'periodic':\n # Split the integral into the part over period (can be several\n # of them) and the remaining part.\n\n xs, xe = self.x[0], self.x[-1]\n period = xe - xs\n interval = b - a\n n_periods, left = divmod(interval, period)\n\n if n_periods > 0:\n _ppoly.integrate(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, xs, xe, False, out=range_int)\n range_int *= n_periods\n else:\n range_int.fill(0)\n\n # Map a to [xs, xe], b is always a + left.\n a = xs + (a - xs) % period\n b = a + left\n\n # If b <= xe then we need to integrate over [a, b], otherwise\n # over [a, xe] and from xs to what is remained.\n remainder_int = np.empty_like(range_int)\n if b <= xe:\n _ppoly.integrate(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, a, b, False, out=remainder_int)\n range_int += remainder_int\n else:\n _ppoly.integrate(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, a, xe, False, out=remainder_int)\n range_int += remainder_int\n\n _ppoly.integrate(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, xs, xs + left + a - xe, False, out=remainder_int)\n range_int += remainder_int\n else:\n _ppoly.integrate(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, a, b, bool(extrapolate), out=range_int)\n\n # Return\n range_int *= sign\n return range_int.reshape(self.c.shape[2:])\n\n def solve(self, y=0., discontinuity=True, extrapolate=None):\n \"\"\"\n Find real solutions of the the equation ``pp(x) == y``.\n\n Parameters\n ----------\n y : float, optional\n Right-hand side. Default is zero.\n discontinuity : bool, optional\n Whether to report sign changes across discontinuities at\n breakpoints as roots.\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to return roots from the polynomial\n extrapolated based on first and last intervals, 'periodic' works\n the same as False. If None (default), use `self.extrapolate`.\n\n Returns\n -------\n roots : ndarray\n Roots of the polynomial(s).\n\n If the PPoly object describes multiple polynomials, the\n return value is an object array whose each element is an\n ndarray containing the roots.\n\n Notes\n -----\n This routine works only on real-valued polynomials.\n\n If the piecewise polynomial contains sections that are\n identically zero, the root list will contain the start point\n of the corresponding interval, followed by a ``nan`` value.\n\n If the polynomial is discontinuous across a breakpoint, and\n there is a sign change across the breakpoint, this is reported\n if the `discont` parameter is True.\n\n Examples\n --------\n\n Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals\n ``[-2, 1], [1, 2]``:\n\n >>> from scipy.interpolate import PPoly\n >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])\n >>> pp.roots()\n array([-1., 1.])\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n\n self._ensure_c_contiguous()\n\n if np.issubdtype(self.c.dtype, np.complexfloating):\n raise ValueError(\"Root finding is only for \"\n \"real-valued polynomials\")\n\n y = float(y)\n r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, y, bool(discontinuity),\n bool(extrapolate))\n if self.c.ndim == 2:\n return r[0]\n else:\n r2 = np.empty(prod(self.c.shape[2:]), dtype=object)\n # this for-loop is equivalent to ``r2[...] = r``, but that's broken\n # in numpy 1.6.0\n for ii, root in enumerate(r):\n r2[ii] = root\n\n return r2.reshape(self.c.shape[2:])\n\n def roots(self, discontinuity=True, extrapolate=None):\n \"\"\"\n Find real roots of the the piecewise polynomial.\n\n Parameters\n ----------\n discontinuity : bool, optional\n Whether to report sign changes across discontinuities at\n breakpoints as roots.\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to return roots from the polynomial\n extrapolated based on first and last intervals, 'periodic' works\n the same as False. If None (default), use `self.extrapolate`.\n\n Returns\n -------\n roots : ndarray\n Roots of the polynomial(s).\n\n If the PPoly object describes multiple polynomials, the\n return value is an object array whose each element is an\n ndarray containing the roots.\n\n See Also\n --------\n PPoly.solve\n \"\"\"\n return self.solve(0, discontinuity, extrapolate)\n\n @classmethod\n def from_spline(cls, tck, extrapolate=None):\n \"\"\"\n Construct a piecewise polynomial from a spline\n\n Parameters\n ----------\n tck\n A spline, as returned by `splrep` or a BSpline object.\n extrapolate : bool or 'periodic', optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used. Default is True.\n \"\"\"\n if isinstance(tck, BSpline):\n t, c, k = tck.tck\n if extrapolate is None:\n extrapolate = tck.extrapolate\n else:\n t, c, k = tck\n\n cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)\n for m in xrange(k, -1, -1):\n y = fitpack.splev(t[:-1], tck, der=m)\n cvals[k - m, :] = y/spec.gamma(m+1)\n\n return cls.construct_fast(cvals, t, extrapolate)\n\n @classmethod\n def from_bernstein_basis(cls, bp, extrapolate=None):\n \"\"\"\n Construct a piecewise polynomial in the power basis\n from a polynomial in Bernstein basis.\n\n Parameters\n ----------\n bp : BPoly\n A Bernstein basis polynomial, as created by BPoly\n extrapolate : bool or 'periodic', optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used. Default is True.\n \"\"\"\n dx = np.diff(bp.x)\n k = bp.c.shape[0] - 1 # polynomial order\n\n rest = (None,)*(bp.c.ndim-2)\n\n c = np.zeros_like(bp.c)\n for a in range(k+1):\n factor = (-1)**a * comb(k, a) * bp.c[a]\n for s in range(a, k+1):\n val = comb(k-a, s-a) * (-1)**s\n c[k-s] += factor * val / dx[(slice(None),)+rest]**s\n\n if extrapolate is None:\n extrapolate = bp.extrapolate\n\n return cls.construct_fast(c, bp.x, extrapolate, bp.axis)\n\n\nclass BPoly(_PPolyBase):\n \"\"\"Piecewise polynomial in terms of coefficients and breakpoints.\n\n The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the\n Bernstein polynomial basis::\n\n S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),\n\n where ``k`` is the degree of the polynomial, and::\n\n b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),\n\n with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial\n coefficient.\n\n Parameters\n ----------\n c : ndarray, shape (k, m, ...)\n Polynomial coefficients, order `k` and `m` intervals\n x : ndarray, shape (m+1,)\n Polynomial breakpoints. Must be sorted in either increasing or\n decreasing order.\n extrapolate : bool, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs. If 'periodic',\n periodic extrapolation is used. Default is True.\n axis : int, optional\n Interpolation axis. Default is zero.\n\n Attributes\n ----------\n x : ndarray\n Breakpoints.\n c : ndarray\n Coefficients of the polynomials. They are reshaped\n to a 3-dimensional array with the last dimension representing\n the trailing dimensions of the original coefficient array.\n axis : int\n Interpolation axis.\n\n Methods\n -------\n __call__\n extend\n derivative\n antiderivative\n integrate\n construct_fast\n from_power_basis\n from_derivatives\n\n See also\n --------\n PPoly : piecewise polynomials in the power basis\n\n Notes\n -----\n Properties of Bernstein polynomials are well documented in the literature.\n Here's a non-exhaustive list:\n\n .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial\n\n .. [2] Kenneth I. Joy, Bernstein polynomials,\n http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf\n\n .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,\n vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.\n\n Examples\n --------\n >>> from scipy.interpolate import BPoly\n >>> x = [0, 1]\n >>> c = [[1], [2], [3]]\n >>> bp = BPoly(c, x)\n\n This creates a 2nd order polynomial\n\n .. math::\n\n B(x) = 1 \\\\times b_{0, 2}(x) + 2 \\\\times b_{1, 2}(x) + 3 \\\\times b_{2, 2}(x) \\\\\\\\\n = 1 \\\\times (1-x)^2 + 2 \\\\times 2 x (1 - x) + 3 \\\\times x^2\n\n \"\"\"\n\n def _evaluate(self, x, nu, extrapolate, out):\n _ppoly.evaluate_bernstein(\n self.c.reshape(self.c.shape[0], self.c.shape[1], -1),\n self.x, x, nu, bool(extrapolate), out)\n\n def derivative(self, nu=1):\n \"\"\"\n Construct a new piecewise polynomial representing the derivative.\n\n Parameters\n ----------\n nu : int, optional\n Order of derivative to evaluate. Default is 1, i.e. compute the\n first derivative. If negative, the antiderivative is returned.\n\n Returns\n -------\n bp : BPoly\n Piecewise polynomial of order k - nu representing the derivative of\n this polynomial.\n\n \"\"\"\n if nu < 0:\n return self.antiderivative(-nu)\n\n if nu > 1:\n bp = self\n for k in range(nu):\n bp = bp.derivative()\n return bp\n\n # reduce order\n if nu == 0:\n c2 = self.c.copy()\n else:\n # For a polynomial\n # B(x) = \\sum_{a=0}^{k} c_a b_{a, k}(x),\n # we use the fact that\n # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),\n # which leads to\n # B'(x) = \\sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}\n #\n # finally, for an interval [y, y + dy] with dy != 1,\n # we need to correct for an extra power of dy\n\n rest = (None,)*(self.c.ndim-2)\n\n k = self.c.shape[0] - 1\n dx = np.diff(self.x)[(None, slice(None))+rest]\n c2 = k * np.diff(self.c, axis=0) / dx\n\n if c2.shape[0] == 0:\n # derivative of order 0 is zero\n c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)\n\n # construct a compatible polynomial\n return self.construct_fast(c2, self.x, self.extrapolate, self.axis)\n\n def antiderivative(self, nu=1):\n \"\"\"\n Construct a new piecewise polynomial representing the antiderivative.\n\n Parameters\n ----------\n nu : int, optional\n Order of antiderivative to evaluate. Default is 1, i.e. compute\n the first integral. If negative, the derivative is returned.\n\n Returns\n -------\n bp : BPoly\n Piecewise polynomial of order k + nu representing the\n antiderivative of this polynomial.\n\n Notes\n -----\n If antiderivative is computed and ``self.extrapolate='periodic'``,\n it will be set to False for the returned instance. This is done because\n the antiderivative is no longer periodic and its correct evaluation\n outside of the initially given x interval is difficult.\n \"\"\"\n if nu <= 0:\n return self.derivative(-nu)\n\n if nu > 1:\n bp = self\n for k in range(nu):\n bp = bp.antiderivative()\n return bp\n\n # Construct the indefinite integrals on individual intervals\n c, x = self.c, self.x\n k = c.shape[0]\n c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)\n\n c2[1:, ...] = np.cumsum(c, axis=0) / k\n delta = x[1:] - x[:-1]\n c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]\n\n # Now fix continuity: on the very first interval, take the integration\n # constant to be zero; on an interval [x_j, x_{j+1}) with j>0,\n # the integration constant is then equal to the jump of the `bp` at x_j.\n # The latter is given by the coefficient of B_{n+1, n+1}\n # *on the previous interval* (other B. polynomials are zero at the\n # breakpoint). Finally, use the fact that BPs form a partition of unity.\n c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]\n\n if self.extrapolate == 'periodic':\n extrapolate = False\n else:\n extrapolate = self.extrapolate\n\n return self.construct_fast(c2, x, extrapolate, axis=self.axis)\n\n def integrate(self, a, b, extrapolate=None):\n \"\"\"\n Compute a definite integral over a piecewise polynomial.\n\n Parameters\n ----------\n a : float\n Lower integration bound\n b : float\n Upper integration bound\n extrapolate : {bool, 'periodic', None}, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs. If 'periodic', periodic\n extrapolation is used. If None (default), use `self.extrapolate`.\n\n Returns\n -------\n array_like\n Definite integral of the piecewise polynomial over [a, b]\n\n \"\"\"\n # XXX: can probably use instead the fact that\n # \\int_0^{1} B_{j, n}(x) \\dx = 1/(n+1)\n ib = self.antiderivative()\n if extrapolate is None:\n extrapolate = self.extrapolate\n\n # ib.extrapolate shouldn't be 'periodic', it is converted to\n # False for 'periodic. in antiderivative() call.\n if extrapolate != 'periodic':\n ib.extrapolate = extrapolate\n\n if extrapolate == 'periodic':\n # Split the integral into the part over period (can be several\n # of them) and the remaining part.\n\n # For simplicity and clarity convert to a <= b case.\n if a <= b:\n sign = 1\n else:\n a, b = b, a\n sign = -1\n\n xs, xe = self.x[0], self.x[-1]\n period = xe - xs\n interval = b - a\n n_periods, left = divmod(interval, period)\n res = n_periods * (ib(xe) - ib(xs))\n\n # Map a and b to [xs, xe].\n a = xs + (a - xs) % period\n b = a + left\n\n # If b <= xe then we need to integrate over [a, b], otherwise\n # over [a, xe] and from xs to what is remained.\n if b <= xe:\n res += ib(b) - ib(a)\n else:\n res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)\n\n return sign * res\n else:\n return ib(b) - ib(a)\n\n def extend(self, c, x, right=None):\n k = max(self.c.shape[0], c.shape[0])\n self.c = self._raise_degree(self.c, k - self.c.shape[0])\n c = self._raise_degree(c, k - c.shape[0])\n return _PPolyBase.extend(self, c, x, right)\n extend.__doc__ = _PPolyBase.extend.__doc__\n\n @classmethod\n def from_power_basis(cls, pp, extrapolate=None):\n \"\"\"\n Construct a piecewise polynomial in Bernstein basis\n from a power basis polynomial.\n\n Parameters\n ----------\n pp : PPoly\n A piecewise polynomial in the power basis\n extrapolate : bool or 'periodic', optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used. Default is True.\n \"\"\"\n dx = np.diff(pp.x)\n k = pp.c.shape[0] - 1 # polynomial order\n\n rest = (None,)*(pp.c.ndim-2)\n\n c = np.zeros_like(pp.c)\n for a in range(k+1):\n factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)\n for j in range(k-a, k+1):\n c[j] += factor * comb(j, k-a)\n\n if extrapolate is None:\n extrapolate = pp.extrapolate\n\n return cls.construct_fast(c, pp.x, extrapolate, pp.axis)\n\n @classmethod\n def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):\n \"\"\"Construct a piecewise polynomial in the Bernstein basis,\n compatible with the specified values and derivatives at breakpoints.\n\n Parameters\n ----------\n xi : array_like\n sorted 1D array of x-coordinates\n yi : array_like or list of array_likes\n ``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``\n orders : None or int or array_like of ints. Default: None.\n Specifies the degree of local polynomials. If not None, some\n derivatives are ignored.\n extrapolate : bool or 'periodic', optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs.\n If 'periodic', periodic extrapolation is used. Default is True.\n\n Notes\n -----\n If ``k`` derivatives are specified at a breakpoint ``x``, the\n constructed polynomial is exactly ``k`` times continuously\n differentiable at ``x``, unless the ``order`` is provided explicitly.\n In the latter case, the smoothness of the polynomial at\n the breakpoint is controlled by the ``order``.\n\n Deduces the number of derivatives to match at each end\n from ``order`` and the number of derivatives available. If\n possible it uses the same number of derivatives from\n each end; if the number is odd it tries to take the\n extra one from y2. In any case if not enough derivatives\n are available at one end or another it draws enough to\n make up the total from the other end.\n\n If the order is too high and not enough derivatives are available,\n an exception is raised.\n\n Examples\n --------\n\n >>> from scipy.interpolate import BPoly\n >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])\n\n Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`\n such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`\n\n >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])\n\n Creates a piecewise polynomial `f(x)`, such that\n `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.\n Based on the number of derivatives provided, the order of the\n local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.\n Notice that no restriction is imposed on the derivatives at\n `x = 1` and `x = 2`.\n\n Indeed, the explicit form of the polynomial is::\n\n f(x) = | x * (1 - x), 0 <= x < 1\n | 2 * (x - 1), 1 <= x <= 2\n\n So that f'(1-0) = -1 and f'(1+0) = 2\n\n \"\"\"\n xi = np.asarray(xi)\n if len(xi) != len(yi):\n raise ValueError(\"xi and yi need to have the same length\")\n if np.any(xi[1:] - xi[:1] <= 0):\n raise ValueError(\"x coordinates are not in increasing order\")\n\n # number of intervals\n m = len(xi) - 1\n\n # global poly order is k-1, local orders are <=k and can vary\n try:\n k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))\n except TypeError:\n raise ValueError(\"Using a 1D array for y? Please .reshape(-1, 1).\")\n\n if orders is None:\n orders = [None] * m\n else:\n if isinstance(orders, (integer_types, np.integer)):\n orders = [orders] * m\n k = max(k, max(orders))\n\n if any(o <= 0 for o in orders):\n raise ValueError(\"Orders must be positive.\")\n\n c = []\n for i in range(m):\n y1, y2 = yi[i], yi[i+1]\n if orders[i] is None:\n n1, n2 = len(y1), len(y2)\n else:\n n = orders[i]+1\n n1 = min(n//2, len(y1))\n n2 = min(n - n1, len(y2))\n n1 = min(n - n2, len(y2))\n if n1+n2 != n:\n mesg = (\"Point %g has %d derivatives, point %g\"\n \" has %d derivatives, but order %d requested\" % (\n xi[i], len(y1), xi[i+1], len(y2), orders[i]))\n raise ValueError(mesg)\n\n if not (n1 <= len(y1) and n2 <= len(y2)):\n raise ValueError(\"`order` input incompatible with\"\n \" length y1 or y2.\")\n\n b = BPoly._construct_from_derivatives(xi[i], xi[i+1],\n y1[:n1], y2[:n2])\n if len(b) < k:\n b = BPoly._raise_degree(b, k - len(b))\n c.append(b)\n\n c = np.asarray(c)\n return cls(c.swapaxes(0, 1), xi, extrapolate)\n\n @staticmethod\n def _construct_from_derivatives(xa, xb, ya, yb):\n r\"\"\"Compute the coefficients of a polynomial in the Bernstein basis\n given the values and derivatives at the edges.\n\n Return the coefficients of a polynomial in the Bernstein basis\n defined on `[xa, xb]` and having the values and derivatives at the\n endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.\n The polynomial constructed is of the minimal possible degree, i.e.,\n if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree\n of the polynomial is ``na + nb - 1``.\n\n Parameters\n ----------\n xa : float\n Left-hand end point of the interval\n xb : float\n Right-hand end point of the interval\n ya : array_like\n Derivatives at ``xa``. ``ya[0]`` is the value of the function, and\n ``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.\n yb : array_like\n Derivatives at ``xb``.\n\n Returns\n -------\n array\n coefficient array of a polynomial having specified derivatives\n\n Notes\n -----\n This uses several facts from life of Bernstein basis functions.\n First of all,\n\n .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})\n\n If B(x) is a linear combination of the form\n\n .. math:: B(x) = \\sum_{a=0}^{n} c_a b_{a, n},\n\n then :math: B'(x) = n \\sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.\n Iterating the latter one, one finds for the q-th derivative\n\n .. math:: B^{q}(x) = n!/(n-q)! \\sum_{a=0}^{n-q} Q_a b_{a, n-q},\n\n with\n\n .. math:: Q_a = \\sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}\n\n This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and\n `c_q` are found one by one by iterating `q = 0, ..., na`.\n\n At `x = xb` it's the same with `a = n - q`.\n\n \"\"\"\n ya, yb = np.asarray(ya), np.asarray(yb)\n if ya.shape[1:] != yb.shape[1:]:\n raise ValueError('ya and yb have incompatible dimensions.')\n\n dta, dtb = ya.dtype, yb.dtype\n if (np.issubdtype(dta, np.complexfloating) or\n np.issubdtype(dtb, np.complexfloating)):\n dt = np.complex_\n else:\n dt = np.float_\n\n na, nb = len(ya), len(yb)\n n = na + nb\n\n c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)\n\n # compute coefficients of a polynomial degree na+nb-1\n # walk left-to-right\n for q in range(0, na):\n c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q\n for j in range(0, q):\n c[q] -= (-1)**(j+q) * comb(q, j) * c[j]\n\n # now walk right-to-left\n for q in range(0, nb):\n c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q\n for j in range(0, q):\n c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]\n\n return c\n\n @staticmethod\n def _raise_degree(c, d):\n r\"\"\"Raise a degree of a polynomial in the Bernstein basis.\n\n Given the coefficients of a polynomial degree `k`, return (the\n coefficients of) the equivalent polynomial of degree `k+d`.\n\n Parameters\n ----------\n c : array_like\n coefficient array, 1D\n d : integer\n\n Returns\n -------\n array\n coefficient array, 1D array of length `c.shape[0] + d`\n\n Notes\n -----\n This uses the fact that a Bernstein polynomial `b_{a, k}` can be\n identically represented as a linear combination of polynomials of\n a higher degree `k+d`:\n\n .. math:: b_{a, k} = comb(k, a) \\sum_{j=0}^{d} b_{a+j, k+d} \\\n comb(d, j) / comb(k+d, a+j)\n\n \"\"\"\n if d == 0:\n return c\n\n k = c.shape[0] - 1\n out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)\n\n for a in range(c.shape[0]):\n f = c[a] * comb(k, a)\n for j in range(d+1):\n out[a+j] += f * comb(d, j) / comb(k+d, a+j)\n return out\n\n\nclass NdPPoly(object):\n \"\"\"\n Piecewise tensor product polynomial\n\n The value at point `xp = (x', y', z', ...)` is evaluated by first\n computing the interval indices `i` such that::\n\n x[0][i[0]] <= x' < x[0][i[0]+1]\n x[1][i[1]] <= y' < x[1][i[1]+1]\n ...\n\n and then computing::\n\n S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]\n * (xp[0] - x[0][i[0]])**m0\n * ...\n * (xp[n] - x[n][i[n]])**mn\n for m0 in range(k[0]+1)\n ...\n for mn in range(k[n]+1))\n\n where ``k[j]`` is the degree of the polynomial in dimension j. This\n representation is the piecewise multivariate power basis.\n\n Parameters\n ----------\n c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)\n Polynomial coefficients, with polynomial order `kj` and\n `mj+1` intervals for each dimension `j`.\n x : ndim-tuple of ndarrays, shapes (mj+1,)\n Polynomial breakpoints for each dimension. These must be\n sorted in increasing order.\n extrapolate : bool, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs. Default: True.\n\n Attributes\n ----------\n x : tuple of ndarrays\n Breakpoints.\n c : ndarray\n Coefficients of the polynomials.\n\n Methods\n -------\n __call__\n construct_fast\n\n See also\n --------\n PPoly : piecewise polynomials in 1D\n\n Notes\n -----\n High-order polynomials in the power basis can be numerically\n unstable.\n\n \"\"\"\n\n def __init__(self, c, x, extrapolate=None):\n self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)\n self.c = np.asarray(c)\n if extrapolate is None:\n extrapolate = True\n self.extrapolate = bool(extrapolate)\n\n ndim = len(self.x)\n if any(v.ndim != 1 for v in self.x):\n raise ValueError(\"x arrays must all be 1-dimensional\")\n if any(v.size < 2 for v in self.x):\n raise ValueError(\"x arrays must all contain at least 2 points\")\n if c.ndim < 2*ndim:\n raise ValueError(\"c must have at least 2*len(x) dimensions\")\n if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):\n raise ValueError(\"x-coordinates are not in increasing order\")\n if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):\n raise ValueError(\"x and c do not agree on the number of intervals\")\n\n dtype = self._get_dtype(self.c.dtype)\n self.c = np.ascontiguousarray(self.c, dtype=dtype)\n\n @classmethod\n def construct_fast(cls, c, x, extrapolate=None):\n \"\"\"\n Construct the piecewise polynomial without making checks.\n\n Takes the same parameters as the constructor. Input arguments\n `c` and `x` must be arrays of the correct shape and type. The\n `c` array can only be of dtypes float and complex, and `x`\n array must have dtype float.\n\n \"\"\"\n self = object.__new__(cls)\n self.c = c\n self.x = x\n if extrapolate is None:\n extrapolate = True\n self.extrapolate = extrapolate\n return self\n\n def _get_dtype(self, dtype):\n if np.issubdtype(dtype, np.complexfloating) \\\n or np.issubdtype(self.c.dtype, np.complexfloating):\n return np.complex_\n else:\n return np.float_\n\n def _ensure_c_contiguous(self):\n if not self.c.flags.c_contiguous:\n self.c = self.c.copy()\n if not isinstance(self.x, tuple):\n self.x = tuple(self.x)\n\n def __call__(self, x, nu=None, extrapolate=None):\n \"\"\"\n Evaluate the piecewise polynomial or its derivative\n\n Parameters\n ----------\n x : array-like\n Points to evaluate the interpolant at.\n nu : tuple, optional\n Orders of derivatives to evaluate. Each must be non-negative.\n extrapolate : bool, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs.\n\n Returns\n -------\n y : array-like\n Interpolated values. Shape is determined by replacing\n the interpolation axis in the original array with the shape of x.\n\n Notes\n -----\n Derivatives are evaluated piecewise for each polynomial\n segment, even if the polynomial is not differentiable at the\n breakpoints. The polynomial intervals are considered half-open,\n ``[a, b)``, except for the last interval which is closed\n ``[a, b]``.\n\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n else:\n extrapolate = bool(extrapolate)\n\n ndim = len(self.x)\n\n x = _ndim_coords_from_arrays(x)\n x_shape = x.shape\n x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)\n\n if nu is None:\n nu = np.zeros((ndim,), dtype=np.intc)\n else:\n nu = np.asarray(nu, dtype=np.intc)\n if nu.ndim != 1 or nu.shape[0] != ndim:\n raise ValueError(\"invalid number of derivative orders nu\")\n\n dim1 = prod(self.c.shape[:ndim])\n dim2 = prod(self.c.shape[ndim:2*ndim])\n dim3 = prod(self.c.shape[2*ndim:])\n ks = np.array(self.c.shape[:ndim], dtype=np.intc)\n\n out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)\n self._ensure_c_contiguous()\n\n _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),\n self.x,\n ks,\n x,\n nu,\n bool(extrapolate),\n out)\n\n return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])\n\n def _derivative_inplace(self, nu, axis):\n \"\"\"\n Compute 1D derivative along a selected dimension in-place\n May result to non-contiguous c array.\n \"\"\"\n if nu < 0:\n return self._antiderivative_inplace(-nu, axis)\n\n ndim = len(self.x)\n axis = axis % ndim\n\n # reduce order\n if nu == 0:\n # noop\n return\n else:\n sl = [slice(None)]*ndim\n sl[axis] = slice(None, -nu, None)\n c2 = self.c[tuple(sl)]\n\n if c2.shape[axis] == 0:\n # derivative of order 0 is zero\n shp = list(c2.shape)\n shp[axis] = 1\n c2 = np.zeros(shp, dtype=c2.dtype)\n\n # multiply by the correct rising factorials\n factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)\n sl = [None]*c2.ndim\n sl[axis] = slice(None)\n c2 *= factor[tuple(sl)]\n\n self.c = c2\n\n def _antiderivative_inplace(self, nu, axis):\n \"\"\"\n Compute 1D antiderivative along a selected dimension\n May result to non-contiguous c array.\n \"\"\"\n if nu <= 0:\n return self._derivative_inplace(-nu, axis)\n\n ndim = len(self.x)\n axis = axis % ndim\n\n perm = list(range(ndim))\n perm[0], perm[axis] = perm[axis], perm[0]\n perm = perm + list(range(ndim, self.c.ndim))\n\n c = self.c.transpose(perm)\n\n c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],\n dtype=c.dtype)\n c2[:-nu] = c\n\n # divide by the correct rising factorials\n factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)\n c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]\n\n # fix continuity of added degrees of freedom\n perm2 = list(range(c2.ndim))\n perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]\n\n c2 = c2.transpose(perm2)\n c2 = c2.copy()\n _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),\n self.x[axis], nu-1)\n\n c2 = c2.transpose(perm2)\n c2 = c2.transpose(perm)\n\n # Done\n self.c = c2\n\n def derivative(self, nu):\n \"\"\"\n Construct a new piecewise polynomial representing the derivative.\n\n Parameters\n ----------\n nu : ndim-tuple of int\n Order of derivatives to evaluate for each dimension.\n If negative, the antiderivative is returned.\n\n Returns\n -------\n pp : NdPPoly\n Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])\n representing the derivative of this polynomial.\n\n Notes\n -----\n Derivatives are evaluated piecewise for each polynomial\n segment, even if the polynomial is not differentiable at the\n breakpoints. The polynomial intervals in each dimension are\n considered half-open, ``[a, b)``, except for the last interval\n which is closed ``[a, b]``.\n\n \"\"\"\n p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)\n\n for axis, n in enumerate(nu):\n p._derivative_inplace(n, axis)\n\n p._ensure_c_contiguous()\n return p\n\n def antiderivative(self, nu):\n \"\"\"\n Construct a new piecewise polynomial representing the antiderivative.\n\n Antiderivative is also the indefinite integral of the function,\n and derivative is its inverse operation.\n\n Parameters\n ----------\n nu : ndim-tuple of int\n Order of derivatives to evaluate for each dimension.\n If negative, the derivative is returned.\n\n Returns\n -------\n pp : PPoly\n Piecewise polynomial of order k2 = k + n representing\n the antiderivative of this polynomial.\n\n Notes\n -----\n The antiderivative returned by this function is continuous and\n continuously differentiable to order n-1, up to floating point\n rounding error.\n\n \"\"\"\n p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)\n\n for axis, n in enumerate(nu):\n p._antiderivative_inplace(n, axis)\n\n p._ensure_c_contiguous()\n return p\n\n def integrate_1d(self, a, b, axis, extrapolate=None):\n r\"\"\"\n Compute NdPPoly representation for one dimensional definite integral\n\n The result is a piecewise polynomial representing the integral:\n\n .. math::\n\n p(y, z, ...) = \\int_a^b dx\\, p(x, y, z, ...)\n\n where the dimension integrated over is specified with the\n `axis` parameter.\n\n Parameters\n ----------\n a, b : float\n Lower and upper bound for integration.\n axis : int\n Dimension over which to compute the 1D integrals\n extrapolate : bool, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs.\n\n Returns\n -------\n ig : NdPPoly or array-like\n Definite integral of the piecewise polynomial over [a, b].\n If the polynomial was 1-dimensional, an array is returned,\n otherwise, an NdPPoly object.\n\n \"\"\"\n if extrapolate is None:\n extrapolate = self.extrapolate\n else:\n extrapolate = bool(extrapolate)\n\n ndim = len(self.x)\n axis = int(axis) % ndim\n\n # reuse 1D integration routines\n c = self.c\n swap = list(range(c.ndim))\n swap.insert(0, swap[axis])\n del swap[axis + 1]\n swap.insert(1, swap[ndim + axis])\n del swap[ndim + axis + 1]\n\n c = c.transpose(swap)\n p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),\n self.x[axis],\n extrapolate=extrapolate)\n out = p.integrate(a, b, extrapolate=extrapolate)\n\n # Construct result\n if ndim == 1:\n return out.reshape(c.shape[2:])\n else:\n c = out.reshape(c.shape[2:])\n x = self.x[:axis] + self.x[axis+1:]\n return self.construct_fast(c, x, extrapolate=extrapolate)\n\n def integrate(self, ranges, extrapolate=None):\n \"\"\"\n Compute a definite integral over a piecewise polynomial.\n\n Parameters\n ----------\n ranges : ndim-tuple of 2-tuples float\n Sequence of lower and upper bounds for each dimension,\n ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``\n extrapolate : bool, optional\n Whether to extrapolate to out-of-bounds points based on first\n and last intervals, or to return NaNs.\n\n Returns\n -------\n ig : array_like\n Definite integral of the piecewise polynomial over\n [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]\n\n \"\"\"\n\n ndim = len(self.x)\n\n if extrapolate is None:\n extrapolate = self.extrapolate\n else:\n extrapolate = bool(extrapolate)\n\n if not hasattr(ranges, '__len__') or len(ranges) != ndim:\n raise ValueError(\"Range not a sequence of correct length\")\n\n self._ensure_c_contiguous()\n\n # Reuse 1D integration routine\n c = self.c\n for n, (a, b) in enumerate(ranges):\n swap = list(range(c.ndim))\n swap.insert(1, swap[ndim - n])\n del swap[ndim - n + 1]\n\n c = c.transpose(swap)\n\n p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)\n out = p.integrate(a, b, extrapolate=extrapolate)\n c = out.reshape(c.shape[2:])\n\n return c\n\n\nclass RegularGridInterpolator(object):\n \"\"\"\n Interpolation on a regular grid in arbitrary dimensions\n\n The data must be defined on a regular grid; the grid spacing however may be\n uneven. Linear and nearest-neighbour interpolation are supported. After\n setting up the interpolator object, the interpolation method (*linear* or\n *nearest*) may be chosen at each evaluation.\n\n Parameters\n ----------\n points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )\n The points defining the regular grid in n dimensions.\n\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n\n method : str, optional\n The method of interpolation to perform. Supported are \"linear\" and\n \"nearest\". This parameter will become the default for the object's\n ``__call__`` method. Default is \"linear\".\n\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the\n domain of the input data, a ValueError is raised.\n If False, then `fill_value` is used.\n\n fill_value : number, optional\n If provided, the value to use for points outside of the\n interpolation domain. If None, values outside\n the domain are extrapolated.\n\n Methods\n -------\n __call__\n\n Notes\n -----\n Contrary to LinearNDInterpolator and NearestNDInterpolator, this class\n avoids expensive triangulation of the input data by taking advantage of the\n regular grid structure.\n\n If any of `points` have a dimension of size 1, linear interpolation will\n return an array of `nan` values. Nearest-neighbor interpolation will work\n as usual in this case.\n\n .. versionadded:: 0.14\n\n Examples\n --------\n Evaluate a simple example function on the points of a 3D grid:\n\n >>> from scipy.interpolate import RegularGridInterpolator\n >>> def f(x, y, z):\n ... return 2 * x**3 + 3 * y**2 - z\n >>> x = np.linspace(1, 4, 11)\n >>> y = np.linspace(4, 7, 22)\n >>> z = np.linspace(7, 9, 33)\n >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))\n\n ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.\n Next, define an interpolating function from this data:\n\n >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)\n\n Evaluate the interpolating function at the two points\n ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:\n\n >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])\n >>> my_interpolating_function(pts)\n array([ 125.80469388, 146.30069388])\n\n which is indeed a close approximation to\n ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.\n\n See also\n --------\n NearestNDInterpolator : Nearest neighbour interpolation on unstructured\n data in N dimensions\n\n LinearNDInterpolator : Piecewise linear interpolant on unstructured data\n in N dimensions\n\n References\n ----------\n .. [1] Python package *regulargrid* by Johannes Buchner, see\n https://pypi.python.org/pypi/regulargrid/\n .. [2] Wikipedia, \"Trilinear interpolation\",\n https://en.wikipedia.org/wiki/Trilinear_interpolation\n .. [3] Weiser, Alan, and Sergio E. Zarantonello. \"A note on piecewise linear\n and multilinear table interpolation in many dimensions.\" MATH.\n COMPUT. 50.181 (1988): 189-196.\n https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf\n\n \"\"\"\n # this class is based on code originally programmed by Johannes Buchner,\n # see https://github.com/JohannesBuchner/regulargrid\n\n def __init__(self, points, values, method=\"linear\", bounds_error=True,\n fill_value=np.nan):\n if method not in [\"linear\", \"nearest\"]:\n raise ValueError(\"Method '%s' is not defined\" % method)\n self.method = method\n self.bounds_error = bounds_error\n\n if not hasattr(values, 'ndim'):\n # allow reasonable duck-typed values\n values = np.asarray(values)\n\n if len(points) > values.ndim:\n raise ValueError(\"There are %d point arrays, but values has %d \"\n \"dimensions\" % (len(points), values.ndim))\n\n if hasattr(values, 'dtype') and hasattr(values, 'astype'):\n if not np.issubdtype(values.dtype, np.inexact):\n values = values.astype(float)\n\n self.fill_value = fill_value\n if fill_value is not None:\n fill_value_dtype = np.asarray(fill_value).dtype\n if (hasattr(values, 'dtype') and not\n np.can_cast(fill_value_dtype, values.dtype,\n casting='same_kind')):\n raise ValueError(\"fill_value must be either 'None' or \"\n \"of a type compatible with values\")\n\n for i, p in enumerate(points):\n if not np.all(np.diff(p) > 0.):\n raise ValueError(\"The points in dimension %d must be strictly \"\n \"ascending\" % i)\n if not np.asarray(p).ndim == 1:\n raise ValueError(\"The points in dimension %d must be \"\n \"1-dimensional\" % i)\n if not values.shape[i] == len(p):\n raise ValueError(\"There are %d points and %d values in \"\n \"dimension %d\" % (len(p), values.shape[i], i))\n self.grid = tuple([np.asarray(p) for p in points])\n self.values = values\n\n def __call__(self, xi, method=None):\n \"\"\"\n Interpolation at coordinates\n\n Parameters\n ----------\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data at\n\n method : str\n The method of interpolation to perform. Supported are \"linear\" and\n \"nearest\".\n\n \"\"\"\n method = self.method if method is None else method\n if method not in [\"linear\", \"nearest\"]:\n raise ValueError(\"Method '%s' is not defined\" % method)\n\n ndim = len(self.grid)\n xi = _ndim_coords_from_arrays(xi, ndim=ndim)\n if xi.shape[-1] != len(self.grid):\n raise ValueError(\"The requested sample points xi have dimension \"\n \"%d, but this RegularGridInterpolator has \"\n \"dimension %d\" % (xi.shape[1], ndim))\n\n xi_shape = xi.shape\n xi = xi.reshape(-1, xi_shape[-1])\n\n if self.bounds_error:\n for i, p in enumerate(xi.T):\n if not np.logical_and(np.all(self.grid[i][0] <= p),\n np.all(p <= self.grid[i][-1])):\n raise ValueError(\"One of the requested xi is out of bounds \"\n \"in dimension %d\" % i)\n\n indices, norm_distances, out_of_bounds = self._find_indices(xi.T)\n if method == \"linear\":\n result = self._evaluate_linear(indices,\n norm_distances,\n out_of_bounds)\n elif method == \"nearest\":\n result = self._evaluate_nearest(indices,\n norm_distances,\n out_of_bounds)\n if not self.bounds_error and self.fill_value is not None:\n result[out_of_bounds] = self.fill_value\n\n return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])\n\n def _evaluate_linear(self, indices, norm_distances, out_of_bounds):\n # slice for broadcasting over trailing dimensions in self.values\n vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))\n\n # find relevant values\n # each i and i+1 represents a edge\n edges = itertools.product(*[[i, i + 1] for i in indices])\n values = 0.\n for edge_indices in edges:\n weight = 1.\n for ei, i, yi in zip(edge_indices, indices, norm_distances):\n weight *= np.where(ei == i, 1 - yi, yi)\n values += np.asarray(self.values[edge_indices]) * weight[vslice]\n return values\n\n def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):\n idx_res = []\n for i, yi in zip(indices, norm_distances):\n idx_res.append(np.where(yi <= .5, i, i + 1))\n return self.values[tuple(idx_res)]\n\n def _find_indices(self, xi):\n # find relevant edges between which xi are situated\n indices = []\n # compute distance to lower edge in unity units\n norm_distances = []\n # check for out of bounds xi\n out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)\n # iterate through dimensions\n for x, grid in zip(xi, self.grid):\n i = np.searchsorted(grid, x) - 1\n i[i < 0] = 0\n i[i > grid.size - 2] = grid.size - 2\n indices.append(i)\n norm_distances.append((x - grid[i]) /\n (grid[i + 1] - grid[i]))\n if not self.bounds_error:\n out_of_bounds += x < grid[0]\n out_of_bounds += x > grid[-1]\n return indices, norm_distances, out_of_bounds\n\n\ndef interpn(points, values, xi, method=\"linear\", bounds_error=True,\n fill_value=np.nan):\n \"\"\"\n Multidimensional interpolation on regular grids.\n\n Parameters\n ----------\n points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )\n The points defining the regular grid in n dimensions.\n\n values : array_like, shape (m1, ..., mn, ...)\n The data on the regular grid in n dimensions.\n\n xi : ndarray of shape (..., ndim)\n The coordinates to sample the gridded data at\n\n method : str, optional\n The method of interpolation to perform. Supported are \"linear\" and\n \"nearest\", and \"splinef2d\". \"splinef2d\" is only supported for\n 2-dimensional data.\n\n bounds_error : bool, optional\n If True, when interpolated values are requested outside of the\n domain of the input data, a ValueError is raised.\n If False, then `fill_value` is used.\n\n fill_value : number, optional\n If provided, the value to use for points outside of the\n interpolation domain. If None, values outside\n the domain are extrapolated. Extrapolation is not supported by method\n \"splinef2d\".\n\n Returns\n -------\n values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]\n Interpolated values at input coordinates.\n\n Notes\n -----\n\n .. versionadded:: 0.14\n\n See also\n --------\n NearestNDInterpolator : Nearest neighbour interpolation on unstructured\n data in N dimensions\n\n LinearNDInterpolator : Piecewise linear interpolant on unstructured data\n in N dimensions\n\n RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a\n regular grid in arbitrary dimensions\n\n RectBivariateSpline : Bivariate spline approximation over a rectangular mesh\n\n \"\"\"\n # sanity check 'method' kwarg\n if method not in [\"linear\", \"nearest\", \"splinef2d\"]:\n raise ValueError(\"interpn only understands the methods 'linear', \"\n \"'nearest', and 'splinef2d'. You provided %s.\" %\n method)\n\n if not hasattr(values, 'ndim'):\n values = np.asarray(values)\n\n ndim = values.ndim\n if ndim > 2 and method == \"splinef2d\":\n raise ValueError(\"The method spline2fd can only be used for \"\n \"2-dimensional input data\")\n if not bounds_error and fill_value is None and method == \"splinef2d\":\n raise ValueError(\"The method spline2fd does not support extrapolation.\")\n\n # sanity check consistency of input dimensions\n if len(points) > ndim:\n raise ValueError(\"There are %d point arrays, but values has %d \"\n \"dimensions\" % (len(points), ndim))\n if len(points) != ndim and method == 'splinef2d':\n raise ValueError(\"The method spline2fd can only be used for \"\n \"scalar data with one point per coordinate\")\n\n # sanity check input grid\n for i, p in enumerate(points):\n if not np.all(np.diff(p) > 0.):\n raise ValueError(\"The points in dimension %d must be strictly \"\n \"ascending\" % i)\n if not np.asarray(p).ndim == 1:\n raise ValueError(\"The points in dimension %d must be \"\n \"1-dimensional\" % i)\n if not values.shape[i] == len(p):\n raise ValueError(\"There are %d points and %d values in \"\n \"dimension %d\" % (len(p), values.shape[i], i))\n grid = tuple([np.asarray(p) for p in points])\n\n # sanity check requested xi\n xi = _ndim_coords_from_arrays(xi, ndim=len(grid))\n if xi.shape[-1] != len(grid):\n raise ValueError(\"The requested sample points xi have dimension \"\n \"%d, but this RegularGridInterpolator has \"\n \"dimension %d\" % (xi.shape[1], len(grid)))\n\n for i, p in enumerate(xi.T):\n if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),\n np.all(p <= grid[i][-1])):\n raise ValueError(\"One of the requested xi is out of bounds \"\n \"in dimension %d\" % i)\n\n # perform interpolation\n if method == \"linear\":\n interp = RegularGridInterpolator(points, values, method=\"linear\",\n bounds_error=bounds_error,\n fill_value=fill_value)\n return interp(xi)\n elif method == \"nearest\":\n interp = RegularGridInterpolator(points, values, method=\"nearest\",\n bounds_error=bounds_error,\n fill_value=fill_value)\n return interp(xi)\n elif method == \"splinef2d\":\n xi_shape = xi.shape\n xi = xi.reshape(-1, xi.shape[-1])\n\n # RectBivariateSpline doesn't support fill_value; we need to wrap here\n idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],\n grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),\n axis=0)\n result = np.empty_like(xi[:, 0])\n\n # make a copy of values for RectBivariateSpline\n interp = RectBivariateSpline(points[0], points[1], values[:])\n result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])\n result[np.logical_not(idx_valid)] = fill_value\n\n return result.reshape(xi_shape[:-1])\n\n\n# backward compatibility wrapper\nclass _ppform(PPoly):\n \"\"\"\n Deprecated piecewise polynomial class.\n\n New code should use the `PPoly` class instead.\n\n \"\"\"\n\n def __init__(self, coeffs, breaks, fill=0.0, sort=False):\n warnings.warn(\"_ppform is deprecated -- use PPoly instead\",\n category=DeprecationWarning)\n\n if sort:\n breaks = np.sort(breaks)\n else:\n breaks = np.asarray(breaks)\n\n PPoly.__init__(self, coeffs, breaks)\n\n self.coeffs = self.c\n self.breaks = self.x\n self.K = self.coeffs.shape[0]\n self.fill = fill\n self.a = self.breaks[0]\n self.b = self.breaks[-1]\n\n def __call__(self, x):\n return PPoly.__call__(self, x, 0, False)\n\n def _evaluate(self, x, nu, extrapolate, out):\n PPoly._evaluate(self, x, nu, extrapolate, out)\n out[~((x >= self.a) & (x <= self.b))] = self.fill\n return out\n\n @classmethod\n def fromspline(cls, xk, cvals, order, fill=0.0):\n # Note: this spline representation is incompatible with FITPACK\n N = len(xk)-1\n sivals = np.empty((order+1, N), dtype=float)\n for m in xrange(order, -1, -1):\n fact = spec.gamma(m+1)\n res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)\n res /= fact\n sivals[order-m, :] = res\n return cls(sivals, xk, fill=fill)\n\n\n# The 3 private functions below can be called by splmake().\n\n\ndef _dot0(a, b):\n \"\"\"Similar to numpy.dot, but sum over last axis of a and 1st axis of b\"\"\"\n if b.ndim <= 2:\n return dot(a, b)\n else:\n axes = list(range(b.ndim))\n axes.insert(-1, 0)\n axes.pop(0)\n return dot(a, b.transpose(axes))\n\n\ndef _find_smoothest(xk, yk, order, conds=None, B=None):\n # construct Bmatrix, and Jmatrix\n # e = J*c\n # minimize norm(e,2) given B*c=yk\n # if desired B can be given\n # conds is ignored\n N = len(xk)-1\n K = order\n if B is None:\n B = _fitpack._bsplmat(order, xk)\n J = _fitpack._bspldismat(order, xk)\n u, s, vh = scipy.linalg.svd(B)\n ind = K-1\n V2 = vh[-ind:,:].T\n V1 = vh[:-ind,:].T\n A = dot(J.T,J)\n tmp = dot(V2.T,A)\n Q = dot(tmp,V2)\n p = scipy.linalg.solve(Q, tmp)\n tmp = dot(V2,p)\n tmp = np.eye(N+K) - tmp\n tmp = dot(tmp,V1)\n tmp = dot(tmp,np.diag(1.0/s))\n tmp = dot(tmp,u.T)\n return _dot0(tmp, yk)\n\n\n# conds is a tuple of an array and a vector\n# giving the left-hand and the right-hand side\n# of the additional equations to add to B\n\n\ndef _find_user(xk, yk, order, conds, B):\n lh = conds[0]\n rh = conds[1]\n B = np.concatenate((B, lh), axis=0)\n w = np.concatenate((yk, rh), axis=0)\n M, N = B.shape\n if (M > N):\n raise ValueError(\"over-specification of conditions\")\n elif (M < N):\n return _find_smoothest(xk, yk, order, None, B)\n else:\n return scipy.linalg.solve(B, w)\n\n\n# Remove the 3 private functions above as well when removing splmake\[email protected](message=\"splmake is deprecated in scipy 0.19.0, \"\n \"use make_interp_spline instead.\")\ndef splmake(xk, yk, order=3, kind='smoothest', conds=None):\n \"\"\"\n Return a representation of a spline given data-points at internal knots\n\n Parameters\n ----------\n xk : array_like\n The input array of x values of rank 1\n yk : array_like\n The input array of y values of rank N. `yk` can be an N-d array to\n represent more than one curve, through the same `xk` points. The first\n dimension is assumed to be the interpolating dimension and is the same\n length of `xk`.\n order : int, optional\n Order of the spline\n kind : str, optional\n Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',\n 'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2\n conds : optional\n Conds\n\n Returns\n -------\n splmake : tuple\n Return a (`xk`, `cvals`, `k`) representation of a spline given\n data-points where the (internal) knots are at the data-points.\n\n \"\"\"\n yk = np.asanyarray(yk)\n\n order = int(order)\n if order < 0:\n raise ValueError(\"order must not be negative\")\n if order == 0:\n return xk, yk[:-1], order\n elif order == 1:\n return xk, yk, order\n\n try:\n func = eval('_find_%s' % kind)\n except Exception:\n raise NotImplementedError\n\n # the constraint matrix\n B = _fitpack._bsplmat(order, xk)\n coefs = func(xk, yk, order, conds, B)\n return xk, coefs, order\n\n\[email protected](message=\"spleval is deprecated in scipy 0.19.0, \"\n \"use BSpline instead.\")\ndef spleval(xck, xnew, deriv=0):\n \"\"\"\n Evaluate a fixed spline represented by the given tuple at the new x-values\n\n The `xj` values are the interior knot points. The approximation\n region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`\n should have length N+k where `k` is the order of the spline.\n\n Parameters\n ----------\n (xj, cvals, k) : tuple\n Parameters that define the fixed spline\n xj : array_like\n Interior knot points\n cvals : array_like\n Curvature\n k : int\n Order of the spline\n xnew : array_like\n Locations to calculate spline\n deriv : int\n Deriv\n\n Returns\n -------\n spleval : ndarray\n If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or\n `xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`\n providing the interpolation of multiple curves.\n\n Notes\n -----\n Internally, an additional `k`-1 knot points are added on either side of\n the spline.\n\n \"\"\"\n (xj, cvals, k) = xck\n oldshape = np.shape(xnew)\n xx = np.ravel(xnew)\n sh = cvals.shape[1:]\n res = np.empty(xx.shape + sh, dtype=cvals.dtype)\n for index in np.ndindex(*sh):\n sl = (slice(None),) + index\n if issubclass(cvals.dtype.type, np.complexfloating):\n res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)\n res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)\n else:\n res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)\n res.shape = oldshape + sh\n return res\n\n\n# When `spltopp` gets removed, also remove the _ppform class.\[email protected](message=\"spltopp is deprecated in scipy 0.19.0, \"\n \"use PPoly.from_spline instead.\")\ndef spltopp(xk, cvals, k):\n \"\"\"Return a piece-wise polynomial object from a fixed-spline tuple.\"\"\"\n return _ppform.fromspline(xk, cvals, k)\n\n\[email protected](message=\"spline is deprecated in scipy 0.19.0, \"\n \"use Bspline class instead.\")\ndef spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):\n \"\"\"\n Interpolate a curve at new points using a spline fit\n\n Parameters\n ----------\n xk, yk : array_like\n The x and y values that define the curve.\n xnew : array_like\n The x values where spline should estimate the y values.\n order : int\n Default is 3.\n kind : string\n One of {'smoothest'}\n conds : Don't know\n Don't know\n\n Returns\n -------\n spline : ndarray\n An array of y values; the spline evaluated at the positions `xnew`.\n\n \"\"\"\n return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)\n", "from __future__ import division, print_function\n\nimport os\nimport shutil\nimport pytest\nfrom tempfile import mkstemp, mkdtemp\nfrom subprocess import Popen, PIPE\nfrom distutils.errors import DistutilsError\n\nfrom numpy.distutils import ccompiler, customized_ccompiler\nfrom numpy.testing import assert_, assert_equal\nfrom numpy.distutils.system_info import system_info, ConfigParser\nfrom numpy.distutils.system_info import default_lib_dirs, default_include_dirs\nfrom numpy.distutils import _shell_utils\n\n\ndef get_class(name, notfound_action=1):\n \"\"\"\n notfound_action:\n 0 - do nothing\n 1 - display warning message\n 2 - raise error\n \"\"\"\n cl = {'temp1': Temp1Info,\n 'temp2': Temp2Info\n }.get(name.lower(), _system_info)\n return cl()\n\nsimple_site = \"\"\"\n[ALL]\nlibrary_dirs = {dir1:s}{pathsep:s}{dir2:s}\nlibraries = {lib1:s},{lib2:s}\nextra_compile_args = -I/fake/directory -I\"/path with/spaces\" -Os\nruntime_library_dirs = {dir1:s}\n\n[temp1]\nlibrary_dirs = {dir1:s}\nlibraries = {lib1:s}\nruntime_library_dirs = {dir1:s}\n\n[temp2]\nlibrary_dirs = {dir2:s}\nlibraries = {lib2:s}\nextra_link_args = -Wl,-rpath={lib2_escaped:s}\nrpath = {dir2:s}\n\"\"\"\nsite_cfg = simple_site\n\nfakelib_c_text = \"\"\"\n/* This file is generated from numpy/distutils/testing/test_system_info.py */\n#include<stdio.h>\nvoid foo(void) {\n printf(\"Hello foo\");\n}\nvoid bar(void) {\n printf(\"Hello bar\");\n}\n\"\"\"\n\ndef have_compiler():\n \"\"\" Return True if there appears to be an executable compiler\n \"\"\"\n compiler = customized_ccompiler()\n try:\n cmd = compiler.compiler # Unix compilers\n except AttributeError:\n try:\n if not compiler.initialized:\n compiler.initialize() # MSVC is different\n except (DistutilsError, ValueError):\n return False\n cmd = [compiler.cc]\n try:\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n p.stdout.close()\n p.stderr.close()\n p.wait()\n except OSError:\n return False\n return True\n\n\nHAVE_COMPILER = have_compiler()\n\n\nclass _system_info(system_info):\n\n def __init__(self,\n default_lib_dirs=default_lib_dirs,\n default_include_dirs=default_include_dirs,\n verbosity=1,\n ):\n self.__class__.info = {}\n self.local_prefixes = []\n defaults = {'library_dirs': '',\n 'include_dirs': '',\n 'runtime_library_dirs': '',\n 'rpath': '',\n 'src_dirs': '',\n 'search_static_first': \"0\",\n 'extra_compile_args': '',\n 'extra_link_args': ''}\n self.cp = ConfigParser(defaults)\n # We have to parse the config files afterwards\n # to have a consistent temporary filepath\n\n def _check_libs(self, lib_dirs, libs, opt_libs, exts):\n \"\"\"Override _check_libs to return with all dirs \"\"\"\n info = {'libraries': libs, 'library_dirs': lib_dirs}\n return info\n\n\nclass Temp1Info(_system_info):\n \"\"\"For testing purposes\"\"\"\n section = 'temp1'\n\n\nclass Temp2Info(_system_info):\n \"\"\"For testing purposes\"\"\"\n section = 'temp2'\n\n\nclass TestSystemInfoReading(object):\n\n def setup(self):\n \"\"\" Create the libraries \"\"\"\n # Create 2 sources and 2 libraries\n self._dir1 = mkdtemp()\n self._src1 = os.path.join(self._dir1, 'foo.c')\n self._lib1 = os.path.join(self._dir1, 'libfoo.so')\n self._dir2 = mkdtemp()\n self._src2 = os.path.join(self._dir2, 'bar.c')\n self._lib2 = os.path.join(self._dir2, 'libbar.so')\n # Update local site.cfg\n global simple_site, site_cfg\n site_cfg = simple_site.format(**{\n 'dir1': self._dir1,\n 'lib1': self._lib1,\n 'dir2': self._dir2,\n 'lib2': self._lib2,\n 'pathsep': os.pathsep,\n 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])\n })\n # Write site.cfg\n fd, self._sitecfg = mkstemp()\n os.close(fd)\n with open(self._sitecfg, 'w') as fd:\n fd.write(site_cfg)\n # Write the sources\n with open(self._src1, 'w') as fd:\n fd.write(fakelib_c_text)\n with open(self._src2, 'w') as fd:\n fd.write(fakelib_c_text)\n # We create all class-instances\n\n def site_and_parse(c, site_cfg):\n c.files = [site_cfg]\n c.parse_config_files()\n return c\n self.c_default = site_and_parse(get_class('default'), self._sitecfg)\n self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)\n self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)\n\n def teardown(self):\n # Do each removal separately\n try:\n shutil.rmtree(self._dir1)\n except Exception:\n pass\n try:\n shutil.rmtree(self._dir2)\n except Exception:\n pass\n try:\n os.remove(self._sitecfg)\n except Exception:\n pass\n\n def test_all(self):\n # Read in all information in the ALL block\n tsi = self.c_default\n assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])\n assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])\n assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])\n extra = tsi.calc_extra_info()\n assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])\n\n def test_temp1(self):\n # Read in all information in the temp1 block\n tsi = self.c_temp1\n assert_equal(tsi.get_lib_dirs(), [self._dir1])\n assert_equal(tsi.get_libraries(), [self._lib1])\n assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])\n\n def test_temp2(self):\n # Read in all information in the temp2 block\n tsi = self.c_temp2\n assert_equal(tsi.get_lib_dirs(), [self._dir2])\n assert_equal(tsi.get_libraries(), [self._lib2])\n # Now from rpath and not runtime_library_dirs\n assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])\n extra = tsi.calc_extra_info()\n assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])\n\n @pytest.mark.skipif(not HAVE_COMPILER, reason=\"Missing compiler\")\n def test_compile1(self):\n # Compile source and link the first source\n c = customized_ccompiler()\n previousDir = os.getcwd()\n try:\n # Change directory to not screw up directories\n os.chdir(self._dir1)\n c.compile([os.path.basename(self._src1)], output_dir=self._dir1)\n # Ensure that the object exists\n assert_(os.path.isfile(self._src1.replace('.c', '.o')) or\n os.path.isfile(self._src1.replace('.c', '.obj')))\n finally:\n os.chdir(previousDir)\n\n @pytest.mark.skipif(not HAVE_COMPILER, reason=\"Missing compiler\")\n @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),\n reason=\"Fails with MSVC compiler \")\n def test_compile2(self):\n # Compile source and link the second source\n tsi = self.c_temp2\n c = customized_ccompiler()\n extra_link_args = tsi.calc_extra_info()['extra_link_args']\n previousDir = os.getcwd()\n try:\n # Change directory to not screw up directories\n os.chdir(self._dir2)\n c.compile([os.path.basename(self._src2)], output_dir=self._dir2,\n extra_postargs=extra_link_args)\n # Ensure that the object exists\n assert_(os.path.isfile(self._src2.replace('.c', '.o')))\n finally:\n os.chdir(previousDir)\n", "\"\"\"\nRecord Arrays\n=============\nRecord arrays expose the fields of structured arrays as properties.\n\nMost commonly, ndarrays contain elements of a single type, e.g. floats,\nintegers, bools etc. However, it is possible for elements to be combinations\nof these using structured types, such as::\n\n >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])\n >>> a\n array([(1, 2.0), (1, 2.0)],\n dtype=[('x', '<i4'), ('y', '<f8')])\n\nHere, each element consists of two fields: x (and int), and y (a float).\nThis is known as a structured array. The different fields are analogous\nto columns in a spread-sheet. The different fields can be accessed as\none would a dictionary::\n\n >>> a['x']\n array([1, 1])\n\n >>> a['y']\n array([ 2., 2.])\n\nRecord arrays allow us to access fields as properties::\n\n >>> ar = np.rec.array(a)\n\n >>> ar.x\n array([1, 1])\n\n >>> ar.y\n array([ 2., 2.])\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport os\nimport warnings\n\nfrom . import numeric as sb\nfrom . import numerictypes as nt\nfrom numpy.compat import isfileobj, bytes, long, unicode, os_fspath\nfrom numpy.core.overrides import set_module\nfrom .arrayprint import get_printoptions\n\n# All of the functions allow formats to be a dtype\n__all__ = ['record', 'recarray', 'format_parser']\n\n\nndarray = sb.ndarray\n\n_byteorderconv = {'b':'>',\n 'l':'<',\n 'n':'=',\n 'B':'>',\n 'L':'<',\n 'N':'=',\n 'S':'s',\n 's':'s',\n '>':'>',\n '<':'<',\n '=':'=',\n '|':'|',\n 'I':'|',\n 'i':'|'}\n\n# formats regular expression\n# allows multidimension spec with a tuple syntax in front\n# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '\n# are equally allowed\n\nnumfmt = nt.typeDict\n\ndef find_duplicate(list):\n \"\"\"Find duplication in a list, return a list of duplicated elements\"\"\"\n dup = []\n for i in range(len(list)):\n if (list[i] in list[i + 1:]):\n if (list[i] not in dup):\n dup.append(list[i])\n return dup\n\n\n@set_module('numpy')\nclass format_parser(object):\n \"\"\"\n Class to convert formats, names, titles description to a dtype.\n\n After constructing the format_parser object, the dtype attribute is\n the converted data-type:\n ``dtype = format_parser(formats, names, titles).dtype``\n\n Attributes\n ----------\n dtype : dtype\n The converted data-type.\n\n Parameters\n ----------\n formats : str or list of str\n The format description, either specified as a string with\n comma-separated format descriptions in the form ``'f8, i4, a5'``, or\n a list of format description strings in the form\n ``['f8', 'i4', 'a5']``.\n names : str or list/tuple of str\n The field names, either specified as a comma-separated string in the\n form ``'col1, col2, col3'``, or as a list or tuple of strings in the\n form ``['col1', 'col2', 'col3']``.\n An empty list can be used, in that case default field names\n ('f0', 'f1', ...) are used.\n titles : sequence\n Sequence of title strings. An empty list can be used to leave titles\n out.\n aligned : bool, optional\n If True, align the fields by padding as the C-compiler would.\n Default is False.\n byteorder : str, optional\n If specified, all the fields will be changed to the\n provided byte-order. Otherwise, the default byte-order is\n used. For all available string specifiers, see `dtype.newbyteorder`.\n\n See Also\n --------\n dtype, typename, sctype2char\n\n Examples\n --------\n >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],\n ... ['T1', 'T2', 'T3']).dtype\n dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),\n (('T3', 'col3'), '|S5')])\n\n `names` and/or `titles` can be empty lists. If `titles` is an empty list,\n titles will simply not appear. If `names` is empty, default field names\n will be used.\n\n >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],\n ... []).dtype\n dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])\n >>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype\n dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])\n\n \"\"\"\n\n def __init__(self, formats, names, titles, aligned=False, byteorder=None):\n self._parseFormats(formats, aligned)\n self._setfieldnames(names, titles)\n self._createdescr(byteorder)\n self.dtype = self._descr\n\n def _parseFormats(self, formats, aligned=0):\n \"\"\" Parse the field formats \"\"\"\n\n if formats is None:\n raise ValueError(\"Need formats argument\")\n if isinstance(formats, list):\n if len(formats) < 2:\n formats.append('')\n formats = ','.join(formats)\n dtype = sb.dtype(formats, aligned)\n fields = dtype.fields\n if fields is None:\n dtype = sb.dtype([('f1', dtype)], aligned)\n fields = dtype.fields\n keys = dtype.names\n self._f_formats = [fields[key][0] for key in keys]\n self._offsets = [fields[key][1] for key in keys]\n self._nfields = len(keys)\n\n def _setfieldnames(self, names, titles):\n \"\"\"convert input field names into a list and assign to the _names\n attribute \"\"\"\n\n if (names):\n if (type(names) in [list, tuple]):\n pass\n elif isinstance(names, (str, unicode)):\n names = names.split(',')\n else:\n raise NameError(\"illegal input names %s\" % repr(names))\n\n self._names = [n.strip() for n in names[:self._nfields]]\n else:\n self._names = []\n\n # if the names are not specified, they will be assigned as\n # \"f0, f1, f2,...\"\n # if not enough names are specified, they will be assigned as \"f[n],\n # f[n+1],...\" etc. where n is the number of specified names...\"\n self._names += ['f%d' % i for i in range(len(self._names),\n self._nfields)]\n # check for redundant names\n _dup = find_duplicate(self._names)\n if _dup:\n raise ValueError(\"Duplicate field names: %s\" % _dup)\n\n if (titles):\n self._titles = [n.strip() for n in titles[:self._nfields]]\n else:\n self._titles = []\n titles = []\n\n if (self._nfields > len(titles)):\n self._titles += [None] * (self._nfields - len(titles))\n\n def _createdescr(self, byteorder):\n descr = sb.dtype({'names':self._names,\n 'formats':self._f_formats,\n 'offsets':self._offsets,\n 'titles':self._titles})\n if (byteorder is not None):\n byteorder = _byteorderconv[byteorder[0]]\n descr = descr.newbyteorder(byteorder)\n\n self._descr = descr\n\nclass record(nt.void):\n \"\"\"A data-type scalar that allows field access as attribute lookup.\n \"\"\"\n\n # manually set name and module so that this class's type shows up\n # as numpy.record when printed\n __name__ = 'record'\n __module__ = 'numpy'\n\n def __repr__(self):\n if get_printoptions()['legacy'] == '1.13':\n return self.__str__()\n return super(record, self).__repr__()\n\n def __str__(self):\n if get_printoptions()['legacy'] == '1.13':\n return str(self.item())\n return super(record, self).__str__()\n\n def __getattribute__(self, attr):\n if attr in ['setfield', 'getfield', 'dtype']:\n return nt.void.__getattribute__(self, attr)\n try:\n return nt.void.__getattribute__(self, attr)\n except AttributeError:\n pass\n fielddict = nt.void.__getattribute__(self, 'dtype').fields\n res = fielddict.get(attr, None)\n if res:\n obj = self.getfield(*res[:2])\n # if it has fields return a record,\n # otherwise return the object\n try:\n dt = obj.dtype\n except AttributeError:\n #happens if field is Object type\n return obj\n if dt.names is not None:\n return obj.view((self.__class__, obj.dtype))\n return obj\n else:\n raise AttributeError(\"'record' object has no \"\n \"attribute '%s'\" % attr)\n\n def __setattr__(self, attr, val):\n if attr in ['setfield', 'getfield', 'dtype']:\n raise AttributeError(\"Cannot set '%s' attribute\" % attr)\n fielddict = nt.void.__getattribute__(self, 'dtype').fields\n res = fielddict.get(attr, None)\n if res:\n return self.setfield(val, *res[:2])\n else:\n if getattr(self, attr, None):\n return nt.void.__setattr__(self, attr, val)\n else:\n raise AttributeError(\"'record' object has no \"\n \"attribute '%s'\" % attr)\n\n def __getitem__(self, indx):\n obj = nt.void.__getitem__(self, indx)\n\n # copy behavior of record.__getattribute__,\n if isinstance(obj, nt.void) and obj.dtype.names is not None:\n return obj.view((self.__class__, obj.dtype))\n else:\n # return a single element\n return obj\n\n def pprint(self):\n \"\"\"Pretty-print all fields.\"\"\"\n # pretty-print all fields\n names = self.dtype.names\n maxlen = max(len(name) for name in names)\n fmt = '%% %ds: %%s' % maxlen\n rows = [fmt % (name, getattr(self, name)) for name in names]\n return \"\\n\".join(rows)\n\n# The recarray is almost identical to a standard array (which supports\n# named fields already) The biggest difference is that it can use\n# attribute-lookup to find the fields and it is constructed using\n# a record.\n\n# If byteorder is given it forces a particular byteorder on all\n# the fields (and any subfields)\n\nclass recarray(ndarray):\n \"\"\"Construct an ndarray that allows field access using attributes.\n\n Arrays may have a data-types containing fields, analogous\n to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,\n where each entry in the array is a pair of ``(int, float)``. Normally,\n these attributes are accessed using dictionary lookups such as ``arr['x']``\n and ``arr['y']``. Record arrays allow the fields to be accessed as members\n of the array, using ``arr.x`` and ``arr.y``.\n\n Parameters\n ----------\n shape : tuple\n Shape of output array.\n dtype : data-type, optional\n The desired data-type. By default, the data-type is determined\n from `formats`, `names`, `titles`, `aligned` and `byteorder`.\n formats : list of data-types, optional\n A list containing the data-types for the different columns, e.g.\n ``['i4', 'f8', 'i4']``. `formats` does *not* support the new\n convention of using types directly, i.e. ``(int, float, int)``.\n Note that `formats` must be a list, not a tuple.\n Given that `formats` is somewhat limited, we recommend specifying\n `dtype` instead.\n names : tuple of str, optional\n The name of each column, e.g. ``('x', 'y', 'z')``.\n buf : buffer, optional\n By default, a new array is created of the given shape and data-type.\n If `buf` is specified and is an object exposing the buffer interface,\n the array will use the memory from the existing buffer. In this case,\n the `offset` and `strides` keywords are available.\n\n Other Parameters\n ----------------\n titles : tuple of str, optional\n Aliases for column names. For example, if `names` were\n ``('x', 'y', 'z')`` and `titles` is\n ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then\n ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.\n byteorder : {'<', '>', '='}, optional\n Byte-order for all fields.\n aligned : bool, optional\n Align the fields in memory as the C-compiler would.\n strides : tuple of ints, optional\n Buffer (`buf`) is interpreted according to these strides (strides\n define how many bytes each array element, row, column, etc.\n occupy in memory).\n offset : int, optional\n Start reading buffer (`buf`) from this offset onwards.\n order : {'C', 'F'}, optional\n Row-major (C-style) or column-major (Fortran-style) order.\n\n Returns\n -------\n rec : recarray\n Empty array of the given shape and type.\n\n See Also\n --------\n rec.fromrecords : Construct a record array from data.\n record : fundamental data-type for `recarray`.\n format_parser : determine a data-type from formats, names, titles.\n\n Notes\n -----\n This constructor can be compared to ``empty``: it creates a new record\n array but does not fill it with data. To create a record array from data,\n use one of the following methods:\n\n 1. Create a standard ndarray and convert it to a record array,\n using ``arr.view(np.recarray)``\n 2. Use the `buf` keyword.\n 3. Use `np.rec.fromrecords`.\n\n Examples\n --------\n Create an array with two fields, ``x`` and ``y``:\n\n >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])\n >>> x\n array([(1.0, 2), (3.0, 4)],\n dtype=[('x', '<f8'), ('y', '<i4')])\n\n >>> x['x']\n array([ 1., 3.])\n\n View the array as a record array:\n\n >>> x = x.view(np.recarray)\n\n >>> x.x\n array([ 1., 3.])\n\n >>> x.y\n array([2, 4])\n\n Create a new, empty record array:\n\n >>> np.recarray((2,),\n ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP\n rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),\n (3471280, 1.2134086255804012e-316, 0)],\n dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])\n\n \"\"\"\n\n # manually set name and module so that this class's type shows\n # up as \"numpy.recarray\" when printed\n __name__ = 'recarray'\n __module__ = 'numpy'\n\n def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,\n formats=None, names=None, titles=None,\n byteorder=None, aligned=False, order='C'):\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n\n if buf is None:\n self = ndarray.__new__(subtype, shape, (record, descr), order=order)\n else:\n self = ndarray.__new__(subtype, shape, (record, descr),\n buffer=buf, offset=offset,\n strides=strides, order=order)\n return self\n\n def __array_finalize__(self, obj):\n if self.dtype.type is not record and self.dtype.names is not None:\n # if self.dtype is not np.record, invoke __setattr__ which will\n # convert it to a record if it is a void dtype.\n self.dtype = self.dtype\n\n def __getattribute__(self, attr):\n # See if ndarray has this attr, and return it if so. (note that this\n # means a field with the same name as an ndarray attr cannot be\n # accessed by attribute).\n try:\n return object.__getattribute__(self, attr)\n except AttributeError: # attr must be a fieldname\n pass\n\n # look for a field with this name\n fielddict = ndarray.__getattribute__(self, 'dtype').fields\n try:\n res = fielddict[attr][:2]\n except (TypeError, KeyError):\n raise AttributeError(\"recarray has no attribute %s\" % attr)\n obj = self.getfield(*res)\n\n # At this point obj will always be a recarray, since (see\n # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is\n # non-structured, convert it to an ndarray. Then if obj is structured\n # with void type convert it to the same dtype.type (eg to preserve\n # numpy.record type if present), since nested structured fields do not\n # inherit type. Don't do this for non-void structures though.\n if obj.dtype.names is not None:\n if issubclass(obj.dtype.type, nt.void):\n return obj.view(dtype=(self.dtype.type, obj.dtype))\n return obj\n else:\n return obj.view(ndarray)\n\n # Save the dictionary.\n # If the attr is a field name and not in the saved dictionary\n # Undo any \"setting\" of the attribute and do a setfield\n # Thus, you can't create attributes on-the-fly that are field names.\n def __setattr__(self, attr, val):\n\n # Automatically convert (void) structured types to records\n # (but not non-void structures, subarrays, or non-structured voids)\n if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:\n val = sb.dtype((record, val))\n\n newattr = attr not in self.__dict__\n try:\n ret = object.__setattr__(self, attr, val)\n except Exception:\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n if attr not in fielddict:\n exctype, value = sys.exc_info()[:2]\n raise exctype(value)\n else:\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n if attr not in fielddict:\n return ret\n if newattr:\n # We just added this one or this setattr worked on an\n # internal attribute.\n try:\n object.__delattr__(self, attr)\n except Exception:\n return ret\n try:\n res = fielddict[attr][:2]\n except (TypeError, KeyError):\n raise AttributeError(\"record array has no attribute %s\" % attr)\n return self.setfield(val, *res)\n\n def __getitem__(self, indx):\n obj = super(recarray, self).__getitem__(indx)\n\n # copy behavior of getattr, except that here\n # we might also be returning a single element\n if isinstance(obj, ndarray):\n if obj.dtype.names is not None:\n obj = obj.view(type(self))\n if issubclass(obj.dtype.type, nt.void):\n return obj.view(dtype=(self.dtype.type, obj.dtype))\n return obj\n else:\n return obj.view(type=ndarray)\n else:\n # return a single element\n return obj\n\n def __repr__(self):\n\n repr_dtype = self.dtype\n if (self.dtype.type is record\n or (not issubclass(self.dtype.type, nt.void))):\n # If this is a full record array (has numpy.record dtype),\n # or if it has a scalar (non-void) dtype with no records,\n # represent it using the rec.array function. Since rec.array\n # converts dtype to a numpy.record for us, convert back\n # to non-record before printing\n if repr_dtype.type is record:\n repr_dtype = sb.dtype((nt.void, repr_dtype))\n prefix = \"rec.array(\"\n fmt = 'rec.array(%s,%sdtype=%s)'\n else:\n # otherwise represent it using np.array plus a view\n # This should only happen if the user is playing\n # strange games with dtypes.\n prefix = \"array(\"\n fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'\n\n # get data/shape string. logic taken from numeric.array_repr\n if self.size > 0 or self.shape == (0,):\n lst = sb.array2string(\n self, separator=', ', prefix=prefix, suffix=',')\n else:\n # show zero-length shape unless it is (0,)\n lst = \"[], shape=%s\" % (repr(self.shape),)\n\n lf = '\\n'+' '*len(prefix)\n if get_printoptions()['legacy'] == '1.13':\n lf = ' ' + lf # trailing space\n return fmt % (lst, lf, repr_dtype)\n\n def field(self, attr, val=None):\n if isinstance(attr, int):\n names = ndarray.__getattribute__(self, 'dtype').names\n attr = names[attr]\n\n fielddict = ndarray.__getattribute__(self, 'dtype').fields\n\n res = fielddict[attr][:2]\n\n if val is None:\n obj = self.getfield(*res)\n if obj.dtype.names is not None:\n return obj\n return obj.view(ndarray)\n else:\n return self.setfield(val, *res)\n\n\ndef fromarrays(arrayList, dtype=None, shape=None, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n \"\"\" create a record array from a (flat) list of arrays\n\n >>> x1=np.array([1,2,3,4])\n >>> x2=np.array(['a','dd','xyz','12'])\n >>> x3=np.array([1.1,2,3,4])\n >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')\n >>> print(r[1])\n (2, 'dd', 2.0)\n >>> x1[1]=34\n >>> r.a\n array([1, 2, 3, 4])\n \"\"\"\n\n arrayList = [sb.asarray(x) for x in arrayList]\n\n if shape is None or shape == 0:\n shape = arrayList[0].shape\n\n if isinstance(shape, int):\n shape = (shape,)\n\n if formats is None and dtype is None:\n # go through each object in the list to see if it is an ndarray\n # and determine the formats.\n formats = []\n for obj in arrayList:\n if not isinstance(obj, ndarray):\n raise ValueError(\"item in the array list must be an ndarray.\")\n formats.append(obj.dtype.str)\n formats = ','.join(formats)\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n _names = descr.names\n else:\n parsed = format_parser(formats, names, titles, aligned, byteorder)\n _names = parsed._names\n descr = parsed._descr\n\n # Determine shape from data-type.\n if len(descr) != len(arrayList):\n raise ValueError(\"mismatch between the number of fields \"\n \"and the number of arrays\")\n\n d0 = descr[0].shape\n nn = len(d0)\n if nn > 0:\n shape = shape[:-nn]\n\n for k, obj in enumerate(arrayList):\n nn = descr[k].ndim\n testshape = obj.shape[:obj.ndim - nn]\n if testshape != shape:\n raise ValueError(\"array-shape mismatch in array %d\" % k)\n\n _array = recarray(shape, descr)\n\n # populate the record array (makes a copy)\n for i in range(len(arrayList)):\n _array[_names[i]] = arrayList[i]\n\n return _array\n\ndef fromrecords(recList, dtype=None, shape=None, formats=None, names=None,\n titles=None, aligned=False, byteorder=None):\n \"\"\" create a recarray from a list of records in text form\n\n The data in the same field can be heterogeneous, they will be promoted\n to the highest data type. This method is intended for creating\n smaller record arrays. If used to create large array without formats\n defined\n\n r=fromrecords([(2,3.,'abc')]*100000)\n\n it can be slow.\n\n If formats is None, then this will auto-detect formats. Use list of\n tuples rather than list of lists for faster processing.\n\n >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],\n ... names='col1,col2,col3')\n >>> print(r[0])\n (456, 'dbe', 1.2)\n >>> r.col1\n array([456, 2])\n >>> r.col2\n array(['dbe', 'de'],\n dtype='|S3')\n >>> import pickle\n >>> print(pickle.loads(pickle.dumps(r)))\n [(456, 'dbe', 1.2) (2, 'de', 1.3)]\n \"\"\"\n\n if formats is None and dtype is None: # slower\n obj = sb.array(recList, dtype=object)\n arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]\n return fromarrays(arrlist, formats=formats, shape=shape, names=names,\n titles=titles, aligned=aligned, byteorder=byteorder)\n\n if dtype is not None:\n descr = sb.dtype((record, dtype))\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n\n try:\n retval = sb.array(recList, dtype=descr)\n except (TypeError, ValueError):\n if (shape is None or shape == 0):\n shape = len(recList)\n if isinstance(shape, (int, long)):\n shape = (shape,)\n if len(shape) > 1:\n raise ValueError(\"Can only deal with 1-d array.\")\n _array = recarray(shape, descr)\n for k in range(_array.size):\n _array[k] = tuple(recList[k])\n # list of lists instead of list of tuples ?\n # 2018-02-07, 1.14.1\n warnings.warn(\n \"fromrecords expected a list of tuples, may have received a list \"\n \"of lists instead. In the future that will raise an error\",\n FutureWarning, stacklevel=2)\n return _array\n else:\n if shape is not None and retval.shape != shape:\n retval.shape = shape\n\n res = retval.view(recarray)\n\n return res\n\n\ndef fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n \"\"\" create a (read-only) record array from binary data contained in\n a string\"\"\"\n\n if dtype is None and formats is None:\n raise TypeError(\"fromstring() needs a 'dtype' or 'formats' argument\")\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n\n itemsize = descr.itemsize\n if (shape is None or shape == 0 or shape == -1):\n shape = (len(datastring) - offset) // itemsize\n\n _array = recarray(shape, descr, buf=datastring, offset=offset)\n return _array\n\ndef get_remaining_size(fd):\n try:\n fn = fd.fileno()\n except AttributeError:\n return os.path.getsize(fd.name) - fd.tell()\n st = os.fstat(fn)\n size = st.st_size - fd.tell()\n return size\n\ndef fromfile(fd, dtype=None, shape=None, offset=0, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n \"\"\"Create an array from binary file data\n\n If file is a string or a path-like object then that file is opened,\n else it is assumed to be a file object. The file object must\n support random access (i.e. it must have tell and seek methods).\n\n >>> from tempfile import TemporaryFile\n >>> a = np.empty(10,dtype='f8,i4,a5')\n >>> a[5] = (0.5,10,'abcde')\n >>>\n >>> fd=TemporaryFile()\n >>> a = a.newbyteorder('<')\n >>> a.tofile(fd)\n >>>\n >>> fd.seek(0)\n >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,\n ... byteorder='<')\n >>> print(r[5])\n (0.5, 10, 'abcde')\n >>> r.shape\n (10,)\n \"\"\"\n \n if dtype is None and formats is None:\n raise TypeError(\"fromfile() needs a 'dtype' or 'formats' argument\")\n\n if (shape is None or shape == 0):\n shape = (-1,)\n elif isinstance(shape, (int, long)):\n shape = (shape,)\n\n if isfileobj(fd):\n # file already opened\n name = 0\n else:\n # open file\n fd = open(os_fspath(fd), 'rb')\n name = 1\n\n if (offset > 0):\n fd.seek(offset, 1)\n size = get_remaining_size(fd)\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder)._descr\n\n itemsize = descr.itemsize\n\n shapeprod = sb.array(shape).prod(dtype=nt.intp)\n shapesize = shapeprod * itemsize\n if shapesize < 0:\n shape = list(shape)\n shape[shape.index(-1)] = size // -shapesize\n shape = tuple(shape)\n shapeprod = sb.array(shape).prod(dtype=nt.intp)\n\n nbytes = shapeprod * itemsize\n\n if nbytes > size:\n raise ValueError(\n \"Not enough bytes left in file for specified shape and type\")\n\n # create the array\n _array = recarray(shape, descr)\n nbytesread = fd.readinto(_array.data)\n if nbytesread != nbytes:\n raise IOError(\"Didn't read as many bytes as expected\")\n if name:\n fd.close()\n\n return _array\n\ndef array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,\n names=None, titles=None, aligned=False, byteorder=None, copy=True):\n \"\"\"Construct a record array from a wide-variety of objects.\n \"\"\"\n\n if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and\n (formats is None) and (dtype is None)):\n raise ValueError(\"Must define formats (or dtype) if object is \"\n \"None, string, or an open file\")\n\n kwds = {}\n if dtype is not None:\n dtype = sb.dtype(dtype)\n elif formats is not None:\n dtype = format_parser(formats, names, titles,\n aligned, byteorder)._descr\n else:\n kwds = {'formats': formats,\n 'names': names,\n 'titles': titles,\n 'aligned': aligned,\n 'byteorder': byteorder\n }\n\n if obj is None:\n if shape is None:\n raise ValueError(\"Must define a shape if obj is None\")\n return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)\n\n elif isinstance(obj, bytes):\n return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)\n\n elif isinstance(obj, (list, tuple)):\n if isinstance(obj[0], (tuple, list)):\n return fromrecords(obj, dtype=dtype, shape=shape, **kwds)\n else:\n return fromarrays(obj, dtype=dtype, shape=shape, **kwds)\n\n elif isinstance(obj, recarray):\n if dtype is not None and (obj.dtype != dtype):\n new = obj.view(dtype)\n else:\n new = obj\n if copy:\n new = new.copy()\n return new\n\n elif isfileobj(obj):\n return fromfile(obj, dtype=dtype, shape=shape, offset=offset)\n\n elif isinstance(obj, ndarray):\n if dtype is not None and (obj.dtype != dtype):\n new = obj.view(dtype)\n else:\n new = obj\n if copy:\n new = new.copy()\n return new.view(recarray)\n\n else:\n interface = getattr(obj, \"__array_interface__\", None)\n if interface is None or not isinstance(interface, dict):\n raise ValueError(\"Unknown input type\")\n obj = sb.array(obj)\n if dtype is not None and (obj.dtype != dtype):\n obj = obj.view(dtype)\n return obj.view(recarray)\n", "from __future__ import division, absolute_import, print_function\n\nimport sys\nimport operator\nimport pytest\nimport ctypes\nimport gc\n\nimport numpy as np\nfrom numpy.core._rational_tests import rational\nfrom numpy.testing import (\n assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)\nfrom numpy.core.numeric import pickle\n\ndef assert_dtype_equal(a, b):\n assert_equal(a, b)\n assert_equal(hash(a), hash(b),\n \"two equivalent types do not hash to the same value !\")\n\ndef assert_dtype_not_equal(a, b):\n assert_(a != b)\n assert_(hash(a) != hash(b),\n \"two different types hash to the same value !\")\n\nclass TestBuiltin(object):\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\n np.unicode])\n def test_run(self, t):\n \"\"\"Only test hash runs at all.\"\"\"\n dt = np.dtype(t)\n hash(dt)\n\n @pytest.mark.parametrize('t', [int, float])\n def test_dtype(self, t):\n # Make sure equivalent byte order char hash the same (e.g. < and = on\n # little endian)\n dt = np.dtype(t)\n dt2 = dt.newbyteorder(\"<\")\n dt3 = dt.newbyteorder(\">\")\n if dt == dt2:\n assert_(dt.byteorder != dt2.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt2)\n else:\n assert_(dt.byteorder != dt3.byteorder, \"bogus test\")\n assert_dtype_equal(dt, dt3)\n\n def test_equivalent_dtype_hashing(self):\n # Make sure equivalent dtypes with different type num hash equal\n uintp = np.dtype(np.uintp)\n if uintp.itemsize == 4:\n left = uintp\n right = np.dtype(np.uint32)\n else:\n left = uintp\n right = np.dtype(np.ulonglong)\n assert_(left == right)\n assert_(hash(left) == hash(right))\n\n def test_invalid_types(self):\n # Make sure invalid type strings raise an error\n\n assert_raises(TypeError, np.dtype, 'O3')\n assert_raises(TypeError, np.dtype, 'O5')\n assert_raises(TypeError, np.dtype, 'O7')\n assert_raises(TypeError, np.dtype, 'b3')\n assert_raises(TypeError, np.dtype, 'h4')\n assert_raises(TypeError, np.dtype, 'I5')\n assert_raises(TypeError, np.dtype, 'e3')\n assert_raises(TypeError, np.dtype, 'f5')\n\n if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:\n assert_raises(TypeError, np.dtype, 'g12')\n elif np.dtype('g').itemsize == 12:\n assert_raises(TypeError, np.dtype, 'g16')\n\n if np.dtype('l').itemsize == 8:\n assert_raises(TypeError, np.dtype, 'l4')\n assert_raises(TypeError, np.dtype, 'L4')\n else:\n assert_raises(TypeError, np.dtype, 'l8')\n assert_raises(TypeError, np.dtype, 'L8')\n\n if np.dtype('q').itemsize == 8:\n assert_raises(TypeError, np.dtype, 'q4')\n assert_raises(TypeError, np.dtype, 'Q4')\n else:\n assert_raises(TypeError, np.dtype, 'q8')\n assert_raises(TypeError, np.dtype, 'Q8')\n\n def test_bad_param(self):\n # Can't give a size that's too small\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'i1'],\n 'offsets':[0, 4],\n 'itemsize':4})\n # If alignment is enabled, the alignment (4) must divide the itemsize\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'i1'],\n 'offsets':[0, 4],\n 'itemsize':9}, align=True)\n # If alignment is enabled, the individual fields must be aligned\n assert_raises(ValueError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i1', 'f4'],\n 'offsets':[0, 2]}, align=True)\n\n def test_field_order_equality(self):\n x = np.dtype({'names': ['A', 'B'], \n 'formats': ['i4', 'f4'], \n 'offsets': [0, 4]})\n y = np.dtype({'names': ['B', 'A'], \n 'formats': ['f4', 'i4'], \n 'offsets': [4, 0]})\n assert_equal(x == y, False)\n\nclass TestRecord(object):\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent record dtypes hash the same.\"\"\"\n a = np.dtype([('yo', int)])\n b = np.dtype([('yo', int)])\n assert_dtype_equal(a, b)\n\n def test_different_names(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype([('yo', int)])\n b = np.dtype([('ye', int)])\n assert_dtype_not_equal(a, b)\n\n def test_different_titles(self):\n # In theory, they may hash the same (collision) ?\n a = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n b = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['RRed pixel', 'Blue pixel']})\n assert_dtype_not_equal(a, b)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_refcount_dictionary_setting(self):\n names = [\"name1\"]\n formats = [\"f8\"]\n titles = [\"t1\"]\n offsets = [0]\n d = dict(names=names, formats=formats, titles=titles, offsets=offsets)\n refcounts = {k: sys.getrefcount(i) for k, i in d.items()}\n np.dtype(d)\n refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}\n assert refcounts == refcounts_new\n\n def test_mutate(self):\n # Mutating a dtype should reset the cached hash value\n a = np.dtype([('yo', int)])\n b = np.dtype([('yo', int)])\n c = np.dtype([('ye', int)])\n assert_dtype_equal(a, b)\n assert_dtype_not_equal(a, c)\n a.names = ['ye']\n assert_dtype_equal(a, c)\n assert_dtype_not_equal(a, b)\n state = b.__reduce__()[2]\n a.__setstate__(state)\n assert_dtype_equal(a, b)\n assert_dtype_not_equal(a, c)\n\n def test_not_lists(self):\n \"\"\"Test if an appropriate exception is raised when passing bad values to\n the dtype constructor.\n \"\"\"\n assert_raises(TypeError, np.dtype,\n dict(names={'A', 'B'}, formats=['f8', 'i4']))\n assert_raises(TypeError, np.dtype,\n dict(names=['A', 'B'], formats={'f8', 'i4'}))\n\n def test_aligned_size(self):\n # Check that structured dtypes get padded to an aligned size\n dt = np.dtype('i4, i1', align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype({'names':['f0', 'f1'],\n 'formats':['i4', 'u1'],\n 'offsets':[0, 4]}, align=True)\n assert_equal(dt.itemsize, 8)\n dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)\n assert_equal(dt.itemsize, 8)\n # Nesting should preserve that alignment\n dt1 = np.dtype([('f0', 'i4'),\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\n ('f2', 'i1')], align=True)\n assert_equal(dt1.itemsize, 20)\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\n 'formats':['i4',\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\n 'i1'],\n 'offsets':[0, 4, 16]}, align=True)\n assert_equal(dt2.itemsize, 20)\n dt3 = np.dtype({'f0': ('i4', 0),\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\n 'f2': ('i1', 16)}, align=True)\n assert_equal(dt3.itemsize, 20)\n assert_equal(dt1, dt2)\n assert_equal(dt2, dt3)\n # Nesting should preserve packing\n dt1 = np.dtype([('f0', 'i4'),\n ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),\n ('f2', 'i1')], align=False)\n assert_equal(dt1.itemsize, 11)\n dt2 = np.dtype({'names':['f0', 'f1', 'f2'],\n 'formats':['i4',\n [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],\n 'i1'],\n 'offsets':[0, 4, 10]}, align=False)\n assert_equal(dt2.itemsize, 11)\n dt3 = np.dtype({'f0': ('i4', 0),\n 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),\n 'f2': ('i1', 10)}, align=False)\n assert_equal(dt3.itemsize, 11)\n assert_equal(dt1, dt2)\n assert_equal(dt2, dt3)\n # Array of subtype should preserve alignment\n dt1 = np.dtype([('a', '|i1'),\n ('b', [('f0', '<i2'),\n ('f1', '<f4')], 2)], align=True)\n assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),\n ('b', [('f0', '<i2'), ('', '|V2'),\n ('f1', '<f4')], (2,))])\n\n def test_union_struct(self):\n # Should be able to create union dtypes\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[0, 0, 2]}, align=True)\n assert_equal(dt.itemsize, 4)\n a = np.array([3], dtype='<u4').view(dt)\n a['f1'] = 10\n a['f2'] = 36\n assert_equal(a['f0'], 10 + 36*256*256)\n # Should be able to specify fields out of order\n dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[4, 0, 2]}, align=True)\n assert_equal(dt.itemsize, 8)\n # field name should not matter: assignment is by position\n dt2 = np.dtype({'names':['f2', 'f0', 'f1'],\n 'formats':['<u4', '<u2', '<u2'],\n 'offsets':[4, 0, 2]}, align=True)\n vals = [(0, 1, 2), (3, -1, 4)]\n vals2 = [(0, 1, 2), (3, -1, 4)]\n a = np.array(vals, dt)\n b = np.array(vals2, dt2)\n assert_equal(a.astype(dt2), b)\n assert_equal(b.astype(dt), a)\n assert_equal(a.view(dt2), b)\n assert_equal(b.view(dt), a)\n # Should not be able to overlap objects with other types\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['O', 'i1'],\n 'offsets':[0, 2]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', 'O'],\n 'offsets':[0, 3]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':[[('a', 'O')], 'i1'],\n 'offsets':[0, 2]})\n assert_raises(TypeError, np.dtype,\n {'names':['f0', 'f1'],\n 'formats':['i4', [('a', 'O')]],\n 'offsets':[0, 3]})\n # Out of order should still be ok, however\n dt = np.dtype({'names':['f0', 'f1'],\n 'formats':['i1', 'O'],\n 'offsets':[np.dtype('intp').itemsize, 0]})\n\n def test_comma_datetime(self):\n dt = np.dtype('M8[D],datetime64[Y],i8')\n assert_equal(dt, np.dtype([('f0', 'M8[D]'),\n ('f1', 'datetime64[Y]'),\n ('f2', 'i8')]))\n\n def test_from_dictproxy(self):\n # Tests for PR #5920\n dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})\n assert_dtype_equal(dt, np.dtype(dt.fields))\n dt2 = np.dtype((np.void, dt.fields))\n assert_equal(dt2.fields, dt.fields)\n\n def test_from_dict_with_zero_width_field(self):\n # Regression test for #6430 / #2196\n dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])\n dt2 = np.dtype({'names': ['val1', 'val2'],\n 'formats': [(np.float32, (0,)), int]})\n\n assert_dtype_equal(dt, dt2)\n assert_equal(dt.fields['val1'][0].itemsize, 0)\n assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)\n\n def test_bool_commastring(self):\n d = np.dtype('?,?,?') # raises?\n assert_equal(len(d.names), 3)\n for n in d.names:\n assert_equal(d.fields[n][0], np.dtype('?'))\n\n def test_nonint_offsets(self):\n # gh-8059\n def make_dtype(off):\n return np.dtype({'names': ['A'], 'formats': ['i4'],\n 'offsets': [off]})\n\n assert_raises(TypeError, make_dtype, 'ASD')\n assert_raises(OverflowError, make_dtype, 2**70)\n assert_raises(TypeError, make_dtype, 2.3)\n assert_raises(ValueError, make_dtype, -10)\n\n # no errors here:\n dt = make_dtype(np.uint32(0))\n np.zeros(1, dtype=dt)[0].item()\n\n def test_fields_by_index(self):\n dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])\n assert_dtype_equal(dt[0], np.dtype(np.int8))\n assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))\n assert_dtype_equal(dt[-1], dt[1])\n assert_dtype_equal(dt[-2], dt[0])\n assert_raises(IndexError, lambda: dt[-3])\n\n assert_raises(TypeError, operator.getitem, dt, 3.0)\n assert_raises(TypeError, operator.getitem, dt, [])\n\n assert_equal(dt[1], dt[np.int8(1)])\n\n def test_partial_dict(self):\n # 'names' is missing\n assert_raises(ValueError, np.dtype,\n {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})\n \n\nclass TestSubarray(object):\n def test_single_subarray(self):\n a = np.dtype((int, (2)))\n b = np.dtype((int, (2,)))\n assert_dtype_equal(a, b)\n\n assert_equal(type(a.subdtype[1]), tuple)\n assert_equal(type(b.subdtype[1]), tuple)\n\n def test_equivalent_record(self):\n \"\"\"Test whether equivalent subarray dtypes hash the same.\"\"\"\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (2, 3)))\n assert_dtype_equal(a, b)\n\n def test_nonequivalent_record(self):\n \"\"\"Test whether different subarray dtypes hash differently.\"\"\"\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (3, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((int, (2, 3)))\n b = np.dtype((int, (2, 2)))\n assert_dtype_not_equal(a, b)\n\n a = np.dtype((int, (1, 2, 3)))\n b = np.dtype((int, (1, 2)))\n assert_dtype_not_equal(a, b)\n\n def test_shape_equal(self):\n \"\"\"Test some data types that are equal\"\"\"\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))\n assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))\n assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))\n assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))\n d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))\n assert_dtype_equal(np.dtype(d), np.dtype(d))\n\n def test_shape_simple(self):\n \"\"\"Test some simple cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))\n assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))\n assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))\n\n def test_shape_monster(self):\n \"\"\"Test some more complicated cases that shouldn't be equal\"\"\"\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))\n assert_dtype_not_equal(\n np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),\n np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))\n\n def test_shape_sequence(self):\n # Any sequence of integers should work as shape, but the result\n # should be a tuple (immutable) of base type integers.\n a = np.array([1, 2, 3], dtype=np.int16)\n l = [1, 2, 3]\n # Array gets converted\n dt = np.dtype([('a', 'f4', a)])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n # List gets converted\n dt = np.dtype([('a', 'f4', l)])\n assert_(isinstance(dt['a'].shape, tuple))\n #\n\n class IntLike(object):\n def __index__(self):\n return 3\n\n def __int__(self):\n # (a PyNumber_Check fails without __int__)\n return 3\n\n dt = np.dtype([('a', 'f4', IntLike())])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n dt = np.dtype([('a', 'f4', (IntLike(),))])\n assert_(isinstance(dt['a'].shape, tuple))\n assert_(isinstance(dt['a'].shape[0], int))\n\n def test_shape_matches_ndim(self):\n dt = np.dtype([('a', 'f4', ())])\n assert_equal(dt['a'].shape, ())\n assert_equal(dt['a'].ndim, 0)\n\n dt = np.dtype([('a', 'f4')])\n assert_equal(dt['a'].shape, ())\n assert_equal(dt['a'].ndim, 0)\n\n dt = np.dtype([('a', 'f4', 4)])\n assert_equal(dt['a'].shape, (4,))\n assert_equal(dt['a'].ndim, 1)\n\n dt = np.dtype([('a', 'f4', (1, 2, 3))])\n assert_equal(dt['a'].shape, (1, 2, 3))\n assert_equal(dt['a'].ndim, 3)\n\n def test_shape_invalid(self):\n # Check that the shape is valid.\n max_int = np.iinfo(np.intc).max\n max_intp = np.iinfo(np.intp).max\n # Too large values (the datatype is part of this)\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])\n # Takes a different code path (fails earlier:\n assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])\n # Negative values\n assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])\n assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])\n\n def test_alignment(self):\n #Check that subarrays are aligned\n t1 = np.dtype('1i4', align=True)\n t2 = np.dtype('2i4', align=True)\n assert_equal(t1.alignment, t2.alignment)\n\n\ndef iter_struct_object_dtypes():\n \"\"\"\n Iterates over a few complex dtypes and object pattern which\n fill the array with a given object (defaults to a singleton).\n\n Yields\n ------\n dtype : dtype\n pattern : tuple\n Structured tuple for use with `np.array`.\n count : int\n Number of objects stored in the dtype.\n singleton : object\n A singleton object. The returned pattern is constructed so that\n all objects inside the datatype are set to the singleton.\n \"\"\"\n obj = object()\n\n dt = np.dtype([('b', 'O', (2, 3))])\n p = ([[obj] * 3] * 2,)\n yield pytest.param(dt, p, 6, obj, id=\"<subarray>\")\n\n dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])\n p = (0, [[obj] * 3] * 2)\n yield pytest.param(dt, p, 6, obj, id=\"<subarray in field>\")\n\n dt = np.dtype([('a', 'i4'),\n ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])\n p = (0, [[(obj, 0)] * 3] * 2)\n yield pytest.param(dt, p, 6, obj, id=\"<structured subarray 1>\")\n\n dt = np.dtype([('a', 'i4'),\n ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])\n p = (0, [[(obj, obj)] * 3] * 2)\n yield pytest.param(dt, p, 12, obj, id=\"<structured subarray 2>\")\n\n\[email protected](not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\nclass TestStructuredObjectRefcounting:\n \"\"\"These tests cover various uses of complicated structured types which\n include objects and thus require reference counting.\n \"\"\"\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n @pytest.mark.parametrize([\"creation_func\", \"creation_obj\"], [\n pytest.param(np.empty, None,\n # None is probably used for too many things\n marks=pytest.mark.skip(\"unreliable due to python's behaviour\")),\n (np.ones, 1),\n (np.zeros, 0)])\n def test_structured_object_create_delete(self, dt, pat, count, singleton,\n creation_func, creation_obj):\n \"\"\"Structured object reference counting in creation and deletion\"\"\"\n # The test assumes that 0, 1, and None are singletons.\n gc.collect()\n before = sys.getrefcount(creation_obj)\n arr = creation_func(3, dt)\n\n now = sys.getrefcount(creation_obj)\n assert now - before == count * 3\n del arr\n now = sys.getrefcount(creation_obj)\n assert now == before\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n def test_structured_object_item_setting(self, dt, pat, count, singleton):\n \"\"\"Structured object reference counting for simple item setting\"\"\"\n one = 1\n\n gc.collect()\n before = sys.getrefcount(singleton)\n arr = np.array([pat] * 3, dt)\n assert sys.getrefcount(singleton) - before == count * 3\n # Fill with `1` and check that it was replaced correctly:\n before2 = sys.getrefcount(one)\n arr[...] = one\n after2 = sys.getrefcount(one)\n assert after2 - before2 == count * 3\n del arr\n gc.collect()\n assert sys.getrefcount(one) == before2\n assert sys.getrefcount(singleton) == before\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n @pytest.mark.parametrize(\n ['shape', 'index', 'items_changed'],\n [((3,), ([0, 2],), 2),\n ((3, 2), ([0, 2], slice(None)), 4),\n ((3, 2), ([0, 2], [1]), 2),\n ((3,), ([True, False, True]), 2)])\n def test_structured_object_indexing(self, shape, index, items_changed,\n dt, pat, count, singleton):\n \"\"\"Structured object reference counting for advanced indexing.\"\"\"\n zero = 0\n one = 1\n\n arr = np.zeros(shape, dt)\n\n gc.collect()\n before_zero = sys.getrefcount(zero)\n before_one = sys.getrefcount(one)\n # Test item getting:\n part = arr[index]\n after_zero = sys.getrefcount(zero)\n assert after_zero - before_zero == count * items_changed\n del part\n # Test item setting:\n arr[index] = one\n gc.collect()\n after_zero = sys.getrefcount(zero)\n after_one = sys.getrefcount(one)\n assert before_zero - after_zero == count * items_changed\n assert after_one - before_one == count * items_changed\n\n @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],\n iter_struct_object_dtypes())\n def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):\n \"\"\"Structured object reference counting for specialized functions.\n The older functions such as take and repeat use different code paths\n then item setting (when writing this).\n \"\"\"\n indices = [0, 1]\n\n arr = np.array([pat] * 3, dt)\n gc.collect()\n before = sys.getrefcount(singleton)\n res = arr.take(indices)\n after = sys.getrefcount(singleton)\n assert after - before == count * 2\n new = res.repeat(10)\n gc.collect()\n after_repeat = sys.getrefcount(singleton)\n assert after_repeat - after == count * 2 * 10\n\n\nclass TestStructuredDtypeSparseFields(object):\n \"\"\"Tests subarray fields which contain sparse dtypes so that\n not all memory is used by the dtype work. Such dtype's should\n leave the underlying memory unchanged.\n \"\"\"\n dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],\n 'offsets':[0, 4]}, (2, 3))])\n sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],\n 'offsets':[4]}, (2, 3))])\n\n @pytest.mark.xfail(reason=\"inaccessible data is changed see gh-12686.\")\n @pytest.mark.valgrind_error(reason=\"reads from unitialized buffers.\")\n def test_sparse_field_assignment(self):\n arr = np.zeros(3, self.dtype)\n sparse_arr = arr.view(self.sparse_dtype)\n\n sparse_arr[...] = np.finfo(np.float32).max\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\n\n def test_sparse_field_assignment_fancy(self):\n # Fancy assignment goes to the copyswap function for comlex types:\n arr = np.zeros(3, self.dtype)\n sparse_arr = arr.view(self.sparse_dtype)\n\n sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max\n # dtype is reduced when accessing the field, so shape is (3, 2, 3):\n assert_array_equal(arr[\"a\"][\"aa\"], np.zeros((3, 2, 3)))\n\n\nclass TestMonsterType(object):\n \"\"\"Test deeply nested subtypes.\"\"\"\n\n def test1(self):\n simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n a = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((int, (3, 2))))])\n b = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((int, (3, 2))))])\n assert_dtype_equal(a, b)\n\n c = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n d = np.dtype([('yo', int), ('ye', simple1),\n ('yi', np.dtype((a, (3, 2))))])\n assert_dtype_equal(c, d)\n\nclass TestMetadata(object):\n def test_no_metadata(self):\n d = np.dtype(int)\n assert_(d.metadata is None)\n\n def test_metadata_takes_dict(self):\n d = np.dtype(int, metadata={'datum': 1})\n assert_(d.metadata == {'datum': 1})\n\n def test_metadata_rejects_nondict(self):\n assert_raises(TypeError, np.dtype, int, metadata='datum')\n assert_raises(TypeError, np.dtype, int, metadata=1)\n assert_raises(TypeError, np.dtype, int, metadata=None)\n\n def test_nested_metadata(self):\n d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])\n assert_(d['a'].metadata == {'datum': 1})\n\n def test_base_metadata_copied(self):\n d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))\n assert_(d.metadata == {'datum': 1})\n\nclass TestString(object):\n def test_complex_dtype_str(self):\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))], (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])])\n assert_equal(str(dt),\n \"[('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)), \"\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))])]\")\n\n # If the sticky aligned flag is set to True, it makes the\n # str() function use a dict representation with an 'aligned' flag\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))],\n (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])],\n align=True)\n assert_equal(str(dt),\n \"{'names':['top','bottom'], \"\n \"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)),\"\n \"[('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))]], \"\n \"'offsets':[0,76800], \"\n \"'itemsize':80000, \"\n \"'aligned':True}\")\n assert_equal(np.dtype(eval(str(dt))), dt)\n\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\n 'offsets': [0, 1, 2],\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"[(('Red pixel', 'r'), 'u1'), \"\n \"(('Green pixel', 'g'), 'u1'), \"\n \"(('Blue pixel', 'b'), 'u1')]\")\n\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\n 'offsets': [0, 0, 1, 2],\n 'titles': ['Color', 'Red pixel',\n 'Green pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"{'names':['rgba','r','g','b'],\"\n \" 'formats':['<u4','u1','u1','u1'],\"\n \" 'offsets':[0,0,1,2],\"\n \" 'titles':['Color','Red pixel',\"\n \"'Green pixel','Blue pixel'],\"\n \" 'itemsize':4}\")\n\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'offsets': [0, 2],\n 'titles': ['Red pixel', 'Blue pixel']})\n assert_equal(str(dt),\n \"{'names':['r','b'],\"\n \" 'formats':['u1','u1'],\"\n \" 'offsets':[0,2],\"\n \" 'titles':['Red pixel','Blue pixel'],\"\n \" 'itemsize':3}\")\n\n dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])\n assert_equal(str(dt),\n \"[('a', '<m8[D]'), ('b', '<M8[us]')]\")\n\n def test_repr_structured(self):\n dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),\n ('rtile', '>f4', (64, 36))], (3,)),\n ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),\n ('bright', '>f4', (8, 36))])])\n assert_equal(repr(dt),\n \"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), \"\n \"('rtile', '>f4', (64, 36))], (3,)), \"\n \"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), \"\n \"('bright', '>f4', (8, 36))])])\")\n\n dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],\n 'offsets': [0, 1, 2],\n 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},\n align=True)\n assert_equal(repr(dt),\n \"dtype([(('Red pixel', 'r'), 'u1'), \"\n \"(('Green pixel', 'g'), 'u1'), \"\n \"(('Blue pixel', 'b'), 'u1')], align=True)\")\n\n def test_repr_structured_not_packed(self):\n dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],\n 'formats': ['<u4', 'u1', 'u1', 'u1'],\n 'offsets': [0, 0, 1, 2],\n 'titles': ['Color', 'Red pixel',\n 'Green pixel', 'Blue pixel']}, align=True)\n assert_equal(repr(dt),\n \"dtype({'names':['rgba','r','g','b'],\"\n \" 'formats':['<u4','u1','u1','u1'],\"\n \" 'offsets':[0,0,1,2],\"\n \" 'titles':['Color','Red pixel',\"\n \"'Green pixel','Blue pixel'],\"\n \" 'itemsize':4}, align=True)\")\n\n dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],\n 'offsets': [0, 2],\n 'titles': ['Red pixel', 'Blue pixel'],\n 'itemsize': 4})\n assert_equal(repr(dt),\n \"dtype({'names':['r','b'], \"\n \"'formats':['u1','u1'], \"\n \"'offsets':[0,2], \"\n \"'titles':['Red pixel','Blue pixel'], \"\n \"'itemsize':4})\")\n\n def test_repr_structured_datetime(self):\n dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\n assert_equal(repr(dt),\n \"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])\")\n\n def test_repr_str_subarray(self):\n dt = np.dtype(('<i2', (1,)))\n assert_equal(repr(dt), \"dtype(('<i2', (1,)))\")\n assert_equal(str(dt), \"('<i2', (1,))\")\n\n @pytest.mark.skipif(sys.version_info[0] >= 3, reason=\"Python 2 only\")\n def test_dtype_str_with_long_in_shape(self):\n # Pull request #376, should not error\n np.dtype('(1L,)i4')\n\n def test_base_dtype_with_object_type(self):\n # Issue gh-2798, should not error.\n np.array(['a'], dtype=\"O\").astype((\"O\", [(\"name\", \"O\")]))\n\n def test_empty_string_to_object(self):\n # Pull request #4722\n np.array([\"\", \"\"]).astype(object)\n\n def test_void_subclass_unsized(self):\n dt = np.dtype(np.record)\n assert_equal(repr(dt), \"dtype('V')\")\n assert_equal(str(dt), '|V0')\n assert_equal(dt.name, 'record')\n\n def test_void_subclass_sized(self):\n dt = np.dtype((np.record, 2))\n assert_equal(repr(dt), \"dtype('V2')\")\n assert_equal(str(dt), '|V2')\n assert_equal(dt.name, 'record16')\n\n def test_void_subclass_fields(self):\n dt = np.dtype((np.record, [('a', '<u2')]))\n assert_equal(repr(dt), \"dtype((numpy.record, [('a', '<u2')]))\")\n assert_equal(str(dt), \"(numpy.record, [('a', '<u2')])\")\n assert_equal(dt.name, 'record16')\n\n\nclass TestDtypeAttributeDeletion(object):\n\n def test_dtype_non_writable_attributes_deletion(self):\n dt = np.dtype(np.double)\n attr = [\"subdtype\", \"descr\", \"str\", \"name\", \"base\", \"shape\",\n \"isbuiltin\", \"isnative\", \"isalignedstruct\", \"fields\",\n \"metadata\", \"hasobject\"]\n\n for s in attr:\n assert_raises(AttributeError, delattr, dt, s)\n\n def test_dtype_writable_attributes_deletion(self):\n dt = np.dtype(np.double)\n attr = [\"names\"]\n for s in attr:\n assert_raises(AttributeError, delattr, dt, s)\n\n\nclass TestDtypeAttributes(object):\n def test_descr_has_trailing_void(self):\n # see gh-6359\n dtype = np.dtype({\n 'names': ['A', 'B'],\n 'formats': ['f4', 'f4'],\n 'offsets': [0, 8],\n 'itemsize': 16})\n new_dtype = np.dtype(dtype.descr)\n assert_equal(new_dtype.itemsize, 16)\n\n @pytest.mark.parametrize('t', np.typeDict.values())\n def test_name_builtin(self, t):\n name = t.__name__\n if name.endswith('_'):\n name = name[:-1]\n assert_equal(np.dtype(t).name, name)\n\n def test_name_dtype_subclass(self):\n # Ticket #4357\n class user_def_subcls(np.void):\n pass\n assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')\n\n\nclass TestPickling(object):\n\n def check_pickling(self, dtype):\n for proto in range(pickle.HIGHEST_PROTOCOL + 1):\n pickled = pickle.loads(pickle.dumps(dtype, proto))\n assert_equal(pickled, dtype)\n assert_equal(pickled.descr, dtype.descr)\n if dtype.metadata is not None:\n assert_equal(pickled.metadata, dtype.metadata)\n # Check the reconstructed dtype is functional\n x = np.zeros(3, dtype=dtype)\n y = np.zeros(3, dtype=pickled)\n assert_equal(x, y)\n assert_equal(x[0], y[0])\n\n @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,\n np.unicode, bool])\n def test_builtin(self, t):\n self.check_pickling(np.dtype(t))\n\n def test_structured(self):\n dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))\n self.check_pickling(dt)\n\n def test_structured_aligned(self):\n dt = np.dtype('i4, i1', align=True)\n self.check_pickling(dt)\n\n def test_structured_unaligned(self):\n dt = np.dtype('i4, i1', align=False)\n self.check_pickling(dt)\n\n def test_structured_padded(self):\n dt = np.dtype({\n 'names': ['A', 'B'],\n 'formats': ['f4', 'f4'],\n 'offsets': [0, 8],\n 'itemsize': 16})\n self.check_pickling(dt)\n\n def test_structured_titles(self):\n dt = np.dtype({'names': ['r', 'b'],\n 'formats': ['u1', 'u1'],\n 'titles': ['Red pixel', 'Blue pixel']})\n self.check_pickling(dt)\n\n @pytest.mark.parametrize('base', ['m8', 'M8'])\n @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',\n 'ms', 'us', 'ns', 'ps', 'fs', 'as'])\n def test_datetime(self, base, unit):\n dt = np.dtype('%s[%s]' % (base, unit) if unit else base)\n self.check_pickling(dt)\n if unit:\n dt = np.dtype('%s[7%s]' % (base, unit))\n self.check_pickling(dt)\n\n def test_metadata(self):\n dt = np.dtype(int, metadata={'datum': 1})\n self.check_pickling(dt)\n\n\ndef test_rational_dtype():\n # test for bug gh-5719\n a = np.array([1111], dtype=rational).astype\n assert_raises(OverflowError, a, 'int8')\n\n # test that dtype detection finds user-defined types\n x = rational(1)\n assert_equal(np.array([x,x]).dtype, np.dtype(rational))\n\n\ndef test_dtypes_are_true():\n # test for gh-6294\n assert bool(np.dtype('f8'))\n assert bool(np.dtype('i8'))\n assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))\n\n\ndef test_invalid_dtype_string():\n # test for gh-10440\n assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')\n assert_raises(TypeError, np.dtype, u'Fl\\xfcgel')\n\n\nclass TestFromCTypes(object):\n\n @staticmethod\n def check(ctype, dtype):\n dtype = np.dtype(dtype)\n assert_equal(np.dtype(ctype), dtype)\n assert_equal(np.dtype(ctype()), dtype)\n\n def test_array(self):\n c8 = ctypes.c_uint8\n self.check( 3 * c8, (np.uint8, (3,)))\n self.check( 1 * c8, (np.uint8, (1,)))\n self.check( 0 * c8, (np.uint8, (0,)))\n self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))\n self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))\n\n def test_padded_structure(self):\n class PaddedStruct(ctypes.Structure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', np.uint8),\n ('b', np.uint16)\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_bit_fields(self):\n class BitfieldStruct(ctypes.Structure):\n _fields_ = [\n ('a', ctypes.c_uint8, 7),\n ('b', ctypes.c_uint8, 1)\n ]\n assert_raises(TypeError, np.dtype, BitfieldStruct)\n assert_raises(TypeError, np.dtype, BitfieldStruct())\n\n def test_pointer(self):\n p_uint8 = ctypes.POINTER(ctypes.c_uint8)\n assert_raises(TypeError, np.dtype, p_uint8)\n\n def test_void_pointer(self):\n self.check(ctypes.c_void_p, np.uintp)\n\n def test_union(self):\n class Union(ctypes.Union):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ]\n expected = np.dtype(dict(\n names=['a', 'b'],\n formats=[np.uint8, np.uint16],\n offsets=[0, 0],\n itemsize=2\n ))\n self.check(Union, expected)\n\n def test_union_with_struct_packed(self):\n class Struct(ctypes.Structure):\n _pack_ = 1\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n\n class Union(ctypes.Union):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint32),\n ('d', Struct),\n ]\n expected = np.dtype(dict(\n names=['a', 'b', 'c', 'd'],\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\n offsets=[0, 0, 0, 0],\n itemsize=ctypes.sizeof(Union)\n ))\n self.check(Union, expected)\n\n def test_union_packed(self):\n class Struct(ctypes.Structure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n class Union(ctypes.Union):\n _pack_ = 1\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint32),\n ('d', Struct),\n ]\n expected = np.dtype(dict(\n names=['a', 'b', 'c', 'd'],\n formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],\n offsets=[0, 0, 0, 0],\n itemsize=ctypes.sizeof(Union)\n ))\n self.check(Union, expected)\n\n def test_packed_structure(self):\n class PackedStructure(ctypes.Structure):\n _pack_ = 1\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', np.uint8),\n ('b', np.uint16)\n ])\n self.check(PackedStructure, expected)\n\n def test_large_packed_structure(self):\n class PackedStructure(ctypes.Structure):\n _pack_ = 2\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16),\n ('c', ctypes.c_uint8),\n ('d', ctypes.c_uint16),\n ('e', ctypes.c_uint32),\n ('f', ctypes.c_uint32),\n ('g', ctypes.c_uint8)\n ]\n expected = np.dtype(dict(\n formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],\n offsets=[0, 2, 4, 6, 8, 12, 16],\n names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],\n itemsize=18))\n self.check(PackedStructure, expected)\n\n def test_big_endian_structure_packed(self):\n class BigEndStruct(ctypes.BigEndianStructure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n expected = np.dtype([('one', 'u1'), ('two', '>u4')])\n self.check(BigEndStruct, expected)\n\n def test_little_endian_structure_packed(self):\n class LittleEndStruct(ctypes.LittleEndianStructure):\n _fields_ = [\n ('one', ctypes.c_uint8),\n ('two', ctypes.c_uint32)\n ]\n _pack_ = 1\n expected = np.dtype([('one', 'u1'), ('two', '<u4')])\n self.check(LittleEndStruct, expected)\n\n def test_little_endian_structure(self):\n class PaddedStruct(ctypes.LittleEndianStructure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', '<B'),\n ('b', '<H')\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_big_endian_structure(self):\n class PaddedStruct(ctypes.BigEndianStructure):\n _fields_ = [\n ('a', ctypes.c_uint8),\n ('b', ctypes.c_uint16)\n ]\n expected = np.dtype([\n ('a', '>B'),\n ('b', '>H')\n ], align=True)\n self.check(PaddedStruct, expected)\n\n def test_simple_endian_types(self):\n self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))\n self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))\n self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))\n self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))\n", "\"\"\"\nConversion from ctypes to dtype.\n\nIn an ideal world, we could acheive this through the PEP3118 buffer protocol,\nsomething like::\n\n def dtype_from_ctypes_type(t):\n # needed to ensure that the shape of `t` is within memoryview.format\n class DummyStruct(ctypes.Structure):\n _fields_ = [('a', t)]\n\n # empty to avoid memory allocation\n ctype_0 = (DummyStruct * 0)()\n mv = memoryview(ctype_0)\n\n # convert the struct, and slice back out the field\n return _dtype_from_pep3118(mv.format)['a']\n\nUnfortunately, this fails because:\n\n* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)\n* PEP3118 cannot represent unions, but both numpy and ctypes can\n* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)\n\"\"\"\nimport _ctypes\nimport ctypes\n\nimport numpy as np\n\n\ndef _from_ctypes_array(t):\n return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))\n\n\ndef _from_ctypes_structure(t):\n for item in t._fields_:\n if len(item) > 2:\n raise TypeError(\n \"ctypes bitfields have no dtype equivalent\")\n\n if hasattr(t, \"_pack_\"):\n formats = []\n offsets = []\n names = []\n current_offset = 0\n for fname, ftyp in t._fields_:\n names.append(fname)\n formats.append(dtype_from_ctypes_type(ftyp))\n # Each type has a default offset, this is platform dependent for some types.\n effective_pack = min(t._pack_, ctypes.alignment(ftyp))\n current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack\n offsets.append(current_offset)\n current_offset += ctypes.sizeof(ftyp)\n\n return np.dtype(dict(\n formats=formats,\n offsets=offsets,\n names=names,\n itemsize=ctypes.sizeof(t)))\n else:\n fields = []\n for fname, ftyp in t._fields_:\n fields.append((fname, dtype_from_ctypes_type(ftyp)))\n\n # by default, ctypes structs are aligned\n return np.dtype(fields, align=True)\n\n\ndef _from_ctypes_scalar(t):\n \"\"\"\n Return the dtype type with endianness included if it's the case\n \"\"\"\n if getattr(t, '__ctype_be__', None) is t:\n return np.dtype('>' + t._type_)\n elif getattr(t, '__ctype_le__', None) is t:\n return np.dtype('<' + t._type_)\n else:\n return np.dtype(t._type_)\n\n\ndef _from_ctypes_union(t):\n formats = []\n offsets = []\n names = []\n for fname, ftyp in t._fields_:\n names.append(fname)\n formats.append(dtype_from_ctypes_type(ftyp))\n offsets.append(0) # Union fields are offset to 0\n\n return np.dtype(dict(\n formats=formats,\n offsets=offsets,\n names=names,\n itemsize=ctypes.sizeof(t)))\n\n\ndef dtype_from_ctypes_type(t):\n \"\"\"\n Construct a dtype object from a ctypes type\n \"\"\"\n if issubclass(t, _ctypes.Array):\n return _from_ctypes_array(t)\n elif issubclass(t, _ctypes._Pointer):\n raise TypeError(\"ctypes pointers have no dtype equivalent\")\n elif issubclass(t, _ctypes.Structure):\n return _from_ctypes_structure(t)\n elif issubclass(t, _ctypes.Union):\n return _from_ctypes_union(t)\n elif isinstance(getattr(t, '_type_', None), str):\n return _from_ctypes_scalar(t)\n else:\n raise NotImplementedError(\n \"Unknown ctypes type {}\".format(t.__name__))\n", "from __future__ import division, absolute_import, print_function\n\nimport operator\nimport warnings\nimport sys\nimport decimal\nimport pytest\n\nimport numpy as np\nfrom numpy import ma\nfrom numpy.testing import (\n assert_, assert_equal, assert_array_equal, assert_almost_equal,\n assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,\n assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,\n )\nimport numpy.lib.function_base as nfb\nfrom numpy.random import rand\nfrom numpy.lib import (\n add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,\n delete, diff, digitize, extract, flipud, gradient, hamming, hanning,\n i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,\n select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize\n )\n\nfrom numpy.compat import long\n\n\ndef get_mat(n):\n data = np.arange(n)\n data = np.add.outer(data, data)\n return data\n\n\nclass TestRot90(object):\n def test_basic(self):\n assert_raises(ValueError, rot90, np.ones(4))\n assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))\n assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))\n assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))\n assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))\n\n a = [[0, 1, 2],\n [3, 4, 5]]\n b1 = [[2, 5],\n [1, 4],\n [0, 3]]\n b2 = [[5, 4, 3],\n [2, 1, 0]]\n b3 = [[3, 0],\n [4, 1],\n [5, 2]]\n b4 = [[0, 1, 2],\n [3, 4, 5]]\n\n for k in range(-3, 13, 4):\n assert_equal(rot90(a, k=k), b1)\n for k in range(-2, 13, 4):\n assert_equal(rot90(a, k=k), b2)\n for k in range(-1, 13, 4):\n assert_equal(rot90(a, k=k), b3)\n for k in range(0, 13, 4):\n assert_equal(rot90(a, k=k), b4)\n\n assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)\n assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))\n\n def test_axes(self):\n a = np.ones((50, 40, 3))\n assert_equal(rot90(a).shape, (40, 50, 3))\n assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))\n assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))\n\n def test_rotation_axes(self):\n a = np.arange(8).reshape((2,2,2))\n\n a_rot90_01 = [[[2, 3],\n [6, 7]],\n [[0, 1],\n [4, 5]]]\n a_rot90_12 = [[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]]\n a_rot90_20 = [[[4, 0],\n [6, 2]],\n [[5, 1],\n [7, 3]]]\n a_rot90_10 = [[[4, 5],\n [0, 1]],\n [[6, 7],\n [2, 3]]]\n\n assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)\n assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)\n assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)\n\n for k in range(1,5):\n assert_equal(rot90(a, k=k, axes=(2, 0)),\n rot90(a_rot90_20, k=k-1, axes=(2, 0)))\n\n\nclass TestFlip(object):\n\n def test_axes(self):\n assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)\n assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)\n assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)\n assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))\n\n def test_basic_lr(self):\n a = get_mat(4)\n b = a[:, ::-1]\n assert_equal(np.flip(a, 1), b)\n a = [[0, 1, 2],\n [3, 4, 5]]\n b = [[2, 1, 0],\n [5, 4, 3]]\n assert_equal(np.flip(a, 1), b)\n\n def test_basic_ud(self):\n a = get_mat(4)\n b = a[::-1, :]\n assert_equal(np.flip(a, 0), b)\n a = [[0, 1, 2],\n [3, 4, 5]]\n b = [[3, 4, 5],\n [0, 1, 2]]\n assert_equal(np.flip(a, 0), b)\n\n def test_3d_swap_axis0(self):\n a = np.array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n\n b = np.array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n\n assert_equal(np.flip(a, 0), b)\n\n def test_3d_swap_axis1(self):\n a = np.array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n\n b = np.array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n\n assert_equal(np.flip(a, 1), b)\n\n def test_3d_swap_axis2(self):\n a = np.array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n\n b = np.array([[[1, 0],\n [3, 2]],\n [[5, 4],\n [7, 6]]])\n\n assert_equal(np.flip(a, 2), b)\n\n def test_4d(self):\n a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)\n for i in range(a.ndim):\n assert_equal(np.flip(a, i),\n np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))\n\n def test_default_axis(self):\n a = np.array([[1, 2, 3],\n [4, 5, 6]])\n b = np.array([[6, 5, 4],\n [3, 2, 1]])\n assert_equal(np.flip(a), b)\n\n def test_multiple_axes(self):\n a = np.array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n\n assert_equal(np.flip(a, axis=()), a)\n\n b = np.array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n\n assert_equal(np.flip(a, axis=(0, 2)), b)\n\n c = np.array([[[3, 2],\n [1, 0]],\n [[7, 6],\n [5, 4]]])\n\n assert_equal(np.flip(a, axis=(1, 2)), c)\n\n\nclass TestAny(object):\n\n def test_basic(self):\n y1 = [0, 0, 1, 0]\n y2 = [0, 0, 0, 0]\n y3 = [1, 0, 1, 0]\n assert_(np.any(y1))\n assert_(np.any(y3))\n assert_(not np.any(y2))\n\n def test_nd(self):\n y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]\n assert_(np.any(y1))\n assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])\n assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])\n\n\nclass TestAll(object):\n\n def test_basic(self):\n y1 = [0, 1, 1, 0]\n y2 = [0, 0, 0, 0]\n y3 = [1, 1, 1, 1]\n assert_(not np.all(y1))\n assert_(np.all(y3))\n assert_(not np.all(y2))\n assert_(np.all(~np.array(y2)))\n\n def test_nd(self):\n y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]\n assert_(not np.all(y1))\n assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])\n assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])\n\n\nclass TestCopy(object):\n\n def test_basic(self):\n a = np.array([[1, 2], [3, 4]])\n a_copy = np.copy(a)\n assert_array_equal(a, a_copy)\n a_copy[0, 0] = 10\n assert_equal(a[0, 0], 1)\n assert_equal(a_copy[0, 0], 10)\n\n def test_order(self):\n # It turns out that people rely on np.copy() preserving order by\n # default; changing this broke scikit-learn:\n # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa\n a = np.array([[1, 2], [3, 4]])\n assert_(a.flags.c_contiguous)\n assert_(not a.flags.f_contiguous)\n a_fort = np.array([[1, 2], [3, 4]], order=\"F\")\n assert_(not a_fort.flags.c_contiguous)\n assert_(a_fort.flags.f_contiguous)\n a_copy = np.copy(a)\n assert_(a_copy.flags.c_contiguous)\n assert_(not a_copy.flags.f_contiguous)\n a_fort_copy = np.copy(a_fort)\n assert_(not a_fort_copy.flags.c_contiguous)\n assert_(a_fort_copy.flags.f_contiguous)\n\n\nclass TestAverage(object):\n\n def test_basic(self):\n y1 = np.array([1, 2, 3])\n assert_(average(y1, axis=0) == 2.)\n y2 = np.array([1., 2., 3.])\n assert_(average(y2, axis=0) == 2.)\n y3 = [0., 0., 0.]\n assert_(average(y3, axis=0) == 0.)\n\n y4 = np.ones((4, 4))\n y4[0, 1] = 0\n y4[1, 0] = 2\n assert_almost_equal(y4.mean(0), average(y4, 0))\n assert_almost_equal(y4.mean(1), average(y4, 1))\n\n y5 = rand(5, 5)\n assert_almost_equal(y5.mean(0), average(y5, 0))\n assert_almost_equal(y5.mean(1), average(y5, 1))\n\n def test_weights(self):\n y = np.arange(10)\n w = np.arange(10)\n actual = average(y, weights=w)\n desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()\n assert_almost_equal(actual, desired)\n\n y1 = np.array([[1, 2, 3], [4, 5, 6]])\n w0 = [1, 2]\n actual = average(y1, weights=w0, axis=0)\n desired = np.array([3., 4., 5.])\n assert_almost_equal(actual, desired)\n\n w1 = [0, 0, 1]\n actual = average(y1, weights=w1, axis=1)\n desired = np.array([3., 6.])\n assert_almost_equal(actual, desired)\n\n # This should raise an error. Can we test for that ?\n # assert_equal(average(y1, weights=w1), 9./2.)\n\n # 2D Case\n w2 = [[0, 0, 1], [0, 0, 2]]\n desired = np.array([3., 6.])\n assert_array_equal(average(y1, weights=w2, axis=1), desired)\n assert_equal(average(y1, weights=w2), 5.)\n\n y3 = rand(5).astype(np.float32)\n w3 = rand(5).astype(np.float64)\n\n assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))\n\n def test_returned(self):\n y = np.array([[1, 2, 3], [4, 5, 6]])\n\n # No weights\n avg, scl = average(y, returned=True)\n assert_equal(scl, 6.)\n\n avg, scl = average(y, 0, returned=True)\n assert_array_equal(scl, np.array([2., 2., 2.]))\n\n avg, scl = average(y, 1, returned=True)\n assert_array_equal(scl, np.array([3., 3.]))\n\n # With weights\n w0 = [1, 2]\n avg, scl = average(y, weights=w0, axis=0, returned=True)\n assert_array_equal(scl, np.array([3., 3., 3.]))\n\n w1 = [1, 2, 3]\n avg, scl = average(y, weights=w1, axis=1, returned=True)\n assert_array_equal(scl, np.array([6., 6.]))\n\n w2 = [[0, 0, 1], [1, 2, 3]]\n avg, scl = average(y, weights=w2, axis=1, returned=True)\n assert_array_equal(scl, np.array([1., 6.]))\n\n def test_subclasses(self):\n class subclass(np.ndarray):\n pass\n a = np.array([[1,2],[3,4]]).view(subclass)\n w = np.array([[1,2],[3,4]]).view(subclass)\n\n assert_equal(type(np.average(a)), subclass)\n assert_equal(type(np.average(a, weights=w)), subclass)\n\n def test_upcasting(self):\n types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),\n ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]\n for at, wt, rt in types:\n a = np.array([[1,2],[3,4]], dtype=at)\n w = np.array([[1,2],[3,4]], dtype=wt)\n assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))\n\n def test_object_dtype(self):\n a = np.array([decimal.Decimal(x) for x in range(10)])\n w = np.array([decimal.Decimal(1) for _ in range(10)])\n w /= w.sum()\n assert_almost_equal(a.mean(0), average(a, weights=w))\n\nclass TestSelect(object):\n choices = [np.array([1, 2, 3]),\n np.array([4, 5, 6]),\n np.array([7, 8, 9])]\n conditions = [np.array([False, False, False]),\n np.array([False, True, False]),\n np.array([False, False, True])]\n\n def _select(self, cond, values, default=0):\n output = []\n for m in range(len(cond)):\n output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]\n return output\n\n def test_basic(self):\n choices = self.choices\n conditions = self.conditions\n assert_array_equal(select(conditions, choices, default=15),\n self._select(conditions, choices, default=15))\n\n assert_equal(len(choices), 3)\n assert_equal(len(conditions), 3)\n\n def test_broadcasting(self):\n conditions = [np.array(True), np.array([False, True, False])]\n choices = [1, np.arange(12).reshape(4, 3)]\n assert_array_equal(select(conditions, choices), np.ones((4, 3)))\n # default can broadcast too:\n assert_equal(select([True], [0], default=[0]).shape, (1,))\n\n def test_return_dtype(self):\n assert_equal(select(self.conditions, self.choices, 1j).dtype,\n np.complex_)\n # But the conditions need to be stronger then the scalar default\n # if it is scalar.\n choices = [choice.astype(np.int8) for choice in self.choices]\n assert_equal(select(self.conditions, choices).dtype, np.int8)\n\n d = np.array([1, 2, 3, np.nan, 5, 7])\n m = np.isnan(d)\n assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])\n\n def test_deprecated_empty(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\")\n assert_equal(select([], [], 3j), 3j)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\")\n assert_warns(DeprecationWarning, select, [], [])\n warnings.simplefilter(\"error\")\n assert_raises(DeprecationWarning, select, [], [])\n\n def test_non_bool_deprecation(self):\n choices = self.choices\n conditions = self.conditions[:]\n with warnings.catch_warnings():\n warnings.filterwarnings(\"always\")\n conditions[0] = conditions[0].astype(np.int_)\n assert_warns(DeprecationWarning, select, conditions, choices)\n conditions[0] = conditions[0].astype(np.uint8)\n assert_warns(DeprecationWarning, select, conditions, choices)\n warnings.filterwarnings(\"error\")\n assert_raises(DeprecationWarning, select, conditions, choices)\n\n def test_many_arguments(self):\n # This used to be limited by NPY_MAXARGS == 32\n conditions = [np.array([False])] * 100\n choices = [np.array([1])] * 100\n select(conditions, choices)\n\n\nclass TestInsert(object):\n\n def test_basic(self):\n a = [1, 2, 3]\n assert_equal(insert(a, 0, 1), [1, 1, 2, 3])\n assert_equal(insert(a, 3, 1), [1, 2, 3, 1])\n assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])\n assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])\n assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])\n assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])\n assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])\n b = np.array([0, 1], dtype=np.float64)\n assert_equal(insert(b, 0, b[0]), [0., 0., 1.])\n assert_equal(insert(b, [], []), b)\n # Bools will be treated differently in the future:\n # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9])\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', FutureWarning)\n assert_equal(\n insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3])\n assert_(w[0].category is FutureWarning)\n\n def test_multidim(self):\n a = [[1, 1, 1]]\n r = [[2, 2, 2],\n [1, 1, 1]]\n assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])\n assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)\n assert_equal(insert(a, 0, 2, axis=0), r)\n assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])\n\n a = np.array([[1, 1], [2, 2], [3, 3]])\n b = np.arange(1, 4).repeat(3).reshape(3, 3)\n c = np.concatenate(\n (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,\n a[:, 1:2]), axis=1)\n assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)\n assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)\n # scalars behave differently, in this case exactly opposite:\n assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)\n assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)\n\n a = np.arange(4).reshape(2, 2)\n assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)\n assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)\n\n # negative axis value\n a = np.arange(24).reshape((2, 3, 4))\n assert_equal(insert(a, 1, a[:,:, 3], axis=-1),\n insert(a, 1, a[:,:, 3], axis=2))\n assert_equal(insert(a, 1, a[:, 2,:], axis=-2),\n insert(a, 1, a[:, 2,:], axis=1))\n\n # invalid axis value\n assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3)\n assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4)\n\n # negative axis value\n a = np.arange(24).reshape((2, 3, 4))\n assert_equal(insert(a, 1, a[:, :, 3], axis=-1),\n insert(a, 1, a[:, :, 3], axis=2))\n assert_equal(insert(a, 1, a[:, 2, :], axis=-2),\n insert(a, 1, a[:, 2, :], axis=1))\n\n def test_0d(self):\n # This is an error in the future\n a = np.array(1)\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', DeprecationWarning)\n assert_equal(insert(a, [], 2, axis=0), np.array(2))\n assert_(w[0].category is DeprecationWarning)\n\n def test_subclass(self):\n class SubClass(np.ndarray):\n pass\n a = np.arange(10).view(SubClass)\n assert_(isinstance(np.insert(a, 0, [0]), SubClass))\n assert_(isinstance(np.insert(a, [], []), SubClass))\n assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))\n assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))\n assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))\n # This is an error in the future:\n a = np.array(1).view(SubClass)\n assert_(isinstance(np.insert(a, 0, [0]), SubClass))\n\n def test_index_array_copied(self):\n x = np.array([1, 1, 1])\n np.insert([0, 1, 2], x, [3, 4, 5])\n assert_equal(x, np.array([1, 1, 1]))\n\n def test_structured_array(self):\n a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],\n dtype=[('foo', 'i'), ('bar', 'a1')])\n val = (4, 'd')\n b = np.insert(a, 0, val)\n assert_array_equal(b[0], np.array(val, dtype=b.dtype))\n val = [(4, 'd')] * 2\n b = np.insert(a, [0, 2], val)\n assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))\n\n\nclass TestAmax(object):\n\n def test_basic(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(np.amax(a), 10.0)\n b = [[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]]\n assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])\n assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])\n\n\nclass TestAmin(object):\n\n def test_basic(self):\n a = [3, 4, 5, 10, -3, -5, 6.0]\n assert_equal(np.amin(a), -5.0)\n b = [[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]]\n assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])\n assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])\n\n\nclass TestPtp(object):\n\n def test_basic(self):\n a = np.array([3, 4, 5, 10, -3, -5, 6.0])\n assert_equal(a.ptp(axis=0), 15.0)\n b = np.array([[3, 6.0, 9.0],\n [4, 10.0, 5.0],\n [8, 3.0, 2.0]])\n assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])\n assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])\n\n assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]])\n assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])\n\n\nclass TestCumsum(object):\n\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,\n np.uint32, np.float32, np.float64, np.complex64,\n np.complex128]:\n a = np.array(ba, ctype)\n a2 = np.array(ba2, ctype)\n\n tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)\n assert_array_equal(np.cumsum(a, axis=0), tgt)\n\n tgt = np.array(\n [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)\n assert_array_equal(np.cumsum(a2, axis=0), tgt)\n\n tgt = np.array(\n [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)\n assert_array_equal(np.cumsum(a2, axis=1), tgt)\n\n\nclass TestProd(object):\n\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [np.int16, np.uint16, np.int32, np.uint32,\n np.float32, np.float64, np.complex64, np.complex128]:\n a = np.array(ba, ctype)\n a2 = np.array(ba2, ctype)\n if ctype in ['1', 'b']:\n assert_raises(ArithmeticError, np.prod, a)\n assert_raises(ArithmeticError, np.prod, a2, 1)\n else:\n assert_equal(a.prod(axis=0), 26400)\n assert_array_equal(a2.prod(axis=0),\n np.array([50, 36, 84, 180], ctype))\n assert_array_equal(a2.prod(axis=-1),\n np.array([24, 1890, 600], ctype))\n\n\nclass TestCumprod(object):\n\n def test_basic(self):\n ba = [1, 2, 10, 11, 6, 5, 4]\n ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]\n for ctype in [np.int16, np.uint16, np.int32, np.uint32,\n np.float32, np.float64, np.complex64, np.complex128]:\n a = np.array(ba, ctype)\n a2 = np.array(ba2, ctype)\n if ctype in ['1', 'b']:\n assert_raises(ArithmeticError, np.cumprod, a)\n assert_raises(ArithmeticError, np.cumprod, a2, 1)\n assert_raises(ArithmeticError, np.cumprod, a)\n else:\n assert_array_equal(np.cumprod(a, axis=-1),\n np.array([1, 2, 20, 220,\n 1320, 6600, 26400], ctype))\n assert_array_equal(np.cumprod(a2, axis=0),\n np.array([[1, 2, 3, 4],\n [5, 12, 21, 36],\n [50, 36, 84, 180]], ctype))\n assert_array_equal(np.cumprod(a2, axis=-1),\n np.array([[1, 2, 6, 24],\n [5, 30, 210, 1890],\n [10, 30, 120, 600]], ctype))\n\n\nclass TestDiff(object):\n\n def test_basic(self):\n x = [1, 4, 6, 7, 12]\n out = np.array([3, 2, 1, 5])\n out2 = np.array([-1, -1, 4])\n out3 = np.array([0, 5])\n assert_array_equal(diff(x), out)\n assert_array_equal(diff(x, n=2), out2)\n assert_array_equal(diff(x, n=3), out3)\n\n x = [1.1, 2.2, 3.0, -0.2, -0.1]\n out = np.array([1.1, 0.8, -3.2, 0.1])\n assert_almost_equal(diff(x), out)\n\n x = [True, True, False, False]\n out = np.array([False, True, False])\n out2 = np.array([True, True])\n assert_array_equal(diff(x), out)\n assert_array_equal(diff(x, n=2), out2)\n\n def test_axis(self):\n x = np.zeros((10, 20, 30))\n x[:, 1::2, :] = 1\n exp = np.ones((10, 19, 30))\n exp[:, 1::2, :] = -1\n assert_array_equal(diff(x), np.zeros((10, 20, 29)))\n assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))\n assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))\n assert_array_equal(diff(x, axis=1), exp)\n assert_array_equal(diff(x, axis=-2), exp)\n assert_raises(np.AxisError, diff, x, axis=3)\n assert_raises(np.AxisError, diff, x, axis=-4)\n\n def test_nd(self):\n x = 20 * rand(10, 20, 30)\n out1 = x[:, :, 1:] - x[:, :, :-1]\n out2 = out1[:, :, 1:] - out1[:, :, :-1]\n out3 = x[1:, :, :] - x[:-1, :, :]\n out4 = out3[1:, :, :] - out3[:-1, :, :]\n assert_array_equal(diff(x), out1)\n assert_array_equal(diff(x, n=2), out2)\n assert_array_equal(diff(x, axis=0), out3)\n assert_array_equal(diff(x, n=2, axis=0), out4)\n\n def test_n(self):\n x = list(range(3))\n assert_raises(ValueError, diff, x, n=-1)\n output = [diff(x, n=n) for n in range(1, 5)]\n expected = [[1, 1], [0], [], []]\n assert_(diff(x, n=0) is x)\n for n, (expected, out) in enumerate(zip(expected, output), start=1):\n assert_(type(out) is np.ndarray)\n assert_array_equal(out, expected)\n assert_equal(out.dtype, np.int_)\n assert_equal(len(out), max(0, len(x) - n))\n\n def test_times(self):\n x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)\n expected = [\n np.array([1, 1], dtype='timedelta64[D]'),\n np.array([0], dtype='timedelta64[D]'),\n ]\n expected.extend([np.array([], dtype='timedelta64[D]')] * 3)\n for n, exp in enumerate(expected, start=1):\n out = diff(x, n=n)\n assert_array_equal(out, exp)\n assert_equal(out.dtype, exp.dtype)\n\n def test_subclass(self):\n x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],\n mask=[[False, False], [True, False],\n [False, True], [True, True], [False, False]])\n out = diff(x)\n assert_array_equal(out.data, [[1], [1], [1], [1], [1]])\n assert_array_equal(out.mask, [[False], [True],\n [True], [True], [False]])\n assert_(type(out) is type(x))\n\n out3 = diff(x, n=3)\n assert_array_equal(out3.data, [[], [], [], [], []])\n assert_array_equal(out3.mask, [[], [], [], [], []])\n assert_(type(out3) is type(x))\n\n def test_prepend(self):\n x = np.arange(5) + 1\n assert_array_equal(diff(x, prepend=0), np.ones(5))\n assert_array_equal(diff(x, prepend=[0]), np.ones(5))\n assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)\n assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))\n\n x = np.arange(4).reshape(2, 2)\n result = np.diff(x, axis=1, prepend=0)\n expected = [[0, 1], [2, 1]]\n assert_array_equal(result, expected)\n result = np.diff(x, axis=1, prepend=[[0], [0]])\n assert_array_equal(result, expected)\n\n result = np.diff(x, axis=0, prepend=0)\n expected = [[0, 1], [2, 2]]\n assert_array_equal(result, expected)\n result = np.diff(x, axis=0, prepend=[[0, 0]])\n assert_array_equal(result, expected)\n\n assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3)))\n\n assert_raises(np.AxisError, diff, x, prepend=0, axis=3)\n\n def test_append(self):\n x = np.arange(5)\n result = diff(x, append=0)\n expected = [1, 1, 1, 1, -4]\n assert_array_equal(result, expected)\n result = diff(x, append=[0])\n assert_array_equal(result, expected)\n result = diff(x, append=[0, 2])\n expected = expected + [2]\n assert_array_equal(result, expected)\n\n x = np.arange(4).reshape(2, 2)\n result = np.diff(x, axis=1, append=0)\n expected = [[1, -1], [1, -3]]\n assert_array_equal(result, expected)\n result = np.diff(x, axis=1, append=[[0], [0]])\n assert_array_equal(result, expected)\n\n result = np.diff(x, axis=0, append=0)\n expected = [[2, 2], [-2, -3]]\n assert_array_equal(result, expected)\n result = np.diff(x, axis=0, append=[[0, 0]])\n assert_array_equal(result, expected)\n\n assert_raises(ValueError, np.diff, x, append=np.zeros((3,3)))\n\n assert_raises(np.AxisError, diff, x, append=0, axis=3)\n\n\nclass TestDelete(object):\n\n def setup(self):\n self.a = np.arange(5)\n self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)\n\n def _check_inverse_of_slicing(self, indices):\n a_del = delete(self.a, indices)\n nd_a_del = delete(self.nd_a, indices, axis=1)\n msg = 'Delete failed for obj: %r' % indices\n # NOTE: The cast should be removed after warning phase for bools\n if not isinstance(indices, (slice, int, long, np.integer)):\n indices = np.asarray(indices, dtype=np.intp)\n indices = indices[(indices >= 0) & (indices < 5)]\n assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,\n err_msg=msg)\n xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])\n assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg)\n\n def test_slices(self):\n lims = [-6, -2, 0, 1, 2, 4, 5]\n steps = [-3, -1, 1, 3]\n for start in lims:\n for stop in lims:\n for step in steps:\n s = slice(start, stop, step)\n self._check_inverse_of_slicing(s)\n\n def test_fancy(self):\n # Deprecation/FutureWarning tests should be kept after change.\n self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))\n with warnings.catch_warnings():\n warnings.filterwarnings('error', category=DeprecationWarning)\n assert_raises(DeprecationWarning, delete, self.a, [100])\n assert_raises(DeprecationWarning, delete, self.a, [-100])\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', category=FutureWarning)\n self._check_inverse_of_slicing([0, -1, 2, 2])\n obj = np.array([True, False, False], dtype=bool)\n self._check_inverse_of_slicing(obj)\n assert_(w[0].category is FutureWarning)\n assert_(w[1].category is FutureWarning)\n\n def test_single(self):\n self._check_inverse_of_slicing(0)\n self._check_inverse_of_slicing(-4)\n\n def test_0d(self):\n a = np.array(1)\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', DeprecationWarning)\n assert_equal(delete(a, [], axis=0), a)\n assert_(w[0].category is DeprecationWarning)\n\n def test_subclass(self):\n class SubClass(np.ndarray):\n pass\n a = self.a.view(SubClass)\n assert_(isinstance(delete(a, 0), SubClass))\n assert_(isinstance(delete(a, []), SubClass))\n assert_(isinstance(delete(a, [0, 1]), SubClass))\n assert_(isinstance(delete(a, slice(1, 2)), SubClass))\n assert_(isinstance(delete(a, slice(1, -2)), SubClass))\n\n def test_array_order_preserve(self):\n # See gh-7113\n k = np.arange(10).reshape(2, 5, order='F')\n m = delete(k, slice(60, None), axis=1)\n\n # 'k' is Fortran ordered, and 'm' should have the\n # same ordering as 'k' and NOT become C ordered\n assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)\n assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)\n\n\nclass TestGradient(object):\n\n def test_basic(self):\n v = [[1, 1], [3, 4]]\n x = np.array(v)\n dx = [np.array([[2., 3.], [2., 3.]]),\n np.array([[0., 0.], [1., 1.]])]\n assert_array_equal(gradient(x), dx)\n assert_array_equal(gradient(v), dx)\n\n def test_args(self):\n dx = np.cumsum(np.ones(5))\n dx_uneven = [1., 2., 5., 9., 11.]\n f_2d = np.arange(25).reshape(5, 5)\n\n # distances must be scalars or have size equal to gradient[axis]\n gradient(np.arange(5), 3.)\n gradient(np.arange(5), np.array(3.))\n gradient(np.arange(5), dx)\n # dy is set equal to dx because scalar\n gradient(f_2d, 1.5)\n gradient(f_2d, np.array(1.5))\n\n gradient(f_2d, dx_uneven, dx_uneven)\n # mix between even and uneven spaces and\n # mix between scalar and vector\n gradient(f_2d, dx, 2)\n\n # 2D but axis specified\n gradient(f_2d, dx, axis=1)\n\n # 2d coordinate arguments are not yet allowed\n assert_raises_regex(ValueError, '.*scalars or 1d',\n gradient, f_2d, np.stack([dx]*2, axis=-1), 1)\n\n def test_badargs(self):\n f_2d = np.arange(25).reshape(5, 5)\n x = np.cumsum(np.ones(5))\n\n # wrong sizes\n assert_raises(ValueError, gradient, f_2d, x, np.ones(2))\n assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))\n assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))\n # wrong number of arguments\n assert_raises(TypeError, gradient, f_2d, x)\n assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))\n assert_raises(TypeError, gradient, f_2d, x, x, x)\n assert_raises(TypeError, gradient, f_2d, 1, 1, 1)\n assert_raises(TypeError, gradient, f_2d, x, x, axis=1)\n assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)\n\n def test_datetime64(self):\n # Make sure gradient() can handle special types like datetime64\n x = np.array(\n ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',\n '1910-10-12', '1910-12-12', '1912-12-12'],\n dtype='datetime64[D]')\n dx = np.array(\n [-5, -3, 0, 31, 61, 396, 731],\n dtype='timedelta64[D]')\n assert_array_equal(gradient(x), dx)\n assert_(dx.dtype == np.dtype('timedelta64[D]'))\n\n def test_masked(self):\n # Make sure that gradient supports subclasses like masked arrays\n x = np.ma.array([[1, 1], [3, 4]],\n mask=[[False, False], [False, False]])\n out = gradient(x)[0]\n assert_equal(type(out), type(x))\n # And make sure that the output and input don't have aliased mask\n # arrays\n assert_(x.mask is not out.mask)\n # Also check that edge_order=2 doesn't alter the original mask\n x2 = np.ma.arange(5)\n x2[2] = np.ma.masked\n np.gradient(x2, edge_order=2)\n assert_array_equal(x2.mask, [False, False, True, False, False])\n\n def test_second_order_accurate(self):\n # Testing that the relative numerical error is less that 3% for\n # this example problem. This corresponds to second order\n # accurate finite differences for all interior and boundary\n # points.\n x = np.linspace(0, 1, 10)\n dx = x[1] - x[0]\n y = 2 * x ** 3 + 4 * x ** 2 + 2 * x\n analytical = 6 * x ** 2 + 8 * x + 2\n num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)\n assert_(np.all(num_error < 0.03) == True)\n\n # test with unevenly spaced\n np.random.seed(0)\n x = np.sort(np.random.random(10))\n y = 2 * x ** 3 + 4 * x ** 2 + 2 * x\n analytical = 6 * x ** 2 + 8 * x + 2\n num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)\n assert_(np.all(num_error < 0.03) == True)\n\n def test_spacing(self):\n f = np.array([0, 2., 3., 4., 5., 5.])\n f = np.tile(f, (6,1)) + f.reshape(-1, 1)\n x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])\n x_even = np.arange(6.)\n\n fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))\n fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))\n fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))\n fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))\n\n # evenly spaced\n for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:\n res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)\n res2 = gradient(f, x_even, x_even,\n axis=(0,1), edge_order=edge_order)\n res3 = gradient(f, x_even, x_even,\n axis=None, edge_order=edge_order)\n assert_array_equal(res1, res2)\n assert_array_equal(res2, res3)\n assert_almost_equal(res1[0], exp_res.T)\n assert_almost_equal(res1[1], exp_res)\n\n res1 = gradient(f, 1., axis=0, edge_order=edge_order)\n res2 = gradient(f, x_even, axis=0, edge_order=edge_order)\n assert_(res1.shape == res2.shape)\n assert_almost_equal(res2, exp_res.T)\n\n res1 = gradient(f, 1., axis=1, edge_order=edge_order)\n res2 = gradient(f, x_even, axis=1, edge_order=edge_order)\n assert_(res1.shape == res2.shape)\n assert_array_equal(res2, exp_res)\n\n # unevenly spaced\n for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:\n res1 = gradient(f, x_uneven, x_uneven,\n axis=(0,1), edge_order=edge_order)\n res2 = gradient(f, x_uneven, x_uneven,\n axis=None, edge_order=edge_order)\n assert_array_equal(res1, res2)\n assert_almost_equal(res1[0], exp_res.T)\n assert_almost_equal(res1[1], exp_res)\n\n res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)\n assert_almost_equal(res1, exp_res.T)\n\n res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)\n assert_almost_equal(res1, exp_res)\n\n # mixed\n res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)\n res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)\n assert_array_equal(res1[0], res2[1])\n assert_array_equal(res1[1], res2[0])\n assert_almost_equal(res1[0], fdx_even_ord1.T)\n assert_almost_equal(res1[1], fdx_uneven_ord1)\n\n res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)\n res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)\n assert_array_equal(res1[0], res2[1])\n assert_array_equal(res1[1], res2[0])\n assert_almost_equal(res1[0], fdx_even_ord2.T)\n assert_almost_equal(res1[1], fdx_uneven_ord2)\n\n def test_specific_axes(self):\n # Testing that gradient can work on a given axis only\n v = [[1, 1], [3, 4]]\n x = np.array(v)\n dx = [np.array([[2., 3.], [2., 3.]]),\n np.array([[0., 0.], [1., 1.]])]\n assert_array_equal(gradient(x, axis=0), dx[0])\n assert_array_equal(gradient(x, axis=1), dx[1])\n assert_array_equal(gradient(x, axis=-1), dx[1])\n assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])\n\n # test axis=None which means all axes\n assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])\n # and is the same as no axis keyword given\n assert_almost_equal(gradient(x, axis=None), gradient(x))\n\n # test vararg order\n assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),\n [dx[1]/2.0, dx[0]/3.0])\n # test maximal number of varargs\n assert_raises(TypeError, gradient, x, 1, 2, axis=1)\n\n assert_raises(np.AxisError, gradient, x, axis=3)\n assert_raises(np.AxisError, gradient, x, axis=-3)\n # assert_raises(TypeError, gradient, x, axis=[1,])\n\n def test_timedelta64(self):\n # Make sure gradient() can handle special types like timedelta64\n x = np.array(\n [-5, -3, 10, 12, 61, 321, 300],\n dtype='timedelta64[D]')\n dx = np.array(\n [2, 7, 7, 25, 154, 119, -21],\n dtype='timedelta64[D]')\n assert_array_equal(gradient(x), dx)\n assert_(dx.dtype == np.dtype('timedelta64[D]'))\n\n def test_inexact_dtypes(self):\n for dt in [np.float16, np.float32, np.float64]:\n # dtypes should not be promoted in a different way to what diff does\n x = np.array([1, 2, 3], dtype=dt)\n assert_equal(gradient(x).dtype, np.diff(x).dtype)\n\n def test_values(self):\n # needs at least 2 points for edge_order ==1\n gradient(np.arange(2), edge_order=1)\n # needs at least 3 points for edge_order ==1\n gradient(np.arange(3), edge_order=2)\n\n assert_raises(ValueError, gradient, np.arange(0), edge_order=1)\n assert_raises(ValueError, gradient, np.arange(0), edge_order=2)\n assert_raises(ValueError, gradient, np.arange(1), edge_order=1)\n assert_raises(ValueError, gradient, np.arange(1), edge_order=2)\n assert_raises(ValueError, gradient, np.arange(2), edge_order=2)\n\n\nclass TestAngle(object):\n\n def test_basic(self):\n x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,\n 1, 1j, -1, -1j, 1 - 3j, -1 + 3j]\n y = angle(x)\n yo = [\n np.arctan(3.0 / 1.0),\n np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,\n -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]\n z = angle(x, deg=1)\n zo = np.array(yo) * 180 / np.pi\n assert_array_almost_equal(y, yo, 11)\n assert_array_almost_equal(z, zo, 11)\n\n def test_subclass(self):\n x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])\n x[1] = np.ma.masked\n expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])\n expected[1] = np.ma.masked\n actual = angle(x)\n assert_equal(type(actual), type(expected))\n assert_equal(actual.mask, expected.mask)\n assert_equal(actual, expected)\n\n\nclass TestTrimZeros(object):\n\n \"\"\"\n Only testing for integer splits.\n\n \"\"\"\n\n def test_basic(self):\n a = np.array([0, 0, 1, 2, 3, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, np.array([1, 2, 3, 4]))\n\n def test_leading_skip(self):\n a = np.array([0, 0, 1, 0, 2, 3, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, np.array([1, 0, 2, 3, 4]))\n\n def test_trailing_skip(self):\n a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0])\n res = trim_zeros(a)\n assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))\n\n\nclass TestExtins(object):\n\n def test_basic(self):\n a = np.array([1, 3, 2, 1, 2, 3, 3])\n b = extract(a > 1, a)\n assert_array_equal(b, [3, 2, 2, 3, 3])\n\n def test_place(self):\n # Make sure that non-np.ndarray objects\n # raise an error instead of doing nothing\n assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])\n\n a = np.array([1, 4, 3, 2, 5, 8, 7])\n place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])\n assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])\n\n place(a, np.zeros(7), [])\n assert_array_equal(a, np.arange(1, 8))\n\n place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])\n assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])\n assert_raises_regex(ValueError, \"Cannot insert from an empty array\",\n lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))\n\n # See Issue #6974\n a = np.array(['12', '34'])\n place(a, [0, 1], '9')\n assert_array_equal(a, ['12', '9'])\n\n def test_both(self):\n a = rand(10)\n mask = a > 0.5\n ac = a.copy()\n c = extract(mask, a)\n place(a, mask, 0)\n place(a, mask, c)\n assert_array_equal(a, ac)\n\n\nclass TestVectorize(object):\n\n def test_simple(self):\n def addsubtract(a, b):\n if a > b:\n return a - b\n else:\n return a + b\n\n f = vectorize(addsubtract)\n r = f([0, 3, 6, 9], [1, 3, 5, 7])\n assert_array_equal(r, [1, 6, 1, 2])\n\n def test_scalar(self):\n def addsubtract(a, b):\n if a > b:\n return a - b\n else:\n return a + b\n\n f = vectorize(addsubtract)\n r = f([0, 3, 6, 9], 5)\n assert_array_equal(r, [5, 8, 1, 4])\n\n def test_large(self):\n x = np.linspace(-3, 2, 10000)\n f = vectorize(lambda x: x)\n y = f(x)\n assert_array_equal(y, x)\n\n def test_ufunc(self):\n import math\n f = vectorize(math.cos)\n args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])\n r1 = f(args)\n r2 = np.cos(args)\n assert_array_almost_equal(r1, r2)\n\n def test_keywords(self):\n\n def foo(a, b=1):\n return a + b\n\n f = vectorize(foo)\n args = np.array([1, 2, 3])\n r1 = f(args)\n r2 = np.array([2, 3, 4])\n assert_array_equal(r1, r2)\n r1 = f(args, 2)\n r2 = np.array([3, 4, 5])\n assert_array_equal(r1, r2)\n\n def test_keywords_no_func_code(self):\n # This needs to test a function that has keywords but\n # no func_code attribute, since otherwise vectorize will\n # inspect the func_code.\n import random\n try:\n vectorize(random.randrange) # Should succeed\n except Exception:\n raise AssertionError()\n\n def test_keywords2_ticket_2100(self):\n # Test kwarg support: enhancement ticket 2100\n\n def foo(a, b=1):\n return a + b\n\n f = vectorize(foo)\n args = np.array([1, 2, 3])\n r1 = f(a=args)\n r2 = np.array([2, 3, 4])\n assert_array_equal(r1, r2)\n r1 = f(b=1, a=args)\n assert_array_equal(r1, r2)\n r1 = f(args, b=2)\n r2 = np.array([3, 4, 5])\n assert_array_equal(r1, r2)\n\n def test_keywords3_ticket_2100(self):\n # Test excluded with mixed positional and kwargs: ticket 2100\n def mypolyval(x, p):\n _p = list(p)\n res = _p.pop(0)\n while _p:\n res = res * x + _p.pop(0)\n return res\n\n vpolyval = np.vectorize(mypolyval, excluded=['p', 1])\n ans = [3, 6]\n assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))\n assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))\n assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))\n\n def test_keywords4_ticket_2100(self):\n # Test vectorizing function with no positional args.\n @vectorize\n def f(**kw):\n res = 1.0\n for _k in kw:\n res *= kw[_k]\n return res\n\n assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])\n\n def test_keywords5_ticket_2100(self):\n # Test vectorizing function with no kwargs args.\n @vectorize\n def f(*v):\n return np.prod(v)\n\n assert_array_equal(f([1, 2], [3, 4]), [3, 8])\n\n def test_coverage1_ticket_2100(self):\n def foo():\n return 1\n\n f = vectorize(foo)\n assert_array_equal(f(), 1)\n\n def test_assigning_docstring(self):\n def foo(x):\n \"\"\"Original documentation\"\"\"\n return x\n\n f = vectorize(foo)\n assert_equal(f.__doc__, foo.__doc__)\n\n doc = \"Provided documentation\"\n f = vectorize(foo, doc=doc)\n assert_equal(f.__doc__, doc)\n\n def test_UnboundMethod_ticket_1156(self):\n # Regression test for issue 1156\n class Foo:\n b = 2\n\n def bar(self, a):\n return a ** self.b\n\n assert_array_equal(vectorize(Foo().bar)(np.arange(9)),\n np.arange(9) ** 2)\n assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),\n np.arange(9) ** 2)\n\n def test_execution_order_ticket_1487(self):\n # Regression test for dependence on execution order: issue 1487\n f1 = vectorize(lambda x: x)\n res1a = f1(np.arange(3))\n res1b = f1(np.arange(0.1, 3))\n f2 = vectorize(lambda x: x)\n res2b = f2(np.arange(0.1, 3))\n res2a = f2(np.arange(3))\n assert_equal(res1a, res2a)\n assert_equal(res1b, res2b)\n\n def test_string_ticket_1892(self):\n # Test vectorization over strings: issue 1892.\n f = np.vectorize(lambda x: x)\n s = '0123456789' * 10\n assert_equal(s, f(s))\n\n def test_cache(self):\n # Ensure that vectorized func called exactly once per argument.\n _calls = [0]\n\n @vectorize\n def f(x):\n _calls[0] += 1\n return x ** 2\n\n f.cache = True\n x = np.arange(5)\n assert_array_equal(f(x), x * x)\n assert_equal(_calls[0], len(x))\n\n def test_otypes(self):\n f = np.vectorize(lambda x: x)\n f.otypes = 'i'\n x = np.arange(5)\n assert_array_equal(f(x), x)\n\n def test_parse_gufunc_signature(self):\n assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))\n assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),\n ([('x', 'y')], [()]))\n assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),\n ([('x',), ('y',)], [()]))\n assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),\n ([('x',)], [('y',)]))\n assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),\n ([('x',)], [('y',), ()]))\n assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),\n ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))\n with assert_raises(ValueError):\n nfb._parse_gufunc_signature('(x)(y)->()')\n with assert_raises(ValueError):\n nfb._parse_gufunc_signature('(x),(y)->')\n with assert_raises(ValueError):\n nfb._parse_gufunc_signature('((x))->(x)')\n\n def test_signature_simple(self):\n def addsubtract(a, b):\n if a > b:\n return a - b\n else:\n return a + b\n\n f = vectorize(addsubtract, signature='(),()->()')\n r = f([0, 3, 6, 9], [1, 3, 5, 7])\n assert_array_equal(r, [1, 6, 1, 2])\n\n def test_signature_mean_last(self):\n def mean(a):\n return a.mean()\n\n f = vectorize(mean, signature='(n)->()')\n r = f([[1, 3], [2, 4]])\n assert_array_equal(r, [2, 3])\n\n def test_signature_center(self):\n def center(a):\n return a - a.mean()\n\n f = vectorize(center, signature='(n)->(n)')\n r = f([[1, 3], [2, 4]])\n assert_array_equal(r, [[-1, 1], [-1, 1]])\n\n def test_signature_two_outputs(self):\n f = vectorize(lambda x: (x, x), signature='()->(),()')\n r = f([1, 2, 3])\n assert_(isinstance(r, tuple) and len(r) == 2)\n assert_array_equal(r[0], [1, 2, 3])\n assert_array_equal(r[1], [1, 2, 3])\n\n def test_signature_outer(self):\n f = vectorize(np.outer, signature='(a),(b)->(a,b)')\n r = f([1, 2], [1, 2, 3])\n assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])\n\n r = f([[[1, 2]]], [1, 2, 3])\n assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])\n\n r = f([[1, 0], [2, 0]], [1, 2, 3])\n assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],\n [[2, 4, 6], [0, 0, 0]]])\n\n r = f([1, 2], [[1, 2, 3], [0, 0, 0]])\n assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],\n [[0, 0, 0], [0, 0, 0]]])\n\n def test_signature_computed_size(self):\n f = vectorize(lambda x: x[:-1], signature='(n)->(m)')\n r = f([1, 2, 3])\n assert_array_equal(r, [1, 2])\n\n r = f([[1, 2, 3], [2, 3, 4]])\n assert_array_equal(r, [[1, 2], [2, 3]])\n\n def test_signature_excluded(self):\n\n def foo(a, b=1):\n return a + b\n\n f = vectorize(foo, signature='()->()', excluded={'b'})\n assert_array_equal(f([1, 2, 3]), [2, 3, 4])\n assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])\n\n def test_signature_otypes(self):\n f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])\n r = f([1, 2, 3])\n assert_equal(r.dtype, np.dtype('float64'))\n assert_array_equal(r, [1, 2, 3])\n\n def test_signature_invalid_inputs(self):\n f = vectorize(operator.add, signature='(n),(n)->(n)')\n with assert_raises_regex(TypeError, 'wrong number of positional'):\n f([1, 2])\n with assert_raises_regex(\n ValueError, 'does not have enough dimensions'):\n f(1, 2)\n with assert_raises_regex(\n ValueError, 'inconsistent size for core dimension'):\n f([1, 2], [1, 2, 3])\n\n f = vectorize(operator.add, signature='()->()')\n with assert_raises_regex(TypeError, 'wrong number of positional'):\n f(1, 2)\n\n def test_signature_invalid_outputs(self):\n\n f = vectorize(lambda x: x[:-1], signature='(n)->(n)')\n with assert_raises_regex(\n ValueError, 'inconsistent size for core dimension'):\n f([1, 2, 3])\n\n f = vectorize(lambda x: x, signature='()->(),()')\n with assert_raises_regex(ValueError, 'wrong number of outputs'):\n f(1)\n\n f = vectorize(lambda x: (x, x), signature='()->()')\n with assert_raises_regex(ValueError, 'wrong number of outputs'):\n f([1, 2])\n\n def test_size_zero_output(self):\n # see issue 5868\n f = np.vectorize(lambda x: x)\n x = np.zeros([0, 5], dtype=int)\n with assert_raises_regex(ValueError, 'otypes'):\n f(x)\n\n f.otypes = 'i'\n assert_array_equal(f(x), x)\n\n f = np.vectorize(lambda x: x, signature='()->()')\n with assert_raises_regex(ValueError, 'otypes'):\n f(x)\n\n f = np.vectorize(lambda x: x, signature='()->()', otypes='i')\n assert_array_equal(f(x), x)\n\n f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')\n assert_array_equal(f(x), x)\n\n f = np.vectorize(lambda x: x, signature='(n)->(n)')\n assert_array_equal(f(x.T), x.T)\n\n f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')\n with assert_raises_regex(ValueError, 'new output dimensions'):\n f(x)\n\n\nclass TestDigitize(object):\n\n def test_forward(self):\n x = np.arange(-6, 5)\n bins = np.arange(-5, 5)\n assert_array_equal(digitize(x, bins), np.arange(11))\n\n def test_reverse(self):\n x = np.arange(5, -6, -1)\n bins = np.arange(5, -5, -1)\n assert_array_equal(digitize(x, bins), np.arange(11))\n\n def test_random(self):\n x = rand(10)\n bin = np.linspace(x.min(), x.max(), 10)\n assert_(np.all(digitize(x, bin) != 0))\n\n def test_right_basic(self):\n x = [1, 5, 4, 10, 8, 11, 0]\n bins = [1, 5, 10]\n default_answer = [1, 2, 1, 3, 2, 3, 0]\n assert_array_equal(digitize(x, bins), default_answer)\n right_answer = [0, 1, 1, 2, 2, 3, 0]\n assert_array_equal(digitize(x, bins, True), right_answer)\n\n def test_right_open(self):\n x = np.arange(-6, 5)\n bins = np.arange(-6, 4)\n assert_array_equal(digitize(x, bins, True), np.arange(11))\n\n def test_right_open_reverse(self):\n x = np.arange(5, -6, -1)\n bins = np.arange(4, -6, -1)\n assert_array_equal(digitize(x, bins, True), np.arange(11))\n\n def test_right_open_random(self):\n x = rand(10)\n bins = np.linspace(x.min(), x.max(), 10)\n assert_(np.all(digitize(x, bins, True) != 10))\n\n def test_monotonic(self):\n x = [-1, 0, 1, 2]\n bins = [0, 0, 1]\n assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])\n assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])\n bins = [1, 1, 0]\n assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])\n assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])\n bins = [1, 1, 1, 1]\n assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])\n assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])\n bins = [0, 0, 1, 0]\n assert_raises(ValueError, digitize, x, bins)\n bins = [1, 1, 0, 1]\n assert_raises(ValueError, digitize, x, bins)\n\n def test_casting_error(self):\n x = [1, 2, 3 + 1.j]\n bins = [1, 2, 3]\n assert_raises(TypeError, digitize, x, bins)\n x, bins = bins, x\n assert_raises(TypeError, digitize, x, bins)\n\n def test_return_type(self):\n # Functions returning indices should always return base ndarrays\n class A(np.ndarray):\n pass\n a = np.arange(5).view(A)\n b = np.arange(1, 3).view(A)\n assert_(not isinstance(digitize(b, a, False), A))\n assert_(not isinstance(digitize(b, a, True), A))\n\n def test_large_integers_increasing(self):\n # gh-11022\n x = 2**54 # loses precision in a float\n assert_equal(np.digitize(x, [x - 1, x + 1]), 1)\n\n @pytest.mark.xfail(\n reason=\"gh-11022: np.core.multiarray._monoticity loses precision\")\n def test_large_integers_decreasing(self):\n # gh-11022\n x = 2**54 # loses precision in a float\n assert_equal(np.digitize(x, [x + 1, x - 1]), 1)\n\n\nclass TestUnwrap(object):\n\n def test_simple(self):\n # check that unwrap removes jumps greater that 2*pi\n assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])\n # check that unwrap maintains continuity\n assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))\n\n\nclass TestFilterwindows(object):\n\n def test_hanning(self):\n # check symmetry\n w = hanning(10)\n assert_array_almost_equal(w, flipud(w), 7)\n # check known value\n assert_almost_equal(np.sum(w, axis=0), 4.500, 4)\n\n def test_hamming(self):\n # check symmetry\n w = hamming(10)\n assert_array_almost_equal(w, flipud(w), 7)\n # check known value\n assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)\n\n def test_bartlett(self):\n # check symmetry\n w = bartlett(10)\n assert_array_almost_equal(w, flipud(w), 7)\n # check known value\n assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)\n\n def test_blackman(self):\n # check symmetry\n w = blackman(10)\n assert_array_almost_equal(w, flipud(w), 7)\n # check known value\n assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)\n\n\nclass TestTrapz(object):\n\n def test_simple(self):\n x = np.arange(-10, 10, .1)\n r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)\n # check integral of normal equals 1\n assert_almost_equal(r, 1, 7)\n\n def test_ndim(self):\n x = np.linspace(0, 1, 3)\n y = np.linspace(0, 2, 8)\n z = np.linspace(0, 3, 13)\n\n wx = np.ones_like(x) * (x[1] - x[0])\n wx[0] /= 2\n wx[-1] /= 2\n wy = np.ones_like(y) * (y[1] - y[0])\n wy[0] /= 2\n wy[-1] /= 2\n wz = np.ones_like(z) * (z[1] - z[0])\n wz[0] /= 2\n wz[-1] /= 2\n\n q = x[:, None, None] + y[None,:, None] + z[None, None,:]\n\n qx = (q * wx[:, None, None]).sum(axis=0)\n qy = (q * wy[None, :, None]).sum(axis=1)\n qz = (q * wz[None, None, :]).sum(axis=2)\n\n # n-d `x`\n r = trapz(q, x=x[:, None, None], axis=0)\n assert_almost_equal(r, qx)\n r = trapz(q, x=y[None,:, None], axis=1)\n assert_almost_equal(r, qy)\n r = trapz(q, x=z[None, None,:], axis=2)\n assert_almost_equal(r, qz)\n\n # 1-d `x`\n r = trapz(q, x=x, axis=0)\n assert_almost_equal(r, qx)\n r = trapz(q, x=y, axis=1)\n assert_almost_equal(r, qy)\n r = trapz(q, x=z, axis=2)\n assert_almost_equal(r, qz)\n\n def test_masked(self):\n # Testing that masked arrays behave as if the function is 0 where\n # masked\n x = np.arange(5)\n y = x * x\n mask = x == 2\n ym = np.ma.array(y, mask=mask)\n r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))\n assert_almost_equal(trapz(ym, x), r)\n\n xm = np.ma.array(x, mask=mask)\n assert_almost_equal(trapz(ym, xm), r)\n\n xm = np.ma.array(x, mask=mask)\n assert_almost_equal(trapz(y, xm), r)\n\n\nclass TestSinc(object):\n\n def test_simple(self):\n assert_(sinc(0) == 1)\n w = sinc(np.linspace(-1, 1, 100))\n # check symmetry\n assert_array_almost_equal(w, flipud(w), 7)\n\n def test_array_like(self):\n x = [0, 0.5]\n y1 = sinc(np.array(x))\n y2 = sinc(list(x))\n y3 = sinc(tuple(x))\n assert_array_equal(y1, y2)\n assert_array_equal(y1, y3)\n\n\nclass TestUnique(object):\n\n def test_simple(self):\n x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])\n assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))\n assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))\n x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']\n assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))\n x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])\n assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))\n\n\nclass TestCheckFinite(object):\n\n def test_simple(self):\n a = [1, 2, 3]\n b = [1, 2, np.inf]\n c = [1, 2, np.nan]\n np.lib.asarray_chkfinite(a)\n assert_raises(ValueError, np.lib.asarray_chkfinite, b)\n assert_raises(ValueError, np.lib.asarray_chkfinite, c)\n\n def test_dtype_order(self):\n # Regression test for missing dtype and order arguments\n a = [1, 2, 3]\n a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64)\n assert_(a.dtype == np.float64)\n\n\nclass TestCorrCoef(object):\n A = np.array(\n [[0.15391142, 0.18045767, 0.14197213],\n [0.70461506, 0.96474128, 0.27906989],\n [0.9297531, 0.32296769, 0.19267156]])\n B = np.array(\n [[0.10377691, 0.5417086, 0.49807457],\n [0.82872117, 0.77801674, 0.39226705],\n [0.9314666, 0.66800209, 0.03538394]])\n res1 = np.array(\n [[1., 0.9379533, -0.04931983],\n [0.9379533, 1., 0.30007991],\n [-0.04931983, 0.30007991, 1.]])\n res2 = np.array(\n [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],\n [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],\n [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],\n [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],\n [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],\n [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])\n\n def test_non_array(self):\n assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),\n [[1., -1.], [-1., 1.]])\n\n def test_simple(self):\n tgt1 = corrcoef(self.A)\n assert_almost_equal(tgt1, self.res1)\n assert_(np.all(np.abs(tgt1) <= 1.0))\n\n tgt2 = corrcoef(self.A, self.B)\n assert_almost_equal(tgt2, self.res2)\n assert_(np.all(np.abs(tgt2) <= 1.0))\n\n def test_ddof(self):\n # ddof raises DeprecationWarning\n with suppress_warnings() as sup:\n warnings.simplefilter(\"always\")\n assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)\n sup.filter(DeprecationWarning)\n # ddof has no or negligible effect on the function\n assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)\n assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)\n assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)\n assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)\n\n def test_bias(self):\n # bias raises DeprecationWarning\n with suppress_warnings() as sup:\n warnings.simplefilter(\"always\")\n assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)\n assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)\n sup.filter(DeprecationWarning)\n # bias has no or negligible effect on the function\n assert_almost_equal(corrcoef(self.A, bias=1), self.res1)\n\n def test_complex(self):\n x = np.array([[1, 2, 3], [1j, 2j, 3j]])\n res = corrcoef(x)\n tgt = np.array([[1., -1.j], [1.j, 1.]])\n assert_allclose(res, tgt)\n assert_(np.all(np.abs(res) <= 1.0))\n\n def test_xy(self):\n x = np.array([[1, 2, 3]])\n y = np.array([[1j, 2j, 3j]])\n assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))\n\n def test_empty(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n assert_array_equal(corrcoef(np.array([])), np.nan)\n assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),\n np.array([]).reshape(0, 0))\n assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),\n np.array([[np.nan, np.nan], [np.nan, np.nan]]))\n\n def test_extreme(self):\n x = [[1e-100, 1e100], [1e100, 1e-100]]\n with np.errstate(all='raise'):\n c = corrcoef(x)\n assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))\n assert_(np.all(np.abs(c) <= 1.0))\n\n\nclass TestCov(object):\n x1 = np.array([[0, 2], [1, 1], [2, 0]]).T\n res1 = np.array([[1., -1.], [-1., 1.]])\n x2 = np.array([0.0, 1.0, 2.0], ndmin=2)\n frequencies = np.array([1, 4, 1])\n x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T\n res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])\n unit_frequencies = np.ones(3, dtype=np.integer)\n weights = np.array([1.0, 4.0, 1.0])\n res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])\n unit_weights = np.ones(3)\n x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])\n\n def test_basic(self):\n assert_allclose(cov(self.x1), self.res1)\n\n def test_complex(self):\n x = np.array([[1, 2, 3], [1j, 2j, 3j]])\n res = np.array([[1., -1.j], [1.j, 1.]])\n assert_allclose(cov(x), res)\n assert_allclose(cov(x, aweights=np.ones(3)), res)\n\n def test_xy(self):\n x = np.array([[1, 2, 3]])\n y = np.array([[1j, 2j, 3j]])\n assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))\n\n def test_empty(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n assert_array_equal(cov(np.array([])), np.nan)\n assert_array_equal(cov(np.array([]).reshape(0, 2)),\n np.array([]).reshape(0, 0))\n assert_array_equal(cov(np.array([]).reshape(2, 0)),\n np.array([[np.nan, np.nan], [np.nan, np.nan]]))\n\n def test_wrong_ddof(self):\n with warnings.catch_warnings(record=True):\n warnings.simplefilter('always', RuntimeWarning)\n assert_array_equal(cov(self.x1, ddof=5),\n np.array([[np.inf, -np.inf],\n [-np.inf, np.inf]]))\n\n def test_1D_rowvar(self):\n assert_allclose(cov(self.x3), cov(self.x3, rowvar=0))\n y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])\n assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0))\n\n def test_1D_variance(self):\n assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))\n\n def test_fweights(self):\n assert_allclose(cov(self.x2, fweights=self.frequencies),\n cov(self.x2_repeats))\n assert_allclose(cov(self.x1, fweights=self.frequencies),\n self.res2)\n assert_allclose(cov(self.x1, fweights=self.unit_frequencies),\n self.res1)\n nonint = self.frequencies + 0.5\n assert_raises(TypeError, cov, self.x1, fweights=nonint)\n f = np.ones((2, 3), dtype=np.integer)\n assert_raises(RuntimeError, cov, self.x1, fweights=f)\n f = np.ones(2, dtype=np.integer)\n assert_raises(RuntimeError, cov, self.x1, fweights=f)\n f = -1 * np.ones(3, dtype=np.integer)\n assert_raises(ValueError, cov, self.x1, fweights=f)\n\n def test_aweights(self):\n assert_allclose(cov(self.x1, aweights=self.weights), self.res3)\n assert_allclose(cov(self.x1, aweights=3.0 * self.weights),\n cov(self.x1, aweights=self.weights))\n assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)\n w = np.ones((2, 3))\n assert_raises(RuntimeError, cov, self.x1, aweights=w)\n w = np.ones(2)\n assert_raises(RuntimeError, cov, self.x1, aweights=w)\n w = -1.0 * np.ones(3)\n assert_raises(ValueError, cov, self.x1, aweights=w)\n\n def test_unit_fweights_and_aweights(self):\n assert_allclose(cov(self.x2, fweights=self.frequencies,\n aweights=self.unit_weights),\n cov(self.x2_repeats))\n assert_allclose(cov(self.x1, fweights=self.frequencies,\n aweights=self.unit_weights),\n self.res2)\n assert_allclose(cov(self.x1, fweights=self.unit_frequencies,\n aweights=self.unit_weights),\n self.res1)\n assert_allclose(cov(self.x1, fweights=self.unit_frequencies,\n aweights=self.weights),\n self.res3)\n assert_allclose(cov(self.x1, fweights=self.unit_frequencies,\n aweights=3.0 * self.weights),\n cov(self.x1, aweights=self.weights))\n assert_allclose(cov(self.x1, fweights=self.unit_frequencies,\n aweights=self.unit_weights),\n self.res1)\n\n\nclass Test_I0(object):\n\n def test_simple(self):\n assert_almost_equal(\n i0(0.5),\n np.array(1.0634833707413234))\n\n A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])\n assert_almost_equal(\n i0(A),\n np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049]))\n\n B = np.array([[0.827002, 0.99959078],\n [0.89694769, 0.39298162],\n [0.37954418, 0.05206293],\n [0.36465447, 0.72446427],\n [0.48164949, 0.50324519]])\n assert_almost_equal(\n i0(B),\n np.array([[1.17843223, 1.26583466],\n [1.21147086, 1.03898290],\n [1.03633899, 1.00067775],\n [1.03352052, 1.13557954],\n [1.05884290, 1.06432317]]))\n\n\nclass TestKaiser(object):\n\n def test_simple(self):\n assert_(np.isfinite(kaiser(1, 1.0)))\n assert_almost_equal(kaiser(0, 1.0),\n np.array([]))\n assert_almost_equal(kaiser(2, 1.0),\n np.array([0.78984831, 0.78984831]))\n assert_almost_equal(kaiser(5, 1.0),\n np.array([0.78984831, 0.94503323, 1.,\n 0.94503323, 0.78984831]))\n assert_almost_equal(kaiser(5, 1.56789),\n np.array([0.58285404, 0.88409679, 1.,\n 0.88409679, 0.58285404]))\n\n def test_int_beta(self):\n kaiser(3, 4)\n\n\nclass TestMsort(object):\n\n def test_simple(self):\n A = np.array([[0.44567325, 0.79115165, 0.54900530],\n [0.36844147, 0.37325583, 0.96098397],\n [0.64864341, 0.52929049, 0.39172155]])\n assert_almost_equal(\n msort(A),\n np.array([[0.36844147, 0.37325583, 0.39172155],\n [0.44567325, 0.52929049, 0.54900530],\n [0.64864341, 0.79115165, 0.96098397]]))\n\n\nclass TestMeshgrid(object):\n\n def test_simple(self):\n [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])\n assert_array_equal(X, np.array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]))\n assert_array_equal(Y, np.array([[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6],\n [7, 7, 7]]))\n\n def test_single_input(self):\n [X] = meshgrid([1, 2, 3, 4])\n assert_array_equal(X, np.array([1, 2, 3, 4]))\n\n def test_no_input(self):\n args = []\n assert_array_equal([], meshgrid(*args))\n assert_array_equal([], meshgrid(*args, copy=False))\n\n def test_indexing(self):\n x = [1, 2, 3]\n y = [4, 5, 6, 7]\n [X, Y] = meshgrid(x, y, indexing='ij')\n assert_array_equal(X, np.array([[1, 1, 1, 1],\n [2, 2, 2, 2],\n [3, 3, 3, 3]]))\n assert_array_equal(Y, np.array([[4, 5, 6, 7],\n [4, 5, 6, 7],\n [4, 5, 6, 7]]))\n\n # Test expected shapes:\n z = [8, 9]\n assert_(meshgrid(x, y)[0].shape == (4, 3))\n assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))\n assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))\n assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))\n\n assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')\n\n def test_sparse(self):\n [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)\n assert_array_equal(X, np.array([[1, 2, 3]]))\n assert_array_equal(Y, np.array([[4], [5], [6], [7]]))\n\n def test_invalid_arguments(self):\n # Test that meshgrid complains about invalid arguments\n # Regression test for issue #4755:\n # https://github.com/numpy/numpy/issues/4755\n assert_raises(TypeError, meshgrid,\n [1, 2, 3], [4, 5, 6, 7], indices='ij')\n\n def test_return_type(self):\n # Test for appropriate dtype in returned arrays.\n # Regression test for issue #5297\n # https://github.com/numpy/numpy/issues/5297\n x = np.arange(0, 10, dtype=np.float32)\n y = np.arange(10, 20, dtype=np.float64)\n\n X, Y = np.meshgrid(x,y)\n\n assert_(X.dtype == x.dtype)\n assert_(Y.dtype == y.dtype)\n\n # copy\n X, Y = np.meshgrid(x,y, copy=True)\n\n assert_(X.dtype == x.dtype)\n assert_(Y.dtype == y.dtype)\n\n # sparse\n X, Y = np.meshgrid(x,y, sparse=True)\n\n assert_(X.dtype == x.dtype)\n assert_(Y.dtype == y.dtype)\n\n def test_writeback(self):\n # Issue 8561\n X = np.array([1.1, 2.2])\n Y = np.array([3.3, 4.4])\n x, y = np.meshgrid(X, Y, sparse=False, copy=True)\n\n x[0, :] = 0\n assert_equal(x[0, :], 0)\n assert_equal(x[1, :], X)\n\n\nclass TestPiecewise(object):\n\n def test_simple(self):\n # Condition is single bool list\n x = piecewise([0, 0], [True, False], [1])\n assert_array_equal(x, [1, 0])\n\n # List of conditions: single bool list\n x = piecewise([0, 0], [[True, False]], [1])\n assert_array_equal(x, [1, 0])\n\n # Conditions is single bool array\n x = piecewise([0, 0], np.array([True, False]), [1])\n assert_array_equal(x, [1, 0])\n\n # Condition is single int array\n x = piecewise([0, 0], np.array([1, 0]), [1])\n assert_array_equal(x, [1, 0])\n\n # List of conditions: int array\n x = piecewise([0, 0], [np.array([1, 0])], [1])\n assert_array_equal(x, [1, 0])\n\n x = piecewise([0, 0], [[False, True]], [lambda x:-1])\n assert_array_equal(x, [0, -1])\n\n assert_raises_regex(ValueError, '1 or 2 functions are expected',\n piecewise, [0, 0], [[False, True]], [])\n assert_raises_regex(ValueError, '1 or 2 functions are expected',\n piecewise, [0, 0], [[False, True]], [1, 2, 3])\n\n def test_two_conditions(self):\n x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])\n assert_array_equal(x, [3, 4])\n\n def test_scalar_domains_three_conditions(self):\n x = piecewise(3, [True, False, False], [4, 2, 0])\n assert_equal(x, 4)\n\n def test_default(self):\n # No value specified for x[1], should be 0\n x = piecewise([1, 2], [True, False], [2])\n assert_array_equal(x, [2, 0])\n\n # Should set x[1] to 3\n x = piecewise([1, 2], [True, False], [2, 3])\n assert_array_equal(x, [2, 3])\n\n def test_0d(self):\n x = np.array(3)\n y = piecewise(x, x > 3, [4, 0])\n assert_(y.ndim == 0)\n assert_(y == 0)\n\n x = 5\n y = piecewise(x, [True, False], [1, 0])\n assert_(y.ndim == 0)\n assert_(y == 1)\n\n # With 3 ranges (It was failing, before)\n y = piecewise(x, [False, False, True], [1, 2, 3])\n assert_array_equal(y, 3)\n\n def test_0d_comparison(self):\n x = 3\n y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.\n assert_equal(y, 4)\n\n # With 3 ranges (It was failing, before)\n x = 4\n y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])\n assert_array_equal(y, 2)\n\n assert_raises_regex(ValueError, '2 or 3 functions are expected',\n piecewise, x, [x <= 3, x > 3], [1])\n assert_raises_regex(ValueError, '2 or 3 functions are expected',\n piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])\n\n def test_0d_0d_condition(self):\n x = np.array(3)\n c = np.array(x > 3)\n y = piecewise(x, [c], [1, 2])\n assert_equal(y, 2)\n\n def test_multidimensional_extrafunc(self):\n x = np.array([[-2.5, -1.5, -0.5],\n [0.5, 1.5, 2.5]])\n y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])\n assert_array_equal(y, np.array([[-1., -1., -1.],\n [3., 3., 1.]]))\n\n\nclass TestBincount(object):\n\n def test_simple(self):\n y = np.bincount(np.arange(4))\n assert_array_equal(y, np.ones(4))\n\n def test_simple2(self):\n y = np.bincount(np.array([1, 5, 2, 4, 1]))\n assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))\n\n def test_simple_weight(self):\n x = np.arange(4)\n w = np.array([0.2, 0.3, 0.5, 0.1])\n y = np.bincount(x, w)\n assert_array_equal(y, w)\n\n def test_simple_weight2(self):\n x = np.array([1, 2, 4, 5, 2])\n w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])\n y = np.bincount(x, w)\n assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))\n\n def test_with_minlength(self):\n x = np.array([0, 1, 0, 1, 1])\n y = np.bincount(x, minlength=3)\n assert_array_equal(y, np.array([2, 3, 0]))\n x = []\n y = np.bincount(x, minlength=0)\n assert_array_equal(y, np.array([]))\n\n def test_with_minlength_smaller_than_maxvalue(self):\n x = np.array([0, 1, 1, 2, 2, 3, 3])\n y = np.bincount(x, minlength=2)\n assert_array_equal(y, np.array([1, 2, 2, 2]))\n y = np.bincount(x, minlength=0)\n assert_array_equal(y, np.array([1, 2, 2, 2]))\n\n def test_with_minlength_and_weights(self):\n x = np.array([1, 2, 4, 5, 2])\n w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])\n y = np.bincount(x, w, 8)\n assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))\n\n def test_empty(self):\n x = np.array([], dtype=int)\n y = np.bincount(x)\n assert_array_equal(x, y)\n\n def test_empty_with_minlength(self):\n x = np.array([], dtype=int)\n y = np.bincount(x, minlength=5)\n assert_array_equal(y, np.zeros(5, dtype=int))\n\n def test_with_incorrect_minlength(self):\n x = np.array([], dtype=int)\n assert_raises_regex(TypeError,\n \"'str' object cannot be interpreted\",\n lambda: np.bincount(x, minlength=\"foobar\"))\n assert_raises_regex(ValueError,\n \"must not be negative\",\n lambda: np.bincount(x, minlength=-1))\n\n x = np.arange(5)\n assert_raises_regex(TypeError,\n \"'str' object cannot be interpreted\",\n lambda: np.bincount(x, minlength=\"foobar\"))\n assert_raises_regex(ValueError,\n \"must not be negative\",\n lambda: np.bincount(x, minlength=-1))\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_dtype_reference_leaks(self):\n # gh-6805\n intp_refcount = sys.getrefcount(np.dtype(np.intp))\n double_refcount = sys.getrefcount(np.dtype(np.double))\n\n for j in range(10):\n np.bincount([1, 2, 3])\n assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)\n assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)\n\n for j in range(10):\n np.bincount([1, 2, 3], [4, 5, 6])\n assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)\n assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)\n\n\nclass TestInterp(object):\n\n def test_exceptions(self):\n assert_raises(ValueError, interp, 0, [], [])\n assert_raises(ValueError, interp, 0, [0], [1, 2])\n assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)\n assert_raises(ValueError, interp, 0, [], [], period=360)\n assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)\n\n def test_basic(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = np.linspace(0, 1, 50)\n assert_almost_equal(np.interp(x0, x, y), x0)\n\n def test_right_left_behavior(self):\n # Needs range of sizes to test different code paths.\n # size ==1 is special cased, 1 < size < 5 is linear search, and\n # size >= 5 goes through local search and possibly binary search.\n for size in range(1, 10):\n xp = np.arange(size, dtype=np.double)\n yp = np.ones(size, dtype=np.double)\n incpts = np.array([-1, 0, size - 1, size], dtype=np.double)\n decpts = incpts[::-1]\n\n incres = interp(incpts, xp, yp)\n decres = interp(decpts, xp, yp)\n inctgt = np.array([1, 1, 1, 1], dtype=float)\n dectgt = inctgt[::-1]\n assert_equal(incres, inctgt)\n assert_equal(decres, dectgt)\n\n incres = interp(incpts, xp, yp, left=0)\n decres = interp(decpts, xp, yp, left=0)\n inctgt = np.array([0, 1, 1, 1], dtype=float)\n dectgt = inctgt[::-1]\n assert_equal(incres, inctgt)\n assert_equal(decres, dectgt)\n\n incres = interp(incpts, xp, yp, right=2)\n decres = interp(decpts, xp, yp, right=2)\n inctgt = np.array([1, 1, 1, 2], dtype=float)\n dectgt = inctgt[::-1]\n assert_equal(incres, inctgt)\n assert_equal(decres, dectgt)\n\n incres = interp(incpts, xp, yp, left=0, right=2)\n decres = interp(decpts, xp, yp, left=0, right=2)\n inctgt = np.array([0, 1, 1, 2], dtype=float)\n dectgt = inctgt[::-1]\n assert_equal(incres, inctgt)\n assert_equal(decres, dectgt)\n\n def test_scalar_interpolation_point(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = 0\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = .3\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.float32(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.float64(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n x0 = np.nan\n assert_almost_equal(np.interp(x0, x, y), x0)\n\n def test_non_finite_behavior(self):\n x = [1, 2, 2.5, 3, 4]\n xp = [1, 2, 3, 4]\n fp = [1, 2, np.inf, 4]\n assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])\n fp = [1, 2, np.nan, 4]\n assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])\n\n def test_complex_interp(self):\n # test complex interpolation\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j\n x0 = 0.3\n y0 = x0 + (1+x0)*1.0j\n assert_almost_equal(np.interp(x0, x, y), y0)\n # test complex left and right\n x0 = -1\n left = 2 + 3.0j\n assert_almost_equal(np.interp(x0, x, y, left=left), left)\n x0 = 2.0\n right = 2 + 3.0j\n assert_almost_equal(np.interp(x0, x, y, right=right), right)\n # test complex non finite\n x = [1, 2, 2.5, 3, 4]\n xp = [1, 2, 3, 4]\n fp = [1, 2+1j, np.inf, 4]\n y = [1, 2+1j, np.inf+0.5j, np.inf, 4]\n assert_almost_equal(np.interp(x, xp, fp), y)\n # test complex periodic\n x = [-180, -170, -185, 185, -10, -5, 0, 365]\n xp = [190, -190, 350, -350]\n fp = [5+1.0j, 10+2j, 3+3j, 4+4j]\n y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,\n 3.5+3.5j, 3.75+3.75j]\n assert_almost_equal(np.interp(x, xp, fp, period=360), y)\n\n def test_zero_dimensional_interpolation_point(self):\n x = np.linspace(0, 1, 5)\n y = np.linspace(0, 1, 5)\n x0 = np.array(.3)\n assert_almost_equal(np.interp(x0, x, y), x0)\n\n xp = np.array([0, 2, 4])\n fp = np.array([1, -1, 1])\n\n actual = np.interp(np.array(1), xp, fp)\n assert_equal(actual, 0)\n assert_(isinstance(actual, np.float64))\n\n actual = np.interp(np.array(4.5), xp, fp, period=4)\n assert_equal(actual, 0.5)\n assert_(isinstance(actual, np.float64))\n\n def test_if_len_x_is_small(self):\n xp = np.arange(0, 10, 0.0001)\n fp = np.sin(xp)\n assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)\n\n def test_period(self):\n x = [-180, -170, -185, 185, -10, -5, 0, 365]\n xp = [190, -190, 350, -350]\n fp = [5, 10, 3, 4]\n y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]\n assert_almost_equal(np.interp(x, xp, fp, period=360), y)\n x = np.array(x, order='F').reshape(2, -1)\n y = np.array(y, order='C').reshape(2, -1)\n assert_almost_equal(np.interp(x, xp, fp, period=360), y)\n\n\ndef compare_results(res, desired):\n for i in range(len(desired)):\n assert_array_equal(res[i], desired[i])\n\n\nclass TestPercentile(object):\n\n def test_basic(self):\n x = np.arange(8) * 0.5\n assert_equal(np.percentile(x, 0), 0.)\n assert_equal(np.percentile(x, 100), 3.5)\n assert_equal(np.percentile(x, 50), 1.75)\n x[1] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(x, 0), np.nan)\n assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)\n assert_(w[0].category is RuntimeWarning)\n\n def test_api(self):\n d = np.ones(5)\n np.percentile(d, 5, None, None, False)\n np.percentile(d, 5, None, None, False, 'linear')\n o = np.ones((1,))\n np.percentile(d, 5, None, o, False, 'linear')\n\n def test_2D(self):\n x = np.array([[1, 1, 1],\n [1, 1, 1],\n [4, 4, 3],\n [1, 1, 1],\n [1, 1, 1]])\n assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])\n\n def test_linear(self):\n\n # Test defaults\n assert_equal(np.percentile(range(10), 50), 4.5)\n\n # explicitly specify interpolation_method 'linear' (the default)\n assert_equal(np.percentile(range(10), 50,\n interpolation='linear'), 4.5)\n\n def test_lower_higher(self):\n\n # interpolation_method 'lower'/'higher'\n assert_equal(np.percentile(range(10), 50,\n interpolation='lower'), 4)\n assert_equal(np.percentile(range(10), 50,\n interpolation='higher'), 5)\n\n def test_midpoint(self):\n assert_equal(np.percentile(range(10), 51,\n interpolation='midpoint'), 4.5)\n assert_equal(np.percentile(range(11), 51,\n interpolation='midpoint'), 5.5)\n assert_equal(np.percentile(range(11), 50,\n interpolation='midpoint'), 5)\n\n def test_nearest(self):\n assert_equal(np.percentile(range(10), 51,\n interpolation='nearest'), 5)\n assert_equal(np.percentile(range(10), 49,\n interpolation='nearest'), 4)\n\n def test_sequence(self):\n x = np.arange(8) * 0.5\n assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])\n\n def test_axis(self):\n x = np.arange(12).reshape(3, 4)\n\n assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])\n\n r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]\n assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)\n\n r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]\n assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)\n\n # ensure qth axis is always first as with np.array(old_percentile(..))\n x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)\n assert_equal(np.percentile(x, (25, 50)).shape, (2,))\n assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))\n assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))\n assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))\n assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))\n assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))\n assert_equal(\n np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))\n assert_equal(np.percentile(x, (25, 50),\n interpolation=\"higher\").shape, (2,))\n assert_equal(np.percentile(x, (25, 50, 75),\n interpolation=\"higher\").shape, (3,))\n assert_equal(np.percentile(x, (25, 50), axis=0,\n interpolation=\"higher\").shape, (2, 4, 5, 6))\n assert_equal(np.percentile(x, (25, 50), axis=1,\n interpolation=\"higher\").shape, (2, 3, 5, 6))\n assert_equal(np.percentile(x, (25, 50), axis=2,\n interpolation=\"higher\").shape, (2, 3, 4, 6))\n assert_equal(np.percentile(x, (25, 50), axis=3,\n interpolation=\"higher\").shape, (2, 3, 4, 5))\n assert_equal(np.percentile(x, (25, 50, 75), axis=1,\n interpolation=\"higher\").shape, (3, 3, 5, 6))\n\n def test_scalar_q(self):\n # test for no empty dimensions for compatibility with old percentile\n x = np.arange(12).reshape(3, 4)\n assert_equal(np.percentile(x, 50), 5.5)\n assert_(np.isscalar(np.percentile(x, 50)))\n r0 = np.array([4., 5., 6., 7.])\n assert_equal(np.percentile(x, 50, axis=0), r0)\n assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)\n r1 = np.array([1.5, 5.5, 9.5])\n assert_almost_equal(np.percentile(x, 50, axis=1), r1)\n assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)\n\n out = np.empty(1)\n assert_equal(np.percentile(x, 50, out=out), 5.5)\n assert_equal(out, 5.5)\n out = np.empty(4)\n assert_equal(np.percentile(x, 50, axis=0, out=out), r0)\n assert_equal(out, r0)\n out = np.empty(3)\n assert_equal(np.percentile(x, 50, axis=1, out=out), r1)\n assert_equal(out, r1)\n\n # test for no empty dimensions for compatibility with old percentile\n x = np.arange(12).reshape(3, 4)\n assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)\n assert_(np.isscalar(np.percentile(x, 50)))\n r0 = np.array([4., 5., 6., 7.])\n c0 = np.percentile(x, 50, interpolation='lower', axis=0)\n assert_equal(c0, r0)\n assert_equal(c0.shape, r0.shape)\n r1 = np.array([1., 5., 9.])\n c1 = np.percentile(x, 50, interpolation='lower', axis=1)\n assert_almost_equal(c1, r1)\n assert_equal(c1.shape, r1.shape)\n\n out = np.empty((), dtype=x.dtype)\n c = np.percentile(x, 50, interpolation='lower', out=out)\n assert_equal(c, 5)\n assert_equal(out, 5)\n out = np.empty(4, dtype=x.dtype)\n c = np.percentile(x, 50, interpolation='lower', axis=0, out=out)\n assert_equal(c, r0)\n assert_equal(out, r0)\n out = np.empty(3, dtype=x.dtype)\n c = np.percentile(x, 50, interpolation='lower', axis=1, out=out)\n assert_equal(c, r1)\n assert_equal(out, r1)\n\n def test_exception(self):\n assert_raises(ValueError, np.percentile, [1, 2], 56,\n interpolation='foobar')\n assert_raises(ValueError, np.percentile, [1], 101)\n assert_raises(ValueError, np.percentile, [1], -1)\n assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])\n assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])\n\n def test_percentile_list(self):\n assert_equal(np.percentile([1, 2, 3], 0), 1)\n\n def test_percentile_out(self):\n x = np.array([1, 2, 3])\n y = np.zeros((3,))\n p = (1, 2, 3)\n np.percentile(x, p, out=y)\n assert_equal(y, np.percentile(x, p))\n\n x = np.array([[1, 2, 3],\n [4, 5, 6]])\n\n y = np.zeros((3, 3))\n np.percentile(x, p, axis=0, out=y)\n assert_equal(y, np.percentile(x, p, axis=0))\n\n y = np.zeros((3, 2))\n np.percentile(x, p, axis=1, out=y)\n assert_equal(y, np.percentile(x, p, axis=1))\n\n x = np.arange(12).reshape(3, 4)\n # q.dim > 1, float\n r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])\n out = np.empty((2, 4))\n assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0)\n assert_equal(out, r0)\n r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])\n out = np.empty((2, 3))\n assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)\n assert_equal(out, r1)\n\n # q.dim > 1, int\n r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])\n out = np.empty((2, 4), dtype=x.dtype)\n c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out)\n assert_equal(c, r0)\n assert_equal(out, r0)\n r1 = np.array([[0, 4, 8], [1, 5, 9]])\n out = np.empty((2, 3), dtype=x.dtype)\n c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out)\n assert_equal(c, r1)\n assert_equal(out, r1)\n\n def test_percentile_empty_dim(self):\n # empty dims are preserved\n d = np.arange(11 * 2).reshape(11, 1, 2, 1)\n assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))\n assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))\n assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))\n assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))\n assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))\n assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))\n assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))\n assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))\n\n assert_array_equal(np.percentile(d, 50, axis=2,\n interpolation='midpoint').shape,\n (11, 1, 1))\n assert_array_equal(np.percentile(d, 50, axis=-2,\n interpolation='midpoint').shape,\n (11, 1, 1))\n\n assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,\n (2, 1, 2, 1))\n assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,\n (2, 11, 2, 1))\n assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,\n (2, 11, 1, 1))\n assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,\n (2, 11, 1, 2))\n\n def test_percentile_no_overwrite(self):\n a = np.array([2, 3, 4, 1])\n np.percentile(a, [50], overwrite_input=False)\n assert_equal(a, np.array([2, 3, 4, 1]))\n\n a = np.array([2, 3, 4, 1])\n np.percentile(a, [50])\n assert_equal(a, np.array([2, 3, 4, 1]))\n\n def test_no_p_overwrite(self):\n p = np.linspace(0., 100., num=5)\n np.percentile(np.arange(100.), p, interpolation=\"midpoint\")\n assert_array_equal(p, np.linspace(0., 100., num=5))\n p = np.linspace(0., 100., num=5).tolist()\n np.percentile(np.arange(100.), p, interpolation=\"midpoint\")\n assert_array_equal(p, np.linspace(0., 100., num=5).tolist())\n\n def test_percentile_overwrite(self):\n a = np.array([2, 3, 4, 1])\n b = np.percentile(a, [50], overwrite_input=True)\n assert_equal(b, np.array([2.5]))\n\n b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)\n assert_equal(b, np.array([2.5]))\n\n def test_extended_axis(self):\n o = np.random.normal(size=(71, 23))\n x = np.dstack([o] * 10)\n assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))\n x = np.moveaxis(x, -1, 0)\n assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))\n x = x.swapaxes(0, 1).copy()\n assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))\n x = x.swapaxes(0, 1).copy()\n\n assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),\n np.percentile(x, [25, 60], axis=None))\n assert_equal(np.percentile(x, [25, 60], axis=(0,)),\n np.percentile(x, [25, 60], axis=0))\n\n d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))\n np.random.shuffle(d.ravel())\n assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],\n np.percentile(d[:,:,:, 0].flatten(), 25))\n assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],\n np.percentile(d[:,:, 1,:].flatten(), [10, 90]))\n assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],\n np.percentile(d[:,:, 2,:].flatten(), 25))\n assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],\n np.percentile(d[2,:,:,:].flatten(), 25))\n assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],\n np.percentile(d[2, 1,:,:].flatten(), 25))\n assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],\n np.percentile(d[2,:,:, 1].flatten(), 25))\n assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],\n np.percentile(d[2,:, 2,:].flatten(), 25))\n\n def test_extended_axis_invalid(self):\n d = np.ones((3, 5, 7, 11))\n assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25)\n assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25)\n assert_raises(np.AxisError, np.percentile, d, axis=4, q=25)\n assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25)\n # each of these refers to the same axis twice\n assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)\n assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)\n assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)\n\n def test_keepdims(self):\n d = np.ones((3, 5, 7, 11))\n assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,\n (1, 1, 1, 1))\n assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,\n (1, 1, 7, 11))\n assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,\n (1, 5, 7, 1))\n assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,\n (3, 1, 7, 11))\n assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,\n (1, 1, 1, 1))\n assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,\n (1, 1, 7, 1))\n\n assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),\n keepdims=True).shape, (2, 1, 1, 7, 1))\n assert_equal(np.percentile(d, [1, 7], axis=(0, 3),\n keepdims=True).shape, (2, 1, 5, 7, 1))\n\n def test_out(self):\n o = np.zeros((4,))\n d = np.ones((3, 4))\n assert_equal(np.percentile(d, 0, 0, out=o), o)\n assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o)\n o = np.zeros((3,))\n assert_equal(np.percentile(d, 1, 1, out=o), o)\n assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o)\n\n o = np.zeros(())\n assert_equal(np.percentile(d, 2, out=o), o)\n assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o)\n\n def test_out_nan(self):\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings('always', '', RuntimeWarning)\n o = np.zeros((4,))\n d = np.ones((3, 4))\n d[2, 1] = np.nan\n assert_equal(np.percentile(d, 0, 0, out=o), o)\n assert_equal(\n np.percentile(d, 0, 0, interpolation='nearest', out=o), o)\n o = np.zeros((3,))\n assert_equal(np.percentile(d, 1, 1, out=o), o)\n assert_equal(\n np.percentile(d, 1, 1, interpolation='nearest', out=o), o)\n o = np.zeros(())\n assert_equal(np.percentile(d, 1, out=o), o)\n assert_equal(\n np.percentile(d, 1, interpolation='nearest', out=o), o)\n\n def test_nan_behavior(self):\n a = np.arange(24, dtype=float)\n a[2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, 0.3), np.nan)\n assert_equal(np.percentile(a, 0.3, axis=0), np.nan)\n assert_equal(np.percentile(a, [0.3, 0.6], axis=0),\n np.array([np.nan] * 2))\n assert_(w[0].category is RuntimeWarning)\n assert_(w[1].category is RuntimeWarning)\n assert_(w[2].category is RuntimeWarning)\n\n a = np.arange(24, dtype=float).reshape(2, 3, 4)\n a[1, 2, 3] = np.nan\n a[1, 1, 2] = np.nan\n\n # no axis\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, 0.3), np.nan)\n assert_equal(np.percentile(a, 0.3).ndim, 0)\n assert_(w[0].category is RuntimeWarning)\n\n # axis0 zerod\n b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)\n b[2, 3] = np.nan\n b[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, 0.3, 0), b)\n\n # axis0 not zerod\n b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),\n [0.3, 0.6], 0)\n b[:, 2, 3] = np.nan\n b[:, 1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, [0.3, 0.6], 0), b)\n\n # axis1 zerod\n b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)\n b[1, 3] = np.nan\n b[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, 0.3, 1), b)\n # axis1 not zerod\n b = np.percentile(\n np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)\n b[:, 1, 3] = np.nan\n b[:, 1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, [0.3, 0.6], 1), b)\n\n # axis02 zerod\n b = np.percentile(\n np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))\n b[1] = np.nan\n b[2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, 0.3, (0, 2)), b)\n # axis02 not zerod\n b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),\n [0.3, 0.6], (0, 2))\n b[:, 1] = np.nan\n b[:, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)\n # axis02 not zerod with nearest interpolation\n b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),\n [0.3, 0.6], (0, 2), interpolation='nearest')\n b[:, 1] = np.nan\n b[:, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.percentile(\n a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)\n\n\nclass TestQuantile(object):\n # most of this is already tested by TestPercentile\n\n def test_basic(self):\n x = np.arange(8) * 0.5\n assert_equal(np.quantile(x, 0), 0.)\n assert_equal(np.quantile(x, 1), 3.5)\n assert_equal(np.quantile(x, 0.5), 1.75)\n\n def test_no_p_overwrite(self):\n # this is worth retesting, because quantile does not make a copy\n p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])\n p = p0.copy()\n np.quantile(np.arange(100.), p, interpolation=\"midpoint\")\n assert_array_equal(p, p0)\n\n p0 = p0.tolist()\n p = p.tolist()\n np.quantile(np.arange(100.), p, interpolation=\"midpoint\")\n assert_array_equal(p, p0)\n\n\nclass TestMedian(object):\n\n def test_basic(self):\n a0 = np.array(1)\n a1 = np.arange(2)\n a2 = np.arange(6).reshape(2, 3)\n assert_equal(np.median(a0), 1)\n assert_allclose(np.median(a1), 0.5)\n assert_allclose(np.median(a2), 2.5)\n assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])\n assert_equal(np.median(a2, axis=1), [1, 4])\n assert_allclose(np.median(a2, axis=None), 2.5)\n\n a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])\n assert_almost_equal((a[1] + a[3]) / 2., np.median(a))\n a = np.array([0.0463301, 0.0444502, 0.141249])\n assert_equal(a[0], np.median(a))\n a = np.array([0.0444502, 0.141249, 0.0463301])\n assert_equal(a[-1], np.median(a))\n # check array scalar result\n assert_equal(np.median(a).ndim, 0)\n a[1] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a).ndim, 0)\n assert_(w[0].category is RuntimeWarning)\n\n def test_axis_keyword(self):\n a3 = np.array([[2, 3],\n [0, 1],\n [6, 7],\n [4, 5]])\n for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:\n orig = a.copy()\n np.median(a, axis=None)\n for ax in range(a.ndim):\n np.median(a, axis=ax)\n assert_array_equal(a, orig)\n\n assert_allclose(np.median(a3, axis=0), [3, 4])\n assert_allclose(np.median(a3.T, axis=1), [3, 4])\n assert_allclose(np.median(a3), 3.5)\n assert_allclose(np.median(a3, axis=None), 3.5)\n assert_allclose(np.median(a3.T), 3.5)\n\n def test_overwrite_keyword(self):\n a3 = np.array([[2, 3],\n [0, 1],\n [6, 7],\n [4, 5]])\n a0 = np.array(1)\n a1 = np.arange(2)\n a2 = np.arange(6).reshape(2, 3)\n assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)\n assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)\n assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)\n assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),\n [1.5, 2.5, 3.5])\n assert_allclose(\n np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])\n assert_allclose(\n np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)\n assert_allclose(\n np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])\n assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),\n [3, 4])\n\n a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))\n np.random.shuffle(a4.ravel())\n assert_allclose(np.median(a4, axis=None),\n np.median(a4.copy(), axis=None, overwrite_input=True))\n assert_allclose(np.median(a4, axis=0),\n np.median(a4.copy(), axis=0, overwrite_input=True))\n assert_allclose(np.median(a4, axis=1),\n np.median(a4.copy(), axis=1, overwrite_input=True))\n assert_allclose(np.median(a4, axis=2),\n np.median(a4.copy(), axis=2, overwrite_input=True))\n\n def test_array_like(self):\n x = [1, 2, 3]\n assert_almost_equal(np.median(x), 2)\n x2 = [x]\n assert_almost_equal(np.median(x2), 2)\n assert_allclose(np.median(x2, axis=0), x)\n\n def test_subclass(self):\n # gh-3846\n class MySubClass(np.ndarray):\n\n def __new__(cls, input_array, info=None):\n obj = np.asarray(input_array).view(cls)\n obj.info = info\n return obj\n\n def mean(self, axis=None, dtype=None, out=None):\n return -7\n\n a = MySubClass([1, 2, 3])\n assert_equal(np.median(a), -7)\n\n def test_out(self):\n o = np.zeros((4,))\n d = np.ones((3, 4))\n assert_equal(np.median(d, 0, out=o), o)\n o = np.zeros((3,))\n assert_equal(np.median(d, 1, out=o), o)\n o = np.zeros(())\n assert_equal(np.median(d, out=o), o)\n\n def test_out_nan(self):\n with warnings.catch_warnings(record=True):\n warnings.filterwarnings('always', '', RuntimeWarning)\n o = np.zeros((4,))\n d = np.ones((3, 4))\n d[2, 1] = np.nan\n assert_equal(np.median(d, 0, out=o), o)\n o = np.zeros((3,))\n assert_equal(np.median(d, 1, out=o), o)\n o = np.zeros(())\n assert_equal(np.median(d, out=o), o)\n\n def test_nan_behavior(self):\n a = np.arange(24, dtype=float)\n a[2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a), np.nan)\n assert_equal(np.median(a, axis=0), np.nan)\n assert_(w[0].category is RuntimeWarning)\n assert_(w[1].category is RuntimeWarning)\n\n a = np.arange(24, dtype=float).reshape(2, 3, 4)\n a[1, 2, 3] = np.nan\n a[1, 1, 2] = np.nan\n\n # no axis\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a), np.nan)\n assert_equal(np.median(a).ndim, 0)\n assert_(w[0].category is RuntimeWarning)\n\n # axis0\n b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)\n b[2, 3] = np.nan\n b[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a, 0), b)\n assert_equal(len(w), 1)\n\n # axis1\n b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)\n b[1, 3] = np.nan\n b[1, 2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a, 1), b)\n assert_equal(len(w), 1)\n\n # axis02\n b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))\n b[1] = np.nan\n b[2] = np.nan\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a, (0, 2)), b)\n assert_equal(len(w), 1)\n\n def test_empty(self):\n # empty arrays\n a = np.array([], dtype=float)\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a), np.nan)\n assert_(w[0].category is RuntimeWarning)\n\n # multiple dimensions\n a = np.array([], dtype=float, ndmin=3)\n # no axis\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a), np.nan)\n assert_(w[0].category is RuntimeWarning)\n\n # axis 0 and 1\n b = np.array([], dtype=float, ndmin=2)\n assert_equal(np.median(a, axis=0), b)\n assert_equal(np.median(a, axis=1), b)\n\n # axis 2\n b = np.array(np.nan, dtype=float, ndmin=2)\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', RuntimeWarning)\n assert_equal(np.median(a, axis=2), b)\n assert_(w[0].category is RuntimeWarning)\n\n def test_object(self):\n o = np.arange(7.)\n assert_(type(np.median(o.astype(object))), float)\n o[2] = np.nan\n assert_(type(np.median(o.astype(object))), float)\n\n def test_extended_axis(self):\n o = np.random.normal(size=(71, 23))\n x = np.dstack([o] * 10)\n assert_equal(np.median(x, axis=(0, 1)), np.median(o))\n x = np.moveaxis(x, -1, 0)\n assert_equal(np.median(x, axis=(-2, -1)), np.median(o))\n x = x.swapaxes(0, 1).copy()\n assert_equal(np.median(x, axis=(0, -1)), np.median(o))\n\n assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))\n assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))\n assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))\n\n d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))\n np.random.shuffle(d.ravel())\n assert_equal(np.median(d, axis=(0, 1, 2))[0],\n np.median(d[:,:,:, 0].flatten()))\n assert_equal(np.median(d, axis=(0, 1, 3))[1],\n np.median(d[:,:, 1,:].flatten()))\n assert_equal(np.median(d, axis=(3, 1, -4))[2],\n np.median(d[:,:, 2,:].flatten()))\n assert_equal(np.median(d, axis=(3, 1, 2))[2],\n np.median(d[2,:,:,:].flatten()))\n assert_equal(np.median(d, axis=(3, 2))[2, 1],\n np.median(d[2, 1,:,:].flatten()))\n assert_equal(np.median(d, axis=(1, -2))[2, 1],\n np.median(d[2,:,:, 1].flatten()))\n assert_equal(np.median(d, axis=(1, 3))[2, 2],\n np.median(d[2,:, 2,:].flatten()))\n\n def test_extended_axis_invalid(self):\n d = np.ones((3, 5, 7, 11))\n assert_raises(np.AxisError, np.median, d, axis=-5)\n assert_raises(np.AxisError, np.median, d, axis=(0, -5))\n assert_raises(np.AxisError, np.median, d, axis=4)\n assert_raises(np.AxisError, np.median, d, axis=(0, 4))\n assert_raises(ValueError, np.median, d, axis=(1, 1))\n\n def test_keepdims(self):\n d = np.ones((3, 5, 7, 11))\n assert_equal(np.median(d, axis=None, keepdims=True).shape,\n (1, 1, 1, 1))\n assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,\n (1, 1, 7, 11))\n assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,\n (1, 5, 7, 1))\n assert_equal(np.median(d, axis=(1,), keepdims=True).shape,\n (3, 1, 7, 11))\n assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,\n (1, 1, 1, 1))\n assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,\n (1, 1, 7, 1))\n\n\nclass TestAdd_newdoc_ufunc(object):\n\n def test_ufunc_arg(self):\n assert_raises(TypeError, add_newdoc_ufunc, 2, \"blah\")\n assert_raises(ValueError, add_newdoc_ufunc, np.add, \"blah\")\n\n def test_string_arg(self):\n assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)\n\n\nclass TestAdd_newdoc(object):\n\n @pytest.mark.skipif(sys.flags.optimize == 2, reason=\"Python running -OO\")\n @pytest.mark.xfail(IS_PYPY, reason=\"PyPy does not modify tp_doc\")\n def test_add_doc(self):\n # test np.add_newdoc\n tgt = \"Current flat index into the array.\"\n assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)\n assert_(len(np.core.ufunc.identity.__doc__) > 300)\n assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)\n\nclass TestSortComplex(object):\n\n @pytest.mark.parametrize(\"type_in, type_out\", [\n ('l', 'D'),\n ('h', 'F'),\n ('H', 'F'),\n ('b', 'F'),\n ('B', 'F'),\n ('g', 'G'),\n ])\n def test_sort_real(self, type_in, type_out):\n # sort_complex() type casting for real input types\n a = np.array([5, 3, 6, 2, 1], dtype=type_in)\n actual = np.sort_complex(a)\n expected = np.sort(a).astype(type_out)\n assert_equal(actual, expected)\n assert_equal(actual.dtype, expected.dtype)\n\n def test_sort_complex(self):\n # sort_complex() handling of complex input\n a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')\n expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')\n actual = np.sort_complex(a)\n assert_equal(actual, expected)\n assert_equal(actual.dtype, expected.dtype)\n" ]
[ [ "scipy.fftpack.irfft", "numpy.take", "numpy.asarray", "scipy.fftpack._fftpack.zfft", "numpy.issubdtype", "numpy.dtype", "scipy.fftpack.rfft", "scipy.fftpack.fft", "numpy.random.randn", "numpy.exp", "numpy.testing.assert_equal", "numpy.swapaxes", "numpy.arange", "scipy.fftpack._fftpack.drfft", "scipy.fftpack.fft2", "numpy.zeros", "numpy.testing.assert_array_almost_equal", "scipy.fftpack.basic._is_safe_size", "scipy.fftpack.ifft", "scipy.fftpack._fftpack.zrfft", "numpy.testing.assert_array_almost_equal_nulp", "scipy.fftpack.fftn", "numpy.fft.ifft", "numpy.random.rand", "numpy.testing.assert_", "numpy.array", "scipy.fftpack.ifftn", "numpy.random.seed", "numpy.fft.fft", "numpy.add.outer", "numpy.linalg.norm" ], [ "numpy.product", "numpy.dtype", "numpy.broadcast", "numpy.zeros_like", "numpy.any", "numpy.ravel_multi_index", "numpy.bool_", "numpy.testing.assert_equal", "numpy.may_share_memory", "numpy.testing.suppress_warnings", "numpy.arange", "numpy.full", "numpy.zeros", "numpy.nonzero", "numpy.int_", "numpy.rec.array", "numpy.testing.assert_raises", "numpy.testing.assert_", "numpy.array", "numpy.testing.assert_warns", "numpy.intp", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.broadcast_to", "numpy.float64", "numpy.float_", "numpy.prod", "numpy.empty" ], [ "numpy.deprecate", "numpy.dot", "numpy.diag", "numpy.poly1d", "numpy.amax", "numpy.take", "numpy.rollaxis", "numpy.can_cast", "numpy.asarray", "numpy.issubdtype", "numpy.cumsum", "numpy.concatenate", "numpy.all", "numpy.zeros_like", "numpy.any", "numpy.searchsorted", "numpy.where", "scipy.special.poch", "numpy.nextafter", "numpy.ones_like", "numpy.arange", "numpy.eye", "numpy.empty_like", "numpy.atleast_1d", "numpy.asanyarray", "numpy.diff", "numpy.interp", "numpy.ravel", "numpy.zeros", "numpy.logical_not", "scipy.special.gamma", "numpy.ascontiguousarray", "numpy.amin", "numpy.isnan", "numpy.atleast_2d", "scipy._lib.six.xrange", "numpy.transpose", "numpy.argsort", "numpy.array", "numpy.sort", "numpy.ones", "scipy.special.comb", "numpy.shape", "numpy.ndindex", "numpy.empty" ], [ "numpy.testing.assert_equal", "numpy.distutils.system_info.ConfigParser", "numpy.distutils.ccompiler.new_compiler", "numpy.distutils.customized_ccompiler", "numpy.distutils._shell_utils.NativeParser.join" ], [ "numpy.compat.isfileobj", "numpy.core.overrides.set_module", "numpy.compat.os_fspath" ], [ "numpy.testing.assert_equal", "numpy.uint32", "numpy.core._rational_tests.rational", "numpy.int8", "numpy.dtype", "numpy.finfo", "numpy.typeDict.values", "numpy.core.numeric.pickle.dumps", "numpy.testing.assert_raises", "numpy.iinfo", "numpy.testing.assert_", "numpy.array", "numpy.zeros" ], [ "numpy.dtype" ], [ "numpy.lib.unwrap", "numpy.sqrt", "numpy.cumsum", "numpy.all", "numpy.lib.blackman", "numpy.digitize", "numpy.exp", "numpy.lib.setxor1d", "numpy.sin", "numpy.diff", "numpy.insert", "numpy.zeros", "numpy.testing.assert_raises_regex", "numpy.median", "numpy.cumprod", "numpy.testing.assert_raises", "numpy.array", "numpy.sometrue", "numpy.sum", "numpy.gradient", "numpy.lib.sinc", "numpy.lib.hanning", "numpy.testing.assert_array_equal", "numpy.lib.function_base._parse_gufunc_signature", "numpy.lib.flipud", "numpy.lib.interp", "numpy.arctan", "numpy.asarray", "numpy.lib.trapz", "numpy.var", "numpy.lib.trim_zeros", "numpy.lib.unique", "numpy.lib.rot90", "numpy.testing.suppress_warnings", "numpy.ma.arange", "numpy.copy", "numpy.float32", "numpy.lib.gradient", "numpy.lib.i0", "numpy.lib.select", "numpy.amin", "numpy.lib.extract", "numpy.random.rand", "numpy.testing.assert_", "numpy.corrcoef", "numpy.lib.piecewise", "numpy.errstate", "numpy.testing.assert_warns", "numpy.lib.average", "numpy.ones", "numpy.vectorize", "numpy.lib.meshgrid", "numpy.empty", "numpy.linspace", "numpy.alltrue", "numpy.moveaxis", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.interp", "numpy.sort_complex", "numpy.lib.vectorize", "numpy.lib.angle", "numpy.lib.hamming", "numpy.testing.assert_array_almost_equal", "numpy.isnan", "numpy.quantile", "numpy.testing.assert_allclose", "numpy.tile", "numpy.cos", "numpy.percentile", "numpy.dstack", "numpy.bincount", "numpy.amax", "numpy.lib.delete", "numpy.lib.bartlett", "numpy.dtype", "numpy.any", "numpy.ma.array", "numpy.lib.cov", "numpy.ones_like", "numpy.arange", "numpy.stack", "numpy.testing.assert_almost_equal", "numpy.lib.msort", "numpy.lib.corrcoef", "numpy.lib.asarray_chkfinite", "numpy.lib.insert", "numpy.meshgrid", "numpy.flip", "numpy.lib.diff", "numpy.random.random", "numpy.random.seed", "numpy.abs", "numpy.add.outer", "numpy.lib.kaiser", "numpy.sort", "numpy.result_type", "numpy.random.normal", "numpy.float64", "numpy.prod", "numpy.lib.place", "numpy.average", "numpy.lib.digitize" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.19", "1.16", "1.18", "1.17" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.19", "1.16", "1.18", "1.20", "1.17" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
snowcoding/justice40-tool
[ "b6a6813bb5d617abf400cafc97da891618541558", "b6a6813bb5d617abf400cafc97da891618541558" ]
[ "data/data-pipeline/data_pipeline/etl/sources/michigan_ejscreen/etl.py", "data/data-pipeline/data_pipeline/etl/sources/census_acs/etl_utils.py" ]
[ "import pandas as pd\n\nfrom data_pipeline.etl.base import ExtractTransformLoad\nfrom data_pipeline.utils import get_module_logger\nfrom data_pipeline.score import field_names\nfrom data_pipeline.config import settings\n\nlogger = get_module_logger(__name__)\n\n\nclass MichiganEnviroScreenETL(ExtractTransformLoad):\n \"\"\"Michigan EJ Screen class that ingests dataset represented\n here: https://www.arcgis.com/apps/webappviewer/index.html?id=dc4f0647dda34959963488d3f519fd24\n This class ingests the data presented in \"Assessing the State of Environmental\n Justice in Michigan.\" Please see the README in this module for further details.\n \"\"\"\n\n def __init__(self):\n self.MICHIGAN_EJSCREEN_S3_URL = (\n settings.AWS_JUSTICE40_DATASOURCES_URL\n + \"/michigan_ejscore_12212021.csv\"\n )\n\n self.CSV_PATH = self.DATA_PATH / \"dataset\" / \"michigan_ejscreen\"\n self.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_THRESHOLD: float = 0.75\n\n self.COLUMNS_TO_KEEP = [\n self.GEOID_TRACT_FIELD_NAME,\n field_names.MICHIGAN_EJSCREEN_SCORE_FIELD,\n field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD,\n field_names.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_FIELD,\n ]\n\n self.df: pd.DataFrame\n\n def extract(self) -> None:\n logger.info(\"Downloading Michigan EJSCREEN Data\")\n self.df = pd.read_csv(\n filepath_or_buffer=self.MICHIGAN_EJSCREEN_S3_URL,\n dtype={\"GEO_ID\": \"string\"},\n low_memory=False,\n )\n\n def transform(self) -> None:\n logger.info(\"Transforming Michigan EJSCREEN Data\")\n\n self.df.rename(\n columns={\n \"GEO_ID\": self.GEOID_TRACT_FIELD_NAME,\n \"EJ_Score_Cal_Min\": field_names.MICHIGAN_EJSCREEN_SCORE_FIELD,\n \"Pct_CalMin\": field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD,\n },\n inplace=True,\n )\n # Calculate the top quartile of prioritized communities\n # Please see pg. 104 - 109 from source:\n # pg. https://deepblue.lib.umich.edu/bitstream/handle/2027.42/149105/AssessingtheStateofEnvironmentalJusticeinMichigan_344.pdf\n self.df[field_names.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_FIELD] = (\n self.df[field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD]\n >= self.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_THRESHOLD\n )\n\n def load(self) -> None:\n logger.info(\"Saving Michigan Environmental Screening Tool to CSV\")\n # write nationwide csv\n self.CSV_PATH.mkdir(parents=True, exist_ok=True)\n self.df[self.COLUMNS_TO_KEEP].to_csv(\n self.CSV_PATH / \"michigan_ejscreen.csv\", index=False\n )\n", "from pathlib import Path\nfrom typing import List\nimport censusdata\nimport pandas as pd\n\nfrom data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes\nfrom data_pipeline.utils import get_module_logger\n\nlogger = get_module_logger(__name__)\n\nCENSUS_ACS_FIPS_CODES_TO_SKIP = [\"60\", \"66\", \"69\", \"78\"]\n\n\ndef _fips_from_censusdata_censusgeo(censusgeo: censusdata.censusgeo) -> str:\n \"\"\"Create a FIPS code from the proprietary censusgeo index.\"\"\"\n fips = \"\".join([value for (key, value) in censusgeo.params()])\n return fips\n\n\n# pylint: disable=too-many-arguments\ndef retrieve_census_acs_data(\n acs_year: int,\n variables: List[str],\n tract_output_field_name: str,\n data_path_for_fips_codes: Path,\n acs_type=\"acs5\",\n) -> pd.DataFrame:\n \"\"\"Retrieves and combines census ACS data for a given year.\"\"\"\n dfs = []\n for fips in get_state_fips_codes(data_path_for_fips_codes):\n if fips in CENSUS_ACS_FIPS_CODES_TO_SKIP:\n logger.info(\n f\"Skipping download for state/territory with FIPS code {fips}\"\n )\n else:\n logger.info(\n f\"Downloading data for state/territory with FIPS code {fips}\"\n )\n\n try:\n response = censusdata.download(\n src=acs_type,\n year=acs_year,\n geo=censusdata.censusgeo(\n [(\"state\", fips), (\"county\", \"*\"), (\"tract\", \"*\")]\n ),\n var=variables,\n )\n dfs.append(response)\n\n except ValueError as e:\n logger.error(\n f\"Could not download data for state/territory with FIPS code {fips}\"\n )\n raise e\n\n df = pd.concat(dfs)\n\n df[tract_output_field_name] = df.index.to_series().apply(\n func=_fips_from_censusdata_censusgeo\n )\n\n return df\n" ]
[ [ "pandas.read_csv" ], [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jiwoncpark/lens-classification
[ "c1faf4dbbd4a16f2df74a34fd593ec7128750252" ]
[ "magnificat/drw_dataset.py" ]
[ "import os\nimport os.path as osp\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom magnificat import drw_utils\nfrom magnificat.cadence import LSSTCadence\n\n\nclass DRWDataset(Dataset):\n\n bp_to_int = dict(zip(list('ugrizy'), range(6)))\n int_to_bp = dict(zip(range(6), list('ugrizy')))\n\n def __init__(self,\n params_sampler,\n out_dir,\n num_samples,\n is_training,\n transform_x_func=lambda x: x,\n transform_y_func=lambda x: x,\n prestored_bandpasses=list('ugrizy'),\n seed=123,\n obs_kwargs={}):\n \"\"\"Dataset of DRW light curves\n\n Parameters\n ----------\n params_sampler : flexible\n Any sampler that has a `sample()` method returning a dict\n of `self.param_names` (see below) and has an attribute\n `bandpasses` which is a list of strings indicating which\n LSST bands, and `idx` which is list of indices if sampler\n is associated with a catalog\n out_dir : str\n Output directory for this dataset\n num_samples : int\n Number of AGNs in this dataset\n is_training : bool\n whether this is the training set\n transform_x_func : callable, optional\n Transform function for the times x, useful if the ML model is\n sensitive to the absolute scale of time. Default: identity function\n prestored_bandpasses : TYPE, optional\n Description\n seed : int, optional\n Random seed relevant for generating DRW light curves\n obs_kwargs: dict\n Parameters defining pointings. Includes as keys 'n_pointings_init'\n (number of pointings to request), 'obs_dir' (directory\n containing observation conditions), 'seed' (random seed for\n sampling observation conditions for each light curve, defaults to\n `seed`), 'bandpasses' (list of bandpasses to include in trimming)\n \"\"\"\n self.params_sampler = params_sampler\n # Figure out which bandpasses are sampled\n bandpasses = self.params_sampler.bandpasses\n self.bandpasses_int = [self.bp_to_int[bp] for bp in bandpasses]\n self.bandpasses_int.sort()\n self.bandpasses = [self.int_to_bp[bp_i] for bp_i in self.bandpasses_int]\n # Compile list of parameters, both bp-dependent and otherwise\n # Determined at data generation time\n param_names = ['BH_mass', 'M_i']\n param_names += [f'log_sf_inf_{bp}' for bp in prestored_bandpasses]\n param_names += [f'{bp}' for bp in prestored_bandpasses]\n param_names += ['redshift']\n param_names += [f'log_rf_tau_{bp}' for bp in prestored_bandpasses]\n self.param_names = param_names\n # Create output directory for this dataset\n self.out_dir = out_dir\n os.makedirs(self.out_dir, exist_ok=True)\n self.num_samples = num_samples\n self.obs_kwargs = obs_kwargs\n self.is_training = is_training\n self.seed = seed\n self.transform_x_func = transform_x_func\n self.transform_y_func = transform_y_func\n self.delta_x = 1.0 # 1-day interval\n self.max_x = 3650.0 # LSST 10-year\n # Preview of untrimmed times\n self.x_grid = np.arange(0, self.max_x, self.delta_x)\n self.x_grid = self.transform_x_func(self.x_grid)\n self.n_points = len(self.x_grid)\n # For standardizing params\n self.mean_params = None\n self.std_params = None\n self.log_params = None\n self.slice_params = None\n # Load observation strategy\n self.load_obs_strat()\n # Generate and prestore light curves\n self._generate_x_y_params()\n np.savetxt(os.path.join(out_dir, 'cat_idx.txt'),\n self.params_sampler.idx, fmt='%i')\n self._fully_obs = False # init property\n self._add_noise = True # init property\n\n def get_sliced_params(self):\n return np.array(self.param_names)[np.array(self.slice_params)]\n\n def load_obs_strat(self):\n \"\"\"Load observation strategies\n\n \"\"\"\n self.cadence_obj = LSSTCadence(self.obs_kwargs['obs_dir'])\n ra, dec = self.cadence_obj.get_pointings(self.obs_kwargs['n_pointings_init'])\n self.cadence_obj.get_obs_info(ra, dec, skip_ddf=True,\n min_visits=50)\n self.cadence_obj.bin_by_day(bandpasses=self.obs_kwargs['bandpasses'])\n obs_mask = self.cadence_obj.get_observed_mask() # [3650,]\n self.trimmed_T = sum(obs_mask)\n self.obs_mask = torch.from_numpy(obs_mask).to(torch.bool)\n self.rng = np.random.default_rng(self.obs_kwargs.get('seed', self.seed)) # for sampling pointings\n\n def get_t_obs(self):\n \"\"\"Get full 10-year times in observed frame\n\n \"\"\"\n return torch.arange(0, self.max_x, self.delta_x)\n\n def _generate_x_y_params(self):\n \"\"\"Generate and store fully observed DRW light curves and params\n\n \"\"\"\n # Save times first, since it's the same for all AGNs in dataset\n x = self.get_t_obs() # [3651]\n torch.save(self.obs_mask, osp.join(self.out_dir, 'obs_mask.pt'))\n torch.save(x, osp.join(self.out_dir, 'x.pt'))\n for index in tqdm(range(self.num_samples), desc=\"y, params\"):\n if osp.exists(osp.join(self.out_dir, f'drw_{index}.pt')):\n continue\n # Sample params\n params_dict = self.params_sampler.sample()\n z = params_dict['redshift']\n y_concat = torch.ones([self.n_points, 6])*(-99) # [3650, 6]\n # Render LC for each filter\n for bp in self.bandpasses:\n bp_int = self.bp_to_int[bp]\n log_rf_tau = params_dict[f'log_rf_tau_{bp}']\n log_sf_inf = params_dict[f'log_sf_inf_{bp}']\n mean_mag = params_dict[f'{bp}']\n y = self._generate_light_curve(index, log_rf_tau, log_sf_inf,\n mean_mag, z) # [3650,]\n y_concat[:, bp_int] = y\n # Sort params in predetermined ordering\n params = torch.tensor([params_dict[n] for n in self.param_names]) # [n_params]\n # Concat along filter dimension in predetermined filter ordering\n # y_concat = y_concat[self.obs_mask, :] # [trimmed_T, N_filters]\n # Save y_concat without obs_mask\n # y_concat ~ [3651, N_filters]\n torch.save((y_concat, params),\n osp.join(self.out_dir, f'drw_{index}.pt'))\n\n def _generate_light_curve(self, index, log_rf_tau, log_sf_inf, mean, z):\n \"\"\"Generate a single light curve in a given filter.\n Rendering is done in the rest frame, with the input params\n assumed to be in the rest frame.\n\n Parameters\n ----------\n index : int\n index within the dataset\n log_rf_tau : float\n log10 of rest-frame timescale in days\n log_sf_inf : float\n log10 of rest-frame asymptotic amplitude in mag\n mean : float\n mean static magnitude\n z : float\n redshift\n\n Returns\n -------\n tuple\n single-filter light curve of shape [n_points, 1]\n \"\"\"\n torch.manual_seed(int(str(self.seed) + str(index)))\n # Shifted rest-frame times\n t_rest = self.get_t_obs()/(1.0 + z)\n # DRW flux\n tau = 10**log_rf_tau\n sf_inf = 10**log_sf_inf\n y = drw_utils.get_drw_torch(t_rest, tau, z, sf_inf,\n xmean=mean) # [T,]\n return y\n\n @property\n def fully_obs(self):\n return self._fully_obs\n\n @fully_obs.setter\n def fully_obs(self, val):\n self._fully_obs = val\n\n @property\n def add_noise(self):\n return self._add_noise\n\n @add_noise.setter\n def add_noise(self, val):\n self._add_noise = val\n\n def __getitem__(self, index):\n # Load fully observed light curve at fully obs times\n y, params = torch.load(osp.join(self.out_dir,\n f'drw_{index}.pt')) # [T=3650, 6]\n if self.fully_obs:\n obs_mask = slice(None)\n else:\n obs_mask = self.obs_mask\n # Trim the times\n x = torch.load(osp.join(self.out_dir, 'x.pt'))[obs_mask] # [trimmed_T,]\n y = y[obs_mask, :]\n # Slice relevant bandpasses\n y = y[:, self.bandpasses_int]\n # Rescale x for numerical stability of ML model\n x = self.transform_x_func(x)\n # Add noise and rescale flux to [-1, 1]\n y = self.transform_y_func(y)\n # y = (y - torch.min(y))/(torch.max(y) - torch.min(y))*2.0 - 1.0\n if self.slice_params is not None:\n params = params[self.slice_params]\n if self.log_params is not None:\n params[self.log_params] = torch.log10(params[self.log_params])\n if self.mean_params is not None:\n params -= self.mean_params\n params /= self.std_params\n # Sample observation mask\n if self.is_training:\n # Randomly drawn pointing index\n p = self.rng.integers(low=0, high=self.cadence_obj.n_pointings)\n else:\n # Do not shuffle pointing for validation set\n p = 0\n trimmed_mask = self.cadence_obj.get_trimmed_mask(p,\n as_tensor=True)\n # trimmed_mask = trimmed_mask[:, self.bandpasses_int]\n\n data = dict(x=x,\n y=y,\n params=params,\n trimmed_mask=trimmed_mask\n )\n return data\n\n def get_normalizing_metadata(self, set_metadata=True):\n loader = DataLoader(self,\n batch_size=100,\n shuffle=False,\n drop_last=False)\n mean_params = 0.0\n var_params = 0.0\n print(\"Computing normalizing metadata...\")\n # Compute mean, std\n for i, data in enumerate(loader):\n params = data['params']\n new_mean = params.mean(dim=0)\n new_var = params.var(dim=0, unbiased=False)\n var_params += (new_var - var_params)/(i+1)\n var_params += (i/(i+1)**2.0)*(mean_params - new_mean)**2.0\n mean_params += (new_mean - mean_params)/(i+1)\n std_params = var_params**0.5\n if set_metadata:\n self.mean_params = mean_params\n self.std_params = std_params\n return mean_params, std_params\n\n def __len__(self):\n return self.num_samples\n\n\nif __name__ == '__main__':\n import random\n\n class Sampler:\n def __init__(self, seed, bandpasses):\n random.seed(seed)\n np.random.seed(seed)\n self.bandpasses = bandpasses\n\n def sample(self):\n sample_dict = dict()\n for bp in self.bandpasses:\n log_sf_inf = np.maximum(np.random.randn()*0.05 + 0.2, 0.2)\n # log_sf_inf = 10**(np.random.randn(N)*(0.25) + -0.8)\n # log_sf_inf = np.ones(N)*0.15\n # tau = 10.0**np.maximum(np.random.randn(N)*0.5 + 2.0, 0.1)\n tau = np.maximum(np.random.randn()*50.0 + 200.0, 10.0)\n # mag = np.maximum(np.random.randn(N) + 19.0, 17.5)\n mag = 0.0\n # z = np.maximum(np.random.randn(N) + 2.0, 0.5)\n sample_dict[f'log_rf_tau_{bp}'] = tau\n sample_dict[f'log_sf_inf_{bp}'] = log_sf_inf\n sample_dict[f'{bp}'] = mag\n sample_dict['redshift'] = 2.0\n sample_dict['M_i'] = -16.0\n sample_dict['BH_mass'] = 10.0\n return sample_dict\n\n train_seed = 123\n sampler = Sampler(train_seed, bandpasses=['i'])\n\n train_dataset = DRWDataset(sampler, 'train_drw_s82',\n num_samples=3,\n seed=train_seed,\n shift_x=-3650*0.5,\n rescale_x=1.0/(3650*0.5)*4.0,\n delta_x=1.0,\n max_x=3650.0,\n err_y=0.01)\n train_dataset.slice_params = [train_dataset.param_names.index(n) for n in ['log_rf_taui', 'log_sf_inf_i', 'M_i']]\n train_dataset.log_params = [True, True, False]\n train_dataset.get_normalizing_metadata()\n print(train_dataset.mean_params, train_dataset.std_params)\n x, y, params = train_dataset[0]\n print(x.shape, y.shape, params.shape)\n\n\n\n" ]
[ [ "torch.ones", "numpy.random.seed", "numpy.arange", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.tensor", "numpy.random.randn", "torch.arange", "torch.log10", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jld23/sasoptpy
[ "f96911f04d6c0c01fce902f1f995935583df69a8", "f96911f04d6c0c01fce902f1f995935583df69a8" ]
[ "examples/client_side/decentralization.py", "tests/core/test_model.py" ]
[ "import sasoptpy as so\nimport pandas as pd\n\n\ndef test(cas_conn):\n\n m = so.Model(name='decentralization', session=cas_conn)\n\n DEPTS = ['A', 'B', 'C', 'D', 'E']\n CITIES = ['Bristol', 'Brighton', 'London']\n\n benefit_data = pd.DataFrame([\n ['Bristol', 10, 15, 10, 20, 5],\n ['Brighton', 10, 20, 15, 15, 15]],\n columns=['city'] + DEPTS).set_index('city')\n\n comm_data = pd.DataFrame([\n ['A', 'B', 0.0],\n ['A', 'C', 1.0],\n ['A', 'D', 1.5],\n ['A', 'E', 0.0],\n ['B', 'C', 1.4],\n ['B', 'D', 1.2],\n ['B', 'E', 0.0],\n ['C', 'D', 0.0],\n ['C', 'E', 2.0],\n ['D', 'E', 0.7]], columns=['i', 'j', 'comm']).set_index(['i', 'j'])\n\n cost_data = pd.DataFrame([\n ['Bristol', 'Bristol', 5],\n ['Bristol', 'Brighton', 14],\n ['Bristol', 'London', 13],\n ['Brighton', 'Brighton', 5],\n ['Brighton', 'London', 9],\n ['London', 'London', 10]], columns=['i', 'j', 'cost']).set_index(\n ['i', 'j'])\n\n max_num_depts = 3\n\n benefit = {}\n for city in CITIES:\n for dept in DEPTS:\n try:\n benefit[dept, city] = benefit_data.loc[city, dept]\n except:\n benefit[dept, city] = 0\n\n comm = {}\n for row in comm_data.iterrows():\n (i, j) = row[0]\n comm[i, j] = row[1]['comm']\n comm[j, i] = comm[i, j]\n\n cost = {}\n for row in cost_data.iterrows():\n (i, j) = row[0]\n cost[i, j] = row[1]['cost']\n cost[j, i] = cost[i, j]\n\n assign = m.add_variables(DEPTS, CITIES, vartype=so.BIN, name='assign')\n IJKL = [(i, j, k, l)\n for i in DEPTS for j in CITIES for k in DEPTS for l in CITIES\n if i < k]\n product = m.add_variables(IJKL, vartype=so.BIN, name='product')\n\n totalBenefit = so.expr_sum(benefit[i, j] * assign[i, j]\n for i in DEPTS for j in CITIES)\n\n totalCost = so.expr_sum(comm[i, k] * cost[j, l] * product[i, j, k, l]\n for (i, j, k, l) in IJKL)\n\n m.set_objective(totalBenefit-totalCost, name='netBenefit', sense=so.MAX)\n\n m.add_constraints((so.expr_sum(assign[dept, city] for city in CITIES)\n == 1 for dept in DEPTS), name='assign_dept')\n\n m.add_constraints((so.expr_sum(assign[dept, city] for dept in DEPTS)\n <= max_num_depts for city in CITIES), name='cardinality')\n\n product_def1 = m.add_constraints((assign[i, j] + assign[k, l] - 1\n <= product[i, j, k, l]\n for (i, j, k, l) in IJKL),\n name='pd1')\n\n product_def2 = m.add_constraints((product[i, j, k, l] <= assign[i, j]\n for (i, j, k, l) in IJKL),\n name='pd2')\n\n product_def3 = m.add_constraints((product[i, j, k, l] <= assign[k, l]\n for (i, j, k, l) in IJKL),\n name='pd3')\n\n m.solve()\n print(m.get_problem_summary())\n\n m.drop_constraints(product_def1)\n m.drop_constraints(product_def2)\n m.drop_constraints(product_def3)\n\n m.add_constraints((\n so.expr_sum(product[i, j, k, l]\n for j in CITIES if (i, j, k, l) in IJKL) == assign[k, l]\n for i in DEPTS for k in DEPTS for l in CITIES if i < k),\n name='pd4')\n\n m.add_constraints((\n so.expr_sum(product[i, j, k, l]\n for l in CITIES if (i, j, k, l) in IJKL) == assign[i, j]\n for k in DEPTS for i in DEPTS for j in CITIES if i < k),\n name='pd5')\n\n m.solve()\n print(m.get_problem_summary())\n totalBenefit.set_name('totalBenefit')\n totalCost.set_name('totalCost')\n print(so.get_solution_table(totalBenefit, totalCost))\n print(so.get_solution_table(assign).unstack(level=-1))\n\n return m.get_objective_value()\n", "#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for core classes.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom difflib import SequenceMatcher\nimport inspect\nimport os\nimport unittest\nimport warnings\nfrom inspect import cleandoc\n\nimport sasoptpy as so\nfrom tests.swat_config import create_cas_connection\n\n\nclass MockSASconfig:\n\n def __init__(self, name):\n self.name = name\n\nclass SASsession:\n\n def __init__(self, cfgname):\n import saspy\n self.sascfg = MockSASconfig(name=cfgname)\n\nclass TestModel(unittest.TestCase):\n \"\"\"\n Unit tests for :class:`sasoptpy.Model` objects\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.conn = None\n from swat import CAS, SWATError\n try:\n cls.conn = create_cas_connection()\n except SWATError:\n warnings.warn('CAS connection is not available',\n RuntimeWarning)\n except TypeError:\n warnings.warn('CAS variables are not available',\n RuntimeWarning)\n\n @classmethod\n def tearDownClass(cls):\n if cls.conn is not None:\n cls.conn.close()\n\n def setUp(self):\n pass\n\n @classmethod\n def get_standard_model(cls, name):\n m = so.Model(name=name)\n x = m.add_variable(name='x')\n y = m.add_variables(2, name='y')\n c1 = m.add_constraint(x <= 5, name='c1')\n c2 = m.add_constraints((y[i] <= 3 for i in range(2)), name='c2')\n return m\n\n def test_initialize(self):\n m = so.Model(name='test_initialize', session=None)\n self.assertEqual(type(m), so.Model)\n\n def test_comparison(self):\n model1 = so.Model(name='test_equal_1', session=None)\n model2 = so.Model(name='test_equal_2', session=None)\n self.assertFalse(model1 == model2)\n\n model3 = model1\n self.assertTrue(model1 == model3)\n\n def invalid_comparison():\n _ = model1 == list()\n self.assertWarns(RuntimeWarning, invalid_comparison)\n\n def test_get_name(self):\n m = so.Model(name='m')\n self.assertEqual(m.get_name(), 'm')\n\n def test_adding_variable(self):\n m = so.Model(name='test_add_variable')\n\n x = m.add_variable(name='x')\n y = m.add_variable(name='y', vartype=so.INT)\n z = m.add_variable(name='z', lb=1, ub=10)\n w = m.add_variable(name='w', init=5)\n u = so.Variable(name='u')\n m.include(u)\n self.assertEqual(m.get_variables(), [x, y, z, w, u])\n self.assertEqual(m.get_variable_dict(), {'x': x, 'y': y, 'z': z,\n 'w': w, 'u': u})\n self.assertIs(m.get_variable('x'), x)\n self.assertIs(m.get_variable('t'), None)\n\n def test_duplicate_variables(self):\n\n m = so.Model(name='test_duplicate_variables')\n\n def add_multi_var():\n x = m.add_variable(name='x', lb=2)\n x2 = m.add_variable(name='x', lb=1)\n\n self.assertWarns(UserWarning, add_multi_var)\n self.assertEqual(m.to_optmodel(), cleandoc(\"\"\"\n proc optmodel;\n min test_duplicate_variables_obj = 0;\n var x >= 1;\n solve;\n quit;\"\"\"))\n\n def test_dropping_variable(self):\n m = so.Model(name='test_drop_variable')\n x = m.add_variable(name='x')\n self.assertIs(m.get_variables()[0], x)\n self.assertIs(m.get_variable_dict()['x'], x)\n m.drop_variable(x)\n self.assertEqual(m.get_variables(), [])\n self.assertEqual(m.get_variable_dict(), {})\n m.include(x)\n self.assertIs(m.get_variable_dict()['x'], x)\n m.drop(x)\n self.assertEqual(m.get_variable_dict(), {})\n\n def test_drop_restore_var(self):\n m = so.Model(name='test_drop_restore')\n x = m.add_variable(name='x')\n y = m.add_variables(5, name='y')\n m.set_objective(y[3], sense=so.minimize, name='obj')\n self.assertEqual(m.to_optmodel(), cleandoc('''\n proc optmodel;\n var x;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n solve;\n quit;'''))\n m.drop_variable(x)\n m.drop_variable(y[1])\n m.drop_variable(y[2])\n self.assertEqual(m.to_optmodel(), cleandoc('''\n proc optmodel;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n drop y[1] y[2];\n solve;\n quit;'''))\n m.restore_variable(x)\n m.restore_variable(y[2])\n self.assertEqual(m.to_optmodel(), cleandoc('''\n proc optmodel;\n var x;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n drop y[1];\n solve;\n quit;'''))\n\n def test_adding_vargroup(self):\n m = so.Model(name='test_add_vg')\n\n x = m.add_variables(2, name='x')\n y = m.add_variables(['a', 'b'], name='y', vartype=so.BIN)\n I = so.abstract.Set(name='I')\n z = m.add_variables(I, name='z', lb=1, ub=10, init=5)\n w = so.VariableGroup(5, name='w')\n m.include(w)\n vars = [('x', x), ('y', y), ('z', z), ('w', w)]\n self.assertEqual(m.get_grouped_variables(), OrderedDict(vars))\n self.assertIs(m.get_variable('x')[0], x[0])\n\n def test_dropping_vargroup(self):\n m = so.Model(name='test_drop_vg')\n x = m.add_variables(2, name='x')\n self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))\n m.drop_variables(x)\n self.assertEqual(m.get_grouped_variables(), OrderedDict())\n m.include(x)\n self.assertEqual(m.get_grouped_variables(), OrderedDict([('x', x)]))\n m.drop(x)\n self.assertEqual(m.get_grouped_variables(), OrderedDict())\n\n def test_adding_constraint(self):\n m = so.Model(name='test_add_constraint')\n x = m.add_variable(name='x')\n\n c1 = m.add_constraint(x <= 5, name='c1')\n c2 = m.add_constraint(2 * x + x ** 5 >= 1, name='c2')\n self.assertEqual([c1, c2], m.get_constraints())\n self.assertEqual({'c1': c1, 'c2': c2}, m.get_constraints_dict())\n\n def invalid_constraint():\n from math import inf\n c3 = m.add_constraint(x <= inf, name='c3')\n self.assertRaises(ValueError, invalid_constraint)\n\n cx = m.get_constraint('c1')\n self.assertEqual(cx, c1)\n cy = m.get_constraint('c3')\n self.assertEqual(cy, None)\n\n def test_duplicate_constraints(self):\n\n m = so.Model(name='test_duplicate_constraints')\n\n def add_multi_con():\n x = m.add_variable(name='x')\n\n c1 = m.add_constraint(x <= 5, name='c')\n c2 = m.add_constraint(x <= 5, name='c')\n\n self.assertWarns(UserWarning, add_multi_con)\n self.assertEqual(m.to_optmodel(), cleandoc(\"\"\"\n proc optmodel;\n min test_duplicate_constraints_obj = 0;\n var x;\n con c : x <= 5;\n solve;\n quit;\"\"\"))\n\n def test_drop_restore_cons(self):\n m = so.Model(name='test_drop_restore_constraints')\n x = m.add_variable(name='x')\n y = m.add_variables(5, name='y')\n m.set_objective(y[3], sense=so.minimize, name='obj')\n\n c1 = m.add_constraint(x <= 5, name='c1')\n c2 = m.add_constraints((y[i] <= i for i in range(5)), name='c2')\n self.assertEqual(m.to_optmodel(), cleandoc(\"\"\"\n proc optmodel;\n var x;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n con c1 : x <= 5;\n con c2_0 : y[0] <= 0;\n con c2_1 : y[1] <= 1;\n con c2_2 : y[2] <= 2;\n con c2_3 : y[3] <= 3;\n con c2_4 : y[4] <= 4;\n solve;\n quit;\"\"\"))\n m.drop_constraint(c1)\n m.drop_constraint(c2[1])\n m.drop_constraint(c2[2])\n self.assertEqual(m.to_optmodel(), cleandoc(\"\"\"\n proc optmodel;\n var x;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n con c2_0 : y[0] <= 0;\n con c2_1 : y[1] <= 1;\n con c2_2 : y[2] <= 2;\n con c2_3 : y[3] <= 3;\n con c2_4 : y[4] <= 4;\n drop c2_1 c2_2;\n solve;\n quit;\"\"\"))\n m.restore_constraint(c1)\n m.restore_constraint(c2[2])\n self.assertEqual(m.to_optmodel(), cleandoc(\"\"\"\n proc optmodel;\n var x;\n var y {{0,1,2,3,4}};\n min obj = y[3];\n con c1 : x <= 5;\n con c2_0 : y[0] <= 0;\n con c2_1 : y[1] <= 1;\n con c2_2 : y[2] <= 2;\n con c2_3 : y[3] <= 3;\n con c2_4 : y[4] <= 4;\n drop c2_1;\n solve;\n quit;\"\"\"))\n\n def test_dropping_constraint(self):\n m = so.Model(name='test_drop_constraint')\n x = m.add_variable(name='x')\n c1 = m.add_constraint(x <= 5, name='c1')\n self.assertEqual({'c1': c1}, m.get_constraints_dict())\n m.drop_constraint(c1)\n self.assertEqual({}, m.get_constraints_dict())\n m.include(c1)\n self.assertEqual({'c1': c1}, m.get_constraints_dict())\n m.drop(c1)\n self.assertEqual({}, m.get_constraints_dict())\n\n def test_adding_constraints(self):\n m = so.Model(name='test_add_cg')\n x = m.add_variables(5, name='x')\n\n c1 = m.add_constraints((x[i] >= i for i in range(5)), name='c1')\n self.assertEqual(OrderedDict([('c1', c1)]), m.get_grouped_constraints())\n self.assertEqual(c1, m.get_constraint('c1'))\n\n c2 = so.ConstraintGroup((i * x[i] <= 10 for i in range(5)), name='c2')\n m.include(c2)\n grouped_con_dict = OrderedDict([('c1', c1), ('c2', c2)])\n self.assertEqual(grouped_con_dict, m.get_grouped_constraints())\n\n def warn_user_single_constraint():\n c3 = m.add_constraints(x[0] >= 1, name='c3')\n self.assertWarns(UserWarning, warn_user_single_constraint)\n\n def test_dropping_constraints(self):\n m = so.Model(name='test_drop_cg')\n x = m.add_variables(2, name='x')\n c1 = m.add_constraints((x[i] <= i for i in range(2)), name='c1')\n self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))\n m.drop_constraints(c1)\n self.assertEqual(m.get_grouped_constraints(), OrderedDict())\n m.include(c1)\n self.assertEqual(m.get_grouped_constraints(), OrderedDict([('c1', c1)]))\n m.drop(c1)\n self.assertEqual(m.get_grouped_constraints(), OrderedDict())\n\n def test_add_set(self):\n m = so.Model(name='test_add_set')\n I = m.add_set(name='I', init=2)\n self.assertEqual(m.get_sets(), [I])\n self.assertEqual(so.to_definition(m.get_sets()[0]), \"set I init 2;\")\n\n def test_add_parameter(self):\n m = so.Model(name='test_add_parameter')\n p = m.add_parameter(name='p', init=10)\n I = m.add_set(name='I')\n r = m.add_parameter(I, name='r', init=5)\n self.assertEqual([p, r], m.get_parameters())\n m.drop(r)\n self.assertEqual([p], m.get_parameters())\n\n def test_add_implicit_var(self):\n m = so.Model(name='test_add_impvar')\n x = m.add_variables(5, name='x')\n y = m.add_implicit_variable((i * x[i] + x[i] ** 2 for i in range(5)),\n name='y')\n self.assertEqual([y], m.get_implicit_variables())\n\n def test_add_literal_statement(self):\n m = so.Model(name='test_add_literal_statement')\n m.set_objective(0, name='empty_obj')\n m.add_statement('var x {0,1};')\n m.add_statement('solve;')\n self.assertEqual(\n m.to_optmodel(solve=False),\n inspect.cleandoc('''\n proc optmodel;\n min empty_obj = 0;\n var x {0,1};\n solve;\n quit;'''))\n s = so.abstract.LiteralStatement('print x;')\n m.include(s)\n self.assertEqual(\n m.to_optmodel(solve=False),\n inspect.cleandoc('''\n proc optmodel;\n min empty_obj = 0;\n var x {0,1};\n solve;\n print x;\n quit;'''))\n m.drop(s)\n self.assertEqual(\n m.to_optmodel(solve=False),\n inspect.cleandoc('''\n proc optmodel;\n min empty_obj = 0;\n var x {0,1};\n solve;\n quit;'''))\n\n\n def test_add_abstract_statement(self):\n m = so.Model(name='m')\n x = m.add_variable(name='x')\n m.set_objective(x ** 2, sense=so.MIN, name='obj')\n s = so.abstract.LiteralStatement('expand;')\n m.add_statement(s)\n self.assertEqual(so.to_optmodel(m), inspect.cleandoc(\"\"\"\n proc optmodel;\n var x;\n min obj = (x) ^ (2);\n expand;\n solve;\n quit;\n \"\"\"))\n\n def test_postsolve_statement(self):\n m = so.Model(name='test_postsolve_statement')\n x = m.add_variable(name='x')\n c1 = m.add_constraint(x <= 10, name='c1')\n self.assertEqual(m.to_optmodel(), inspect.cleandoc(\"\"\"\n proc optmodel;\n min test_postsolve_statement_obj = 0;\n var x;\n con c1 : x <= 10;\n solve;\n quit;\"\"\"))\n\n m.add_postsolve_statement('print x;')\n self.assertEqual(m.to_optmodel(), inspect.cleandoc(\"\"\"\n proc optmodel;\n min test_postsolve_statement_obj = 0;\n var x;\n con c1 : x <= 10;\n solve;\n print x;\n quit;\"\"\"))\n\n m.add_postsolve_statement(so.abstract.LiteralStatement('expand;'))\n self.assertEqual(m.to_optmodel(), inspect.cleandoc(\"\"\"\n proc optmodel;\n min test_postsolve_statement_obj = 0;\n var x;\n con c1 : x <= 10;\n solve;\n print x;\n expand;\n quit;\"\"\"))\n\n def test_include_model(self):\n m1 = so.Model(name='test_copy_model_1')\n x = m1.add_variable(name='x')\n y = m1.add_variables(2, name='y')\n c1 = m1.add_constraint(x + y[0] >= 2, name='c1')\n c2 = m1.add_constraints((x - y[i] <= 10 for i in range(2)), name='c2')\n m1.set_objective(2 * x + y[0] + 3 * y[1], name='model_obj')\n\n m2 = so.Model(name='test_copy_model_2')\n m2.include(m1)\n vars = OrderedDict([('x', x), ('y', y)])\n self.assertEqual(m2.get_grouped_variables(), vars)\n cons = OrderedDict([('c1', c1), ('c2', c2)])\n self.assertEqual(m2.get_grouped_constraints(), cons)\n self.assertEqual(m2.to_optmodel(),inspect.cleandoc(\"\"\"\n proc optmodel;\n var x;\n var y {{0,1}};\n con c1 : x + y[0] >= 2;\n con c2_0 : x - y[0] <= 10;\n con c2_1 : x - y[1] <= 10;\n min model_obj = 2 * x + y[0] + 3 * y[1];\n solve;\n quit;\"\"\"))\n\n def test_set_get_objective(self):\n m = so.Model(name='test_set_get_objective')\n x = m.add_variable(name='x')\n\n # Regular objective\n obj1 = m.set_objective(2 * x, sense=so.MIN, name='obj1')\n self.assertIs(obj1, m.get_objective())\n\n # Multi objective\n obj2 = m.set_objective(5 * x, sense=so.MIN, name='obj2')\n self.assertIs(obj2, m.get_objective())\n obj3 = m.append_objective(10 * x, sense=so.MIN, name='obj3')\n self.assertEqual([obj2, obj3], m.get_all_objectives())\n self.assertEqual(\n m.to_optmodel(),\n inspect.cleandoc(\"\"\"\n proc optmodel;\n var x;\n min obj2 = 5 * x;\n min obj3 = 10 * x;\n solve;\n quit;\"\"\"))\n\n def test_get_objective_value(self):\n m = so.Model(name='test_objective_value')\n x = m.add_variable(name='x')\n m.set_objective(x ** 2 - 4 * x + 5, sense=so.MIN, name='nonlinear')\n x.set_value(3)\n self.assertEqual(m.get_objective_value(), 2)\n\n if TestModel.conn:\n m.set_session(TestModel.conn)\n m.solve()\n self.assertEqual(m.get_objective_value(), 1)\n self.assertEqual(x.get_value(), 2)\n else:\n self.skipTest('No CAS connection available, skipping ' +\n 'objective value test')\n\n def zero_div_error():\n m.set_objective(x / x, sense=so.MIN, name='nonlinear2')\n x.set_value(0)\n m.clear_solution()\n m.get_objective_value()\n self.assertRaises(ZeroDivisionError, zero_div_error)\n\n def test_variable_coef(self):\n m = so.Model(name='test_get_variable_coef')\n x = m.add_variable(name='x')\n m.set_objective(5 * x, sense=so.MIN, name='obj1')\n\n self.assertEqual(m.get_variable_coef(x), 5)\n self.assertEqual(m.get_variable_coef('x'), 5)\n\n y = so.Variable(name='y')\n def variable_not_in_model():\n return m.get_variable_coef(y)\n self.assertRaises(RuntimeError, variable_not_in_model)\n\n m.set_objective(2 * x + y ** 2, sense=so.MIN, name='obj1')\n self.assertEqual(m.get_variable_coef('x'), 2)\n def nonlinear_objective():\n return m.get_variable_coef('y')\n self.assertWarns(RuntimeWarning, nonlinear_objective)\n\n def test_get_variable_value(self):\n\n if TestModel.conn is None:\n self.skipTest('Session is not available')\n\n m = so.Model(name='test_get_var_value')\n x = m.add_variable(name='x', lb=1.5, ub=10, vartype=so.INT)\n m.set_objective(x, sense=so.MIN, name='obj1')\n m.set_session(TestModel.conn)\n m.solve(verbose=True)\n self.assertEqual(m.get_variable_value(x), 2)\n\n I = m.add_set(name='I', value=range(2))\n y = m.add_variables(I, name='y', lb=0.5)\n m.set_objective(x + y[0] + y[1], sense=so.MIN, name='obj1')\n m.solve()\n self.assertEqual(m.get_variable_value(y[0]), 0.5)\n def get_variable_warning():\n self.assertEqual(m.get_variable_value('z'), None)\n self.assertWarns(UserWarning, get_variable_warning)\n\n m2 = so.Model(name='test_get_var_value_copy')\n m2.include(m)\n z = so.Variable(name='z')\n def raise_solution_error():\n return m2.get_variable_value(z)\n self.assertRaises(RuntimeError, raise_solution_error)\n\n m.add_variable(name='var with invalid name')\n def raise_syntax_error():\n return m.solve()\n self.assertRaises(SyntaxError, raise_syntax_error)\n\n def test_get_variable_value_abstract(self):\n if TestModel.conn is None:\n self.skipTest('Session is not available')\n\n import pandas as pd\n so.reset()\n\n m = so.Model(name='abstract_model')\n df = pd.DataFrame([\n ['a', 1],\n ['b', 2]\n ], columns=['tag', 'val'])\n idx = so.Set(name='idx', settype=so.STR)\n varlb = so.ParameterGroup(idx, name='varlb')\n m.include(idx, varlb)\n\n table = TestModel.conn.upload_frame(df, casout='server_data')\n from sasoptpy.actions import read_data\n r = read_data(\n table=table,\n index={'target': idx, 'key': 'tag'},\n columns=[\n {'target': varlb, 'column': 'val'}\n ]\n )\n m.include(r)\n y = so.VariableGroup(idx, name='y')\n c = so.ConstraintGroup((y[i] >= varlb[i] for i in idx), name='c')\n m.include(y, c)\n self.assertEqual(m.to_optmodel(), inspect.cleandoc(\"\"\"\n proc optmodel;\n min abstract_model_obj = 0;\n set <str> idx;\n num varlb {idx};\n read data SERVER_DATA into idx=[tag] varlb=val;\n var y {{idx}};\n con c {o8 in idx} : y[o8] - varlb[o8] >= 0;\n solve;\n quit;\n \"\"\"))\n m.set_session(TestModel.conn)\n m.solve()\n self.assertEqual(m.get_variable_value(y['a']), 1)\n self.assertEqual(m.get_statements(), [r])\n\n\n def test_get_summaries(self):\n if not TestModel.conn:\n self.skipTest('Session is not available')\n m = so.Model(name='test_get_summaries', session=TestModel.conn)\n x = m.add_variable(name='x', lb=1)\n y = m.add_variables(2, name='y', lb=1)\n m.set_objective(x + y[0], sense=so.MIN, name='obj1')\n m.add_constraint(x + 2 *y[0] + 3*y[1] >= 10, name='con1')\n m.solve()\n self.assertEqual(m.get_problem_summary().to_string(),\n inspect.cleandoc(\"\"\"\n Value\n Label \n Objective Sense Minimization\n Objective Function obj1\n Objective Type Linear\n \n Number of Variables 3\n Bounded Above 0\n Bounded Below 3\n Bounded Below and Above 0\n Free 0\n Fixed 0\n \n Number of Constraints 1\n Linear LE (<=) 0\n Linear EQ (=) 0\n Linear GE (>=) 1\n Linear Range 0\n \n Constraint Coefficients 3\"\"\"))\n\n seq = SequenceMatcher(None, m.get_solution_summary().to_string(),\n inspect.cleandoc(\n \"\"\"\n Value\n Label \n Solver LP\n Algorithm Dual Simplex\n Objective Function obj1\n Solution Status Optimal\n Objective Value 2\n \n Primal Infeasibility 0\n Dual Infeasibility 0\n Bound Infeasibility 0\n \n Iterations 0\n Presolve Time 0.00\n Solution Time 0.00\"\"\"\n ))\n # There is a chance that the solution time is slightly different\n self.assertTrue(seq.ratio() > 0.99)\n\n def test_get_solution(self):\n if not TestModel.conn:\n self.skipTest('No session is defined, skipping get solution test')\n import pandas as pd\n m = so.Model(name='test_get_soln', session=TestModel.conn)\n data = [\n ['pen', 1, 3, 11],\n ['mug', 15, 10, 5],\n ['watch', 50, 2, 2],\n ['pc', 1500, 200, 1]\n ]\n data = pd.DataFrame(data, columns=['item', 'value', 'weight', 'ub'])\n data = data.set_index(['item'])\n items = data.index\n get = m.add_variables(items, name='get', vartype=so.INT, lb=0)\n value = data['value']\n weight = data['weight']\n ub = data['ub']\n m.set_objective(so.expr_sum(get[i] * value[i] for i in items),\n sense=so.MAX, name='obj1')\n m.add_constraint(so.expr_sum(get[i] * weight[i] for i in items)\n <= 210, name='value_total')\n m.add_constraints((get[i] <= ub[i] for i in items), name='upper_bound')\n\n # Regular solve and regular get\n m.solve(verbose=True)\n self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(\n \"\"\"\n i var value lb ub rc\n 0 1.0 get[pen] 2.0 -0.0 1.797693e+308 NaN\n 1 2.0 get[mug] -0.0 -0.0 1.797693e+308 NaN\n 2 3.0 get[watch] 2.0 -0.0 1.797693e+308 NaN\n 3 4.0 get[pc] 1.0 -0.0 1.797693e+308 NaN\n \"\"\"\n ))\n\n self.assertEqual(m.get_solution(vtype='dual').to_string(),\n inspect.cleandoc(\n \"\"\"\n j con value dual\n 0 1.0 value_total 210.0 NaN\n 1 2.0 upper_bound_pen 2.0 NaN\n 2 3.0 upper_bound_mug -0.0 NaN\n 3 4.0 upper_bound_watch 2.0 NaN\n 4 5.0 upper_bound_pc 1.0 NaN\n \"\"\"\n ))\n\n m.solve(mps=True, options={'maxpoolsols': 3}, verbose=True)\n self.assertEqual(m.get_solution().to_string(), inspect.cleandoc(\n \"\"\"\n var lb ub value solution\n 0 get[pen] 0.0 1.797693e+308 2.0 1.0\n 1 get[mug] 0.0 1.797693e+308 0.0 1.0\n 2 get[watch] 0.0 1.797693e+308 2.0 1.0\n 3 get[pc] 0.0 1.797693e+308 1.0 1.0\n 4 get[pen] 0.0 1.797693e+308 1.0 2.0\n 5 get[mug] 0.0 1.797693e+308 0.0 2.0\n 6 get[watch] 0.0 1.797693e+308 1.0 2.0\n 7 get[pc] 0.0 1.797693e+308 1.0 2.0\n 8 get[pen] 0.0 1.797693e+308 0.0 3.0\n 9 get[mug] 0.0 1.797693e+308 0.0 3.0\n 10 get[watch] 0.0 1.797693e+308 0.0 3.0\n 11 get[pc] 0.0 1.797693e+308 0.0 3.0\n \"\"\"\n ))\n self.assertEqual(m.get_solution('dual').to_string(), inspect.cleandoc(\n \"\"\"\n con value solution\n 0 value_total 210.0 1.0\n 1 upper_bound['pen'] 2.0 1.0\n 2 upper_bound['mug'] 0.0 1.0\n 3 upper_bound['watch'] 2.0 1.0\n 4 upper_bound['pc'] 1.0 1.0\n 5 value_total 205.0 2.0\n 6 upper_bound['pen'] 1.0 2.0\n 7 upper_bound['mug'] 0.0 2.0\n 8 upper_bound['watch'] 1.0 2.0\n 9 upper_bound['pc'] 1.0 2.0\n 10 value_total 0.0 3.0\n 11 upper_bound['pen'] 0.0 3.0\n 12 upper_bound['mug'] 0.0 3.0\n 13 upper_bound['watch'] 0.0 3.0\n 14 upper_bound['pc'] 0.0 3.0\n \"\"\"\n ))\n self.assertEqual(m.get_solution(pivot=True).to_string(),\n inspect.cleandoc(\n \"\"\"\n solution 1.0 2.0 3.0\n var \n get[mug] 0.0 0.0 0.0\n get[pc] 1.0 1.0 0.0\n get[pen] 2.0 1.0 0.0\n get[watch] 2.0 1.0 0.0\n \"\"\"\n ))\n self.assertEqual(m.get_solution('dual', pivot=True).to_string(),\n inspect.cleandoc(\n \"\"\"\n solution 1.0 2.0 3.0\n con \n upper_bound['mug'] 0.0 0.0 0.0\n upper_bound['pc'] 1.0 1.0 0.0\n upper_bound['pen'] 2.0 1.0 0.0\n upper_bound['watch'] 2.0 1.0 0.0\n value_total 210.0 205.0 0.0\n \"\"\"\n ))\n self.assertEqual(m.get_solution('primal', solution=2).to_string(),\n inspect.cleandoc(\n \"\"\"\n var lb ub value solution\n 4 get[pen] 0.0 1.797693e+308 1.0 2.0\n 5 get[mug] 0.0 1.797693e+308 0.0 2.0\n 6 get[watch] 0.0 1.797693e+308 1.0 2.0\n 7 get[pc] 0.0 1.797693e+308 1.0 2.0\n \"\"\"\n ))\n self.assertEqual(m.get_solution('dual', solution=3).to_string(),\n inspect.cleandoc(\n \"\"\"\n con value solution\n 10 value_total 0.0 3.0\n 11 upper_bound['pen'] 0.0 3.0\n 12 upper_bound['mug'] 0.0 3.0\n 13 upper_bound['watch'] 0.0 3.0\n 14 upper_bound['pc'] 0.0 3.0\n \"\"\"\n ))\n m.print_solution()\n\n def third_type():\n m.get_solution('x')\n self.assertRaises(ValueError, third_type)\n\n def test_set_coef(self):\n m = so.Model(name='test_set_coef')\n x = m.add_variable(name='x')\n y = m.add_variables(2, name='y')\n z = m.add_variable(name='z')\n obj = m.set_objective(2*x + 3*y[0] + 2*y[1], name='obj', sense=so.MIN)\n c1 = m.add_constraint(2* x + 5 * y[0] + 7 * y[1] <= 15, name='c1')\n self.assertEqual(m.get_variable_coef(x), 2)\n m.set_variable_coef(x, 3)\n self.assertEqual(m.get_variable_coef(x), 3)\n self.assertEqual(m.get_variable_coef(z), 0)\n m.set_variable_coef(z, 1)\n self.assertEqual(m.get_variable_coef(z), 1)\n\n def test_to_mps(self):\n m = so.Model(name='test_to_mps')\n x = m.add_variable(name='x', lb=0, ub=5, vartype=so.INT)\n y = m.add_variables(2, name='y', lb=1)\n m.set_objective(x + y[0], sense=so.MIN, name='xyobj')\n self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(\n \"\"\"\n Field1 Field2 Field3 Field4 Field5 Field6 _id_\n 0 NAME test_to_mps 0.0 0.0 1\n 1 ROWS NaN NaN 2\n 2 MIN xyobj NaN NaN 3\n 3 COLUMNS NaN NaN 4\n 4 MARK0000 'MARKER' NaN 'INTORG' NaN 5\n 5 x xyobj 1.0 NaN 6\n 6 MARK0001 'MARKER' NaN 'INTEND' NaN 7\n 7 y[0] xyobj 1.0 NaN 8\n 8 y[1] xyobj 0.0 NaN 9\n 9 RHS NaN NaN 10\n 10 RANGES NaN NaN 11\n 11 BOUNDS NaN NaN 12\n 12 LO BND x 0.0 NaN 13\n 13 UP BND x 5.0 NaN 14\n 14 LO BND y[0] 1.0 NaN 15\n 15 LO BND y[1] 1.0 NaN 16\n 16 ENDATA 0.0 0.0 17\n \"\"\"\n ))\n m.set_objective(x + 10, name='o', sense=so.MAX)\n self.assertEqual(m.to_mps(constant=True).to_string(),\n inspect.cleandoc(\n \"\"\"\n Field1 Field2 Field3 Field4 Field5 Field6 _id_\n 0 NAME test_to_mps 0.0 0.0 1\n 1 ROWS NaN NaN 2\n 2 MAX o_constant NaN NaN 3\n 3 COLUMNS NaN NaN 4\n 4 MARK0000 'MARKER' NaN 'INTORG' NaN 5\n 5 x o_constant 1.0 NaN 6\n 6 MARK0001 'MARKER' NaN 'INTEND' NaN 7\n 7 y[0] o_constant 0.0 NaN 8\n 8 y[1] o_constant 0.0 NaN 9\n 9 obj_constant o_constant 1.0 NaN 10\n 10 RHS NaN NaN 11\n 11 RANGES NaN NaN 12\n 12 BOUNDS NaN NaN 13\n 13 LO BND x 0.0 NaN 14\n 14 UP BND x 5.0 NaN 15\n 15 LO BND y[0] 1.0 NaN 16\n 16 LO BND y[1] 1.0 NaN 17\n 17 FX BND obj_constant 10.0 NaN 18\n 18 ENDATA 0.0 0.0 19\n \"\"\"\n ))\n\n # Add invalid constraints for the frame\n c1 = m.add_constraint(y[0] + x >= 0, name='zero_lb')\n c2 = m.add_constraint(y[0] <= 100, name='inf_ub')\n from math import inf\n c2.set_rhs(inf)\n self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(\n \"\"\"\n Field1 Field2 Field3 Field4 Field5 Field6 _id_\n 0 NAME test_to_mps 0.0 0.0 1\n 1 ROWS NaN NaN 2\n 2 MAX o_constant NaN NaN 3\n 3 G zero_lb NaN NaN 4\n 4 L inf_ub NaN NaN 5\n 5 COLUMNS NaN NaN 6\n 6 MARK0000 'MARKER' NaN 'INTORG' NaN 7\n 7 x o_constant 1.0 zero_lb 1.0 8\n 8 MARK0001 'MARKER' NaN 'INTEND' NaN 9\n 9 y[0] zero_lb 1.0 inf_ub 1.0 10\n 10 y[1] o_constant 0.0 NaN 11\n 11 obj_constant o_constant 1.0 NaN 12\n 12 RHS NaN NaN 13\n 13 RANGES NaN NaN 14\n 14 BOUNDS NaN NaN 15\n 15 LO BND x 0.0 NaN 16\n 16 UP BND x 5.0 NaN 17\n 17 LO BND y[0] 1.0 NaN 18\n 18 LO BND y[1] 1.0 NaN 19\n 19 FX BND obj_constant 10.0 NaN 20\n 20 ENDATA 0.0 0.0 21\n \"\"\"\n ))\n\n u = m.add_variable(name='u')\n t = m.add_variable(name='t', vartype=so.BIN)\n m.drop_constraints(c1, c2)\n m.add_constraint(x + 2*y[0] == [3, 8], name='range_con')\n self.assertEqual(m.to_mps().to_string(), inspect.cleandoc(\n \"\"\"\n Field1 Field2 Field3 Field4 Field5 Field6 _id_\n 0 NAME test_to_mps 0.0 0.0 1\n 1 ROWS NaN NaN 2\n 2 MAX o_constant NaN NaN 3\n 3 E range_con NaN NaN 4\n 4 COLUMNS NaN NaN 5\n 5 MARK0000 'MARKER' NaN 'INTORG' NaN 6\n 6 x o_constant 1.0 range_con 1.0 7\n 7 MARK0001 'MARKER' NaN 'INTEND' NaN 8\n 8 y[0] range_con 2.0 NaN 9\n 9 y[1] o_constant 0.0 NaN 10\n 10 obj_constant o_constant 1.0 NaN 11\n 11 u o_constant 0.0 NaN 12\n 12 t o_constant 0.0 NaN 13\n 13 RHS NaN NaN 14\n 14 RHS range_con 3.0 NaN 15\n 15 RANGES NaN NaN 16\n 16 rng range_con 5.0 NaN 17\n 17 BOUNDS NaN NaN 18\n 18 LO BND x 0.0 NaN 19\n 19 UP BND x 5.0 NaN 20\n 20 LO BND y[0] 1.0 NaN 21\n 21 LO BND y[1] 1.0 NaN 22\n 22 FX BND obj_constant 10.0 NaN 23\n 23 FR BND u NaN NaN 24\n 24 BV BND t 1.0 NaN 25\n 25 ENDATA 0.0 0.0 26\n \"\"\"\n ))\n\n def get_frame_warning():\n r = m.to_frame()\n self.assertWarns(DeprecationWarning, get_frame_warning)\n\n def test_to_optmodel(self):\n m = so.Model(name='test_to_optmodel')\n self.assertEqual(m.to_optmodel(), inspect.cleandoc(\n \"\"\"\n proc optmodel;\n min test_to_optmodel_obj = 0;\n solve;\n quit;\n \"\"\"\n ))\n x = m.add_variable(name='x', init=5)\n e1 = m.set_objective(x, sense=so.MIN, name='e1')\n e2 = m.append_objective(x**2, sense=so.MAX, name='e2')\n response = m.to_optmodel(options={\n 'with': 'blackbox',\n 'relaxint': True,\n 'obj': (e1, e2),\n 'primalin': True,\n }, ods=True, primalin=True, parse=False)\n self.assertEqual(response, inspect.cleandoc(\n \"\"\"\n proc optmodel;\n var x init 5;\n min e1 = x;\n max e2 = (x) ^ (2);\n solve with blackbox relaxint obj (e1 e2) / primalin;\n ods output PrintTable=primal_out;\n ods output PrintTable=dual_out;\n create data allsols from [s]=(1.._NVAR_) name=_VAR_[s].name {j in 1.._NSOL_} <col('sol_'||j)=_VAR_[s].sol[j]>;\n quit;\n \"\"\"\n ))\n\n response = m.to_optmodel(options={\n 'with': 'nlp',\n 'multistart': {'loglevel': 3, 'maxstarts': 30}\n })\n self.assertEqual(response, inspect.cleandoc(\n \"\"\"\n proc optmodel;\n var x init 5;\n min e1 = x;\n max e2 = (x) ^ (2);\n solve with nlp / multistart=(loglevel=3,maxstarts=30);\n quit;\n \"\"\"\n ))\n\n def test_str(self):\n m = TestModel.get_standard_model(name='test_model_str')\n\n response = str(m)\n self.assertEqual(response, inspect.cleandoc(\n \"\"\"\n Model: [\n Name: test_model_str\n Objective: MIN [0]\n Variables (3): [\n x\n y[0]\n y[1]\n ]\n Constraints (3): [\n x <= 5\n y[0] <= 3\n y[1] <= 3\n ]\n ]\n \"\"\"\n ))\n if TestModel.conn:\n m.set_session(TestModel.conn)\n response = str(m)\n self.assertEqual(response, inspect.cleandoc(\n \"\"\"\n Model: [\n Name: test_model_str\n Session: {}:{}\n Objective: MIN [0]\n Variables (3): [\n x\n y[0]\n y[1]\n ]\n Constraints (3): [\n x <= 5\n y[0] <= 3\n y[1] <= 3\n ]\n ]\n \"\"\".format(os.environ.get('CASHOST'), os.environ.get('CASPORT'))\n ))\n\n def test_model_repr(self):\n m = so.Model(name='test_model_repr')\n self.assertEqual(repr(m), \"sasoptpy.Model(name='test_model_repr')\")\n s = SASsession(cfgname='winlocal')\n m.set_session(s)\n self.assertEqual(\n repr(m),\n \"sasoptpy.Model(name='test_model_repr', \"\n \"session=saspy.SASsession(cfgname='winlocal'))\")\n\n if TestModel.conn:\n m.set_session(TestModel.conn)\n cas_repr = repr(m.get_session())\n self.assertEqual(\n repr(m), \"sasoptpy.Model(name='test_model_repr', session=\" +\n cas_repr + ')')\n\n def invalid_session_type():\n w = 5\n m.set_session(w)\n rp = repr(m)\n self.assertRaises(TypeError, invalid_session_type)\n\n def test_defn(self):\n m = TestModel.get_standard_model('test_model_defn')\n self.assertEqual(so.to_definition(m), \"problem test_model_defn \"\n \"include x y c1 c2;\")\n\n def test_expr(self):\n m = TestModel.get_standard_model('test_model_expr')\n self.assertEqual(m.to_optmodel(), so.to_expression(m))\n\n def test_is_linear(self):\n m = TestModel.get_standard_model('test_model_linearity')\n self.assertEqual(so.is_linear(m), True)\n x = m.get_variable('x')\n qbound = m.add_constraint(x ** 2 + x <= 10, name='qbound')\n self.assertEqual(so.is_linear(m), False)\n m.drop_constraint(qbound)\n self.assertEqual(so.is_linear(m), True)\n m.set_objective(x ** 2, sense=so.MIN, name='x_squared')\n self.assertEqual(so.is_linear(m), False)\n\n def test_session_type(self):\n m = TestModel.get_standard_model('test_model_session_type')\n self.assertEqual(m.get_session_type(), None)\n if TestModel.conn:\n m.set_session(TestModel.conn)\n self.assertEqual(m.get_session_type(), 'CAS')\n\n def test_ub_set(self):\n m = so.Model(name='test_model_var_ub')\n x = m.add_variable(name='x')\n self.assertEqual(so.to_optmodel(m), cleandoc('''\n proc optmodel;\n min test_model_var_ub_obj = 0;\n var x;\n solve;\n quit;'''))\n x.set_bounds(ub=5)\n self.assertEqual(so.to_optmodel(m), cleandoc('''\n proc optmodel;\n min test_model_var_ub_obj = 0;\n var x <= 5;\n solve;\n quit;'''))\n\n def test_model_add(self):\n m = so.Model(name='test_add')\n x = so.Variable(name='x')\n self.assertEqual(m.get_variables(), [])\n m.add(x)\n self.assertEqual(m.get_variables(), [x])\n\n def test_model_session(self):\n m = so.Model(name='m')\n s = m.get_session()\n self.assertEqual(s, None)\n if TestModel.conn:\n m.set_session(TestModel.conn)\n self.assertEqual(m.get_session(), TestModel.conn)\n self.assertEqual(m.get_session_type(), 'CAS')\n\n def test_names(self):\n if TestModel.conn is None:\n self.skipTest('Session is not available')\n\n m = so.Model(name='test_var_names', session=TestModel.conn)\n a = ['apple', 'apple juice']\n x = m.add_variables(a, name='amount', lb=1)\n m.set_objective(so.expr_sum(x[i] for i in a), name='obj', sense=so.minimize)\n\n m.solve()\n for i in a:\n self.assertEqual(x[i].get_value(), 1.0)\n\n def test_export(self):\n m = TestModel.get_standard_model('test_model_export')\n x = m.get_variable('x')\n mps_text = m.export_mps(fetch=True)\n print(mps_text)\n self.assertEqual(mps_text.replace(' ', ''), inspect.cleandoc(\n \"\"\"\n NAME test_model_export\n ROWS\n MIN test_model_export_obj\n L c1\n L c2[0]\n L c2[1]\n COLUMNS\n x c1 1.0\n y[0] c2[0] 1.0\n y[1] c2[1] 1.0\n RHS\n RHS c1 5.0 c2[0] 3.0\n RHS c2[1] 3.0\n RANGES\n BOUNDS\n FR BND x\n FR BND y[0]\n FR BND y[1]\n ENDATA\"\"\"\n ).replace(' ', ''))\n \n m.add_constraint(x ** 2 + x <= 10, name='qb')\n def generate_error():\n m.export_mps()\n self.assertRaises(ValueError, generate_error)\n\n\n def tearDown(self):\n so.reset()\n\n\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
kingsj0405/Explorable-Super-Resolution
[ "6582477ec1e2b0c6f4bd781552ac880fabdb4496", "6582477ec1e2b0c6f4bd781552ac880fabdb4496" ]
[ "codes/models/modules/architecture.py", "codes/scripts/extract_subimgs_single.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torchvision\nfrom . import block as B\nfrom . import spectral_norm as SN\nimport functools\nimport numpy as np\nimport os\nimport models.modules.archs_util as arch_util\nimport torch.nn.functional as F\nimport re\n\n####################\n# Generator\n####################\nclass MSRResNet(nn.Module):\n ''' modified SRResNet'''\n\n def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n super(MSRResNet, self).__init__()\n self.upscale = upscale\n\n self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)\n self.recon_trunk = arch_util.make_layer(basic_block, nb)\n\n # upsampling\n if self.upscale == 2:\n self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(2)\n elif self.upscale == 3:\n self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(3)\n elif self.upscale == 4:\n self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(2)\n\n self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n # initialization\n arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],\n 0.1)\n if self.upscale == 4:\n arch_util.initialize_weights(self.upconv2, 0.1)\n\n def forward(self, x):\n fea = self.lrelu(self.conv_first(x))\n out = self.recon_trunk(fea)\n\n if self.upscale == 4:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n elif self.upscale == 3 or self.upscale == 2:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n out = self.conv_last(self.lrelu(self.HRconv(out)))\n base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)\n out += base\n return out\n\n\nclass SRResNet(nn.Module):\n def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='batch', act_type='relu', \\\n mode='NAC', res_scale=1, upsample_mode='upconv',range_correction=False):\n super(SRResNet, self).__init__()\n n_upscale = int(math.log(upscale, 2))\n if upscale == 3:\n n_upscale = 1\n\n fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)\n resnet_blocks = [B.ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\\\n mode=mode, res_scale=res_scale) for _ in range(nb)]\n LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)\n\n if upsample_mode == 'upconv':\n upsample_block = B.upconv_blcok\n elif upsample_mode == 'pixelshuffle':\n upsample_block = B.pixelshuffle_block\n else:\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\n if upscale == 3:\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\n else:\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\n HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)\n HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)\n\n self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*resnet_blocks, LR_conv)),\\\n *upsampler, HR_conv0, HR_conv1)\n self.range_correction = bool(range_correction)\n\n def forward(self, x):\n x = self.model(x)\n if self.range_correction:\n x = x/8e-6*0.3+0.45\n return x\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\nclass DnCNN(nn.Module):\n def __init__(self, n_channels, depth, kernel_size = 3, in_nc=64, out_nc=64, norm_type='batch', act_type='leakyrelu',\n latent_input=None,num_latent_channels=None,discriminator=False,expected_input_size=None,chroma_generator=False,spectral_norm=False,pooling_no_FC=False):\n super(DnCNN, self).__init__()\n # assert in_nc in [64,128] and out_nc==64,'Currently only supporting 64 DCT channels'\n assert act_type=='leakyrelu'\n assert norm_type in ['batch','instance','layer',None]\n # self.average_err_collection_counter = 0\n # self.average_abs_err_estimates = np.zeros([8,8])\n self.discriminator_net = discriminator\n if discriminator:\n # Ideally I should not use padding for the discriminator model. I do use padding in the first layers if the input size is too small,\n # so that the dimension of the fully connected layer's input would be at least MIN_DCT_DIMS_4_D x MIN_DCT_DIMS_4_D\n MIN_DCT_DIMS_4_D = 5\n num_padded_layers = max(0,depth-int(np.floor((expected_input_size-MIN_DCT_DIMS_4_D)/(kernel_size-1))))\n layer_num = 0\n self.pooling_no_FC = pooling_no_FC\n else:\n spectral_norm = False\n self.chroma_generator = chroma_generator\n if chroma_generator:\n self.block_size = np.sqrt(out_nc/2)\n assert self.block_size==np.round(self.block_size)\n self.block_size = int(self.block_size)\n padding = kernel_size//2\n self.latent_input = latent_input\n self.num_latent_channels = num_latent_channels\n # if latent_input is None or 'all_layers' not in latent_input or num_latent_channels is None:\n if latent_input not in ['all_layers','first_layer'] or num_latent_channels is None:\n self.num_latent_channels = 0\n\n layers = []\n if self.discriminator_net and layer_num>=num_padded_layers:\n expected_input_size -= (kernel_size - 1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=in_nc+self.num_latent_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,bias=True))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n layers.append(nn.ReLU(inplace=True))\n for layer_num in range(1,depth - 2+1):\n if self.discriminator_net and layer_num>=num_padded_layers:\n expected_input_size -= (kernel_size-1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=n_channels+self.num_latent_channels*(self.latent_input=='all_layers'), out_channels=n_channels, kernel_size=kernel_size, padding=padding,bias=False))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n if norm_type=='batch':\n layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum=0.95))\n elif norm_type=='layer':\n layers.append(nn.LayerNorm(normalized_shape=[n_channels,expected_input_size,expected_input_size],elementwise_affine=False))\n elif norm_type=='instance':\n layers.append(nn.InstanceNorm2d(n_channels))\n layers.append(nn.LeakyReLU(inplace=True))\n layer_num += 1\n if self.discriminator_net and layer_num >= num_padded_layers:\n expected_input_size -= (kernel_size - 1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=n_channels+self.num_latent_channels*(self.latent_input=='all_layers'),\n out_channels=1 if (self.discriminator_net and self.pooling_no_FC) else out_nc, kernel_size=kernel_size, padding=padding,\n bias=self.discriminator_net and self.pooling_no_FC)) #When using a fully convolutional D (when pooling_no_FC), allowing bias in the final layer.\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n if self.discriminator_net:\n layers.append(Flatten())\n if not self.pooling_no_FC:\n layers.append(nn.Linear(in_features=out_nc*(expected_input_size**2),out_features=1))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n # layers.append(nn.Linear(in_features=64, out_features=1))\n else:\n layers.append(nn.Sigmoid())\n if False and self.discriminator_net:\n self.dncnn = nn.Sequential(*layers)\n else:\n self.dncnn = nn.ModuleList(layers)\n\n def forward(self, x):\n if False and self.discriminator_net:\n return self.dncnn(x)\n else:\n latent_input, quantized_coeffs = torch.split(x, split_size_or_sections=[self.num_latent_channels,x.size(1)-self.num_latent_channels], dim=1)\n x = 1*quantized_coeffs\n for i, module in enumerate(self.dncnn):\n if self.num_latent_channels>0 and (self.latent_input=='all_layers' or (self.latent_input=='first_layer' and i==0)) and isinstance(module,nn.Conv2d):\n # if self.num_latent_channels>0 and self.latent_input is not None and 'all_layers' in self.latent_input and isinstance(module,nn.Conv2d):\n if self.discriminator_net and latent_input.size(2)!=x.size(2):\n x = torch.cat([torch.nn.functional.interpolate(input=latent_input,size=x.size()[2:],mode='bilinear',align_corners=False),x],dim=1)\n else:\n x = torch.cat([latent_input,x],dim=1)\n x = module(x)\n if self.discriminator_net:\n return torch.mean(x,dim=1,keepdim=True) # Averaging for the case of pooling instead of having a final FC layer. Otherwise it doesn't matter because x.shape[1]=1 anyway.\n quantization_err_estimation = x-0.5\n # quantization_err_estimation = self.dncnn(x)-0.5\n # if not next(self.modules()).training:\n # self.average_err_collection_counter += 1\n # self.average_abs_err_estimates = ((self.average_err_collection_counter-1)*self.average_abs_err_estimates+\n # quantization_err_estimation.abs().mean(-1).mean(-1).mean(0).view(8,8).data.cpu().numpy())/self.average_err_collection_counter\n if self.chroma_generator:\n quantization_err_estimation = quantization_err_estimation.view(quantization_err_estimation.size(0),2,self.block_size//8,8,self.block_size//8,8,\n quantization_err_estimation.size(2),quantization_err_estimation.size(3))\n quantized_coeffs = quantized_coeffs[:,self.block_size**2:,:,:].view(quantized_coeffs.size(0),2,8,8,quantized_coeffs.size(2),quantized_coeffs.size(3))\n quantization_err_estimation[:,:,0,:,0,...] = quantization_err_estimation[:,:,0,:,0,...]+quantized_coeffs\n return quantization_err_estimation.view(quantization_err_estimation.size(0),-1,quantization_err_estimation.size(6),quantization_err_estimation.size(7))\n else:\n return quantized_coeffs+quantization_err_estimation\n\n def return_collected_err_avg(self):\n self.average_err_collection_counter = 0\n natrix_2_return = 1*self.average_abs_err_estimates\n self.average_abs_err_estimates = np.zeros([8,8])\n return natrix_2_return\n\n def save_estimated_errors_fig(self,quantization_err_batch):\n import matplotlib.pyplot as plt\n plt.clf()\n plt.imshow(quantization_err_batch.abs().mean(-1).mean(-1).mean(0).view(8,8).data.cpu().numpy())\n plt.colorbar()\n plt.savefig('Est_quantization_errors_0iters_95Kiters.png')\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.orthogonal_(m.weight)\n print('init weight')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\n\nclass RRDBNet(nn.Module):\n def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, \\\n act_type='leakyrelu', mode='CNA', upsample_mode='upconv',latent_input=None,num_latent_channels=None):\n super(RRDBNet, self).__init__()\n self.latent_input = latent_input\n if num_latent_channels is not None and num_latent_channels>0:\n num_latent_channels_HR = 1 * num_latent_channels\n if 'HR_rearranged' in latent_input:\n num_latent_channels *= upscale**2\n self.num_latent_channels = 1*num_latent_channels\n self.upscale = upscale\n n_upscale = int(math.log(upscale, 2))\n if upscale == 3:\n n_upscale = 1\n if latent_input is not None:\n in_nc += num_latent_channels\n if latent_input is None or 'all_layers' not in latent_input:\n num_latent_channels,num_latent_channels_HR = 0,0\n\n USE_MODULE_LISTS = True\n fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None,return_module_list=USE_MODULE_LISTS)\n rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \\\n norm_type=norm_type, act_type=act_type, mode='CNA',latent_input_channels=num_latent_channels) for _ in range(nb)]\n LR_conv = B.conv_block(nf+num_latent_channels, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode,return_module_list=USE_MODULE_LISTS)\n\n if upsample_mode == 'upconv':\n upsample_block = B.upconv_blcok\n elif upsample_mode == 'pixelshuffle':\n upsample_block = B.pixelshuffle_block\n else:\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\n if upscale == 3:\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\n else:\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\n if latent_input is not None and 'all_layers' in latent_input:\n if 'LR' in latent_input:\n self.latent_upsampler = nn.Upsample(scale_factor=upscale if upscale==3 else 2)\n HR_conv0 = B.conv_block(nf+num_latent_channels_HR, nf, kernel_size=3, norm_type=None, act_type=act_type,return_module_list=USE_MODULE_LISTS)\n HR_conv1 = B.conv_block(nf+num_latent_channels_HR, out_nc, kernel_size=3, norm_type=None, act_type=None,return_module_list=USE_MODULE_LISTS)\n\n if USE_MODULE_LISTS:\n self.model = nn.ModuleList(fea_conv+\\\n [B.ShortcutBlock(B.sequential(*(rb_blocks+LR_conv),return_module_list=USE_MODULE_LISTS),latent_input_channels=num_latent_channels,\n use_module_list=True)]+upsampler+HR_conv0+HR_conv1)\n else:\n self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\\\n *upsampler, HR_conv0, HR_conv1)\n\n def forward(self, x):\n if self.latent_input is not None:\n if 'HR_downscaled' in self.latent_input:\n # latent_input_HR = 1*self.Z\n latent_input_HR,x = torch.split(x,split_size_or_sections=[x.size(1)-3,3],dim=1)\n latent_input_HR = latent_input_HR.view([latent_input_HR.size(0)]+[-1]+[self.upscale*val for val in list(latent_input_HR.size()[2:])])\n latent_input = torch.nn.functional.interpolate(input=latent_input_HR,scale_factor=1/self.upscale,mode='bilinear',align_corners=False)\n else:\n latent_input = 1*self.Z\n x = torch.cat([latent_input, x], dim=1)\n for i,module in enumerate(self.model):\n module_children = [str(type(m)) for m in module.children()]\n if i>0 and self.latent_input is not None and 'all_layers' in self.latent_input:\n if len(module_children)>0 and 'Upsample' in module_children[0]:\n if 'LR' in self.latent_input:\n latent_input = self.latent_upsampler(latent_input)\n elif 'HR_rearranged' in self.latent_input:\n raise Exception('Unsupported yet')\n latent_input = latent_input.view()\n elif 'HR_downscaled' in self.latent_input:\n latent_input = 1*latent_input_HR\n elif 'ReLU' not in str(type(module)):\n x = torch.cat([latent_input,x],1)\n x = module(x)\n return x\n\n\n####################\n# Discriminator\n####################\n\nclass PatchGAN_Discriminator(nn.Module):\n DEFAULT_N_LAYERS = 3\n\n def __init__(self, input_nc, opt_net,ndf=64, n_layers=DEFAULT_N_LAYERS, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a PatchGAN discriminator\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(PatchGAN_Discriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n self.decomposed_input = bool(opt_net['decomposed_input'])\n self.pre_clipping = bool(opt_net['pre_clipping'])\n projected_component_sequences = []\n in_ch_addition = input_nc if self.decomposed_input else 0\n kw = 4\n padw = 1\n max_out_channels = 512\n sequences = [nn.Sequential(*[nn.Conv2d(input_nc+in_ch_addition, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)])]\n # if self.decomposed_input:\n # projected_component_sequences = [nn.Conv2d(input_nc, input_nc, kernel_size=kw, stride=2, padding=padw)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n # nf_mult_prev = nf_mult\n # nf_mult = min(2 ** max(0,n-n_layers+self.DEFAULT_N_LAYERS), 8)\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw,\n stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw,\n # stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n # padding=padw, bias=use_bias))\n\n # nf_mult_prev = nf_mult\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n_layers, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw, stride=1,\n padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=1,\n # padding=padw, bias=use_bias))\n sequences.append(nn.Sequential(*[\n nn.Conv2d(min(max_out_channels, ndf * nf_mult)+in_ch_addition, 1, kernel_size=kw, stride=1,\n padding=padw)])) # output 1 channel prediction map\n self.num_modules = len(sequences)\n if self.decomposed_input:\n for seq in sequences:\n conv_stride = [child.stride[0] for child in seq.children() if 'Conv2d' in str(child.__class__)]\n assert len(conv_stride)<=1,'More than one conv layer in seq?'\n if len(conv_stride)>0:\n projected_component_sequences.append(nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=conv_stride[0],\n padding=padw, bias=use_bias))\n self.model = nn.ModuleList(sequences+projected_component_sequences)\n\n def forward(self, input):\n # pre-clipping:\n # 1.Making D oblivious to pixel values range, by clipping values to be within valid range\n # 2.Making D oblivious to quantization issues, by quantizing its inputs to 256 possible values\n if self.decomposed_input:\n projected_component = input[0]\n input = input[1]\n if self.pre_clipping:\n input = torch.max(input=torch.min(input=input,other=1-projected_component),other=-projected_component)\n # input = (255*(input+projected_component)).round()/255-projected_component\n elif self.pre_clipping:\n input = torch.clamp(input=input,min=0,max=1)\n # input = (255*input).round()/255\n for i,seq in enumerate(self.model[:self.num_modules]):\n if self.decomposed_input:\n if i > 0:\n projected_component = self.model[self.num_modules + i - 1](projected_component)\n input = seq(torch.cat([projected_component,input],dim=1))\n else:\n input = seq(input)\n return input\n\nclass Discriminator_VGG_128_nonModified(nn.Module):\n def __init__(self, in_nc, nf):\n super(Discriminator_VGG_128_nonModified, self).__init__()\n # [64, 128, 128]\n self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n # [64, 64, 64]\n self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n # [128, 32, 32]\n self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n # [256, 16, 16]\n self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n # [512, 8, 8]\n self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n self.linear1 = nn.Linear(512 * 4 * 4, 100)\n self.linear2 = nn.Linear(100, 1)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n def forward(self, x):\n fea = self.lrelu(self.conv0_0(x))\n fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n fea = fea.view(fea.size(0), -1)\n fea = self.lrelu(self.linear1(fea))\n out = self.linear2(fea)\n return out\n\n# VGG style Discriminator with input size 128*128\nclass Discriminator_VGG_128(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA',input_patch_size=128,num_2_strides=5,nb=10):\n super(Discriminator_VGG_128, self).__init__()\n assert num_2_strides<=5,'Can be modified by adding more stridable layers, if needed.'\n self.num_2_strides = 1*num_2_strides\n # features\n # hxw, c\n # 128, 64\n FC_end_patch_size = 1*input_patch_size\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type,mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type,act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 64, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 32, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 16, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 8, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 4, 512\n self.features = B.sequential(*([conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,conv9][:nb]))\n\n self.last_FC_layers = self.num_2_strides==5 #Replacing the FC layers with convolutions, which means using a patch discriminator:\n self.last_FC_layers = False\n # classifier\n # FC_end_patch_size = input_patch_size//(2**self.num_2_strides)\n if self.last_FC_layers:\n self.classifier = nn.Sequential(nn.Linear(base_nf*8 * int(FC_end_patch_size)**2, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n else:\n # num_feature_channels = base_nf*8\n num_feature_channels = [l for l in self.features.children()][-2].num_features\n pseudo_FC_conv0 = B.conv_block(num_feature_channels,min(100,num_feature_channels),kernel_size=8,stride=1,norm_type=norm_type,act_type=act_type, mode=mode,pad_type=None)\n pseudo_FC_conv1 = B.conv_block(min(100,num_feature_channels),1,kernel_size=1,stride=1,norm_type=norm_type,act_type=act_type, mode=mode)\n self.classifier = nn.Sequential(pseudo_FC_conv0, nn.LeakyReLU(0.2, False),pseudo_FC_conv1) # Changed the LeakyRelu inplace arg to False here, because it caused a bug for some reason.\n\n def forward(self, x):\n x = self.features(x)\n if self.last_FC_layers:\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\n# VGG style Discriminator with input size 128*128, Spectral Normalization\nclass Discriminator_VGG_128_SN(nn.Module):\n def __init__(self):\n super(Discriminator_VGG_128_SN, self).__init__()\n # features\n # hxw, c\n # 128, 64\n self.lrelu = nn.LeakyReLU(0.2, True)\n\n self.conv0 = SN.spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))\n self.conv1 = SN.spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))\n # 64, 64\n self.conv2 = SN.spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))\n self.conv3 = SN.spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))\n # 32, 128\n self.conv4 = SN.spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))\n self.conv5 = SN.spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))\n # 16, 256\n self.conv6 = SN.spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))\n self.conv7 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))\n # 8, 512\n self.conv8 = SN.spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))\n self.conv9 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))\n # 4, 512\n\n # classifier\n self.linear0 = SN.spectral_norm(nn.Linear(512 * 4 * 4, 100))\n self.linear1 = SN.spectral_norm(nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.lrelu(self.conv0(x))\n x = self.lrelu(self.conv1(x))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.lrelu(self.conv4(x))\n x = self.lrelu(self.conv5(x))\n x = self.lrelu(self.conv6(x))\n x = self.lrelu(self.conv7(x))\n x = self.lrelu(self.conv8(x))\n x = self.lrelu(self.conv9(x))\n x = x.view(x.size(0), -1)\n x = self.lrelu(self.linear0(x))\n x = self.linear1(x)\n return x\n\n\nclass Discriminator_VGG_96(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):\n super(Discriminator_VGG_96, self).__init__()\n # features\n # hxw, c\n # 96, 64\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \\\n mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 48, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 24, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 12, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 6, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 3, 512\n self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\\\n conv9)\n\n # classifier\n self.classifier = nn.Sequential(\n nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\nclass Discriminator_VGG_192(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):\n super(Discriminator_VGG_192, self).__init__()\n # features\n # hxw, c\n # 192, 64\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \\\n mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 96, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 48, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 24, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 12, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 6, 512\n conv10 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv11 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 3, 512\n self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\\\n conv9, conv10, conv11)\n\n # classifier\n self.classifier = nn.Sequential(\n nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\n####################\n# Perceptual Network\n####################\nRETRAINING_OBLIGING_MODIFICATIONS = ['num_channel_factor_\\d(\\.\\d)?$','patches_init_first']\n\n# Assume input range is [0, 1]\nclass VGGFeatureExtractor(nn.Module):\n def __init__(self,feature_layer=34,use_bn=False,use_input_norm=True,\n device=torch.device('cpu'),state_dict=None,arch='vgg19',arch_config='',**kwargs):\n super(VGGFeatureExtractor, self).__init__()\n if arch_config!='':\n assert all([re.search(pattern,arch_config) is None for pattern in RETRAINING_OBLIGING_MODIFICATIONS]) or 'untrained_' in arch_config\n # assert (re.search('patches_init_(first|all)',arch_config) is None) or 'untrained' not in arch_config,'Relying on trained weights statistics when setting model weights'\n if arch=='SegNetAE':\n from models.modules import SegNet\n model = nn.DataParallel(SegNet.SegNet(3,encode_only=True,batch_norm_DS=False,num_layers=4))\n loaded_state_dict = torch.load('/home/tiras/ybahat/Autoencoder/models/BEST_checkpoint.tar')['model']\n modified_state_dict = {}\n for key in model.state_dict().keys():\n modified_state_dict[key] = loaded_state_dict[key.replace('.features.0','.down1').replace('.features.1','.down2').replace('.features.2','.down3').replace('.features.3','.down4').replace('.features.4','.down5')]\n model.load_state_dict(modified_state_dict)\n model = model.module\n use_input_norm = False # SegNet model expects non-normalized images\n elif use_bn:\n model = torchvision.models.__dict__[arch+'_bn'](pretrained='untrained' not in arch_config)\n else:\n model = torchvision.models.__dict__[arch](pretrained='untrained' not in arch_config)\n # I now remove all unnecessary layers before changing the model configuration, because this change may make alter the number of layers, thus necessitating changing the feature_layer parameter.\n if state_dict is not None:\n state_dict = dict(zip([key.replace('module.','') for key in state_dict.keys()],[value for value in state_dict.values()]))\n model.load_state_dict(state_dict,strict=False)\n model.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])\n arch_config = arch_config.replace('untrained_','').replace('untrained','')\n if arch_config!='':\n import sys\n sys.path.append(os.path.abspath('../../RandomPooling'))\n from model_modification import Modify_Model\n saved_config_params = kwargs['saved_config_params'] if 'saved_config_params' in kwargs.keys() else None\n saving_path = kwargs['saving_path'] if 'saving_path' in kwargs.keys() else None\n model = Modify_Model(model,arch_config,classification_mode=False,saved_config_params=saved_config_params,saving_path=saving_path)\n self.use_input_norm = use_input_norm\n if self.use_input_norm:\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)\n # [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)\n # [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n # Moved the next line to appear earlier, before altering the number of layers in the model\n # self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])\n self.features = model.features\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def _initialize_weights(self):#This function was copied from the torchvision.models.vgg code:\n for m in self.features.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.use_input_norm:\n x = (x - self.mean) / self.std\n output = self.features(x)\n return output\n\n\n# Assume input range is [0, 1]\nclass ResNet101FeatureExtractor(nn.Module):\n def __init__(self, use_input_norm=True, device=torch.device('cpu')):\n super(ResNet101FeatureExtractor, self).__init__()\n model = torchvision.models.resnet101(pretrained=True)\n self.use_input_norm = use_input_norm\n if self.use_input_norm:\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)\n # [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)\n # [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n self.features = nn.Sequential(*list(model.children())[:8])\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def forward(self, x):\n if self.use_input_norm:\n x = (x - self.mean) / self.std\n output = self.features(x)\n return output\n\n\nclass MINCNet(nn.Module):\n def __init__(self):\n super(MINCNet, self).__init__()\n self.ReLU = nn.ReLU(True)\n self.conv11 = nn.Conv2d(3, 64, 3, 1, 1)\n self.conv12 = nn.Conv2d(64, 64, 3, 1, 1)\n self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv21 = nn.Conv2d(64, 128, 3, 1, 1)\n self.conv22 = nn.Conv2d(128, 128, 3, 1, 1)\n self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv31 = nn.Conv2d(128, 256, 3, 1, 1)\n self.conv32 = nn.Conv2d(256, 256, 3, 1, 1)\n self.conv33 = nn.Conv2d(256, 256, 3, 1, 1)\n self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv41 = nn.Conv2d(256, 512, 3, 1, 1)\n self.conv42 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv43 = nn.Conv2d(512, 512, 3, 1, 1)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv51 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv52 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv53 = nn.Conv2d(512, 512, 3, 1, 1)\n\n def forward(self, x):\n out = self.ReLU(self.conv11(x))\n out = self.ReLU(self.conv12(out))\n out = self.maxpool1(out)\n out = self.ReLU(self.conv21(out))\n out = self.ReLU(self.conv22(out))\n out = self.maxpool2(out)\n out = self.ReLU(self.conv31(out))\n out = self.ReLU(self.conv32(out))\n out = self.ReLU(self.conv33(out))\n out = self.maxpool3(out)\n out = self.ReLU(self.conv41(out))\n out = self.ReLU(self.conv42(out))\n out = self.ReLU(self.conv43(out))\n out = self.maxpool4(out)\n out = self.ReLU(self.conv51(out))\n out = self.ReLU(self.conv52(out))\n out = self.conv53(out)\n return out\n\n# Encoder:\ndef conv3x3(in_planes, out_planes):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,padding=1, bias=True)\n\ndef meanpoolConv(inplanes, outplanes):\n sequence = []\n sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]\n sequence += [nn.Conv2d(inplanes, outplanes,\n kernel_size=1, stride=1, padding=0, bias=True)]\n return nn.Sequential(*sequence)\n\n\ndef convMeanpool(inplanes, outplanes):\n sequence = []\n sequence += [conv3x3(inplanes, outplanes)]\n sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]\n return nn.Sequential(*sequence)\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None):\n super(BasicBlock, self).__init__()\n layers = []\n if norm_layer is not None:\n layers += [norm_layer(inplanes)]\n layers += [nl_layer()]\n layers += [conv3x3(inplanes, inplanes)]\n if norm_layer is not None:\n layers += [norm_layer(inplanes)]\n layers += [nl_layer()]\n layers += [convMeanpool(inplanes, outplanes)]\n self.conv = nn.Sequential(*layers)\n self.shortcut = meanpoolConv(inplanes, outplanes)\n\n def forward(self, x):\n out = self.conv(x) + self.shortcut(x)\n return out\n\nclass E_ResNet(nn.Module):\n def __init__(self, input_nc=3, output_nc=1, ndf=64, n_blocks=4,\n norm_layer=None, nl_layer=None, vaeLike=False):\n super(E_ResNet, self).__init__()\n self.vaeLike = vaeLike\n max_ndf = 4\n conv_layers = [\n nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1, bias=True)]\n for n in range(1, n_blocks):\n input_ndf = ndf * min(max_ndf, n)\n output_ndf = ndf * min(max_ndf, n + 1)\n conv_layers += [BasicBlock(input_ndf,\n output_ndf, norm_layer, nl_layer)]\n conv_layers += [nl_layer(), nn.AvgPool2d(8)]\n if vaeLike:\n self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n self.fcVar = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n else:\n self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n self.conv = nn.Sequential(*conv_layers)\n\n def forward(self, x):\n x_conv = self.conv(x)\n conv_flat = x_conv.view(x.size(0), -1)\n output = self.fc(conv_flat)\n if self.vaeLike:\n outputVar = self.fcVar(conv_flat)\n return output, outputVar\n else:\n return output\n return output\n\n# Assume input range is [0, 1]\nclass MINCFeatureExtractor(nn.Module):\n def __init__(self, feature_layer=34, use_bn=False, use_input_norm=True, \\\n device=torch.device('cpu')):\n super(MINCFeatureExtractor, self).__init__()\n\n self.features = MINCNet()\n self.features.load_state_dict(\n torch.load('../experiments/pretrained_models/VGG16minc_53.pth'), strict=True)\n self.features.eval()\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def forward(self, x):\n output = self.features(x)\n return output\n", "import os\nimport os.path\nimport sys\nfrom multiprocessing import Pool\nimport numpy as np\nimport cv2\nfrom socket import gethostname\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom utils.progress_bar import ProgressBar\n\n\ndef main():\n \"\"\"A multi-thread tool to crop sub imags.\"\"\"\n dataset_root_path = '/home/ybahat/Datasets' if gethostname()=='ybahat-System-Product-Name' else '/home/tiras/datasets' if 'tiras' in os.getcwd() else '/media/ybahat/data/Datasets'\n input_folder = os.path.join(dataset_root_path,'DIV2K_train/DIV2K_train_HR')\n save_folder = os.path.join(dataset_root_path,'DIV2K_train/DIV2K_train_sub_HR')\n n_thread = 20\n crop_sz = 256#480\n step = 30#240\n thres_sz = 48\n compression_level = 3 # 3 is the default value in cv2\n multi_scale = False\n # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer\n # compression time. If read raw images during training, use 0 for faster IO speed.\n\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n print('mkdir [{:s}] ...'.format(save_folder))\n else:\n print('Folder [{:s}] already exists. Exit...'.format(save_folder))\n sys.exit(1)\n\n img_list = []\n for root, _, file_list in sorted(os.walk(input_folder)):\n path = [os.path.join(root, x) for x in file_list] # assume only images in the input_folder\n img_list.extend(path)\n\n def update(arg):\n pbar.update(arg)\n\n pbar = ProgressBar(len(img_list))\n\n pool = Pool(n_thread)\n for path in img_list:\n pool.apply_async(worker,\n args=(path, save_folder, crop_sz, step, thres_sz, compression_level,multi_scale),\n callback=update)\n pool.close()\n pool.join()\n print('All subprocesses done.')\n\n\ndef worker(path, save_folder, crop_sz, step, thres_sz, compression_level,multi_scale=False):\n img_name = os.path.basename(path)\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED)\n\n n_channels = len(img.shape)\n if n_channels == 2:\n h, w = img.shape\n elif n_channels == 3:\n h, w, c = img.shape\n else:\n raise ValueError('Wrong image shape - {}'.format(n_channels))\n\n num_scales = int(np.floor(np.log2(min(h,w)/crop_sz))+1) if multi_scale else 1\n desired_steps_per_scale = (min(h, w) // (2 ** (num_scales - 1)) - crop_sz) // step\n step *= 2**(num_scales-1)\n\n for scale_num in range(num_scales):\n cur_scale_img = img if scale_num==0 else cv2.resize(img,(w//2,h//2))\n h,w = cur_scale_img.shape[:2]\n crop_sz = min(h,w)-(desired_steps_per_scale-1)*step\n h_space = np.arange(0, h - crop_sz + 1, step)\n if h - (h_space[-1] + crop_sz) > thres_sz:\n h_space = np.append(h_space, h - crop_sz)\n w_space = np.arange(0, w - crop_sz + 1, step)\n if w - (w_space[-1] + crop_sz) > thres_sz:\n w_space = np.append(w_space, w - crop_sz)\n\n index = 0\n for x in h_space:\n for y in w_space:\n index += 1\n if n_channels == 2:\n crop_img = cur_scale_img[x:x + crop_sz, y:y + crop_sz]\n else:\n crop_img = cur_scale_img[x:x + crop_sz, y:y + crop_sz, :]\n crop_img = np.ascontiguousarray(crop_img)\n # var = np.var(crop_img / 255)\n # if var > 0.008:\n # print(img_name, index_str, var)\n cv2.imwrite(\n os.path.join(save_folder, img_name.replace('.png', '_scale{:1d}_s{:03d}.png'.format(scale_num,index))),\n crop_img, [cv2.IMWRITE_PNG_COMPRESSION, compression_level])\n step = step//2\n return 'Processing {:s} ...'.format(img_name)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.mean", "numpy.sqrt", "torch.cat", "torch.load", "numpy.round", "torch.nn.functional.interpolate", "torch.device", "torch.clamp", "torch.nn.Sigmoid", "numpy.ceil", "numpy.zeros", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.PixelShuffle", "torch.min", "matplotlib.pyplot.savefig", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.nn.LeakyReLU", "numpy.floor", "torch.nn.BatchNorm2d", "torch.nn.InstanceNorm2d", "torch.Tensor", "torch.nn.LayerNorm", "matplotlib.pyplot.colorbar", "torch.nn.MaxPool2d", "matplotlib.pyplot.clf", "torch.nn.Upsample", "torch.nn.init.orthogonal_", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ], [ "numpy.ascontiguousarray", "numpy.arange", "numpy.append" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
L-Net-1992/TensorRT
[ "34b664d404001bd724cb56b52a6e0e05e1fd97f2", "34b664d404001bd724cb56b52a6e0e05e1fd97f2" ]
[ "samples/python/network_api_pytorch_mnist/model.py", "samples/python/efficientdet/create_onnx.py" ]
[ "#\n# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This file contains functions for training a PyTorch MNIST Model\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport numpy as np\nimport os\n\nfrom random import randint\n\n# Network\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, kernel_size=5)\n self.conv2 = nn.Conv2d(20, 50, kernel_size=5)\n self.fc1 = nn.Linear(800, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)\n x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)\n x = x.view(-1, 800)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\nclass MnistModel(object):\n def __init__(self):\n self.batch_size = 64\n self.test_batch_size = 100\n self.learning_rate = 0.0025\n self.sgd_momentum = 0.9\n self.log_interval = 100\n # Fetch MNIST data set.\n self.train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=1,\n timeout=600)\n self.test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=self.test_batch_size,\n shuffle=True,\n num_workers=1,\n timeout=600)\n self.network = Net()\n\n # Train the network for one or more epochs, validating after each epoch.\n def learn(self, num_epochs=2):\n # Train the network for a single epoch\n def train(epoch):\n self.network.train()\n optimizer = optim.SGD(self.network.parameters(), lr=self.learning_rate, momentum=self.sgd_momentum)\n for batch, (data, target) in enumerate(self.train_loader):\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = self.network(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch % self.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, batch * len(data), len(self.train_loader.dataset), 100. * batch / len(self.train_loader), loss.data.item()))\n\n # Test the network\n def test(epoch):\n self.network.eval()\n test_loss = 0\n correct = 0\n for data, target in self.test_loader:\n with torch.no_grad():\n data, target = Variable(data), Variable(target)\n output = self.network(data)\n test_loss += F.nll_loss(output, target).data.item()\n pred = output.data.max(1)[1]\n correct += pred.eq(target.data).cpu().sum()\n test_loss /= len(self.test_loader)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(test_loss, correct, len(self.test_loader.dataset), 100. * correct / len(self.test_loader.dataset)))\n\n for e in range(num_epochs):\n train(e + 1)\n test(e + 1)\n\n def get_weights(self):\n return self.network.state_dict()\n\n def get_random_testcase(self):\n data, target = next(iter(self.test_loader))\n case_num = randint(0, len(data) - 1)\n test_case = data.numpy()[case_num].ravel().astype(np.float32)\n test_name = target.numpy()[case_num]\n return test_case, test_name\n", "#\n# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport sys\nimport argparse\nimport logging\n\nimport tensorflow as tf\nimport onnx_graphsurgeon as gs\nimport numpy as np\nimport onnx\nfrom onnx import shape_inference\nfrom tf2onnx import tfonnx, optimizer, tf_loader\n\nimport onnx_utils\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"EfficientDetGraphSurgeon\").setLevel(logging.INFO)\nlog = logging.getLogger(\"EfficientDetGraphSurgeon\")\n\n\nclass EfficientDetGraphSurgeon:\n def __init__(self, saved_model_path):\n \"\"\"\n Constructor of the EfficientDet Graph Surgeon object, to do the conversion of an EfficientDet TF saved model\n to an ONNX-TensorRT parsable model.\n :param saved_model_path: The path pointing to the TensorFlow saved model to load.\n \"\"\"\n saved_model_path = os.path.realpath(saved_model_path)\n assert os.path.exists(saved_model_path)\n\n # Use tf2onnx to convert saved model to an initial ONNX graph.\n graph_def, inputs, outputs = tf_loader.from_saved_model(saved_model_path, None, None, \"serve\",\n [\"serving_default\"])\n log.info(\"Loaded saved model from {}\".format(saved_model_path))\n with tf.Graph().as_default() as tf_graph:\n tf.import_graph_def(graph_def, name=\"\")\n with tf_loader.tf_session(graph=tf_graph):\n onnx_graph = tfonnx.process_tf_graph(tf_graph, input_names=inputs, output_names=outputs, opset=11)\n onnx_model = optimizer.optimize_graph(onnx_graph).make_model(\"Converted from {}\".format(saved_model_path))\n self.graph = gs.import_onnx(onnx_model)\n assert self.graph\n log.info(\"TF2ONNX graph created successfully\")\n\n # Fold constants via ONNX-GS that TF2ONNX may have missed\n self.graph.fold_constants()\n\n # Try to auto-detect by finding if nodes match a specific name pattern expected for either of the APIs.\n self.api = None\n if len([node for node in self.graph.nodes if \"class_net/\" in node.name]) > 0:\n self.api = \"AutoML\"\n elif len([node for node in self.graph.nodes if \"/WeightSharedConvolutionalClassHead/\" in node.name]) > 0:\n self.api = \"TFOD\"\n assert self.api\n log.info(\"Graph was detected as {}\".format(self.api))\n\n def sanitize(self):\n \"\"\"\n Sanitize the graph by cleaning any unconnected nodes, do a topological resort, and fold constant inputs values.\n When possible, run shape inference on the ONNX graph to determine tensor shapes.\n \"\"\"\n for i in range(3):\n count_before = len(self.graph.nodes)\n\n self.graph.cleanup().toposort()\n try:\n for node in self.graph.nodes:\n for o in node.outputs:\n o.shape = None\n model = gs.export_onnx(self.graph)\n model = shape_inference.infer_shapes(model)\n self.graph = gs.import_onnx(model)\n except Exception as e:\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\n try:\n self.graph.fold_constants(fold_shapes=True)\n except TypeError as e:\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\n raise\n\n count_after = len(self.graph.nodes)\n if count_before == count_after:\n # No new folding occurred in this iteration, so we can stop for now.\n break\n\n def save(self, output_path):\n \"\"\"\n Save the ONNX model to the given location.\n :param output_path: Path pointing to the location where to write out the updated ONNX model.\n \"\"\"\n self.graph.cleanup().toposort()\n model = gs.export_onnx(self.graph)\n output_path = os.path.realpath(output_path)\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n onnx.save(model, output_path)\n log.info(\"Saved ONNX model to {}\".format(output_path))\n\n def update_preprocessor(self, input_format, input_size, preprocessor=\"imagenet\"):\n \"\"\"\n Remove all the pre-processing nodes in the ONNX graph and leave only the image normalization essentials.\n :param input_format: The input data format, either \"NCHW\" or \"NHWC\".\n :param input_size: The input size as a comma-separated string in H,W format, e.g. \"512,512\".\n :param preprocessor: The preprocessor to use, either \"imagenet\" for imagenet mean and stdev normalization,\n or \"scale_range\" for uniform [-1,+1] range normalization.\n \"\"\"\n # Update the input and output tensors shape\n input_size = input_size.split(\",\")\n assert len(input_size) == 2\n for i in range(len(input_size)):\n input_size[i] = int(input_size[i])\n assert input_size[i] >= 1\n assert input_format in [\"NCHW\", \"NHWC\"]\n if input_format == \"NCHW\":\n self.graph.inputs[0].shape = ['N', 3, input_size[0], input_size[1]]\n if input_format == \"NHWC\":\n self.graph.inputs[0].shape = ['N', input_size[0], input_size[1], 3]\n self.graph.inputs[0].dtype = np.float32\n self.graph.inputs[0].name = \"input\"\n log.info(\"ONNX graph input shape: {} [{} format]\".format(self.graph.inputs[0].shape, input_format))\n self.sanitize()\n\n # Find the initial nodes of the graph, whatever the input is first connected to, and disconnect them\n for node in [node for node in self.graph.nodes if self.graph.inputs[0] in node.inputs]:\n node.inputs.clear()\n\n # Convert to NCHW format if needed\n input_tensor = self.graph.inputs[0]\n if input_format == \"NHWC\":\n input_tensor = self.graph.transpose(\"preprocessor/transpose\", input_tensor, [0, 3, 1, 2])\n\n assert preprocessor in [\"imagenet\", \"scale_range\"]\n preprocessed_tensor = None\n if preprocessor == \"imagenet\":\n # RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting\n scale_val = 1 / np.asarray([255], dtype=np.float32)\n mean_val = -1 * np.expand_dims(np.asarray([0.485, 0.456, 0.406], dtype=np.float32), axis=(0, 2, 3))\n stddev_val = 1 / np.expand_dims(np.asarray([0.229, 0.224, 0.225], dtype=np.float32), axis=(0, 2, 3))\n # y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev\n scale_out = self.graph.elt_const(\"Mul\", \"preprocessor/scale\", input_tensor, scale_val * stddev_val)\n mean_out = self.graph.elt_const(\"Add\", \"preprocessor/mean\", scale_out, mean_val * stddev_val)\n preprocessed_tensor = mean_out[0]\n if preprocessor == \"scale_range\":\n # RGB Normalizers. The per-channel values are given with shape [1, 3, 1, 1] for proper NCHW shape broadcasting\n scale_val = 2 / np.asarray([255], dtype=np.float32)\n offset_val = np.expand_dims(np.asarray([-1, -1, -1], dtype=np.float32), axis=(0, 2, 3))\n # y = (x * scale + mean) * stddev --> y = x * scale * stddev + mean * stddev\n scale_out = self.graph.elt_const(\"Mul\", \"preprocessor/scale\", input_tensor, scale_val)\n range_out = self.graph.elt_const(\"Add\", \"preprocessor/range\", scale_out, offset_val)\n preprocessed_tensor = range_out[0]\n\n # Find the first stem conv node of the graph, and connect the normalizer directly to it\n stem_name = None\n if self.api == \"AutoML\":\n stem_name = \"/stem/\"\n if self.api == \"TFOD\":\n stem_name = \"/stem_conv2d/\"\n stem = [node for node in self.graph.nodes if node.op == \"Conv\" and stem_name in node.name][0]\n log.info(\"Found {} node '{}' as stem entry\".format(stem.op, stem.name))\n stem.inputs[0] = preprocessed_tensor\n\n self.sanitize()\n\n def update_shapes(self):\n # Reshape nodes have the batch dimension as a fixed value of 1, they should use the batch size instead\n # Output-Head reshapes use [1, -1, C], corrected reshape value should be [-1, V, C]\n for node in [node for node in self.graph.nodes if node.op == \"Reshape\"]:\n shape_in = node.inputs[0].shape\n if shape_in is None or len(shape_in) not in [4,5]: # TFOD graphs have 5-dim inputs on this Reshape\n continue\n if type(node.inputs[1]) != gs.Constant:\n continue\n shape_out = node.inputs[1].values\n if len(shape_out) != 3 or shape_out[0] != 1 or shape_out[1] != -1:\n continue\n volume = shape_in[1] * shape_in[2] * shape_in[3] / shape_out[2]\n if len(shape_in) == 5:\n volume *= shape_in[4]\n shape_corrected = np.asarray([-1, volume, shape_out[2]], dtype=np.int64)\n node.inputs[1] = gs.Constant(\"{}_shape\".format(node.name), values=shape_corrected)\n log.info(\"Updating Output-Head Reshape node {} to {}\".format(node.name, node.inputs[1].values))\n\n # Other Reshapes only need to change the first dim to -1, as long as there are no -1's already\n for node in [node for node in self.graph.nodes if node.op == \"Reshape\"]:\n if type(node.inputs[1]) != gs.Constant or node.inputs[1].values[0] != 1 or -1 in node.inputs[1].values:\n continue\n node.inputs[1].values[0] = -1\n log.info(\"Updating Reshape node {} to {}\".format(node.name, node.inputs[1].values))\n\n # Resize nodes try to calculate the output shape dynamically, it's more optimal to pre-compute the shape\n if self.api == \"AutoML\":\n # Resize on a BiFPN will always be 2x, but grab it from the graph just in case\n for node in [node for node in self.graph.nodes if node.op == \"Resize\"]:\n if len(node.inputs) < 4 or node.inputs[0].shape is None:\n continue\n scale_h, scale_w = None, None\n if type(node.inputs[3]) == gs.Constant:\n # The sizes input is already folded\n if len(node.inputs[3].values) != 4:\n continue\n scale_h = node.inputs[3].values[2] / node.inputs[0].shape[2]\n scale_w = node.inputs[3].values[3] / node.inputs[0].shape[3]\n if type(node.inputs[3]) == gs.Variable:\n # The sizes input comes from Shape+Slice+Concat\n concat = node.i(3)\n if concat.op != \"Concat\":\n continue\n if type(concat.inputs[1]) != gs.Constant or len(concat.inputs[1].values) != 2:\n continue\n scale_h = concat.inputs[1].values[0] / node.inputs[0].shape[2]\n scale_w = concat.inputs[1].values[1] / node.inputs[0].shape[3]\n scales = np.asarray([1, 1, scale_h, scale_w], dtype=np.float32)\n del node.inputs[3]\n node.inputs[2] = gs.Constant(name=\"{}_scales\".format(node.name), values=scales)\n log.info(\"Updating Resize node {} to {}\".format(node.name, scales))\n\n self.sanitize()\n\n def update_network(self):\n \"\"\"\n Updates the graph to replace certain nodes in the main EfficientDet network:\n - the global average pooling nodes are optimized when running for TFOD models.\n \"\"\"\n\n if self.api == \"TFOD\":\n for reduce in [node for node in self.graph.nodes if node.op == \"ReduceMean\"]:\n # TFOD models have their ReduceMean nodes applied with some redundant transposes that can be\n # optimized away for better performance\n # Make sure the correct subgraph is being replaced, basically search for this:\n # X > Transpose (0,2,3,1) > ReduceMean (1,2) > Reshape (?,1,1,?) > Reshape (?,?,1,1) > Conv > Y\n # And change to this:\n # X > ReduceMean (2,3) > Conv > Y\n transpose = reduce.i()\n if transpose.op != \"Transpose\" or transpose.attrs['perm'] != [0, 2, 3, 1]:\n continue\n if len(reduce.attrs['axes']) != 2 or reduce.attrs['axes'] != [1, 2]:\n continue\n reshape1 = reduce.o()\n if reshape1.op != \"Reshape\" or len(reshape1.inputs[1].values) != 4:\n continue\n if reshape1.inputs[1].values[1] != 1 or reshape1.inputs[1].values[2] != 1:\n continue\n reshape2 = reshape1.o()\n if reshape2.op != \"Reshape\" or len(reshape2.inputs[1].values) != 4:\n continue\n if reshape2.inputs[1].values[2] != 1 or reshape2.inputs[1].values[3] != 1:\n continue\n conv = reshape2.o()\n if conv.op != \"Conv\":\n continue\n # If all the checks above pass, then this node sequence can be optimized by just the ReduceMean itself\n # operating on a different set of axes\n input_tensor = transpose.inputs[0] # Input tensor to the Transpose\n reduce.inputs[0] = input_tensor # Forward the Transpose input to the ReduceMean node\n output_tensor = reduce.outputs[0] # Output tensor of the ReduceMean\n conv.inputs[0] = output_tensor # Forward the ReduceMean output to the Conv node\n reduce.attrs['axes'] = [2, 3] # Update the axes that ReduceMean operates on\n reduce.attrs['keepdims'] = 1 # Keep the reduced dimensions\n log.info(\"Optimized subgraph around ReduceMean node '{}'\".format(reduce.name))\n\n def update_nms(self, threshold=None, detections=None):\n \"\"\"\n Updates the graph to replace the NMS op by BatchedNMS_TRT TensorRT plugin node.\n :param threshold: Override the score threshold attribute. If set to None, use the value in the graph.\n :param detections: Override the max detections attribute. If set to None, use the value in the graph.\n \"\"\"\n\n def find_head_concat(name_scope):\n # This will find the concatenation node at the end of either Class Net or Box Net. These concatenation nodes\n # bring together prediction data for each of 5 scales.\n # The concatenated Class Net node will have shape [batch_size, num_anchors, num_classes],\n # and the concatenated Box Net node has the shape [batch_size, num_anchors, 4].\n # These concatenation nodes can be be found by searching for all Concat's and checking if the node two\n # steps above in the graph has a name that begins with either \"box_net/...\" or \"class_net/...\".\n for node in [node for node in self.graph.nodes if node.op == \"Transpose\" and name_scope in node.name]:\n concat = self.graph.find_descendant_by_op(node, \"Concat\")\n assert concat and len(concat.inputs) == 5\n log.info(\"Found {} node '{}' as the tip of {}\".format(concat.op, concat.name, name_scope))\n return concat\n\n def extract_anchors_tensor(split):\n # This will find the anchors that have been hardcoded somewhere within the ONNX graph.\n # The function will return a gs.Constant that can be directly used as an input to the NMS plugin.\n # The anchor tensor shape will be [1, num_anchors, 4]. Note that '1' is kept as first dim, regardless of\n # batch size, as it's not necessary to replicate the anchors for all images in the batch.\n\n # The anchors are available (one per coordinate) hardcoded as constants within certain box decoder nodes.\n # Each of these four constants have shape [1, num_anchors], so some numpy operations are used to expand the\n # dims and concatenate them as needed.\n\n # These constants can be found by starting from the Box Net's split operation , and for each coordinate,\n # walking down in the graph until either an Add or Mul node is found. The second input on this nodes will\n # be the anchor data required.\n def get_anchor_np(output_idx, op):\n node = self.graph.find_descendant_by_op(split.o(0, output_idx), op)\n assert node\n val = np.squeeze(node.inputs[1].values)\n return np.expand_dims(val.flatten(), axis=(0, 2))\n\n anchors_y = get_anchor_np(0, \"Add\")\n anchors_x = get_anchor_np(1, \"Add\")\n anchors_h = get_anchor_np(2, \"Mul\")\n anchors_w = get_anchor_np(3, \"Mul\")\n anchors = np.concatenate([anchors_y, anchors_x, anchors_h, anchors_w], axis=2)\n return gs.Constant(name=\"nms/anchors:0\", values=anchors)\n\n self.sanitize()\n\n head_names = []\n if self.api == \"AutoML\":\n head_names = [\"class_net/\", \"box_net/\"]\n if self.api == \"TFOD\":\n head_names = [\"/WeightSharedConvolutionalClassHead/\", \"/WeightSharedConvolutionalBoxHead/\"]\n\n # There are five nodes at the bottom of the graph that provide important connection points:\n\n # 1. Find the concat node at the end of the class net (multi-scale class predictor)\n class_net = find_head_concat(head_names[0])\n class_net_tensor = class_net.outputs[0]\n\n # 2. Find the concat node at the end of the box net (multi-scale localization predictor)\n box_net = find_head_concat(head_names[1])\n box_net_tensor = box_net.outputs[0]\n\n # 3. Find the split node that separates the box net coordinates and feeds them into the box decoder.\n box_net_split = self.graph.find_descendant_by_op(box_net, \"Split\")\n assert box_net_split and len(box_net_split.outputs) == 4\n\n # 4. Find the concat node at the end of the box decoder.\n box_decoder = self.graph.find_descendant_by_op(box_net_split, \"Concat\")\n assert box_decoder and len(box_decoder.inputs) == 4\n box_decoder_tensor = box_decoder.outputs[0]\n\n # 5. Find the NMS node.\n nms_node = self.graph.find_node_by_op(\"NonMaxSuppression\")\n\n # Extract NMS Configuration\n num_detections = int(nms_node.inputs[2].values) if detections is None else detections\n iou_threshold = float(nms_node.inputs[3].values)\n score_threshold = float(nms_node.inputs[4].values) if threshold is None else threshold\n num_classes = class_net.i().inputs[1].values[-1]\n normalized = True if self.api == \"TFOD\" else False\n\n # NMS Inputs and Attributes\n # NMS expects these shapes for its input tensors:\n # box_net: [batch_size, number_boxes, 4]\n # class_net: [batch_size, number_boxes, number_classes]\n # anchors: [1, number_boxes, 4] (if used)\n nms_op = None\n nms_attrs = None\n nms_inputs = None\n\n # EfficientNMS TensorRT Plugin\n # Fusing the decoder will always be faster, so this is the default NMS method supported. In this case,\n # three inputs are given to the NMS TensorRT node:\n # - The box predictions (from the Box Net node found above)\n # - The class predictions (from the Class Net node found above)\n # - The default anchor coordinates (from the extracted anchor constants)\n # As the original tensors from EfficientDet will be used, the NMS code type is set to 1 (Center+Size),\n # because this is the internal box coding format used by the network.\n anchors_tensor = extract_anchors_tensor(box_net_split)\n nms_inputs = [box_net_tensor, class_net_tensor, anchors_tensor]\n nms_op = \"EfficientNMS_TRT\"\n nms_attrs = {\n 'plugin_version': \"1\",\n 'background_class': -1,\n 'max_output_boxes': num_detections,\n 'score_threshold': max(0.01, score_threshold), # Keep threshold to at least 0.01 for better efficiency\n 'iou_threshold': iou_threshold,\n 'score_activation': True,\n 'box_coding': 1,\n }\n nms_output_classes_dtype = np.int32\n\n # NMS Outputs\n nms_output_num_detections = gs.Variable(name=\"num_detections\", dtype=np.int32, shape=['N', 1])\n nms_output_boxes = gs.Variable(name=\"detection_boxes\", dtype=np.float32,\n shape=['N', num_detections, 4])\n nms_output_scores = gs.Variable(name=\"detection_scores\", dtype=np.float32,\n shape=['N', num_detections])\n nms_output_classes = gs.Variable(name=\"detection_classes\", dtype=nms_output_classes_dtype,\n shape=['N', num_detections])\n\n nms_outputs = [nms_output_num_detections, nms_output_boxes, nms_output_scores, nms_output_classes]\n\n # Create the NMS Plugin node with the selected inputs. The outputs of the node will also become the final\n # outputs of the graph.\n self.graph.plugin(\n op=nms_op,\n name=\"nms/non_maximum_suppression\",\n inputs=nms_inputs,\n outputs=nms_outputs,\n attrs=nms_attrs)\n log.info(\"Created NMS plugin '{}' with attributes: {}\".format(nms_op, nms_attrs))\n\n self.graph.outputs = nms_outputs\n\n self.sanitize()\n\n\ndef main(args):\n effdet_gs = EfficientDetGraphSurgeon(args.saved_model)\n if args.tf2onnx:\n effdet_gs.save(args.tf2onnx)\n effdet_gs.update_preprocessor(args.input_format, args.input_size, args.preprocessor)\n effdet_gs.update_shapes()\n effdet_gs.update_network()\n effdet_gs.update_nms(args.nms_threshold, args.nms_detections)\n effdet_gs.save(args.onnx)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--saved_model\", required=True,\n help=\"The TensorFlow saved model directory to load\")\n parser.add_argument(\"-o\", \"--onnx\", required=True,\n help=\"The output ONNX model file to write\")\n parser.add_argument(\"-f\", \"--input_format\", default=\"NHWC\", choices=[\"NHWC\", \"NCHW\"],\n help=\"Set the input data format of the graph, either NCHW or NHWC, default: NHWC\")\n parser.add_argument(\"-i\", \"--input_size\", default=\"512,512\",\n help=\"Set the input shape of the graph, as a comma-separated dimensions in H,W format, \"\n \"default: 512,512\")\n parser.add_argument(\"-p\", \"--preprocessor\", default=\"imagenet\", choices=[\"imagenet\", \"scale_range\"],\n help=\"Set the preprocessor to apply on the graph, either 'imagenet' for standard mean \"\n \"subtraction and stdev normalization, or 'scale_range' for uniform [-1,+1] \"\n \"normalization as is used in the AdvProp models, default: imagenet\")\n parser.add_argument(\"-t\", \"--nms_threshold\", type=float,\n help=\"Override the NMS score threshold, default: use the original value in the model\")\n parser.add_argument(\"-d\", \"--nms_detections\", type=int,\n help=\"Override the NMS max detections, default: use the original value in the model\")\n parser.add_argument(\"--tf2onnx\",\n help=\"The path where to save the intermediate ONNX graph generated by tf2onnx, useful\"\n \"for graph debugging purposes, default: not saved\")\n args = parser.parse_args()\n main(args)\n" ]
[ [ "torch.nn.functional.nll_loss", "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.no_grad", "torch.autograd.Variable" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.asarray", "numpy.squeeze", "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
mlweilert/bpnet
[ "dcc9e8d805f9de774ae9dcc62c20504915be614f", "dcc9e8d805f9de774ae9dcc62c20504915be614f", "dcc9e8d805f9de774ae9dcc62c20504915be614f" ]
[ "bpnet/samplers.py", "bpnet/data.py", "bpnet/metrics.py" ]
[ "\"\"\"\nModule implementing different samplers for the chipnexus data\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom kipoi_utils.external.torch.sampler import Sampler\nfrom kipoi_utils.data_utils import iterable_cycle\nimport warnings\nimport gin\n\n\ndef get_batch_sizes(p_vec, batch_size, verbose=True):\n \"\"\"Compute the individual batch sizes for different probabilities\n\n Args:\n p_vec: list of probabilities for each class\n batch_size: batch size\n\n Returns:\n rounded list p_vec[i]*batch_size\n \"\"\"\n p_vec = np.array(p_vec) / sum(p_vec)\n\n batch_sizes = np.round(p_vec * batch_size).astype(int)\n difference = batch_size - batch_sizes.sum()\n # Increase the largest one\n batch_sizes[batch_sizes.argmax()] += difference\n if verbose:\n print(\"Using batch sizes:\")\n print(batch_sizes)\n assert batch_sizes.sum() == batch_size\n return batch_sizes\n\n\[email protected]\nclass StratifiedRandomBatchSampler(Sampler):\n\n def __init__(self, class_vector, p_vec, batch_size, verbose=False):\n \"\"\"Stratified Sampling\n\n Args:\n class_vector (np.array): a vector of class labels\n p_vec (list[float]): list of probabilities for each class\n batch_size (int): batch_size\n verbose\n \"\"\"\n self.n_splits = int(class_vector.shape[0] / batch_size)\n self.class_vector = class_vector\n self.p_vec = p_vec\n self.batch_size = batch_size\n\n self.batch_sizes = get_batch_sizes(self.p_vec, self.batch_size, verbose=verbose)\n\n # check that the individual batch size will always be > 0\n for i, batch_size in enumerate(self.batch_sizes):\n if batch_size == 0:\n warnings.warn(\"Batch size for class {} is 0.\".format(i))\n\n self.classes = np.arange(len(p_vec))\n assert np.all(np.sort(pd.Series(self.class_vector).unique()) == self.classes)\n\n idx_all = np.arange(len(self.class_vector))\n self.class_idx_iterators = [iterable_cycle(np.random.permutation(idx_all[self.class_vector == cls]))\n for cls in self.classes]\n\n def __iter__(self):\n for i in range(len(self)):\n yield [next(self.class_idx_iterators[i])\n for i, batch_size in enumerate(self.batch_sizes)\n for j in range(batch_size)]\n\n def __len__(self):\n return len(self.class_vector) // self.batch_size\n\n\n# # OLD\n# # convenience samplers for ChIP-nexus data\n\n\n# def random(arr, n=10):\n# \"\"\"\n# Randomly sample the values\n# arr: numpy array\n# n = number of samples to draw\n# \"\"\"\n\n# return list(pd.Series(np.arange(len(arr))).sample(n).index)\n\n\n# def top_max_count(arr, end=10, start=0, keep=None):\n# \"\"\"\n# Return indices where arr has the highest max(pos) + max(neg)\n\n# Args:\n# arr: can be an array or a list of arrays\n# start: Where to start returning the values\n# end: where to stop\n# \"\"\"\n# if keep is None:\n# keep = np.arange(len(arr))\n# assert end > start\n# # Top maxcount indicies\n# return pd.Series(arr.max(1).sum(1))[keep].sort_values(ascending=False).index[start:end]\n\n\n# def top_sum_count(arr, end=10, start=0, keep=None):\n# \"\"\"\n# Return indices where arr has the highest number of counts\n\n# Args:\n# arr: can be an array or a list of arrays\n# start: Where to start returning the values\n# end: where to stop\n# \"\"\"\n# if keep is None:\n# keep = np.arange(len(arr))\n# assert end > start\n# return pd.Series(arr.sum(1).sum(1))[keep].sort_values(ascending=False).index[start:end]\n\n\n# def random_larger(arr, n=10, percentile=50):\n# \"\"\"Randomly sample the values larger than a certain quantile\n\n# arr: numpy array\n# n = number of samples to draw\n# \"\"\"\n# values = arr.sum(1).sum(1)\n# return list(pd.Series(np.arange(len(arr))[values > np.percentile(values, percentile)]).sample(n).index)\n", "import numpy as np\nfrom kipoi_utils.external.torch.sampler import BatchSampler\nimport collections\nfrom kipoi_utils.data_utils import (numpy_collate, numpy_collate_concat, get_dataset_item,\n DataloaderIterable, batch_gen, get_dataset_lens, iterable_cycle)\nfrom copy import deepcopy\nfrom bpnet.utils import flatten, unflatten\nfrom collections import OrderedDict\nimport pandas as pd\nfrom tqdm import tqdm\nfrom kipoi.writers import HDF5BatchWriter\nfrom kipoi.readers import HDF5Reader\n\nfrom kipoi.data import BaseDataLoader\ntry:\n import torch\n from torch.utils.data import DataLoader\n USE_TORCH = True\nexcept Exception:\n # use the Kipoi dataloader as a fall-back strategy\n from kipoi.data import DataLoader\n USE_TORCH = False\nimport abc\n\n\ndef to_numpy(data):\n if not USE_TORCH:\n return data\n if isinstance(data, torch.Tensor):\n return data.numpy()\n elif isinstance(data, collections.Mapping):\n return {key: to_numpy(data[key]) for key in data}\n elif isinstance(data, collections.Sequence):\n if isinstance(data[0], str):\n return data\n else:\n return [to_numpy(sample) for sample in data]\n else:\n raise ValueError(\"Leafs of the nested structure need to be numpy arrays\")\n\n\nclass Dataset(BaseDataLoader):\n \"\"\"An abstract class representing a Dataset.\n\n All other datasets should subclass it. All subclasses should override\n ``__len__``, that provides the size of the dataset, and ``__getitem__``,\n supporting integer indexing in range from 0 to len(self) exclusive.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def __getitem__(self, index):\n \"\"\"Return one sample\n\n index: {0, ..., len(self)-1}\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def __len__(self):\n \"\"\"Return the number of all samples\n \"\"\"\n raise NotImplementedError\n\n def _batch_iterable(self, batch_size=32, shuffle=False, num_workers=0, drop_last=False, **kwargs):\n \"\"\"Return a batch-iteratrable\n\n See batch_iter docs\n\n Returns:\n Iterable\n \"\"\"\n dl = DataLoader(self,\n batch_size=batch_size,\n # collate_fn=numpy_collate,\n shuffle=shuffle,\n num_workers=num_workers,\n drop_last=drop_last,\n **kwargs)\n return dl\n\n def batch_iter(self, batch_size=32, shuffle=False, num_workers=0, drop_last=False, **kwargs):\n \"\"\"Return a batch-iterator\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If False and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n\n Returns:\n iterator\n \"\"\"\n dl = self._batch_iterable(batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n drop_last=drop_last,\n **kwargs)\n return (to_numpy(batch) for batch in dl)\n\n def batch_train_iter(self, cycle=True, **kwargs):\n \"\"\"Returns samples directly useful for training the model:\n (x[\"inputs\"],x[\"targets\"])\n Args:\n cycle: when True, the returned iterator will run indefinitely go through the dataset\n Use True with `fit_generator` in Keras.\n **kwargs: Arguments passed to self.batch_iter(**kwargs)\n \"\"\"\n if cycle:\n return ((to_numpy(x[\"inputs\"]), to_numpy(x[\"targets\"]))\n for x in iterable_cycle(self._batch_iterable(**kwargs)))\n else:\n return ((x[\"inputs\"], x[\"targets\"]) for x in self.batch_iter(**kwargs))\n\n def batch_predict_iter(self, **kwargs):\n \"\"\"Returns samples directly useful for prediction x[\"inputs\"]\n Args:\n **kwargs: Arguments passed to self.batch_iter(**kwargs)\n \"\"\"\n return (x[\"inputs\"] for x in self.batch_iter(**kwargs))\n\n def load_all(self, batch_size=32, **kwargs):\n \"\"\"Load the whole dataset into memory\n Arguments:\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n \"\"\"\n from copy import deepcopy\n return numpy_collate_concat([deepcopy(x)\n for x in tqdm(self.batch_iter(batch_size,\n **kwargs),\n total=len(self) // batch_size)])\n\n\ndef nested_numpy_minibatch(data, batch_size=1):\n lens = get_dataset_lens(data)\n if isinstance(lens, collections.Mapping):\n ln = [v for v in lens.values()][0]\n elif isinstance(lens, collections.Sequence):\n ln = lens[0]\n else:\n ln = lens\n\n for idx in BatchSampler(range(ln),\n batch_size=batch_size,\n drop_last=False):\n yield get_dataset_item(data, idx)\n\n\nclass NumpyDataset(Dataset):\n \"\"\"Data-structure of arbitrarily nested arrays\n with the same first axis\n \"\"\"\n\n def __init__(self, data, attrs=None):\n \"\"\"\n\n Args:\n data: any arbitrarily nested dict/list of np.arrays\n with the same first axis size\n attrs: optional dictionary of attributes\n \"\"\"\n self.data = data\n if attrs is None:\n self.attrs = OrderedDict()\n else:\n self.attrs = attrs\n\n self._validate()\n\n def _validate(self):\n # Make sure the first axis is the same\n # for k,v in flatten(data).items():\n assert len(set(self.get_lens())) == 1\n\n def get_lens(self):\n return list(flatten(self.dapply(len)).values())\n\n def __len__(self):\n return self.get_lens()[0]\n\n def __getitem__(self, idx):\n def get_item(arr, idx):\n return arr[idx]\n return self.dapply(get_item, idx=idx)\n\n def loc(self, idx):\n return super().__init__(self[idx], attrs=deepcopy(self.attrs))\n\n def flatten(self):\n return super().__init__(flatten(self.data), attrs=deepcopy(self.attrs))\n\n def unflatten(self):\n return super().__init__(unflatten(self.data), attrs=deepcopy(self.attrs))\n\n def shapes(self):\n from pprint import pprint\n\n def get_shape(arr):\n return str(arr.shape)\n\n out = self.dapply(get_shape)\n pprint(out)\n\n def dapply(self, fn, *args, **kwargs):\n \"\"\"Apply a function to each element in the list\n\n Returns a nested dictionary\n \"\"\"\n def _dapply(data, fn, *args, **kwargs):\n if type(data).__module__ == 'numpy':\n return fn(data, *args, **kwargs)\n elif isinstance(data, collections.Mapping):\n return {key: _dapply(data[key], fn, *args, **kwargs) for key in data}\n elif isinstance(data, collections.Sequence):\n return [_dapply(sample, fn, *args, **kwargs) for sample in data]\n else:\n raise ValueError(\"Leafs of the nested structure need to be numpy arrays\")\n\n return _dapply(self.data, fn, *args, **kwargs)\n\n def sapply(self, fn, *args, **kwargs):\n \"\"\"Same as dapply but returns NumpyDataset\n \"\"\"\n return super().__init__(self.dapply(fn, *args, **kwargs), deepcopy(self.attrs))\n\n def aggregate(self, fn=np.mean, axis=0):\n \"\"\"Aggregate across all tracks\n\n Args:\n idx: subset index\n \"\"\"\n return self.dapply(fn, axis=axis)\n\n def shuffle(self):\n \"\"\"Permute the order of seqlets\n \"\"\"\n idx = pd.Series(np.arange(len(self))).sample(frac=1).values\n return self.loc(idx)\n\n def split(self, i):\n \"\"\"Split the Dataset at a certain index\n \"\"\"\n return self.loc(np.arange(i)), self.loc(np.arange(i, len(self)))\n\n def append(self, datax):\n \"\"\"Append two datasets\n \"\"\"\n return super().__init__(data=numpy_collate_concat([self.data, datax.data]),\n attrs=deepcopy(self.attrs))\n\n def save(self, file_path, **kwargs):\n \"\"\"Save the dataset to an hdf5 file\n \"\"\"\n obj = HDF5BatchWriter(file_path=file_path, **kwargs)\n obj.batch_write(self.data)\n # Store the attrs\n for k, v in self.attrs.items():\n obj.f.attrs[k] = v\n obj.close()\n\n @classmethod\n def load(cls, file_path):\n \"\"\"Load the dataset from an hdf5 dataset\n \"\"\"\n with HDF5Reader(file_path) as obj:\n data = obj.load_all()\n attrs = OrderedDict(obj.f.attrs)\n return cls(data, attrs)\n\n @classmethod\n def concat(cls, objects):\n return cls(data=numpy_collate_concat(objects), attrs=None)\n", "import sklearn.metrics as skm\nimport logging\nimport matplotlib.pyplot as plt\nfrom bpnet.utils import read_pkl\nfrom keras.models import load_model\nfrom bpnet.utils import _listify, create_tf_session\nfrom bpnet.stats import permute_array\nfrom bpnet.functions import softmax, mean\nimport os\nimport json\nfrom tqdm import tqdm\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nimport gin\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\n# Metric helpers\ndef average_profile(pe):\n tasks = list(pe)\n binsizes = list(pe[tasks[0]])\n return {binsize: {\"auprc\": mean([pe[task][binsize]['auprc'] for task in tasks])}\n for binsize in binsizes}\n\n\ndef average_counts(pe):\n tasks = list(pe)\n metrics = list(pe[tasks[0]])\n return {metric: mean([pe[task][metric] for task in tasks])\n for metric in metrics}\n\n\ndef bin_counts_max(x, binsize=2):\n \"\"\"Bin the counts\n \"\"\"\n if binsize == 1:\n return x\n assert len(x.shape) == 3\n outlen = x.shape[1] // binsize\n xout = np.zeros((x.shape[0], outlen, x.shape[2]))\n for i in range(outlen):\n xout[:, i, :] = x[:, (binsize * i):(binsize * (i + 1)), :].max(1)\n return xout\n\n\ndef bin_counts_amb(x, binsize=2):\n \"\"\"Bin the counts\n \"\"\"\n if binsize == 1:\n return x\n assert len(x.shape) == 3\n outlen = x.shape[1] // binsize\n xout = np.zeros((x.shape[0], outlen, x.shape[2])).astype(float)\n for i in range(outlen):\n iterval = x[:, (binsize * i):(binsize * (i + 1)), :]\n has_amb = np.any(iterval == -1, axis=1)\n has_peak = np.any(iterval == 1, axis=1)\n # if no peak and has_amb -> -1\n # if no peak and no has_amb -> 0\n # if peak -> 1\n xout[:, i, :] = (has_peak - (1 - has_peak) * has_amb).astype(float)\n return xout\n\n\ndef bin_counts_summary(x, binsize=2, fn=np.max):\n \"\"\"Bin the counts\n \"\"\"\n if binsize == 1:\n return x\n assert len(x.shape) == 3\n outlen = x.shape[1] // binsize\n xout = np.zeros((x.shape[0], outlen, x.shape[2]))\n for i in range(outlen):\n xout[:, i, :] = np.apply_along_axis(fn, 1, x[:, (binsize * i):(binsize * (i + 1)), :])\n return xout\n\n\ndef eval_profile(yt, yp,\n pos_min_threshold=0.05,\n neg_max_threshold=0.01,\n required_min_pos_counts=2.5,\n binsizes=[1, 2, 4, 10]):\n \"\"\"\n Evaluate the profile in terms of auPR\n\n Args:\n yt: true profile (counts)\n yp: predicted profile (fractions)\n pos_min_threshold: fraction threshold above which the position is\n considered to be a positive\n neg_max_threshold: fraction threshold bellow which the position is\n considered to be a negative\n required_min_pos_counts: smallest number of reads the peak should be\n supported by. All regions where 0.05 of the total reads would be\n less than required_min_pos_counts are excluded\n \"\"\"\n # The filtering\n # criterion assures that each position in the positive class is\n # supported by at least required_min_pos_counts of reads\n do_eval = yt.sum(axis=1).mean(axis=1) > required_min_pos_counts / pos_min_threshold\n\n # make sure everything sums to one\n yp = yp / yp.sum(axis=1, keepdims=True)\n fracs = yt / yt.sum(axis=1, keepdims=True)\n\n yp_random = permute_array(permute_array(yp[do_eval], axis=1), axis=0)\n out = []\n for binsize in binsizes:\n is_peak = (fracs >= pos_min_threshold).astype(float)\n ambigous = (fracs < pos_min_threshold) & (fracs >= neg_max_threshold)\n is_peak[ambigous] = -1\n y_true = np.ravel(bin_counts_amb(is_peak[do_eval], binsize))\n\n imbalance = np.sum(y_true == 1) / np.sum(y_true >= 0)\n n_positives = np.sum(y_true == 1)\n n_ambigous = np.sum(y_true == -1)\n frac_ambigous = n_ambigous / y_true.size\n\n # TODO - I used to have bin_counts_max over here instead of bin_counts_sum\n try:\n res = auprc(y_true,\n np.ravel(bin_counts_max(yp[do_eval], binsize)))\n res_random = auprc(y_true,\n np.ravel(bin_counts_max(yp_random, binsize)))\n except Exception:\n res = np.nan\n res_random = np.nan\n\n out.append({\"binsize\": binsize,\n \"auprc\": res,\n \"random_auprc\": res_random,\n \"n_positives\": n_positives,\n \"frac_ambigous\": frac_ambigous,\n \"imbalance\": imbalance\n })\n\n return pd.DataFrame.from_dict(out)\n\n# --------------------------------------------\n\n\[email protected]\nclass BPNetSeparatePostproc:\n\n def __init__(self, tasks):\n self.tasks = tasks\n\n def __call__(self, y_true, preds):\n profile_preds = {task: softmax(preds[task_i])\n for task_i, task in enumerate(self.tasks)}\n count_preds = {task: preds[len(self.tasks) + task_i].sum(axis=-1)\n for task_i, task in enumerate(self.tasks)}\n profile_true = {task: y_true[f'profile/{task}']\n for task in self.tasks}\n counts_true = {task: y_true[f'counts/{task}'].sum(axis=-1)\n for task in self.tasks}\n return ({\"profile\": profile_true, \"counts\": counts_true},\n {\"profile\": profile_preds, \"counts\": count_preds})\n\n\[email protected]\nclass BPNetSinglePostproc:\n \"\"\"Example where we predict a single track\n \"\"\"\n\n def __init__(self, tasks):\n self.tasks = tasks\n\n def __call__(self, y_true, preds):\n profile_preds = {task: preds[task_i] / preds[task_i].sum(axis=-2, keepdims=True)\n for task_i, task in enumerate(self.tasks)}\n count_preds = {task: np.log(1 + preds[task_i].sum(axis=(-2, -1)))\n for task_i, task in enumerate(self.tasks)}\n\n profile_true = {task: y_true[f'profile/{task}']\n for task in self.tasks}\n counts_true = {task: np.log(1 + y_true[f'profile/{task}'].sum(axis=(-2, -1)))\n for task in self.tasks}\n return ({\"profile\": profile_true, \"counts\": counts_true},\n {\"profile\": profile_preds, \"counts\": count_preds})\n\n\[email protected]\nclass BPNetMetric:\n \"\"\"BPNet metrics when the net is predicting counts and profile separately\n \"\"\"\n\n def __init__(self, tasks, count_metric,\n profile_metric=None,\n postproc_fn=None):\n \"\"\"\n\n Args:\n tasks: tasks\n count_metric: count evaluation metric\n profile_metric: profile evaluation metric\n \"\"\"\n self.tasks = tasks\n self.count_metric = count_metric\n self.profile_metric = profile_metric\n\n if postproc_fn is None:\n self.postproc_fn = BPNetSeparatePostproc(tasks=self.tasks)\n else:\n self.postproc_fn = postproc_fn\n\n def __call__(self, y_true, preds):\n # extract the profile and count predictions\n\n y_true, preds = self.postproc_fn(y_true, preds)\n\n out = {}\n out[\"counts\"] = {task: self.count_metric(y_true['counts'][task],\n preds['counts'][task])\n for task in self.tasks}\n out[\"counts\"]['avg'] = average_counts(out[\"counts\"])\n\n out[\"avg\"] = {\"counts\": out[\"counts\"]['avg']} # new system compatibility\n if self.profile_metric is not None:\n out[\"profile\"] = {task: self.profile_metric(y_true['profile'][task],\n preds['profile'][task])\n for task in self.tasks}\n out[\"profile\"]['avg'] = average_profile(out[\"profile\"])\n out[\"avg\"]['profile'] = out[\"profile\"]['avg']\n return out\n\n\[email protected]\nclass BPNetMetricSingleProfile:\n \"\"\"BPNet metrics when the net is predicting the total counts + profile at the same time\n \"\"\"\n\n def __init__(self, count_metric,\n profile_metric=None):\n \"\"\"\n\n Args:\n tasks: tasks\n count_metric: count evaluation metric\n profile_metric: profile evaluation metric\n \"\"\"\n # self.tasks = tasks\n self.count_metric = count_metric\n self.profile_metric = profile_metric\n\n def __call__(self, y_true, preds):\n # extract the profile and count predictions\n out = {}\n\n # sum across positions + strands\n out[\"counts\"] = self.count_metric(np.log(1 + y_true.sum(axis=(-2, -1))),\n np.log(1 + preds.sum(axis=(-2, -1))))\n\n if self.profile_metric is not None:\n out[\"profile\"] = self.profile_metric(y_true, preds)\n return out\n\n\[email protected]\nclass PeakPredictionProfileMetric:\n\n def __init__(self, pos_min_threshold=0.05,\n neg_max_threshold=0.01,\n required_min_pos_counts=2.5,\n binsizes=[1, 10]):\n\n self.pos_min_threshold = pos_min_threshold\n self.neg_max_threshold = neg_max_threshold\n self.required_min_pos_counts = required_min_pos_counts\n self.binsizes = binsizes\n\n def __call__(self, y_true, y_pred):\n out = eval_profile(y_true, y_pred,\n pos_min_threshold=self.pos_min_threshold,\n neg_max_threshold=self.neg_max_threshold,\n required_min_pos_counts=self.required_min_pos_counts,\n binsizes=self.binsizes)\n\n return {f\"binsize={k}\": v for k, v in out.set_index(\"binsize\").to_dict(\"index\").items()}\n\n\ndefault_peak_pred_metric = PeakPredictionProfileMetric(pos_min_threshold=0.015,\n neg_max_threshold=0.005,\n required_min_pos_counts=2.5,\n binsizes=[1, 10])\n\n\n# --------------------------------------------\n# Combined metrics\n\n\[email protected]\nclass BootstrapMetric:\n def __init__(self, metric, n):\n \"\"\"\n Args:\n metric: a function accepting (y_true and y_pred) and\n returning the evaluation result\n n: number of bootstrap samples to draw\n \"\"\"\n self.metric = metric\n self.n = n\n\n def __call__(self, y_true, y_pred):\n outl = []\n for i in range(self.n):\n bsamples = (\n pd.Series(np.arange(len(y_true))).sample(frac=1, replace=True).values\n )\n outl.append(self.metric(y_true[bsamples], y_pred[bsamples]))\n return outl\n\n\[email protected]\nclass MetricsList:\n \"\"\"Wraps a list of metrics into a single metric returning a list\"\"\"\n\n def __init__(self, metrics):\n self.metrics = metrics\n\n def __call__(self, y_true, y_pred):\n return [metric(y_true, y_pred) for metric in self.metrics]\n\n\[email protected]\nclass MetricsDict:\n \"\"\"Wraps a dictionary of metrics into a single metric returning a dictionary\"\"\"\n\n def __init__(self, metrics):\n self.metrics = metrics\n\n def __call__(self, y_true, y_pred):\n return {k: metric(y_true, y_pred) for k, metric in self.metrics.items()}\n\n\[email protected]\nclass MetricsTupleList:\n \"\"\"Wraps a dictionary of metrics into a single metric returning a dictionary\"\"\"\n\n def __init__(self, metrics):\n self.metrics = metrics\n\n def __call__(self, y_true, y_pred):\n return [(k, metric(y_true, y_pred)) for k, metric in self.metrics]\n\n\[email protected]\nclass MetricsOrderedDict:\n \"\"\"Wraps a OrderedDict/tuple list of metrics into a single metric\n returning an OrderedDict\n \"\"\"\n\n def __init__(self, metrics):\n self.metrics = metrics\n\n def __call__(self, y_true, y_pred):\n return OrderedDict([(k, metric(y_true, y_pred)) for k, metric in self.metrics])\n\n\[email protected]\nclass MetricsMultiTask:\n \"\"\"Run the same metric across multiple tasks\n \"\"\"\n\n def __init__(self, metrics, task_names=None):\n self.metrics = metrics\n self.task_names = task_names\n\n def __call__(self, y_true, y_pred):\n n_tasks = y_true.shape[1]\n if self.task_names is None:\n self.task_names = [i for i in range(n_tasks)]\n else:\n assert len(self.task_names) == n_tasks\n return OrderedDict([(task, self.metrics(y_true[:, i], y_pred[:, i]))\n for i, task in enumerate(self.task_names)])\n\n\[email protected]\nclass MetricsAggregated:\n\n def __init__(self,\n metrics,\n agg_fn={\"mean\": np.mean, \"std\": np.std},\n prefix=\"\"):\n self.metrics\n self.agg_fn = agg_fn\n self.prefix = prefix\n\n def __call__(self, y_true, y_pred):\n out = self.metrics(y_true, y_pred)\n # TODO - generalize using numpy_collate?\n m = np.array(list(out.values()))\n return {self.prefix + k: fn(m) for k, fn in self.agg_fn}\n\n\[email protected]\nclass MetricsConcise:\n\n def __init__(self, metrics):\n import concise\n self.metrics_dict = OrderedDict([(m, concise.eval_metrics.get(m))\n for m in metrics])\n\n def __call__(self, y_true, y_pred):\n return OrderedDict([(m, fn(y_true, y_pred))\n for m, fn in self.metrics_dict.items()])\n\n\n# -----------------------------\n# Binary classification\n# Metric helpers\nMASK_VALUE = -1\n# Binary classification\n\n\ndef _mask_nan(y_true, y_pred):\n mask_array = ~np.isnan(y_true)\n if np.any(np.isnan(y_pred)):\n print(\"WARNING: y_pred contains {0}/{1} np.nan values. removing them...\".\n format(np.sum(np.isnan(y_pred)), y_pred.size))\n mask_array = np.logical_and(mask_array, ~np.isnan(y_pred))\n return y_true[mask_array], y_pred[mask_array]\n\n\ndef _mask_value(y_true, y_pred, mask=MASK_VALUE):\n mask_array = y_true != mask\n return y_true[mask_array], y_pred[mask_array]\n\n\ndef _mask_value_nan(y_true, y_pred, mask=MASK_VALUE):\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return _mask_value(y_true, y_pred, mask)\n\n\[email protected]\ndef n_positive(y_true, y_pred):\n return y_true.sum()\n\n\[email protected]\ndef n_negative(y_true, y_pred):\n return (1 - y_true).sum()\n\n\[email protected]\ndef frac_positive(y_true, y_pred):\n return y_true.mean()\n\n\[email protected]\ndef accuracy(y_true, y_pred, round=True):\n \"\"\"Classification accuracy\n \"\"\"\n y_true, y_pred = _mask_value_nan(y_true, y_pred)\n if round:\n y_true = np.round(y_true)\n y_pred = np.round(y_pred)\n return skm.accuracy_score(y_true, y_pred)\n\n\[email protected]\ndef auc(y_true, y_pred, round=True):\n \"\"\"Area under the ROC curve\n \"\"\"\n y_true, y_pred = _mask_value_nan(y_true, y_pred)\n\n if round:\n y_true = y_true.round()\n if len(y_true) == 0 or len(np.unique(y_true)) < 2:\n return np.nan\n return skm.roc_auc_score(y_true, y_pred)\n\n\[email protected]\ndef auprc(y_true, y_pred):\n \"\"\"Area under the precision-recall curve\n \"\"\"\n y_true, y_pred = _mask_value_nan(y_true, y_pred)\n return skm.average_precision_score(y_true, y_pred)\n\n\[email protected]\ndef mcc(y_true, y_pred, round=True):\n \"\"\"Matthews correlation coefficient\n \"\"\"\n y_true, y_pred = _mask_value_nan(y_true, y_pred)\n if round:\n y_true = np.round(y_true)\n y_pred = np.round(y_pred)\n return skm.matthews_corrcoef(y_true, y_pred)\n\n\[email protected]\ndef f1(y_true, y_pred, round=True):\n \"\"\"F1 score: `2 * (p * r) / (p + r)`, where p=precision and r=recall.\n \"\"\"\n y_true, y_pred = _mask_value_nan(y_true, y_pred)\n if round:\n y_true = np.round(y_true)\n y_pred = np.round(y_pred)\n return skm.f1_score(y_true, y_pred)\n\n\[email protected]\ndef cat_acc(y_true, y_pred):\n \"\"\"Categorical accuracy\n \"\"\"\n return np.mean(y_true.argmax(axis=1) == y_pred.argmax(axis=1))\n\n\nclassification_metrics = [\n (\"auPR\", auprc),\n (\"auROC\", auc),\n (\"accuracy\", accuracy),\n (\"n_positive\", n_positive),\n (\"n_negative\", n_negative),\n (\"frac_positive\", frac_positive),\n]\n\n\[email protected]\nclass ClassificationMetrics:\n \"\"\"All classification metrics\n \"\"\"\n cls_metrics = classification_metrics\n\n def __init__(self):\n self.classification_metric = MetricsOrderedDict(self.cls_metrics)\n\n def __call__(self, y_true, y_pred):\n return self.classification_metric(y_true, y_pred)\n# TODO - add gin macro for a standard set of classification and regession metrics\n\n\n# --------------------------------------------\n# Regression\n\[email protected]\ndef cor(y_true, y_pred):\n \"\"\"Compute Pearson correlation coefficient.\n \"\"\"\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return np.corrcoef(y_true, y_pred)[0, 1]\n\n\[email protected]\ndef kendall(y_true, y_pred, nb_sample=100000):\n \"\"\"Kendall's tau coefficient, Kendall rank correlation coefficient\n \"\"\"\n from scipy.stats import kendalltau\n y_true, y_pred = _mask_nan(y_true, y_pred)\n if len(y_true) > nb_sample:\n idx = np.arange(len(y_true))\n np.random.shuffle(idx)\n idx = idx[:nb_sample]\n y_true = y_true[idx]\n y_pred = y_pred[idx]\n return kendalltau(y_true, y_pred)[0]\n\n\[email protected]\ndef mad(y_true, y_pred):\n \"\"\"Median absolute deviation\n \"\"\"\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return np.mean(np.abs(y_true - y_pred))\n\n\[email protected]\ndef rmse(y_true, y_pred):\n \"\"\"Root mean-squared error\n \"\"\"\n return np.sqrt(mse(y_true, y_pred))\n\n\[email protected]\ndef rrmse(y_true, y_pred):\n \"\"\"1 - rmse\n \"\"\"\n return 1 - rmse(y_true, y_pred)\n\n\[email protected]\ndef mse(y_true, y_pred):\n \"\"\"Mean squared error\n \"\"\"\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return ((y_true - y_pred) ** 2).mean(axis=None)\n\n\[email protected]\ndef ermse(y_true, y_pred):\n \"\"\"Exponentiated root-mean-squared error\n \"\"\"\n return 10**np.sqrt(mse(y_true, y_pred))\n\n\[email protected]\ndef var_explained(y_true, y_pred):\n \"\"\"Fraction of variance explained.\n \"\"\"\n y_true, y_pred = _mask_nan(y_true, y_pred)\n var_resid = np.var(y_true - y_pred)\n var_y_true = np.var(y_true)\n return 1 - var_resid / var_y_true\n\n\[email protected]\ndef pearsonr(y_true, y_pred):\n from scipy.stats import pearsonr\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return pearsonr(y_true, y_pred)[0]\n\n\[email protected]\ndef spearmanr(y_true, y_pred):\n from scipy.stats import spearmanr\n y_true, y_pred = _mask_nan(y_true, y_pred)\n return spearmanr(y_true, y_pred)[0]\n\n\[email protected]\ndef pearson_spearman(yt, yp):\n return {\"pearsonr\": pearsonr(yt, yp),\n \"spearmanr\": spearmanr(yt, yp)}\n\n\nregression_metrics = [\n (\"mse\", mse),\n (\"var_explained\", var_explained),\n (\"pearsonr\", pearsonr), # pearson and spearman correlation\n (\"spearmanr\", spearmanr),\n (\"mad\", mad), # median absolute deviation\n]\n\n\[email protected]\nclass RegressionMetrics:\n \"\"\"All classification metrics\n \"\"\"\n cls_metrics = regression_metrics\n\n def __init__(self):\n self.regression_metric = MetricsOrderedDict(self.cls_metrics)\n\n def __call__(self, y_true, y_pred):\n # squeeze the last dimension\n if y_true.ndim == 2 and y_true.shape[1] == 1:\n y_true = np.ravel(y_true)\n if y_pred.ndim == 2 and y_pred.shape[1] == 1:\n y_pred = np.ravel(y_pred)\n\n return self.regression_metric(y_true, y_pred)\n\n\n# available eval metrics --------------------------------------------\n\n\nBINARY_CLASS = [\"auc\", \"auprc\", \"accuracy\", \"tpr\", \"tnr\", \"f1\", \"mcc\"]\nCATEGORY_CLASS = [\"cat_acc\"]\nREGRESSION = [\"mse\", \"mad\", \"cor\", \"ermse\", \"var_explained\"]\n\nAVAILABLE = BINARY_CLASS + CATEGORY_CLASS + REGRESSION\n" ]
[ [ "numpy.round", "numpy.array", "numpy.random.permutation", "pandas.Series" ], [ "numpy.arange" ], [ "sklearn.metrics.roc_auc_score", "sklearn.metrics.matthews_corrcoef", "numpy.round", "numpy.any", "numpy.var", "scipy.stats.spearmanr", "sklearn.metrics.f1_score", "numpy.unique", "numpy.apply_along_axis", "numpy.ravel", "numpy.zeros", "numpy.isnan", "scipy.stats.pearsonr", "pandas.DataFrame.from_dict", "numpy.corrcoef", "numpy.sum", "numpy.abs", "numpy.random.shuffle", "sklearn.metrics.average_precision_score", "scipy.stats.kendalltau", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
ankitdipto/sumo-rl
[ "70d75d463fa09d0ecfc10589b66955c22c8df41b" ]
[ "sumo_rl/environment/env.py" ]
[ "import os\nimport sys\nfrom pathlib import Path\nfrom typing import Optional, Union, Tuple\nimport sumo_rl\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"Please declare the environment variable 'SUMO_HOME'\")\nimport traci\nimport sumolib\nimport gym\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nimport pandas as pd\n\nfrom .traffic_signal import TrafficSignal\n\nfrom gym.utils import EzPickle, seeding\nfrom pettingzoo import AECEnv\nfrom pettingzoo.utils.agent_selector import agent_selector\nfrom pettingzoo import AECEnv\nfrom pettingzoo.utils import agent_selector, wrappers\nfrom pettingzoo.utils.conversions import parallel_wrapper_fn\n\nLIBSUMO = 'LIBSUMO_AS_TRACI' in os.environ\n\n\ndef env(**kwargs):\n env = SumoEnvironmentPZ(**kwargs)\n env = wrappers.AssertOutOfBoundsWrapper(env)\n env = wrappers.OrderEnforcingWrapper(env)\n return env\n\nparallel_env = parallel_wrapper_fn(env)\n\n\nclass SumoEnvironment(gym.Env):\n \"\"\"\n SUMO Environment for Traffic Signal Control\n\n :param net_file: (str) SUMO .net.xml file\n :param route_file: (str) SUMO .rou.xml file\n :param out_csv_name: (Optional[str]) name of the .csv output with simulation results. If None no output is generated\n :param use_gui: (bool) Wheter to run SUMO simulation with GUI visualisation\n :param virtual_display: (Optional[Tuple[int,int]]) Resolution of a virtual display for rendering\n :param begin_time: (int) The time step (in seconds) the simulation starts\n :param num_seconds: (int) Number of simulated seconds on SUMO. The time in seconds the simulation must end.\n :param max_depart_delay: (int) Vehicles are discarded if they could not be inserted after max_depart_delay seconds\n :param delta_time: (int) Simulation seconds between actions\n :param min_green: (int) Minimum green time in a phase\n :param max_green: (int) Max green time in a phase\n :single_agent: (bool) If true, it behaves like a regular gym.Env. Else, it behaves like a MultiagentEnv (https://github.com/ray-project/ray/blob/master/python/ray/rllib/env/multi_agent_env.py)\n :sumo_seed: (int/string) Random seed for sumo. If 'random' it uses a randomly chosen seed.\n :fixed_ts: (bool) If true, it will follow the phase configuration in the route_file and ignore the actions.\n :sumo_warnings: (bool) If False, remove SUMO warnings in the terminal\n \"\"\"\n CONNECTION_LABEL = 0 # For traci multi-client support\n\n def __init__(\n self, \n net_file: str, \n route_file: str, \n out_csv_name: Optional[str] = None, \n use_gui: bool = False, \n virtual_display: Optional[Tuple[int,int]] = None,\n begin_time: int = 0, \n num_seconds: int = 20000, \n max_depart_delay: int = 100000,\n time_to_teleport: int = -1, \n delta_time: int = 5, \n yellow_time: int = 2, \n min_green: int = 5, \n max_green: int = 50, \n single_agent: bool = False, \n sumo_seed: Union[str,int] = 'random', \n fixed_ts: bool = False,\n sumo_warnings: bool = True,\n ):\n self._net = net_file\n self._route = route_file\n self.use_gui = use_gui\n if self.use_gui:\n self._sumo_binary = sumolib.checkBinary('sumo-gui')\n else:\n self._sumo_binary = sumolib.checkBinary('sumo')\n\n self.virtual_display = virtual_display\n\n assert delta_time > yellow_time, \"Time between actions must be at least greater than yellow time.\"\n\n self.begin_time = begin_time\n self.sim_max_time = num_seconds\n self.delta_time = delta_time # seconds on sumo at each step\n self.max_depart_delay = max_depart_delay # Max wait time to insert a vehicle\n self.time_to_teleport = time_to_teleport\n self.min_green = min_green\n self.max_green = max_green\n self.yellow_time = yellow_time\n self.single_agent = single_agent\n self.sumo_seed = sumo_seed\n self.fixed_ts = fixed_ts\n self.sumo_warnings = sumo_warnings\n self.label = str(SumoEnvironment.CONNECTION_LABEL)\n SumoEnvironment.CONNECTION_LABEL += 1\n self.sumo = None\n\n if LIBSUMO:\n traci.start([sumolib.checkBinary('sumo'), '-n', self._net]) # Start only to retrieve traffic light information\n conn = traci\n else:\n traci.start([sumolib.checkBinary('sumo'), '-n', self._net], label='init_connection'+self.label)\n conn = traci.getConnection('init_connection'+self.label)\n self.ts_ids = list(conn.trafficlight.getIDList())\n self.traffic_signals = {ts: TrafficSignal(self, \n ts, \n self.delta_time, \n self.yellow_time, \n self.min_green, \n self.max_green, \n self.begin_time,\n conn) for ts in self.ts_ids}\n conn.close()\n\n self.vehicles = dict()\n self.reward_range = (-float('inf'), float('inf'))\n self.metadata = {}\n self.spec = EnvSpec('SUMORL-v0')\n self.run = 0\n self.metrics = []\n self.out_csv_name = out_csv_name\n self.observations = {ts: None for ts in self.ts_ids}\n self.rewards = {ts: None for ts in self.ts_ids}\n \n def _start_simulation(self):\n sumo_cmd = [self._sumo_binary,\n '-n', self._net,\n '-r', self._route,\n '--max-depart-delay', str(self.max_depart_delay), \n '--waiting-time-memory', '10000',\n '--time-to-teleport', str(self.time_to_teleport)]\n if self.begin_time > 0:\n sumo_cmd.append('-b {}'.format(self.begin_time))\n if self.sumo_seed == 'random':\n sumo_cmd.append('--random')\n else:\n sumo_cmd.extend(['--seed', str(self.sumo_seed)])\n if not self.sumo_warnings:\n sumo_cmd.append('--no-warnings')\n if self.use_gui:\n sumo_cmd.extend(['--start', '--quit-on-end'])\n if self.virtual_display is not None:\n sumo_cmd.extend(['--window-size', f'{self.virtual_display[0]},{self.virtual_display[1]}'])\n from pyvirtualdisplay.smartdisplay import SmartDisplay\n print(\"Creating a virtual display.\")\n self.disp = SmartDisplay(size=self.virtual_display)\n self.disp.start()\n print(\"Virtual display started.\")\n\n if LIBSUMO:\n traci.start(sumo_cmd)\n self.sumo = traci\n else:\n traci.start(sumo_cmd, label=self.label)\n self.sumo = traci.getConnection(self.label)\n \n if self.use_gui:\n self.sumo.gui.setSchema(traci.gui.DEFAULT_VIEW, \"real world\") \n\n def reset(self):\n if self.run != 0:\n self.close()\n self.save_csv(self.out_csv_name, self.run)\n self.run += 1\n self.metrics = []\n\n self._start_simulation()\n\n self.traffic_signals = {ts: TrafficSignal(self, \n ts, \n self.delta_time, \n self.yellow_time, \n self.min_green, \n self.max_green, \n self.begin_time,\n self.sumo) for ts in self.ts_ids}\n self.vehicles = dict()\n\n if self.single_agent:\n return self._compute_observations()[self.ts_ids[0]]\n else:\n return self._compute_observations()\n\n @property\n def sim_step(self):\n \"\"\"\n Return current simulation second on SUMO\n \"\"\"\n return self.sumo.simulation.getTime()\n\n def step(self, action):\n # No action, follow fixed TL defined in self.phases\n if action is None or action == {}:\n for _ in range(self.delta_time):\n self._sumo_step()\n else:\n self._apply_actions(action)\n self._run_steps()\n\n observations = self._compute_observations()\n rewards = self._compute_rewards()\n dones = self._compute_dones()\n self._compute_info()\n\n if self.single_agent:\n return observations[self.ts_ids[0]], rewards[self.ts_ids[0]], dones['__all__'], {}\n else:\n return observations, rewards, dones, {}\n\n def _run_steps(self):\n time_to_act = False\n while not time_to_act:\n self._sumo_step()\n for ts in self.ts_ids:\n self.traffic_signals[ts].update()\n if self.traffic_signals[ts].time_to_act:\n time_to_act = True\n\n def _apply_actions(self, actions):\n \"\"\"\n Set the next green phase for the traffic signals\n :param actions: If single-agent, actions is an int between 0 and self.num_green_phases (next green phase)\n If multiagent, actions is a dict {ts_id : greenPhase}\n \"\"\" \n if self.single_agent:\n if self.traffic_signals[self.ts_ids[0]].time_to_act:\n self.traffic_signals[self.ts_ids[0]].set_next_phase(actions)\n else:\n for ts, action in actions.items():\n if self.traffic_signals[ts].time_to_act:\n self.traffic_signals[ts].set_next_phase(action)\n\n def _compute_dones(self):\n dones = {ts_id: False for ts_id in self.ts_ids}\n dones['__all__'] = self.sim_step > self.sim_max_time\n return dones\n \n def _compute_info(self):\n info = self._compute_step_info()\n self.metrics.append(info)\n\n def _compute_observations(self):\n self.observations.update({ts: self.traffic_signals[ts].compute_observation() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})\n return {ts: self.observations[ts].copy() for ts in self.observations.keys() if self.traffic_signals[ts].time_to_act}\n\n def _compute_rewards(self):\n self.rewards.update({ts: self.traffic_signals[ts].compute_reward() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})\n return {ts: self.rewards[ts] for ts in self.rewards.keys() if self.traffic_signals[ts].time_to_act}\n\n @property\n def observation_space(self):\n return self.traffic_signals[self.ts_ids[0]].observation_space\n \n @property\n def action_space(self):\n return self.traffic_signals[self.ts_ids[0]].action_space\n \n def observation_spaces(self, ts_id):\n return self.traffic_signals[ts_id].observation_space\n \n def action_spaces(self, ts_id):\n return self.traffic_signals[ts_id].action_space\n\n def _sumo_step(self):\n self.sumo.simulationStep()\n\n def _compute_step_info(self):\n return {\n 'step_time': self.sim_step,\n 'reward': self.traffic_signals[self.ts_ids[0]].last_reward,\n 'total_stopped': sum(self.traffic_signals[ts].get_total_queued() for ts in self.ts_ids),\n 'total_wait_time': sum(sum(self.traffic_signals[ts].get_waiting_time_per_lane()) for ts in self.ts_ids)\n }\n\n def close(self):\n if self.sumo is None:\n return\n if not LIBSUMO:\n traci.switch(self.label)\n traci.close()\n try:\n self.disp.stop()\n except AttributeError:\n pass\n self.sumo = None\n \n def __del__(self):\n self.close()\n \n def render(self, mode='human'):\n if self.virtual_display:\n #img = self.sumo.gui.screenshot(traci.gui.DEFAULT_VIEW,\n # f\"temp/img{self.sim_step}.jpg\", \n # width=self.virtual_display[0],\n # height=self.virtual_display[1])\n img = self.disp.grab()\n if mode == 'rgb_array':\n return np.array(img)\n return img \n \n def save_csv(self, out_csv_name, run):\n if out_csv_name is not None:\n df = pd.DataFrame(self.metrics)\n Path(Path(out_csv_name).parent).mkdir(parents=True, exist_ok=True)\n df.to_csv(out_csv_name + '_conn{}_run{}'.format(self.label, run) + '.csv', index=False)\n\n # Below functions are for discrete state space\n\n def encode(self, state, ts_id):\n phase = int(np.where(state[:self.traffic_signals[ts_id].num_green_phases] == 1)[0])\n min_green = state[self.traffic_signals[ts_id].num_green_phases]\n density_queue = [self._discretize_density(d) for d in state[self.traffic_signals[ts_id].num_green_phases + 1:]]\n # tuples are hashable and can be used as key in python dictionary\n return tuple([phase, min_green] + density_queue)\n\n def _discretize_density(self, density):\n return min(int(density*10), 9)\n\n\nclass SumoEnvironmentPZ(AECEnv, EzPickle):\n metadata = {'render.modes': ['human', 'rgb_array'], 'name': \"sumo_rl_v0\"}\n\n def __init__(self, **kwargs):\n EzPickle.__init__(self, **kwargs)\n self._kwargs = kwargs\n\n self.seed()\n self.env = SumoEnvironment(**self._kwargs)\n\n self.agents = self.env.ts_ids\n self.possible_agents = self.env.ts_ids\n self._agent_selector = agent_selector(self.agents)\n self.agent_selection = self._agent_selector.reset()\n # spaces\n self.action_spaces = {a: self.env.action_spaces(a) for a in self.agents}\n self.observation_spaces = {a: self.env.observation_spaces(a) for a in self.agents}\n\n # dicts\n self.rewards = {a: 0 for a in self.agents}\n self.dones = {a: False for a in self.agents}\n self.infos = {a: {} for a in self.agents}\n\n def seed(self, seed=None):\n self.randomizer, seed = seeding.np_random(seed)\n\n def reset(self):\n self.env.reset()\n self.agents = self.possible_agents[:]\n self.agent_selection = self._agent_selector.reset()\n self.rewards = {agent: 0 for agent in self.agents}\n self._cumulative_rewards = {agent: 0 for agent in self.agents}\n self.dones = {agent: False for agent in self.agents}\n self.infos = {agent: {} for agent in self.agents}\n \n def observation_space(self, agent):\n return self.observation_spaces[agent]\n\n def action_space(self, agent):\n return self.action_spaces[agent]\n\n def observe(self, agent):\n obs = self.env.observations[agent].copy()\n return obs\n\n def state(self):\n raise NotImplementedError('Method state() currently not implemented.')\n\n def close(self):\n self.env.close()\n\n def render(self, mode='human'):\n return self.env.render(mode)\n \n def save_csv(self, out_csv_name, run):\n self.env.save_csv(out_csv_name, run)\n\n def step(self, action):\n if self.dones[self.agent_selection]:\n return self._was_done_step(action)\n agent = self.agent_selection\n if not self.action_spaces[agent].contains(action):\n raise Exception('Action for agent {} must be in Discrete({}).'\n 'It is currently {}'.format(agent, self.action_spaces[agent].n, action))\n\n self.env._apply_actions({agent: action})\n\n if self._agent_selector.is_last():\n self.env._run_steps()\n self.env._compute_observations()\n self.rewards = self.env._compute_rewards()\n self.env._compute_info()\n else:\n self._clear_rewards()\n \n done = self.env._compute_dones()['__all__']\n self.dones = {a : done for a in self.agents}\n\n self.agent_selection = self._agent_selector.next()\n self._cumulative_rewards[agent] = 0\n self._accumulate_rewards()\n" ]
[ [ "numpy.array", "numpy.where", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
nick-parker/trimesh
[ "a7bc1e0489ec98e3a3516088a7e64c8beca8b41a" ]
[ "trimesh/remesh.py" ]
[ "\"\"\"\nremesh.py\n-------------\n\nDeal with re- triangulation of existing meshes.\n\"\"\"\n\nimport numpy as np\n\nimport collections\n\nfrom . import util\nfrom . import grouping\n\n\ndef subdivide(vertices, faces, face_index=None):\n \"\"\"\n Subdivide a mesh into smaller triangles.\n\n Parameters\n ----------\n vertices: (n,3) float, verticies\n faces: (n,3) int, indexes of vertices which make up triangular faces\n face_index: faces to subdivide.\n if None: all faces of mesh will be subdivided\n if (n,) int array of indices: only specified faces will be\n subdivided. Note that in this case the mesh will generally\n no longer be manifold, as the additional vertex on the midpoint\n will not be used by the adjacent faces to the faces specified,\n and an additional postprocessing step will be required to\n make resulting mesh watertight\n\n Returns\n ----------\n new_vertices: (n,3) float, vertices\n new_faces: (n,3) int, remeshed faces\n \"\"\"\n if face_index is None:\n face_index = np.arange(len(faces))\n else:\n face_index = np.asanyarray(face_index)\n\n # the (c,3) int set of vertex indices\n faces = faces[face_index]\n # the (c, 3, 3) float set of points in the triangles\n triangles = vertices[faces]\n # the 3 midpoints of each triangle edge vstacked to a (3*c, 3) float\n mid = np.vstack([triangles[:, g, :].mean(axis=1) for g in [[0, 1],\n [1, 2],\n [2, 0]]])\n mid_idx = (np.arange(len(face_index) * 3)).reshape((3, -1)).T\n # for adjacent faces we are going to be generating the same midpoint\n # twice, so we handle it here by finding the unique vertices\n unique, inverse = grouping.unique_rows(mid)\n\n mid = mid[unique]\n mid_idx = inverse[mid_idx] + len(vertices)\n # the new faces, with correct winding\n f = np.column_stack([faces[:, 0], mid_idx[:, 0], mid_idx[:, 2],\n mid_idx[:, 0], faces[:, 1], mid_idx[:, 1],\n mid_idx[:, 2], mid_idx[:, 1], faces[:, 2],\n mid_idx[:, 0], mid_idx[:, 1], mid_idx[:, 2], ]).reshape((-1, 3))\n # add the 3 new faces per old face\n new_faces = np.vstack((faces, f[len(face_index):]))\n # replace the old face with a smaller face\n new_faces[face_index] = f[:len(face_index)]\n\n new_vertices = np.vstack((vertices, mid))\n\n return new_vertices, new_faces\n\n\ndef subdivide_to_size(vertices, faces, max_edge, max_iter=10):\n \"\"\"\n Subdivide a mesh until every edge is shorter than a specified length.\n\n Will return a triangle soup, not a nicely structured mesh.\n\n Parameters\n ------------\n vertices: (n,3) float, vertices in space\n faces: (m,3) int, indices of vertices which make up triangles\n max_edge: float, maximum length of any edge in the result\n max_iter: int, the maximum number of times to run subdivisions\n\n Returns\n ------------\n vertices: (j,3) float, vertices in space\n faces: (q,3) int, indices of vertices\n \"\"\"\n done_face = collections.deque()\n done_vert = collections.deque()\n\n current_faces = faces\n current_vertices = vertices\n\n for i in range(max_iter + 1):\n triangles = current_vertices[current_faces]\n\n # compute the length of every triangle edge\n edge_lengths = (\n np.diff(triangles[:, [0, 1, 2, 0]], axis=1)**2).sum(axis=2) ** .5\n\n too_long = (edge_lengths > max_edge).any(axis=1)\n\n # clean up the faces a little bit so we don't carry a ton of unused\n # vertices\n unique, inverse = np.unique(current_faces[np.logical_not(too_long)],\n return_inverse=True)\n\n done_vert.append(current_vertices[unique])\n done_face.append(inverse.reshape((-1, 3)))\n\n if not too_long.any():\n break\n\n (current_vertices,\n current_faces) = subdivide(current_vertices,\n current_faces[too_long])\n\n vertices, faces = util.append_faces(done_vert, done_face)\n return vertices, faces\n" ]
[ [ "numpy.logical_not", "numpy.asanyarray", "numpy.diff", "numpy.column_stack", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Murat-Karadag/nlu
[ "6a2b5995ea543e63c40baaca1bf9ad8a9db36757" ]
[ "nlu/pipe/viz/streamlit_viz/viz_building_blocks/word_similarity.py" ]
[ "import nlu\nfrom nlu.discovery import Discoverer\nfrom nlu.pipe.utils.storage_ref_utils import StorageRefUtils\nfrom typing import List, Tuple, Optional, Dict, Union\nimport streamlit as st\nfrom nlu.utils.modelhub.modelhub_utils import ModelHubUtils\n\nimport numpy as np\nimport pandas as pd\nfrom nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS\nfrom nlu.pipe.viz.streamlit_viz.gen_streamlit_code import get_code_for_viz\nfrom nlu.pipe.viz.streamlit_viz.styles import _set_block_container_style\nimport random\nfrom nlu.pipe.viz.streamlit_viz.streamlit_viz_tracker import StreamlitVizTracker\n\n\nclass WordSimilarityStreamlitBlock():\n @staticmethod\n def display_word_similarity(\n pipe, # nlu pipe\n default_texts: Tuple[str, str] = (\"Donald Trump likes to party!\", \"Angela Merkel likes to party!\"),\n threshold: float = 0.5,\n title: Optional[str] = \"Embeddings Similarity Matrix & Visualizations \",\n sub_tile: Optional[\n str] = \"Visualize `word-wise similarity matrix` and calculate `similarity scores` for `2 texts` and every `word embedding` loaded\",\n write_raw_pandas: bool = False,\n display_embed_information: bool = True,\n similarity_matrix=True,\n show_algo_select: bool = True,\n dist_metrics: List[str] = ('cosine'),\n set_wide_layout_CSS: bool = True,\n generate_code_sample: bool = False,\n key: str = \"NLU_streamlit\",\n num_cols: int = 2,\n display_scalar_similarities: bool = False,\n display_similarity_summary: bool = False,\n model_select_position: str = 'side', # main or side\n show_infos: bool = True,\n show_logo: bool = True,\n ):\n\n \"\"\"We visualize the following cases :\n 1. Simmilarity between 2 words - > sim (word_emb1, word_emb2)\n 2. Simmilarity between 2 sentences -> let weTW stand word word_emb of token T and sentence S\n 2.1. Raw token level with merged embeddings -> sim([we11,we21,weT1], [we12,we22,weT2])\n 2.2 Autogenerate sentemb, basically does 2.1 in the Spark NLP backend\n 2.3 Already using sentence_embedder model -> sim(se1,se2)\n 3. Simmilarity between token and sentence -> sim([we11,w21,wT1], se2)\n 4. Mirrored 3\n \"\"\"\n # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics.pairwise\n StreamlitVizTracker.footer_displayed = False\n try:\n import plotly.express as px\n from sklearn.metrics.pairwise import distance_metrics\n except:\n st.error(\n \"You need the sklearn and plotly package in your Python environment installed for similarity visualizations. Run <pip install sklearn plotly>\")\n if set_wide_layout_CSS: _set_block_container_style()\n if title: st.header(title)\n if show_logo: StreamlitVizTracker.show_logo()\n if sub_tile: st.subheader(sub_tile)\n\n StreamlitVizTracker.loaded_word_embeding_pipes = []\n dist_metric_algos = distance_metrics()\n dist_algos = list(dist_metric_algos.keys())\n if 'haversine' in dist_algos: dist_algos.remove('haversine') # not applicable in >2D\n if 'precomputed' in dist_algos: dist_algos.remove('precomputed') # Not a dist\n cols = st.beta_columns(2)\n text1 = cols[0].text_input(\"Text or word1\", default_texts[0], key=key+'field_1')\n text2 = cols[1].text_input(\"Text or word2\", default_texts[1], key=key+'field_2') if len(default_texts) > 1 else cols[\n 1].text_input(\"Text or word2\", 'Please enter second string', key=key)\n # exp = st.sidebar.beta_expander(\"Select additional Embedding Models and distance metric to compare \")\n e_coms = StreamlitUtilsOS.find_all_embed_components(pipe)\n embed_algos_to_load = []\n embed_pipes = [pipe]\n dist_algo_selection = dist_metrics\n if show_algo_select:\n # emb_components_usable = Discoverer.get_components('embed')\n emb_components_usable = [e for e in Discoverer.get_components('embed', True, include_aliases=True) if\n 'chunk' not in e and 'sentence' not in e]\n loaded_embed_nlu_refs = []\n loaded_storage_refs = []\n loaded_embed_nlu_refs = list(set(loaded_embed_nlu_refs))\n\n for c in e_coms:\n if not hasattr(c.info, 'nlu_ref'): continue\n r = c.info.nlu_ref\n if 'en.' not in r and 'embed.' not in r and 'ner' not in r:\n loaded_embed_nlu_refs.append('en.embed.' + r)\n elif 'en.' in r and 'embed.' not in r and 'ner' not in r:\n r = r.split('en.')[0]\n loaded_embed_nlu_refs.append('en.embed.' + r)\n else:\n loaded_embed_nlu_refs.append(StorageRefUtils.extract_storage_ref(c))\n loaded_storage_refs.append(StorageRefUtils.extract_storage_ref(c))\n for p in StreamlitVizTracker.loaded_word_embeding_pipes:\n if p != pipe: loaded_embed_nlu_refs.append(p.nlu_ref)\n for l in loaded_embed_nlu_refs:\n if l not in emb_components_usable: emb_components_usable.append(l)\n # embed_algo_selection = exp.multiselect(\"Click to pick additional Embedding Algorithm\",options=emb_components_usable,default=loaded_embed_nlu_refs,key = key)\n # dist_algo_selection = exp.multiselect(\"Click to pick additional Distance Metric\", options=dist_algos, default=dist_metrics, key = key)\n emb_components_usable.sort()\n loaded_embed_nlu_refs.sort()\n dist_algos.sort()\n if model_select_position == 'side':\n embed_algo_selection = st.sidebar.multiselect(\n \"Pick additional Word Embeddings for the Similarity Matrix\", options=emb_components_usable,\n default=loaded_embed_nlu_refs, key=key)\n dist_algo_selection = st.sidebar.multiselect(\"Pick additional Similarity Metrics \", options=dist_algos,\n default=dist_metrics, key=key)\n else:\n exp = st.beta_expander(\"Pick additional Word Embeddings and Similarity Metrics\")\n embed_algo_selection = exp.multiselect(\"Pick additional Word Embeddings for the Similarity Matrix\",\n options=emb_components_usable, default=loaded_embed_nlu_refs,\n key=key)\n dist_algo_selection = exp.multiselect(\"Pick additional Similarity Metrics \", options=dist_algos,\n default=dist_metrics, key=key)\n embed_algos_to_load = list(set(embed_algo_selection) - set(loaded_embed_nlu_refs))\n\n for embedder in embed_algos_to_load: embed_pipes.append(nlu.load(embedder))\n\n if generate_code_sample: st.code(\n get_code_for_viz('SIMILARITY', [StreamlitUtilsOS.extract_name(p) for p in embed_pipes], default_texts))\n\n StreamlitVizTracker.loaded_word_embeding_pipes += embed_pipes\n similarity_metrics = {}\n embed_vector_info = {}\n cols_full = True\n col_index = 0\n # for p in embed_pipes :\n for p in StreamlitVizTracker.loaded_word_embeding_pipes:\n data1 = p.predict(text1, output_level='token', get_embeddings=True).dropna()\n data2 = p.predict(text2, output_level='token', get_embeddings=True).dropna()\n e_coms = StreamlitUtilsOS.find_all_embed_components(p)\n modelhub_links = [ModelHubUtils.get_url_by_nlu_refrence(c.info.nlu_ref) if hasattr(c.info,\n 'nlu_ref') else ModelHubUtils.get_url_by_nlu_refrence(\n '') for c in e_coms]\n e_cols = StreamlitUtilsOS.get_embed_cols(p)\n for num_emb, e_col in enumerate(e_cols):\n if col_index == num_cols - 1: cols_full = True\n if cols_full:\n cols = st.beta_columns(num_cols)\n col_index = 0\n cols_full = False\n else:\n col_index += 1\n tok1 = data1['token']\n tok2 = data2['token']\n emb1 = data1[e_col]\n emb2 = data2[e_col]\n\n def normalize_matrix(m):\n return np.nan_to_num(m / np.linalg.norm(m, axis=1, keepdims=True))\n\n embed_mat1 = normalize_matrix(np.array([x for x in emb1]))\n embed_mat2 = normalize_matrix(np.array([x for x in emb2]))\n # e_name = e_col.split('word_embedding_')[-1]\n e_name = e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info, 'nlu_ref') else \\\n e_col.split('word_embedding_')[-1] if 'en.' in e_col else e_col\n e_name = e_name.split('embed.')[-1] if 'en.' in e_name else e_name\n if 'ner' in e_name: e_name = loaded_storage_refs[num_emb]\n\n embed_vector_info[e_name] = {\"Vector Dimension \": embed_mat1.shape[1],\n \"Num Vectors\": embed_mat1.shape[0] + embed_mat1.shape[0],\n \"NLU_reference\": e_coms[num_emb].info.nlu_ref if hasattr(\n e_coms[num_emb].info, 'nlu_ref') else ' ',\n \"Spark_NLP_reference\": ModelHubUtils.NLU_ref_to_NLP_ref(\n e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info,\n 'nlu_ref') else ' '),\n \"Storage Reference\": loaded_storage_refs[num_emb],\n 'Modelhub info': modelhub_links[num_emb]}\n for dist_algo in dist_algo_selection:\n # scalar_similarities[e_col][dist_algo]={}\n sim_score = ((dist_metric_algos[dist_algo](embed_mat1, embed_mat2) - 1) * -1)\n\n sim_score = pd.DataFrame(sim_score)\n sim_score.index = tok1.values\n sim_score.columns = tok2.values\n sim_score.columns = StreamlitVizTracker.pad_duplicate_tokens(list(sim_score.columns))\n sim_score.index = StreamlitVizTracker.pad_duplicate_tokens(list(sim_score.index))\n if write_raw_pandas: st.write(sim_score, key=key)\n if sim_score.shape == (1, 1):\n sim_score = sim_score.iloc[0][0]\n sim_score = round(sim_score, 2)\n if sim_score > threshold:\n st.success(sim_score)\n st.success(f'Scalar Similarity={sim_score} for distance metric={dist_algo}')\n st.error(\n 'No similarity matrix for only 2 tokens. Try entering at least 1 sentences in a field')\n else:\n st.error(f'Scalar Similarity={sim_score} for distance metric={dist_algo}')\n else:\n ploty_avaiable = True\n # for tok emb, sum rows and norm by rows, then sum cols and norm by cols to generate a scalar from matrix\n scalar_sim_score = np.sum((np.sum(sim_score, axis=0) / sim_score.shape[0])) / sim_score.shape[1]\n scalar_sim_score = round(scalar_sim_score, 2)\n\n if display_scalar_similarities:\n if scalar_sim_score > threshold:\n st.success(f'Scalar Similarity :{scalar_sim_score} for distance metric={dist_algo}')\n else:\n st.error(\n f'Scalar Similarity :{scalar_sim_score} for embedder={e_col} distance metric={dist_algo}')\n if similarity_matrix:\n if ploty_avaiable:\n fig = px.imshow(sim_score, labels=dict(\n color=\"similarity\")) # , title=f'Simmilarity Matrix for embedding_model={e_name} distance metric={dist_algo}')\n # st.write(fig,key =key)\n similarity_metrics[f'{e_name}_{dist_algo}_similarity'] = {\n 'scalar_similarity': scalar_sim_score,\n 'dist_metric': dist_algo,\n 'embedding_model': e_name,\n 'modelhub_info': modelhub_links[num_emb],\n }\n subh = f\"\"\"Embedding-Model=`{e_name}`, Similarity-Score=`{scalar_sim_score}`, distance metric=`{dist_algo}`\"\"\"\n cols[col_index].markdown(subh)\n cols[col_index].write(fig, key=key)\n else:\n pass # todo fallback plots\n\n if display_similarity_summary:\n exp = st.beta_expander(\"Similarity summary\")\n exp.write(similarity_metrics)\n if display_embed_information:\n exp = st.beta_expander(\"Embedding vector information\")\n exp.write(embed_vector_info)\n if show_infos:\n # VizUtilsStreamlitOS.display_infos()\n StreamlitVizTracker.display_model_info(pipe.nlu_ref, pipes=[pipe])\n StreamlitVizTracker.display_footer()\n" ]
[ [ "numpy.linalg.norm", "pandas.DataFrame", "sklearn.metrics.pairwise.distance_metrics", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
joannetruong/habitat-api
[ "aad2fd7b8545dce44daefd4b7b3941672eb96ee3", "aad2fd7b8545dce44daefd4b7b3941672eb96ee3", "aad2fd7b8545dce44daefd4b7b3941672eb96ee3", "aad2fd7b8545dce44daefd4b7b3941672eb96ee3" ]
[ "evaluation/evaluate_simulation_coda_gan.py", "test/test_pyrobot.py", "habitat_baselines/slambased/monodepth.py", "habitat_baselines/rl/ddppo/algo/ddppo.py" ]
[ "import matplotlib.pyplot as plt\nimport argparse\nimport os\nfrom collections import defaultdict\n\nimport habitat\nimport numpy as np\nimport quaternion\nimport torch\nfrom evaluate_reality import load_model\nfrom gym.spaces.dict_space import Dict as SpaceDict\nfrom habitat.tasks.utils import cartesian_to_polar\nfrom habitat.utils.geometry_utils import quaternion_rotate_vector\nfrom habitat.utils.visualizations.utils import (images_to_video,\n observations_to_image)\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.env_utils import construct_envs\nfrom habitat_baselines.common.environments import get_env_class\nfrom habitat_baselines.common.utils import batch_obs, generate_video\nfrom habitat_baselines.config.default import get_config\nfrom habitat_sim import geo\nfrom habitat_sim.utils.common import quat_from_two_vectors, quat_rotate_vector\nfrom PIL import Image\nfrom predictor import Predictor\n\n\ndef quat_to_rad(rotation):\n heading_vector = quaternion_rotate_vector(\n rotation.inverse(), np.array([0, 0, -1])\n )\n\n phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]\n return phi\n\ndef create_state(position, rotation):\n rotation_mp3d_habitat = quat_from_two_vectors(geo.GRAVITY, np.array([0, 0, -1]))\n pt_mp3d = quat_rotate_vector(rotation_mp3d_habitat, position) # That point in the mp3d scene mesh coordinate frame.\n state_xyt = [pt_mp3d[0], pt_mp3d[1]]\n theta = quat_to_rad(rotation)\n state_xyt.append(theta)\n return state_xyt\n\ndef create_traj_labels(input_arr):\n r, c = input_arr.shape\n # labels: d_x, d_y, cos_d_t, sin_d_t\n diff = np.diff(input_arr, axis=0)\n labels_arr = np.zeros((r-1, 4))\n labels_arr[:, :2] = diff[:, :2]\n labels_arr[:, 2] = np.cos(diff[:, 2])\n labels_arr[:, 3] = np.sin(diff[:, 2])\n return labels_arr\n\ndef convert_embedding(input_arr_embed):\n # SIMULATOR_REALITY_ACTIONS = {\"stop\": 0, \"forward\": 1 , \"left\": 2 , \"right\": 3}\n ONE_HOT_ACTIONS = {\"0\": [0, 0, 0], \"1\": [0, 0, 1] , \"2\": [0, 1, 0] , \"3\": [1, 0, 0]}\n r, c = input_arr_embed.shape\n input_arr_oneHot = np.zeros((r, c+2))\n input_arr_oneHot[:, :4] = input_arr_embed[:, :4]\n for row in range(r):\n input_arr_oneHot[row, 4:] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]\n ## if logging collisions\n # input_arr_oneHot[row, 4:7] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]\n # input_arr_embed[:, -1] = input_arr_embed[:, 5]\n\n return input_arr_oneHot\n\ndef save_trajectory(data, datasplit, traj_dir, traj_ctr, datatype, embed_type=\"\"):\n pathend = datasplit + '_' + '%03d'%traj_ctr\n if embed_type != \"\":\n embed_type += \"_\"\n filename = os.path.join(traj_dir, datatype + '_LRF_' + embed_type + pathend)\n print('saving: ', filename)\n np.save(filename, data[:, :]) \n np.savetxt(filename + '.csv', data[:, :], delimiter=\",\")\n\ndef create_labels_trajectory(labels_arr):\n r, c = labels_arr.shape\n # input embed: x, y, cost, sint, a\n final_labels_arr = np.zeros((r, c+1))\n ## if logging collisions\n # input_arr_embed = np.zeros((r, c+2))\n final_labels_arr[:, :2] = labels_arr[:, :2]\n final_labels_arr[:, 2] = np.cos(labels_arr[:, 2])\n final_labels_arr[:, 3] = np.sin(labels_arr[:, 2])\n return final_labels_arr\n\ndef create_input_trajectory(final_input_arr):\n r, c = final_input_arr.shape\n # input embed: x, y, cost, sint, a\n input_arr_embed = np.zeros((r, c+1))\n ## if logging collisions\n # input_arr_embed = np.zeros((r, c+2))\n input_arr_embed[:, :2] = final_input_arr[:, :2]\n input_arr_embed[:, 2] = np.cos(final_input_arr[:, 2])\n input_arr_embed[:, 3] = np.sin(final_input_arr[:, 2])\n input_arr_embed[:, 4] = final_input_arr[:, 3]\n ## if logging collisions\n # input_arr_embed[:, 5] = final_input_arr[:, 4]\n\n # input oneHot: x, y, cost, sint, a1, a2, a3\n input_arr_oneHot = convert_embedding(input_arr_embed)\n \n return input_arr_embed, input_arr_oneHot\n\ndef create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return\n\ndef get_last_idx(dir_path):\n f = sorted(os.listdir(dir_path))\n if not f:\n ctr = 0\n else:\n ctr = int(f[-1].split('.')[0].split('_')[-1]) +1\n return ctr\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-path\", type=str, required=True)\n# parser.add_argument(\"--noisy\", action=\"store_true\")\n parser.add_argument(\"--noise\", type=str, required=True)\n parser.add_argument(\"--save-imgs\", action=\"store_true\")\n parser.add_argument(\"--save-traj\", action=\"store_true\")\n parser.add_argument(\"--data-split\", type=str, required=True)\n parser.add_argument(\"--sensors\", type=str, required=True)\n parser.add_argument(\"--hidden-size\", type=int, required=True)\n parser.add_argument(\n \"--normalize-visual-inputs\", type=int, required=True, choices=[0, 1]\n )\n parser.add_argument(\"--depth-only\", action=\"store_true\")\n parser.add_argument(\"--use-gan\", action=\"store_true\")\n parser.add_argument(\"--gan-weights\", type=str, required=False)\n parser.add_argument(\"--noise-type\", type=str, required=True)\n parser.add_argument(\n \"--backbone\",\n type=str,\n required=True,\n choices=[\"resnet50\", \"se_resneXt50\"],\n )\n parser.add_argument(\"--num-recurrent-layers\", type=int, required=True)\n parser.add_argument(\n \"opts\",\n default=None,\n nargs=argparse.REMAINDER,\n help=\"Modify config options from command line\",\n )\n args = parser.parse_args()\n\n # Check torch version\n# vtorch = \"1.2.0\"\n#x assert torch.__version__ == vtorch, \"Please use torch {}\".format(vtorch)\n if args.noise_type == 'poisson_ilqr':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_ilqr.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_poisson.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise_type == 'speckle_mb':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_speckle_mb.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_mb.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_speckle.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise_type == 'gaussian_proportional':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_proportional.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_gaussian.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml\"\n config = get_config(\n cfg_file, args.opts\n )\n if args.save_traj:\n datasplit = args.data_split.split('_')[1]\n split = 'train'\n if datasplit == 'med':\n split = 'test'\n if args.save_imgs:\n if args.noise!=\"no_noise\":\n depth_save_path = 'depth_' + config.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL + '_' + split\n rgb_save_path = 'rgb_' + config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL + '_' + str(config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.intensity_constant) + '_' + split\n else:\n depth_save_path = 'depth_no_noise_' + split\n rgb_save_path = 'rgb_no_noise_' + split\n if args.save_traj:\n if args.noise!=\"no_noise\":\n traj_save_path = 'traj_' + config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.CONTROLLER + '_' + str(config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER) + '_' + split\n else:\n traj_save_path = 'traj_no_noise_' + split\n\n config.defrost()\n config.TASK_CONFIG.TASK.BASE_STATE = habitat.Config()\n config.TASK_CONFIG.TASK.BASE_STATE.TYPE = \"BaseState\"\n # Add the measure to the list of measures in use\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"BASE_STATE\")\n\n if args.sensors == \"\":\n config.SENSORS = []\n else:\n config.SENSORS = args.sensors.split(\",\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"COLLISIONS\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"SOFT_SPL\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"TOP_DOWN_MAP\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"EPISODE_DISTANCE\")\n config.freeze()\n\n envs = construct_envs(config, get_env_class(config.ENV_NAME))\n sensors_obs = envs.observation_spaces[0]\n\n if args.depth_only:\n config.defrost()\n config.SENSORS=[\"DEPTH_SENSOR\"]\n config.freeze()\n envs2 = construct_envs(config, get_env_class(config.ENV_NAME))\n sensors_obs = envs2.observation_spaces[0]\n\n device = (\n torch.device(\"cuda:{}\".format(config.TORCH_GPU_ID))\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n model = load_model(\n path=args.model_path,\n observation_space=sensors_obs,\n # observation_space=envs.observation_spaces[0],\n action_space=envs.action_spaces[0],\n hidden_size=args.hidden_size,\n normalize_visual_inputs=bool(args.normalize_visual_inputs),\n backbone=args.backbone,\n num_recurrent_layers=args.num_recurrent_layers,\n device=device,\n )\n model.eval()\n\n if args.use_gan:\n predictor = Predictor(args.gan_weights)\n print('METRICS: ', config.TASK_CONFIG.TASK.MEASUREMENTS)\n\n metric_name = \"SPL\"\n metric_cfg = getattr(config.TASK_CONFIG.TASK, metric_name)\n measure_type = baseline_registry.get_measure(metric_cfg.TYPE)\n assert measure_type is not None, \"invalid measurement type {}\".format(\n metric_cfg.TYPE\n )\n metric_uuid = measure_type(None, None)._get_uuid()\n\n print('METRIC UUID: ', metric_uuid)\n observations = envs.reset()\n print('IMAGE TYPE: ' , observations[0][\"rgb\"].dtype, observations[0][\"depth\"].dtype)\n# print(observations[0][\"rgb\"], observations[0][\"depth\"])\n rgbd_img = np.dstack((observations[0][\"rgb\"], (observations[0][\"depth\"]*255).astype(np.uint8)))\n gan_observations = predictor(rgbd_img)\n observations[0][\"depth\"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)\n# print('IMAGE TYPE: ' , observations[0][\"rgb\"].dtype, observations[0][\"depth\"].dtype)\n# print(observations[0][\"rgb\"], observations[0][\"depth\"])\n #observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n if args.depth_only:\n del observations[0][\"rgb\"]\n else:\n# print('GAN TYPE: ', gan_observations[:,:,:3][...,::-1].dtype)\n observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n batch = batch_obs(observations, device)\n\n current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)\n\n test_recurrent_hidden_states = torch.zeros(\n model.net.num_recurrent_layers,\n config.NUM_PROCESSES,\n args.hidden_size,\n device=device,\n )\n prev_actions = torch.zeros(\n config.NUM_PROCESSES, 1, device=device, dtype=torch.long\n )\n not_done_masks = torch.zeros(config.NUM_PROCESSES, 1, device=device)\n\n stats_episodes = dict() # dict of dicts that stores stats per episode\n\n stats_actions = defaultdict(int)\n\n rgb_frames = [\n [] for _ in range(config.NUM_PROCESSES)\n ] # type: List[List[np.ndarray]]\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n sensor_path = 'sim_sensor_imgs'\n traj_path = 'sim_traj'\n if args.save_imgs:\n depth_dir = os.path.join(sensor_path, depth_save_path)\n rgb_dir = os.path.join(sensor_path, rgb_save_path)\n create_dir(depth_dir)\n create_dir(rgb_dir)\n img_ctr = get_last_idx(depth_dir)\n if args.save_traj:\n traj_dir = os.path.join(traj_path, traj_save_path)\n create_dir(traj_dir)\n traj_ctr = get_last_idx(traj_dir)\n\n ## not logging collisions\n final_input_arr = np.array([0, 0, 0, 0])\n ## if logging collisions\n # input_arr = np.array([0, 0, 0, 0, 0])\n # final_input_arr = np.array([0, 0, 0, 0, 0])\n tmp_labels_arr = np.array([0, 0, 0])\n prev_base_state = [0, 0, 0]\n num_actions = 0\n# datasplit = args.data_split.split('_')[1]\n print_once = True\n called_stop = False\n\n while (\n len(stats_episodes) < config.TEST_EPISODE_COUNT and envs.num_envs > 0\n ):\n current_episodes = envs.current_episodes()\n if print_once:\n print(\"Ep_id: \", current_episodes[0].episode_id, \"Start_pos: \", current_episodes[0].start_position, current_episodes[0].start_rotation, \"Goal_pos: \", current_episodes[0].goals[0].position)\n print_once = False\n\n with torch.no_grad():\n _, actions, _, test_recurrent_hidden_states = model.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n num_actions +=1\n for a in actions:\n stats_actions[a[0].item()] += 1\n\n observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]\n if args.save_imgs:\n depth_obs = observations[0][\"depth\"] \n depth_obs = np.squeeze(depth_obs)\n depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode=\"L\")\n depth_img.save(os.path.join(depth_dir, \"real_depth_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n\n rgb_obs = observations[0][\"rgb\"]\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGB\")\n rgb_img.save(os.path.join(rgb_dir, \"real_rgb_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n rgbd_img = np.dstack((observations[0][\"rgb\"], (observations[0][\"depth\"]*255).astype(np.uint8)))\n gan_observations = predictor(rgbd_img)\n observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n observations[0][\"depth\"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)\n if args.save_imgs:\n depth_obs = observations[0][\"depth\"] \n depth_obs = np.squeeze(depth_obs)\n depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode=\"L\")\n depth_img.save(os.path.join(depth_dir, \"sim_depth_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n\n rgb_obs = observations[0][\"rgb\"]\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGB\")\n rgb_img.save(os.path.join(rgb_dir, \"sim_rgb_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n img_ctr +=1\n if args.depth_only:\n del observations[0][\"rgb\"]\n batch = batch_obs(observations, device)\n not_done_masks = torch.tensor(\n [[0.0] if done else [1.0] for done in dones],\n dtype=torch.float,\n device=device,\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=device\n ).unsqueeze(1)\n current_episode_reward += rewards\n next_episodes = envs.current_episodes()\n envs_to_pause = []\n n_envs = envs.num_envs\n for i in range(n_envs):\n if (\n next_episodes[i].scene_id,\n next_episodes[i].episode_id,\n ) in stats_episodes:\n envs_to_pause.append(i)\n # x, y, t, a\n input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()]\n #input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()] + [int(infos[i][\"collisions\"][\"is_collision\"])]\n curr_state = create_state(infos[i][\"base_state\"]['position'], infos[i][\"base_state\"]['rotation'])\n delta_row = np.subtract(curr_state, prev_base_state)\n prev_base_state = curr_state\n\n print(input_row + [int(infos[i][\"collisions\"][\"is_collision\"])])\n if int(infos[i][\"collisions\"][\"is_collision\"]) == 0:\n final_input_arr = np.vstack((final_input_arr, input_row))\n tmp_labels_arr = np.vstack((tmp_labels_arr, delta_row))\n\n# plt.ioff()\n# _ = plt.hist(observations[i][\"depth\"].flatten(), bins='auto')\n# plt.savefig('hist.jpg')\n # TODO: save only good trajectories\n\n # episode ended\n if not_done_masks[i].item() == 0:\n episode_stats = dict()\n episode_stats[metric_uuid] = infos[i][metric_uuid]\n episode_stats[\"success\"] = int(infos[i][metric_uuid] > 0)\n episode_stats[\"reward\"] = current_episode_reward[i].item()\n if actions[i][0].cpu().detach().tolist() == 0:\n called_stop = True\n\n # if infos[i][\"collisions\"] == 0:\n # final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))\n # final_labels_arr = np.vstack((final_labels_arr, labels_arr[2:-1,:]))\n # final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))\n # final_labels_arr = np.vstack((final_labels_arr, create_traj_labels(input_arr[2:, :])))\n\n print(final_input_arr.ndim)\n if final_input_arr.ndim > 1:\n print(\"Final Shape: {}\".format(final_input_arr[2:-1, :].shape))\n input_arr_embed, input_arr_oneHot = create_input_trajectory(final_input_arr[2:-1, :])\n final_labels_arr = create_labels_trajectory(tmp_labels_arr[2:-1, :])\n if args.save_traj:\n save_trajectory(input_arr_embed, datasplit, traj_dir, traj_ctr, 'input', embed_type=\"embed\")\n save_trajectory(input_arr_oneHot, datasplit, traj_dir, traj_ctr, 'input', embed_type=\"oneHot\")\n save_trajectory(final_labels_arr, datasplit, traj_dir, traj_ctr, 'labels', embed_type=\"\")\n traj_ctr +=1\n\n print(\"# Actions: {}\".format(num_actions))\n print(\"# Collisions: {}\".format(infos[i][\"collisions\"][\"count\"]))\n print(\"Success: {}\".format(episode_stats[\"success\"]))\n print(\"Agent Episode Distance: {}\".format(infos[i]['episode_distance']['agent_episode_distance'])) #TODO\n print(\"Final Distance to Goal: {}\".format(infos[i]['episode_distance']['goal_distance'])) #TODO\n print(\"SPL: {}\".format(episode_stats[metric_uuid]))\n print(\"Soft SPL: {}\".format(infos[i][\"softspl\"]))\n print(\"Called Stop: {}\".format(called_stop))\n\n current_episode_reward[i] = 0\n ## not logging collisions\n final_input_arr = np.array([0, 0, 0, 0])\n ## if logging collisions\n # input_arr = np.array([0, 0, 0, 0, 0])\n # final_input_arr = np.array([0, 0, 0, 0, 0])\n tmp_labels_arr = np.array([0, 0, 0])\n prev_base_state = [0, 0, 0]\n num_actions = 0\n print_once = True\n called_stop = False\n\n # use scene_id + episode_id as unique id for storing stats\n stats_episodes[\n (\n current_episodes[i].scene_id,\n current_episodes[i].episode_id,\n )\n ] = episode_stats\n\n if len(config.VIDEO_OPTION) > 0:\n metric_value = episode_stats[metric_uuid]\n video_name = (\n f\"episode_{current_episodes[i].episode_id}\"\n f\"_{metric_name}_{metric_value:.2f}\"\n )\n images_to_video(\n rgb_frames[i], config.VIDEO_DIR, video_name\n )\n\n rgb_frames[i] = []\n\n print(\"Episodes finished: {}\".format(len(stats_episodes)))\n\n # episode continues\n elif len(config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[i], infos[i])\n rgb_frames[i].append(frame)\n\n # pausing self.envs with no new episode\n if len(envs_to_pause) > 0:\n state_index = list(range(envs.num_envs))\n for idx in reversed(envs_to_pause):\n state_index.pop(idx)\n envs.pause_at(idx)\n\n # indexing along the batch dimensions\n test_recurrent_hidden_states = test_recurrent_hidden_states[\n :, state_index\n ]\n not_done_masks = not_done_masks[state_index]\n current_episode_reward = current_episode_reward[state_index]\n prev_actions = prev_actions[state_index]\n\n for k, v in batch.items():\n batch[k] = v[state_index]\n\n if len(config.VIDEO_OPTION) > 0:\n rgb_frames = [rgb_frames[i] for i in state_index]\n\n aggregated_stats = dict()\n for stat_key in next(iter(stats_episodes.values())).keys():\n aggregated_stats[stat_key] = sum(\n [v[stat_key] for v in stats_episodes.values()]\n )\n num_episodes = len(stats_episodes)\n\n episode_reward_mean = aggregated_stats[\"reward\"] / num_episodes\n episode_metric_mean = aggregated_stats[metric_uuid] / num_episodes\n episode_success_mean = aggregated_stats[\"success\"] / num_episodes\n\n print(f\"Number of episodes: {num_episodes}\")\n print(f\"Average episode reward: {episode_reward_mean:.6f}\")\n print(f\"Average episode success: {episode_success_mean:.6f}\")\n print(f\"Average episode {metric_uuid}: {episode_metric_mean:.6f}\")\n\n print(\"Stats actions:\", stats_actions)\n\n envs.close()\n\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport sys\n\nimport mock\nimport numpy as np\n\nfrom habitat.config.default import get_config\nfrom habitat.sims import make_sim\n\n\nclass CameraMock:\n def get_rgb(self):\n return np.zeros((256, 256, 3))\n\n def get_depth(self):\n return np.zeros((256, 256, 1))\n\n def reset(self):\n pass\n\n def step(self, *args, **kwargs):\n pass\n\n\nclass RobotMock:\n def __init__(self, *args, **kwargs):\n self.camera = CameraMock()\n self.base = BaseMock()\n\n\nclass BaseMock:\n def __init__(self, *args, **kwargs):\n self.base_state = mock.MagicMock()\n self.base_state.bumper = False\n\n def go_to_relative(self, *args, **kwargs):\n pass\n\n\ndef test_pyrobot(mocker):\n if \"pyrobot\" not in sys.modules:\n # Mock pyrobot package if it is not installed\n mock_pyrobot = mocker.MagicMock()\n mock_pyrobot.Robot = RobotMock\n sys.modules[\"pyrobot\"] = mock_pyrobot\n\n # Re-register pyrobot with mock\n from habitat.sims.registration import _try_register_pyrobot\n\n _try_register_pyrobot()\n\n config = get_config()\n with make_sim(\"PyRobot-v0\", config=config.PYROBOT) as reality:\n\n _ = reality.reset()\n _ = reality.step(\n \"go_to_relative\",\n {\n \"xyt_position\": [0, 0, (10 / 180) * np.pi],\n \"use_map\": False,\n \"close_loop\": True,\n \"smooth\": False,\n },\n )\n", "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"The code below is taken from\nhttps://github.com/JunjH/Revisiting_Single_Depth_Estimation\nRevisiting Single Image Depth Estimation: Toward Higher Resolution Maps With Accurate Object Boundaries\nJunjie Hu and Mete Ozay and Yan Zhang and Takayuki Okatani\nWACV 2019\n\nResNet code gently borrowed from\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/py\n\"\"\"\n\n\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn.parallel\nfrom PIL import Image\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom torch.utils import model_zoo as model_zoo\nfrom torchvision import transforms\n\naccimage = None\n\n\n__all__ = [\n \"ResNet\",\n \"resnet18\",\n \"resnet34\",\n \"resnet50\",\n \"resnet101\",\n \"resnet152\",\n]\n\n\nmodel_urls = {\n \"resnet18\": \"https://download.pytorch.org/models/resnet18-5c106cde.pth\",\n \"resnet34\": \"https://download.pytorch.org/models/resnet34-333f7ec4.pth\",\n \"resnet50\": \"https://download.pytorch.org/models/resnet50-19c8e357.pth\",\n \"resnet101\": \"https://download.pytorch.org/models/resnet101-5d3b4d8f.pth\",\n \"resnet152\": \"https://download.pytorch.org/models/resnet152-b121ed2d.pth\",\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes, planes, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(\n 3, 64, kernel_size=7, stride=2, padding=3, bias=False\n )\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet18(pretrained=False, **kwargs):\n r\"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls[\"resnet18\"]))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n r\"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls[\"resnet34\"]))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n r\"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(\n model_zoo.load_url(\n model_urls[\"resnet50\"], \"pretrained_model/encoder\"\n )\n )\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n r\"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls[\"resnet101\"]))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n r\"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls[\"resnet152\"]))\n return model\n\n\nclass model(nn.Module):\n def __init__(self, Encoder, num_features, block_channel):\n\n super(model, self).__init__()\n\n self.E = Encoder\n self.D = D(num_features)\n self.MFF = MFF(block_channel)\n self.R = R(block_channel)\n\n def forward(self, x):\n x_block1, x_block2, x_block3, x_block4 = self.E(x)\n x_decoder = self.D(x_block1, x_block2, x_block3, x_block4)\n x_mff = self.MFF(\n x_block1,\n x_block2,\n x_block3,\n x_block4,\n [x_decoder.size(2), x_decoder.size(3)],\n )\n out = self.R(torch.cat((x_decoder, x_mff), 1))\n\n return out\n\n\nclass _UpProjection(nn.Sequential):\n def __init__(self, num_input_features, num_output_features):\n super(_UpProjection, self).__init__()\n\n self.conv1 = nn.Conv2d(\n num_input_features,\n num_output_features,\n kernel_size=5,\n stride=1,\n padding=2,\n bias=False,\n )\n self.bn1 = nn.BatchNorm2d(num_output_features)\n self.relu = nn.ReLU(inplace=True)\n self.conv1_2 = nn.Conv2d(\n num_output_features,\n num_output_features,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False,\n )\n self.bn1_2 = nn.BatchNorm2d(num_output_features)\n\n self.conv2 = nn.Conv2d(\n num_input_features,\n num_output_features,\n kernel_size=5,\n stride=1,\n padding=2,\n bias=False,\n )\n self.bn2 = nn.BatchNorm2d(num_output_features)\n\n def forward(self, x, size):\n x = F.upsample(x, size=size, mode=\"bilinear\")\n x_conv1 = self.relu(self.bn1(self.conv1(x)))\n bran1 = self.bn1_2(self.conv1_2(x_conv1))\n bran2 = self.bn2(self.conv2(x))\n\n out = self.relu(bran1 + bran2)\n\n return out\n\n\nclass E_resnet(nn.Module):\n def __init__(self, original_model, num_features=2048):\n super(E_resnet, self).__init__()\n self.conv1 = original_model.conv1\n self.bn1 = original_model.bn1\n self.relu = original_model.relu\n self.maxpool = original_model.maxpool\n\n self.layer1 = original_model.layer1\n self.layer2 = original_model.layer2\n self.layer3 = original_model.layer3\n self.layer4 = original_model.layer4\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x_block1 = self.layer1(x)\n x_block2 = self.layer2(x_block1)\n x_block3 = self.layer3(x_block2)\n x_block4 = self.layer4(x_block3)\n\n return x_block1, x_block2, x_block3, x_block4\n\n\nclass D(nn.Module):\n def __init__(self, num_features=2048):\n super(D, self).__init__()\n self.conv = nn.Conv2d(\n num_features,\n num_features // 2,\n kernel_size=1,\n stride=1,\n bias=False,\n )\n num_features = num_features // 2\n self.bn = nn.BatchNorm2d(num_features)\n\n self.up1 = _UpProjection(\n num_input_features=num_features,\n num_output_features=num_features // 2,\n )\n num_features = num_features // 2\n\n self.up2 = _UpProjection(\n num_input_features=num_features,\n num_output_features=num_features // 2,\n )\n num_features = num_features // 2\n\n self.up3 = _UpProjection(\n num_input_features=num_features,\n num_output_features=num_features // 2,\n )\n num_features = num_features // 2\n\n self.up4 = _UpProjection(\n num_input_features=num_features,\n num_output_features=num_features // 2,\n )\n num_features = num_features // 2\n\n def forward(self, x_block1, x_block2, x_block3, x_block4):\n x_d0 = F.relu(self.bn(self.conv(x_block4)))\n x_d1 = self.up1(x_d0, [x_block3.size(2), x_block3.size(3)])\n x_d2 = self.up2(x_d1, [x_block2.size(2), x_block2.size(3)])\n x_d3 = self.up3(x_d2, [x_block1.size(2), x_block1.size(3)])\n x_d4 = self.up4(x_d3, [x_block1.size(2) * 2, x_block1.size(3) * 2])\n\n return x_d4\n\n\nclass MFF(nn.Module):\n def __init__(self, block_channel, num_features=64):\n\n super(MFF, self).__init__()\n\n self.up1 = _UpProjection(\n num_input_features=block_channel[0], num_output_features=16\n )\n\n self.up2 = _UpProjection(\n num_input_features=block_channel[1], num_output_features=16\n )\n\n self.up3 = _UpProjection(\n num_input_features=block_channel[2], num_output_features=16\n )\n\n self.up4 = _UpProjection(\n num_input_features=block_channel[3], num_output_features=16\n )\n\n self.conv = nn.Conv2d(\n num_features,\n num_features,\n kernel_size=5,\n stride=1,\n padding=2,\n bias=False,\n )\n self.bn = nn.BatchNorm2d(num_features)\n\n def forward(self, x_block1, x_block2, x_block3, x_block4, size):\n x_m1 = self.up1(x_block1, size)\n x_m2 = self.up2(x_block2, size)\n x_m3 = self.up3(x_block3, size)\n x_m4 = self.up4(x_block4, size)\n\n x = self.bn(self.conv(torch.cat((x_m1, x_m2, x_m3, x_m4), 1)))\n x = F.relu(x)\n\n return x\n\n\nclass R(nn.Module):\n def __init__(self, block_channel):\n\n super(R, self).__init__()\n\n num_features = 64 + block_channel[3] // 32\n self.conv0 = nn.Conv2d(\n num_features,\n num_features,\n kernel_size=5,\n stride=1,\n padding=2,\n bias=False,\n )\n self.bn0 = nn.BatchNorm2d(num_features)\n\n self.conv1 = nn.Conv2d(\n num_features,\n num_features,\n kernel_size=5,\n stride=1,\n padding=2,\n bias=False,\n )\n self.bn1 = nn.BatchNorm2d(num_features)\n\n self.conv2 = nn.Conv2d(\n num_features, 1, kernel_size=5, stride=1, padding=2, bias=True\n )\n\n def forward(self, x):\n x0 = self.conv0(x)\n x0 = self.bn0(x0)\n x0 = F.relu(x0)\n\n x1 = self.conv1(x0)\n x1 = self.bn1(x1)\n x1 = F.relu(x1)\n\n x2 = self.conv2(x1)\n\n return x2\n\n\ndef _is_pil_image(img):\n return isinstance(img, Image.Image)\n\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\n\nclass Scale(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, image):\n image = self.changeScale(image, self.size)\n\n return image\n\n def changeScale(self, img, size, interpolation=Image.BILINEAR):\n ow, oh = size\n\n return img.resize((ow, oh), interpolation)\n\n\nclass CenterCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, image):\n image = self.centerCrop(image, self.size)\n\n return image\n\n def centerCrop(self, image, size):\n w1, h1 = image.size\n tw, th = size\n\n if w1 == tw and h1 == th:\n return image\n\n x1 = int(round((w1 - tw) / 2.0))\n y1 = int(round((h1 - th) / 2.0))\n\n image = image.crop((x1, y1, tw + x1, th + y1))\n\n return image\n\n\nclass ToTensor(object):\n r\"\"\"Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.\n Converts a PIL.Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, image):\n image = self.to_tensor(image)\n\n return image\n\n def to_tensor(self, pic):\n if not (_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError(\n \"pic should be PIL Image or ndarray. Got {}\".format(type(pic))\n )\n\n if isinstance(pic, np.ndarray):\n\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n return img.float().div(255)\n\n if accimage is not None and isinstance(pic, accimage.Image):\n nppic = np.zeros(\n [pic.channels, pic.height, pic.width], dtype=np.float32\n )\n pic.copyto(nppic)\n return torch.from_numpy(nppic)\n\n # handle PIL Image\n if pic.mode == \"I\":\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == \"I;16\":\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n else:\n img = torch.ByteTensor(\n torch.ByteStorage.from_buffer(pic.tobytes())\n )\n # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == \"YCbCr\":\n nchannel = 3\n elif pic.mode == \"I;16\":\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n # put it from HWC to CHW format\n # yikes, this transpose takes 80% of the loading time/CPU\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, image):\n image = self.normalize(image, self.mean, self.std)\n\n return image\n\n def normalize(self, tensor, mean, std):\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n\n return tensor\n\n\ndef define_model(is_resnet, is_densenet, is_senet):\n if is_resnet:\n original_model = resnet50(pretrained=False)\n Encoder = E_resnet(original_model)\n model1 = model(\n Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048]\n )\n if is_densenet:\n # original_model = dendensenet161(pretrained=False)\n # Encoder = E_densenet(original_model)\n # model1 = model(\n # Encoder, num_features=2208, block_channel=[192, 384, 1056, 2208]\n # )\n raise NotImplementedError()\n if is_senet:\n # original_model = senet154(pretrained=False)\n # Encoder = E_senet(original_model)\n # model1 = model(\n # Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048]\n # )\n raise NotImplementedError()\n return model1\n\n\nclass MonoDepthEstimator:\n def __init__(self, checkpoint=\"./pretrained_model/model_resnet\"):\n self.model = define_model(\n is_resnet=True, is_densenet=False, is_senet=False\n )\n self.model = torch.nn.DataParallel(self.model).cuda()\n cpt = torch.load(checkpoint)\n if \"state_dict\" in cpt.keys():\n cpt = cpt[\"state_dict\"]\n self.model.load_state_dict(cpt)\n self.model.eval()\n self.init_preprocessor()\n\n def init_preprocessor(self):\n __imagenet_stats = {\n \"mean\": [0.485, 0.456, 0.406],\n \"std\": [0.229, 0.224, 0.225],\n }\n\n self.transform = transforms.Compose(\n [\n Scale([320, 240]),\n # CenterCrop([304, 228]),\n ToTensor(),\n Normalize(__imagenet_stats[\"mean\"], __imagenet_stats[\"std\"]),\n ]\n )\n\n def preprocess(self, image):\n image_torch = self.transform(image).unsqueeze(0)\n return image_torch.cuda()\n\n def compute_depth(self, image):\n # Input: image is a PIL image\n # Output: depth is a numpy array\n image_torch = self.preprocess(image)\n # print(image_torch.size())\n depth_torch = self.model(image_torch)\n depth = (\n depth_torch.view(depth_torch.size(2), depth_torch.size(3))\n .data.cpu()\n .numpy()\n )\n return depth\n", "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch import distributed as distrib\n\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.rl.ppo import PPO\n\nEPS_PPO = 1e-5\n\n\ndef distributed_mean_and_var(\n values: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Computes the mean and variances of a tensor over multiple workers.\n\n This method is equivalent to first collecting all versions of values and\n then computing the mean and variance locally over that\n\n :param values: (*,) shaped tensors to compute mean and variance over. Assumed\n to be solely the workers local copy of this tensor,\n the resultant mean and variance will be computed\n over _all_ workers version of this tensor.\n \"\"\"\n assert distrib.is_initialized(), \"Distributed must be initialized\"\n\n world_size = distrib.get_world_size()\n mean = values.mean()\n distrib.all_reduce(mean)\n mean = mean / world_size\n\n sq_diff = (values - mean).pow(2).mean()\n distrib.all_reduce(sq_diff)\n var = sq_diff / world_size\n\n return mean, var\n\n\nclass DecentralizedDistributedMixin:\n def _get_advantages_distributed(\n self, rollouts: RolloutStorage\n ) -> torch.Tensor:\n advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]\n if not self.use_normalized_advantage: # type: ignore\n return advantages\n\n mean, var = distributed_mean_and_var(advantages)\n\n return (advantages - mean) / (var.sqrt() + EPS_PPO)\n\n def init_distributed(self, find_unused_params: bool = True) -> None:\n r\"\"\"Initializes distributed training for the model\n\n 1. Broadcasts the model weights from world_rank 0 to all other workers\n 2. Adds gradient hooks to the model\n\n :param find_unused_params: Whether or not to filter out unused parameters\n before gradient reduction. This *must* be True if\n there are any parameters in the model that where unused in the\n forward pass, otherwise the gradient reduction\n will not work correctly.\n \"\"\"\n # NB: Used to hide the hooks from the nn.Module,\n # so they don't show up in the state_dict\n class Guard:\n def __init__(self, model, device):\n if torch.cuda.is_available():\n self.ddp = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[device], output_device=device\n )\n else:\n self.ddp = torch.nn.parallel.DistributedDataParallel(model)\n\n self._ddp_hooks = Guard(self.actor_critic, self.device) # type: ignore\n self.get_advantages = self._get_advantages_distributed\n\n self.reducer = self._ddp_hooks.ddp.reducer\n self.find_unused_params = find_unused_params\n\n def before_backward(self, loss: Tensor) -> None:\n super().before_backward(loss) # type: ignore\n\n if self.find_unused_params:\n self.reducer.prepare_for_backward([loss]) # type: ignore\n else:\n self.reducer.prepare_for_backward([]) # type: ignore\n\n\nclass DDPPO(DecentralizedDistributedMixin, PPO):\n pass\n" ]
[ [ "torch.zeros", "numpy.squeeze", "numpy.subtract", "numpy.cos", "numpy.save", "numpy.sin", "torch.tensor", "numpy.diff", "torch.no_grad", "torch.cuda.is_available", "numpy.savetxt", "torch.device", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.zeros" ], [ "torch.nn.functional.upsample", "torch.nn.Sequential", "torch.load", "torch.cat", "torch.nn.Conv2d", "torch.from_numpy", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.DataParallel", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "numpy.array", "torch.utils.model_zoo.load_url", "numpy.zeros" ], [ "torch.distributed.is_initialized", "torch.cuda.is_available", "torch.distributed.get_world_size", "torch.distributed.all_reduce", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
casperg92/MaSIF_colab
[ "f030061276cc21b812bb3be652124b75dcdf7e5b" ]
[ "data.py" ]
[ "import torch\nfrom torch_geometric.data import InMemoryDataset, Data, DataLoader\nfrom torch_geometric.transforms import Compose\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\nimport math\nimport urllib.request\nimport tarfile\nfrom pathlib import Path\nimport requests\nfrom data_preprocessing.convert_pdb2npy import convert_pdbs\nfrom data_preprocessing.convert_ply2npy import convert_plys\n\ntensor = torch.FloatTensor\ninttensor = torch.LongTensor\n\n\ndef numpy(x):\n return x.detach().cpu().numpy()\n\n\ndef iface_valid_filter(protein_pair):\n labels1 = protein_pair.y_p1.reshape(-1)\n labels2 = protein_pair.y_p2.reshape(-1)\n valid1 = (\n (torch.sum(labels1) < 0.75 * len(labels1))\n and (torch.sum(labels1) > 30)\n and (torch.sum(labels1) > 0.01 * labels2.shape[0])\n )\n valid2 = (\n (torch.sum(labels2) < 0.75 * len(labels2))\n and (torch.sum(labels2) > 30)\n and (torch.sum(labels2) > 0.01 * labels1.shape[0])\n )\n\n return valid1 and valid2\n\n\nclass RandomRotationPairAtoms(object):\n r\"\"\"Randomly rotate a protein\"\"\"\n\n def __call__(self, data):\n R1 = tensor(Rotation.random().as_matrix())\n R2 = tensor(Rotation.random().as_matrix())\n\n data.atom_coords_p1 = torch.matmul(R1, data.atom_coords_p1.T).T\n data.xyz_p1 = torch.matmul(R1, data.xyz_p1.T).T\n data.normals_p1 = torch.matmul(R1, data.normals_p1.T).T\n\n data.atom_coords_p2 = torch.matmul(R2, data.atom_coords_p2.T).T\n data.xyz_p2 = torch.matmul(R2, data.xyz_p2.T).T\n data.normals_p2 = torch.matmul(R2, data.normals_p2.T).T\n\n data.rand_rot1 = R1\n data.rand_rot2 = R2\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\nclass CenterPairAtoms(object):\n r\"\"\"Centers a protein\"\"\"\n\n def __call__(self, data):\n atom_center1 = data.atom_coords_p1.mean(dim=-2, keepdim=True)\n atom_center2 = data.atom_coords_p2.mean(dim=-2, keepdim=True)\n\n data.atom_coords_p1 = data.atom_coords_p1 - atom_center1\n data.atom_coords_p2 = data.atom_coords_p2 - atom_center2\n\n data.xyz_p1 = data.xyz_p1 - atom_center1\n data.xyz_p2 = data.xyz_p2 - atom_center2\n\n data.atom_center1 = atom_center1\n data.atom_center2 = atom_center2\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\nclass NormalizeChemFeatures(object):\n r\"\"\"Centers a protein\"\"\"\n\n def __call__(self, data):\n pb_upper = 3.0\n pb_lower = -3.0\n\n chem_p1 = data.chemical_features_p1\n chem_p2 = data.chemical_features_p2\n\n pb_p1 = chem_p1[:, 0]\n pb_p2 = chem_p2[:, 0]\n hb_p1 = chem_p1[:, 1]\n hb_p2 = chem_p2[:, 1]\n hp_p1 = chem_p1[:, 2]\n hp_p2 = chem_p2[:, 2]\n\n # Normalize PB\n pb_p1 = torch.clamp(pb_p1, pb_lower, pb_upper)\n pb_p1 = (pb_p1 - pb_lower) / (pb_upper - pb_lower)\n pb_p1 = 2 * pb_p1 - 1\n\n pb_p2 = torch.clamp(pb_p2, pb_lower, pb_upper)\n pb_p2 = (pb_p2 - pb_lower) / (pb_upper - pb_lower)\n pb_p2 = 2 * pb_p2 - 1\n\n # Normalize HP\n hp_p1 = hp_p1 / 4.5\n hp_p2 = hp_p2 / 4.5\n\n data.chemical_features_p1 = torch.stack([pb_p1, hb_p1, hp_p1]).T\n data.chemical_features_p2 = torch.stack([pb_p2, hb_p2, hp_p2]).T\n\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\ndef load_protein_npy(pdb_id, data_dir, center=False, single_pdb=False):\n \"\"\"Loads a protein surface mesh and its features\"\"\"\n\n # Load the data, and read the connectivity information:\n triangles = (\n None\n if single_pdb\n else inttensor(np.load(data_dir / (pdb_id + \"_triangles.npy\"))).T\n )\n # Normalize the point cloud, as specified by the user:\n points = None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_xyz.npy\")))\n center_location = None if single_pdb else torch.mean(points, axis=0, keepdims=True)\n\n atom_coords = tensor(np.load(data_dir / (pdb_id + \"_atomxyz.npy\")))\n atom_types = tensor(np.load(data_dir / (pdb_id + \"_atomtypes.npy\")))\n\n if center:\n points = points - center_location\n atom_coords = atom_coords - center_location\n\n # Interface labels\n iface_labels = (\n None\n if single_pdb\n else tensor(np.load(data_dir / (pdb_id + \"_iface_labels.npy\")).reshape((-1, 1)))\n )\n\n # Features\n chemical_features = (\n None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_features.npy\")))\n )\n\n # Normals\n normals = (\n None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_normals.npy\")))\n )\n\n protein_data = Data(\n xyz=points,\n face=triangles,\n chemical_features=chemical_features,\n y=iface_labels,\n normals=normals,\n center_location=center_location,\n num_nodes=None if single_pdb else points.shape[0],\n atom_coords=atom_coords,\n atom_types=atom_types,\n )\n return protein_data\n\n\nclass PairData(Data):\n def __init__(\n self,\n xyz_p1=None,\n xyz_p2=None,\n face_p1=None,\n face_p2=None,\n chemical_features_p1=None,\n chemical_features_p2=None,\n y_p1=None,\n y_p2=None,\n normals_p1=None,\n normals_p2=None,\n center_location_p1=None,\n center_location_p2=None,\n atom_coords_p1=None,\n atom_coords_p2=None,\n atom_types_p1=None,\n atom_types_p2=None,\n atom_center1=None,\n atom_center2=None,\n rand_rot1=None,\n rand_rot2=None,\n ):\n super().__init__()\n self.xyz_p1 = xyz_p1\n self.xyz_p2 = xyz_p2\n self.face_p1 = face_p1\n self.face_p2 = face_p2\n\n self.chemical_features_p1 = chemical_features_p1\n self.chemical_features_p2 = chemical_features_p2\n self.y_p1 = y_p1\n self.y_p2 = y_p2\n self.normals_p1 = normals_p1\n self.normals_p2 = normals_p2\n self.center_location_p1 = center_location_p1\n self.center_location_p2 = center_location_p2\n self.atom_coords_p1 = atom_coords_p1\n self.atom_coords_p2 = atom_coords_p2\n self.atom_types_p1 = atom_types_p1\n self.atom_types_p2 = atom_types_p2\n self.atom_center1 = atom_center1\n self.atom_center2 = atom_center2\n self.rand_rot1 = rand_rot1\n self.rand_rot2 = rand_rot2\n\n def __inc__(self, key, value):\n if key == \"face_p1\":\n return self.xyz_p1.size(0)\n if key == \"face_p2\":\n return self.xyz_p2.size(0)\n else:\n return super(PairData, self).__inc__(key, value)\n\n def __cat_dim__(self, key, value):\n if (\"index\" in key) or (\"face\" in key):\n return 1\n else:\n return 0\n\n\ndef load_protein_pair(pdb_id, data_dir,single_pdb=False):\n \"\"\"Loads a protein surface mesh and its features\"\"\"\n pspl = pdb_id.split(\"_\")\n p1_id = pspl[0] + \"_\" + pspl[1]\n p2_id = pspl[0] + \"_\" + pspl[2]\n\n p1 = load_protein_npy(p1_id, data_dir, center=False,single_pdb=single_pdb)\n p2 = load_protein_npy(p2_id, data_dir, center=False,single_pdb=single_pdb)\n # pdist = ((p1['xyz'][:,None,:]-p2['xyz'][None,:,:])**2).sum(-1).sqrt()\n # pdist = pdist<2.0\n # y_p1 = (pdist.sum(1)>0).to(torch.float).reshape(-1,1)\n # y_p2 = (pdist.sum(0)>0).to(torch.float).reshape(-1,1)\n y_p1 = p1[\"y\"]\n y_p2 = p2[\"y\"]\n\n protein_pair_data = PairData(\n xyz_p1=p1[\"xyz\"],\n xyz_p2=p2[\"xyz\"],\n face_p1=p1[\"face\"],\n face_p2=p2[\"face\"],\n chemical_features_p1=p1[\"chemical_features\"],\n chemical_features_p2=p2[\"chemical_features\"],\n y_p1=y_p1,\n y_p2=y_p2,\n normals_p1=p1[\"normals\"],\n normals_p2=p2[\"normals\"],\n center_location_p1=p1[\"center_location\"],\n center_location_p2=p2[\"center_location\"],\n atom_coords_p1=p1[\"atom_coords\"],\n atom_coords_p2=p2[\"atom_coords\"],\n atom_types_p1=p1[\"atom_types\"],\n atom_types_p2=p2[\"atom_types\"],\n )\n return protein_pair_data\n\n\nclass ProteinPairsSurfaces(InMemoryDataset):\n url = \"\"\n\n def __init__(self, root, ppi=False, train=True, transform=None, pre_transform=None):\n self.ppi = ppi\n super(ProteinPairsSurfaces, self).__init__(root, transform, pre_transform)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self):\n return \"masif_site_masif_search_pdbs_and_ply_files.tar.gz\"\n\n @property\n def processed_file_names(self):\n if not self.ppi:\n file_names = [\n \"training_pairs_data.pt\",\n \"testing_pairs_data.pt\",\n \"training_pairs_data_ids.npy\",\n \"testing_pairs_data_ids.npy\",\n ]\n else:\n file_names = [\n \"training_pairs_data_ppi.pt\",\n \"testing_pairs_data_ppi.pt\",\n \"training_pairs_data_ids_ppi.npy\",\n \"testing_pairs_data_ids_ppi.npy\",\n ]\n return file_names\n\n def download(self):\n url = 'https://zenodo.org/record/2625420/files/masif_site_masif_search_pdbs_and_ply_files.tar.gz'\n target_path = self.raw_paths[0]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(target_path, 'wb') as f:\n f.write(response.raw.read())\n \n #raise RuntimeError(\n # \"Dataset not found. Please download {} from {} and move it to {}\".format(\n # self.raw_file_names, self.url, self.raw_dir\n # )\n #)\n\n def process(self):\n pdb_dir = Path(self.root) / \"raw\" / \"01-benchmark_pdbs\"\n surf_dir = Path(self.root) / \"raw\" / \"01-benchmark_surfaces\"\n protein_dir = Path(self.root) / \"raw\" / \"01-benchmark_surfaces_npy\"\n lists_dir = Path('./lists')\n\n # Untar surface files\n if not (pdb_dir.exists() and surf_dir.exists()):\n tar = tarfile.open(self.raw_paths[0])\n tar.extractall(self.raw_dir)\n tar.close()\n\n if not protein_dir.exists():\n protein_dir.mkdir(parents=False, exist_ok=False)\n convert_plys(surf_dir,protein_dir)\n convert_pdbs(pdb_dir,protein_dir)\n\n with open(lists_dir / \"training.txt\") as f_tr, open(\n lists_dir / \"testing.txt\"\n ) as f_ts:\n training_list = sorted(f_tr.read().splitlines())\n testing_list = sorted(f_ts.read().splitlines())\n\n with open(lists_dir / \"training_ppi.txt\") as f_tr, open(\n lists_dir / \"testing_ppi.txt\"\n ) as f_ts:\n training_pairs_list = sorted(f_tr.read().splitlines())\n testing_pairs_list = sorted(f_ts.read().splitlines())\n pairs_list = sorted(training_pairs_list + testing_pairs_list)\n\n if not self.ppi:\n training_pairs_list = []\n for p in pairs_list:\n pspl = p.split(\"_\")\n p1 = pspl[0] + \"_\" + pspl[1]\n p2 = pspl[0] + \"_\" + pspl[2]\n\n if p1 in training_list:\n training_pairs_list.append(p)\n if p2 in training_list:\n training_pairs_list.append(pspl[0] + \"_\" + pspl[2] + \"_\" + pspl[1])\n\n testing_pairs_list = []\n for p in pairs_list:\n pspl = p.split(\"_\")\n p1 = pspl[0] + \"_\" + pspl[1]\n p2 = pspl[0] + \"_\" + pspl[2]\n if p1 in testing_list:\n testing_pairs_list.append(p)\n if p2 in testing_list:\n testing_pairs_list.append(pspl[0] + \"_\" + pspl[2] + \"_\" + pspl[1])\n\n # # Read data into huge `Data` list.\n training_pairs_data = []\n training_pairs_data_ids = []\n for p in training_pairs_list:\n try:\n protein_pair = load_protein_pair(p, protein_dir)\n except FileNotFoundError:\n continue\n training_pairs_data.append(protein_pair)\n training_pairs_data_ids.append(p)\n\n testing_pairs_data = []\n testing_pairs_data_ids = []\n for p in testing_pairs_list:\n try:\n protein_pair = load_protein_pair(p, protein_dir)\n except FileNotFoundError:\n continue\n testing_pairs_data.append(protein_pair)\n testing_pairs_data_ids.append(p)\n\n if self.pre_filter is not None:\n training_pairs_data = [\n data for data in training_pairs_data if self.pre_filter(data)\n ]\n testing_pairs_data = [\n data for data in testing_pairs_data if self.pre_filter(data)\n ]\n\n if self.pre_transform is not None:\n training_pairs_data = [\n self.pre_transform(data) for data in training_pairs_data\n ]\n testing_pairs_data = [\n self.pre_transform(data) for data in testing_pairs_data\n ]\n\n training_pairs_data, training_pairs_slices = self.collate(training_pairs_data)\n torch.save(\n (training_pairs_data, training_pairs_slices), self.processed_paths[0]\n )\n np.save(self.processed_paths[2], training_pairs_data_ids)\n testing_pairs_data, testing_pairs_slices = self.collate(testing_pairs_data)\n torch.save((testing_pairs_data, testing_pairs_slices), self.processed_paths[1])\n np.save(self.processed_paths[3], testing_pairs_data_ids)\n" ]
[ [ "torch.mean", "scipy.spatial.transform.Rotation.random", "torch.load", "torch.sum", "numpy.save", "torch.matmul", "torch.stack", "numpy.load", "torch.clamp", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.5", "1.2", "1.3", "1.4" ], "tensorflow": [] } ]
DaoiestFire/self-supervised-learning-of-object-movement
[ "4db59bf352efd946661feffc7afc4630c6731852", "4db59bf352efd946661feffc7afc4630c6731852" ]
[ "data/datasets.py", "modules/u_net.py" ]
[ "import os\r\nimport glob\r\nimport random\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom imageio import mimread\r\nfrom skimage.color import gray2rgb\r\nfrom skimage import io, img_as_float32\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom torch.utils.data import Dataset\r\nfrom data.augmentation import AllAugmentationTransform\r\n\r\n\r\ndef read_video(name, frame_shape):\r\n \"\"\"\r\n Read video which can be:\r\n - an image of concatenated frames\r\n - '.mp4' and'.gif'\r\n - folder with videos\r\n \"\"\"\r\n\r\n if os.path.isdir(name):\r\n frames = sorted(os.listdir(name))\r\n num_frames = len(frames)\r\n video_array = np.array(\r\n [img_as_float32(io.imread(os.path.join(name, frames[idx]))) for idx in range(num_frames)])\r\n elif name.lower().endswith('.png') or name.lower().endswith('.jpg'):\r\n image = io.imread(name)\r\n\r\n if len(image.shape) == 2 or image.shape[2] == 1:\r\n image = gray2rgb(image)\r\n\r\n if image.shape[2] == 4:\r\n image = image[..., :3]\r\n\r\n image = img_as_float32(image)\r\n\r\n video_array = np.moveaxis(image, 1, 0)\r\n\r\n video_array = video_array.reshape((-1,) + frame_shape)\r\n video_array = np.moveaxis(video_array, 1, 2)\r\n elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):\r\n video = np.array(mimread(name))\r\n if len(video.shape) == 3:\r\n video = np.array([gray2rgb(frame) for frame in video])\r\n if video.shape[-1] == 4:\r\n video = video[..., :3]\r\n video_array = img_as_float32(video)\r\n else:\r\n raise Exception(\"Unknown file extensions %s\" % name)\r\n\r\n return video_array\r\n\r\n\r\nclass FramesDataset(Dataset):\r\n \"\"\"\r\n Dataset of videos, each video can be represented as:\r\n - an image of concatenated frames\r\n - '.mp4' or '.gif'\r\n - folder with all frames\r\n \"\"\"\r\n\r\n def __init__(self, root_dir, frame_shape=(256, 256, 3), id_sampling=False, is_train=True,\r\n random_seed=0, pairs_list=None, augmentation_params=None):\r\n self.root_dir = root_dir\r\n self.videos = os.listdir(root_dir)\r\n self.frame_shape = tuple(frame_shape)\r\n self.pairs_list = pairs_list\r\n self.id_sampling = id_sampling\r\n if os.path.exists(os.path.join(root_dir, 'train')):\r\n assert os.path.exists(os.path.join(root_dir, 'test'))\r\n print(\"Use predefined train-test split.\")\r\n if id_sampling:\r\n train_videos = {os.path.basename(video).split('#')[0] for video in\r\n os.listdir(os.path.join(root_dir, 'train'))}\r\n train_videos = list(train_videos)\r\n else:\r\n train_videos = os.listdir(os.path.join(root_dir, 'train'))\r\n test_videos = os.listdir(os.path.join(root_dir, 'test'))\r\n self.root_dir = os.path.join(self.root_dir, 'train' if is_train else 'test')\r\n else:\r\n print(\"Use random train-test split.\")\r\n train_videos, test_videos = train_test_split(self.videos, random_state=random_seed, test_size=0.2)\r\n\r\n if is_train:\r\n self.videos = train_videos\r\n else:\r\n self.videos = test_videos\r\n\r\n self.is_train = is_train\r\n\r\n if self.is_train:\r\n self.transform = AllAugmentationTransform(**augmentation_params)\r\n else:\r\n self.transform = None\r\n\r\n def __len__(self):\r\n return len(self.videos)\r\n\r\n def __getitem__(self, idx):\r\n if self.is_train and self.id_sampling:\r\n name = self.videos[idx]\r\n path = np.random.choice(glob.glob(os.path.join(self.root_dir, name + '*.mp4')))\r\n else:\r\n name = self.videos[idx]\r\n path = os.path.join(self.root_dir, name)\r\n\r\n video_name = os.path.basename(path)\r\n\r\n if self.is_train and os.path.isdir(path):\r\n frames = os.listdir(path)\r\n num_frames = len(frames)\r\n frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2))\r\n video_array = [img_as_float32(io.imread(os.path.join(path, frames[idx]))) for idx in frame_idx]\r\n else:\r\n video_array = read_video(path, frame_shape=self.frame_shape)\r\n num_frames = len(video_array)\r\n frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2)) if self.is_train else range(\r\n num_frames)\r\n video_array = list(video_array[frame_idx])\r\n\r\n if self.transform is not None:\r\n video_array = self.transform(video_array)\r\n\r\n out = dict()\r\n if self.is_train:\r\n source = np.array(video_array[0], dtype='float32')\r\n driving = np.array(video_array[1], dtype='float32')\r\n\r\n out['source'] = source.transpose((2, 0, 1))\r\n out['driving'] = driving.transpose((2, 0, 1))\r\n else:\r\n video = np.array(video_array, dtype='float32')\r\n out['video'] = video.transpose((3, 0, 1, 2))\r\n\r\n out['name'] = video_name\r\n\r\n return out\r\n\r\n\r\nclass PairedDataset(Dataset):\r\n \"\"\"Dataset of pairs for animation.\"\"\"\r\n\r\n def __init__(self, initial_dataset, number_of_pairs, seed=0):\r\n self.initial_dataset = initial_dataset\r\n pairs_list = self.initial_dataset.pairs_list\r\n\r\n np.random.seed(seed)\r\n\r\n if pairs_list is None:\r\n max_idx = min(number_of_pairs, len(initial_dataset))\r\n nx, ny = max_idx, max_idx\r\n xy = np.mgrid[:nx, :ny].reshape(2, -1).T\r\n number_of_pairs = min(xy.shape[0], number_of_pairs)\r\n self.pairs = xy.take(np.random.choice(xy.shape[0], number_of_pairs, replace=False), axis=0)\r\n else:\r\n videos = self.initial_dataset.videos\r\n name_to_index = {name: index for index, name in enumerate(videos)}\r\n pairs = pd.read_csv(pairs_list)\r\n pairs = pairs[np.logical_and(pairs['source'].isin(videos), pairs['driving'].isin(videos))]\r\n\r\n number_of_pairs = min(pairs.shape[0], number_of_pairs)\r\n self.pairs = []\r\n self.start_frames = []\r\n for ind in range(number_of_pairs):\r\n self.pairs.append(\r\n (name_to_index[pairs['driving'].iloc[ind]], name_to_index[pairs['source'].iloc[ind]]))\r\n\r\n def __len__(self):\r\n return len(self.pairs)\r\n\r\n def __getitem__(self, idx):\r\n pair = self.pairs[idx]\r\n first = self.initial_dataset[pair[0]]\r\n second = self.initial_dataset[pair[1]]\r\n first = {'driving_' + key: value for key, value in first.items()}\r\n second = {'source_' + key: value for key, value in second.items()}\r\n\r\n return {**first, **second}\r\n", "\"\"\"implementation of u_net architecture\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nfrom .utils import DownBlock2d, UpBlock2d\r\n\r\n\r\nclass Encoder(nn.Module):\r\n \"\"\"U-net Encoder\"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Encoder, self).__init__()\r\n\r\n down_blocks = []\r\n for i in range(num_blocks):\r\n down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),\r\n min(max_features, block_expansion * (2 ** (i + 1))),\r\n kernel_size=3, padding=1))\r\n self.down_blocks = nn.ModuleList(down_blocks)\r\n\r\n def forward(self, x):\r\n outs = [x]\r\n for down_block in self.down_blocks:\r\n outs.append(down_block(outs[-1]))\r\n return outs\r\n\r\n\r\nclass Decoder(nn.Module):\r\n \"\"\"U-net Decoder\"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Decoder, self).__init__()\r\n\r\n up_blocks = []\r\n\r\n for i in range(num_blocks)[::-1]:\r\n in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))\r\n out_filters = min(max_features, block_expansion * (2 ** i))\r\n up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))\r\n\r\n self.up_blocks = nn.ModuleList(up_blocks)\r\n self.out_filters = block_expansion + in_features\r\n\r\n def forward(self, x):\r\n out = x.pop()\r\n for up_block in self.up_blocks:\r\n out = up_block(out)\r\n skip = x.pop()\r\n out = torch.cat([out, skip], dim=1)\r\n return out\r\n\r\n\r\nclass Unet(nn.Module):\r\n \"\"\"U-net architecture\"\"\"\r\n\r\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\r\n super(Unet, self).__init__()\r\n self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)\r\n self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)\r\n self.out_filters = self.decoder.out_filters\r\n\r\n def forward(self, x):\r\n return self.decoder(self.encoder(x))\r\n" ]
[ [ "pandas.read_csv", "numpy.random.seed", "numpy.random.choice", "sklearn.model_selection.train_test_split", "numpy.moveaxis", "numpy.array" ], [ "torch.nn.ModuleList", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liuhanyao98/nums_gpu_draft
[ "48df59afe605f02ea2bd609c5f9e0006fbc27a5d" ]
[ "nums/core/array/application.py" ]
[ "# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import List\n\nimport numpy as np\n\nfrom nums.core.array.blockarray import BlockArray, Block\nfrom nums.core.array import utils as array_utils\nfrom nums.core.storage.storage import ArrayGrid, StoredArray, StoredArrayS3\n# TODO(hme): Remove dependence on specific system and scheduler implementations.\nfrom nums.core.systems.systems import System, RaySystem, SerialSystem\nfrom nums.core.systems.gpu_systems import CupyParallelSystem\nfrom nums.core.systems.schedulers import BlockCyclicScheduler\nfrom nums.core.systems import utils as systems_utils\nfrom nums.core.systems.filesystem import FileSystem\nfrom nums.core.array.random import NumsRandomState\n\n# pylint: disable = too-many-lines\n\n\nclass ArrayApplication(object):\n\n def __init__(self, system: System, filesystem: FileSystem):\n self.system: System = system\n self._filesystem: FileSystem = filesystem\n self._array_grids: (str, ArrayGrid) = {}\n self.random = self.random_state()\n\n self.one_half = self.scalar(.5)\n self.two = self.scalar(2.0)\n self.one = self.scalar(1.0)\n self.zero = self.scalar(0.0)\n self._block_shape_map = {}\n\n def num_cores_total(self):\n if isinstance(self.system, RaySystem):\n system: RaySystem = self.system\n nodes = system.nodes()\n num_cores = sum(map(lambda n: n[\"Resources\"][\"CPU\"], nodes))\n elif isinstance(self.system, CupyParallelSystem):\n system: CupyParallelSystem = self.system\n num_cores = system.num_gpus\n else:\n assert isinstance(self.system, SerialSystem)\n num_cores = systems_utils.get_num_cores()\n return int(num_cores)\n\n def compute_block_shape(self,\n shape: tuple,\n dtype: np.dtype,\n cluster_shape=None,\n num_cores=None):\n # TODO (hme): Add support for downstream optimizer to decide block shape.\n if dtype in (np.float32, np.float64, float):\n dtype = np.finfo(dtype).dtype\n elif dtype in (np.int32, np.int64, int):\n dtype = np.iinfo(dtype).dtype\n elif dtype in (bool, np.bool_):\n dtype = np.dtype(np.bool_)\n else:\n raise ValueError(\"dtype %s not supported\" % str(dtype))\n\n nbytes = dtype.alignment\n size = np.product(shape) * nbytes\n # If the object is less than 100 megabytes, there's not much value in constructing\n # a block tensor.\n if size < 10 ** 8:\n block_shape = shape\n return block_shape\n\n if num_cores is not None:\n pass\n else:\n num_cores = self.num_cores_total()\n\n if cluster_shape is not None:\n pass\n elif isinstance(self.system, RaySystem) \\\n and isinstance(self.system.scheduler, BlockCyclicScheduler):\n # This configuration is the default.\n cluster_shape = self.system.scheduler.cluster_shape\n elif isinstance(self.system, CupyParallelSystem):\n cluster_shape = self.system.cluster_shape\n else:\n assert isinstance(self.system, SerialSystem)\n cluster_shape = (1, 1)\n\n if len(shape) < len(cluster_shape):\n cluster_shape = cluster_shape[:len(shape)]\n elif len(shape) > len(cluster_shape):\n cluster_shape = list(cluster_shape)\n for axis in range(len(shape)):\n if axis >= len(cluster_shape):\n cluster_shape.append(1)\n cluster_shape = tuple(cluster_shape)\n\n shape_np = np.array(shape, dtype=np.int)\n # Softmax on cluster shape gives strong preference to larger dimensions.\n cluster_weights = np.exp(np.array(cluster_shape)) / np.sum(np.exp(cluster_shape))\n shape_fracs = np.array(shape) / np.sum(shape)\n # cluster_weights weight the proportion of cores available along each axis,\n # and shape_fracs is the proportion of data along each axis.\n weighted_shape_fracs = cluster_weights * shape_fracs\n weighted_shape_fracs = weighted_shape_fracs / np.sum(weighted_shape_fracs)\n\n # Compute dimensions of grid shape\n # so that the number of blocks are close to the number of cores.\n grid_shape_frac = num_cores ** weighted_shape_fracs\n grid_shape = np.floor(grid_shape_frac)\n # Put remainder on largest axis.\n remaining = np.sum(grid_shape_frac - grid_shape)\n grid_shape[np.argmax(shape)] += remaining\n grid_shape = np.ceil(grid_shape).astype(np.int)\n\n # We use ceiling of floating block shape\n # so that resulting grid shape is <= to what we compute above.\n block_shape = tuple((shape_np + grid_shape - 1) // grid_shape)\n return block_shape\n\n def get_block_shape(self, shape, dtype: np.dtype):\n # Simple way to ensure shape compatibility for basic linear algebra operations.\n block_shape = self.compute_block_shape(shape, dtype)\n final_block_shape = []\n for axis in range(len(shape)):\n shape_dim = shape[axis]\n block_shape_dim = block_shape[axis]\n if shape_dim not in self._block_shape_map:\n self._block_shape_map[shape_dim] = block_shape_dim\n final_block_shape.append(self._block_shape_map[shape_dim])\n return tuple(final_block_shape)\n\n def _get_array_grid(self, filename: str, stored_array_cls) -> ArrayGrid:\n if filename not in self._array_grids:\n store_inst: StoredArray = stored_array_cls(filename)\n self._array_grids[filename] = store_inst.get_grid()\n return self._array_grids[filename]\n\n ######################################\n # Filesystem API\n ######################################\n\n def write_fs(self, ba: BlockArray, filename: str):\n res = self._write(ba, filename, self._filesystem.write_block_fs)\n self._filesystem.write_meta_fs(ba, filename)\n return res\n\n def read_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n ba: BlockArray = BlockArray(grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n ba.blocks[grid_entry].oid = self._filesystem.read_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n return ba\n\n def delete_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n rarr.blocks[grid_entry].oid = self._filesystem.delete_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n self._filesystem.delete_meta_fs(filename)\n return rarr\n\n def write_s3(self, ba: BlockArray, filename: str):\n grid_entry = tuple(np.zeros_like(ba.shape, dtype=np.int))\n result = self._filesystem.write_meta_s3(filename,\n grid_meta=ba.grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": ba.grid.grid_shape\n })\n assert \"ETag\" in self.system.get(result).item(), \"Metadata write failed.\"\n return self._write(ba, filename, self._filesystem.write_block_s3)\n\n def _write(self, ba: BlockArray, filename, remote_func):\n grid = ba.grid\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(ba.blocks[grid_entry].oid,\n filename,\n grid_entry,\n grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_s3(self, filename: str):\n store_cls, remote_func = StoredArrayS3, self._filesystem.read_block_s3\n grid = self._get_array_grid(filename, store_cls)\n grid_meta = grid.to_meta()\n grid_entry_iterator = grid.get_entry_iterator()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid_entry_iterator:\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def delete_s3(self, filename: str):\n grid = self._get_array_grid(filename, StoredArrayS3)\n grid_entry = tuple(np.zeros_like(grid.shape, dtype=np.int))\n result = self._filesystem.delete_meta_s3(filename,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n deleted_key = self.system.get(result).item()[\"Deleted\"][0][\"Key\"]\n assert deleted_key == StoredArrayS3(filename, grid).get_meta_key()\n results: BlockArray = self._delete(filename,\n StoredArrayS3,\n self._filesystem.delete_block_s3)\n return results\n\n def _delete(self, filename, store_cls, remote_func):\n grid = self._get_array_grid(filename, store_cls)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_csv(self, filename, dtype=np.float, delimiter=',', has_header=False, num_workers=None):\n if num_workers is None:\n num_workers = self.num_cores_total()\n arrays: list = self._filesystem.read_csv(filename, dtype, delimiter, has_header,\n num_workers)\n shape = np.zeros(len(arrays[0].shape), dtype=int)\n for array in arrays:\n shape += np.array(array.shape, dtype=int)\n shape = tuple(shape)\n block_shape = self.get_block_shape(shape, dtype)\n result = self.concatenate(arrays, axis=0, axis_block_size=block_shape[0])\n # Release references immediately, in case we need to do another reshape.\n del arrays\n if result.block_shape[1] != block_shape[1]:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def loadtxt(self, fname, dtype=float, comments='# ', delimiter=' ',\n converters=None, skiprows=0, usecols=None, unpack=False,\n ndmin=0, encoding='bytes', max_rows=None, num_workers=None) -> BlockArray:\n if num_workers is None:\n num_workers = self.num_cores_total()\n return self._filesystem.loadtxt(\n fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=converters, skiprows=skiprows,\n usecols=usecols, unpack=unpack, ndmin=ndmin,\n encoding=encoding, max_rows=max_rows, num_workers=num_workers)\n\n ######################################\n # Array Operations API\n ######################################\n\n def scalar(self, value):\n return BlockArray.from_scalar(value, self.system)\n\n def array(self, array: np.ndarray, block_shape: tuple = None):\n assert len(array.shape) == len(block_shape)\n return BlockArray.from_np(array,\n block_shape=block_shape,\n copy=False,\n system=self.system)\n\n def zeros(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"zeros\", shape, block_shape, dtype)\n\n def ones(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"ones\", shape, block_shape, dtype)\n\n def empty(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"empty\", shape, block_shape, dtype)\n\n def _new_array(self, op_name: str, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape)\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = self.system.new_block(op_name,\n grid_entry,\n grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def concatenate(self, arrays: List, axis: int, axis_block_size: int = None):\n num_arrs = len(arrays)\n assert num_arrs > 1\n first_arr: BlockArray = arrays[0]\n num_axes = len(first_arr.shape)\n # Check assumptions and define result shapes and block shapes.\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n assert num_axes == len(curr_ba.shape), \"Unequal num axes.\"\n assert curr_ba.dtype == first_arr.dtype, \"Incompatible dtypes \" \\\n \"%s, %s\" % (curr_ba.dtype, first_arr.dtype)\n for curr_axis in range(num_axes):\n first_block_size = first_arr.block_shape[curr_axis]\n block_size = curr_ba.block_shape[curr_axis]\n if first_block_size == block_size:\n continue\n elif axis == curr_axis:\n assert axis_block_size is not None, \"block axis size is required \" \\\n \"when block shapes are neq.\"\n else:\n raise ValueError(\"Other axis shapes and block shapes must be equal.\")\n\n # Compute result shapes.\n result_shape = []\n result_block_shape = []\n for curr_axis in range(num_axes):\n if curr_axis == axis:\n if axis_block_size is None:\n # They are all equal.\n axis_block_size = first_arr.block_shape[curr_axis]\n result_block_size = axis_block_size\n result_size = 0\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n size = curr_ba.shape[curr_axis]\n result_size += size\n else:\n result_size = first_arr.shape[curr_axis]\n result_block_size = first_arr.block_shape[curr_axis]\n result_shape.append(result_size)\n result_block_shape.append(result_block_size)\n result_shape, result_block_shape = tuple(result_shape), tuple(result_block_shape)\n result_ba = self.empty(result_shape, result_block_shape, first_arr.dtype)\n\n # Write result blocks.\n # TODO (hme): This can be optimized by updating blocks directly.\n pos = 0\n for arr in arrays:\n delta = arr.shape[axis]\n axis_slice = slice(pos, pos+delta)\n result_selector = tuple([slice(None, None) for _ in range(axis)] + [axis_slice, ...])\n result_ba[result_selector] = arr\n pos += delta\n return result_ba\n\n def eye(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape) == 2\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.new_block(\"eye\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n return rarr\n\n def diag(self, X: BlockArray) -> BlockArray:\n if len(X.shape) == 1:\n shape = X.shape[0], X.shape[0]\n block_shape = X.block_shape[0], X.block_shape[0]\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.diag(X.blocks[grid_entry[0]].oid,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n elif len(X.shape) == 2:\n assert X.shape[0] == X.shape[1]\n assert X.block_shape[0] == X.block_shape[1]\n shape = X.shape[0],\n block_shape = X.block_shape[0],\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in X.grid.get_entry_iterator():\n out_grid_entry = grid_entry[:1]\n out_grid_shape = grid.grid_shape[:1]\n syskwargs = {\"grid_entry\": out_grid_entry, \"grid_shape\": out_grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[out_grid_entry].oid = self.system.diag(X.blocks[grid_entry].oid,\n syskwargs=syskwargs)\n else:\n raise ValueError(\"X must have 1 or 2 axes.\")\n return rarr\n\n def arange(self, shape, block_shape, step=1, dtype=np.int64) -> BlockArray:\n assert step == 1\n # Generate ranges per block.\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for _, grid_entry in enumerate(grid.get_entry_iterator()):\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n start = block_shape[0] * grid_entry[0]\n entry_shape = grid.get_block_shape(grid_entry)\n stop = start + entry_shape[0]\n rarr.blocks[grid_entry].oid = self.system.arange(start,\n stop,\n step,\n dtype,\n syskwargs=syskwargs)\n return rarr\n\n def linspace(self, start, stop, shape, block_shape, endpoint, retstep, dtype, axis):\n assert axis == 0\n assert endpoint is True\n assert retstep is False\n step_size = (stop - start) / (shape[0]-1)\n result = self.arange(shape, block_shape)\n result = start + result * step_size\n if dtype is not None and dtype != result.dtype:\n result = result.astype(dtype)\n return result\n\n def log(self, X: BlockArray):\n return X.ufunc(\"log\")\n\n def exp(self, X: BlockArray):\n return X.ufunc(\"exp\")\n\n def abs(self, X: BlockArray):\n return X.ufunc(\"abs\")\n\n def min(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"min\", X, axis, keepdims)\n\n def max(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"max\", X, axis, keepdims)\n\n def argmin(self, X: BlockArray, axis=None):\n pass\n\n def sum(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n return self.reduce(\"sum\", X, axis, keepdims, dtype)\n\n def reduce(self, op_name: str, X: BlockArray, axis=None, keepdims=False, dtype=None):\n res = X.reduce_axis(op_name, axis, keepdims=keepdims)\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def mean(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n num_summed = np.product(X.shape) if axis is None else X.shape[axis]\n res = self.sum(X, axis=axis, keepdims=keepdims) / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def var(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n mean = self.mean(X, axis=axis, keepdims=True)\n ss = self.sum((X - mean)**self.two, axis=axis, keepdims=keepdims)\n num_summed = (np.product(X.shape) if axis is None else X.shape[axis]) - ddof\n res = ss / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def std(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n res = self.sqrt(self.var(X, axis, ddof, keepdims))\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def argop(self, op_name: str, arr: BlockArray, axis=None):\n if len(arr.shape) > 1:\n raise NotImplementedError(\"%s currently supports one-dimensional arrays.\" % op_name)\n if axis is None:\n axis = 0\n assert axis == 0\n grid = ArrayGrid(shape=(), block_shape=(), dtype=np.int64.__name__)\n result = BlockArray(grid, self.system)\n reduction_result = None, None\n for grid_entry in arr.grid.get_entry_iterator():\n block_slice: slice = arr.grid.get_slice(grid_entry)[0]\n block: Block = arr.blocks[grid_entry]\n syskwargs = {\n \"grid_entry\": grid_entry,\n \"grid_shape\": arr.grid.grid_shape,\n \"options\": {\"num_returns\": 2},\n }\n reduction_result = self.system.arg_op(op_name,\n block.oid,\n block_slice,\n *reduction_result,\n syskwargs=syskwargs)\n argoptima, _ = reduction_result\n result.blocks[()].oid = argoptima\n return result\n\n def sqrt(self, X):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n return X.ufunc(\"sqrt\")\n\n def norm(self, X):\n return self.sqrt(X.T @ X)\n\n def xlogy(self, x: BlockArray, y: BlockArray) -> BlockArray:\n if x.dtype not in (float, np.float32, np.float64):\n x = x.astype(np.float64)\n if x.dtype not in (float, np.float32, np.float64):\n y = y.astype(np.float64)\n return self.map_bop(\"xlogy\", x, y)\n\n def where(self, condition: BlockArray, x=None, y=None):\n result_oids = []\n shape_oids = []\n num_axes = max(1, len(condition.shape))\n # Stronger constraint than necessary, but no reason for anything stronger.\n if x is not None or y is not None:\n assert x is not None and y is not None\n assert condition.shape == x.shape == y.shape\n assert condition.block_shape == x.block_shape == y.block_shape\n for grid_entry in condition.grid.get_entry_iterator():\n block: Block = condition.blocks[grid_entry]\n block_slice_tuples = condition.grid.get_slice_tuples(grid_entry)\n roids = self.system.where(block.oid, x, y,\n block_slice_tuples,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": condition.grid.grid_shape,\n \"options\": {\"num_returns\": num_axes+1}\n })\n block_oids, shape_oid = roids[:-1], roids[-1]\n shape_oids.append(shape_oid)\n result_oids.append(block_oids)\n shapes = self.system.get(shape_oids)\n result_shape = (np.sum(shapes),)\n if result_shape == (0,):\n return (self.array(np.array([], dtype=np.int64), block_shape=(0,)),)\n # Remove empty shapes.\n result_shape_pair = []\n for i, shape in enumerate(shapes):\n if np.sum(shape) > 0:\n result_shape_pair.append((result_oids[i], shape))\n result_block_shape = self.compute_block_shape(result_shape, np.int64)\n result_arrays = []\n for axis in range(num_axes):\n block_arrays = []\n for i in range(len(result_oids)):\n if shapes[i] == (0,):\n continue\n block_arrays.append(BlockArray.from_oid(result_oids[i][axis],\n shapes[i],\n np.int64,\n self.system))\n if len(block_arrays) == 1:\n axis_result = block_arrays[0]\n else:\n axis_result = self.concatenate(block_arrays, 0, result_block_shape[0])\n result_arrays.append(axis_result)\n return tuple(result_arrays)\n\n def map_uop(self,\n op_name: str,\n arr: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n \"\"\"\n A map, for unary operators, that applies to every entry of an array.\n :param op_name: An element-wise unary operator.\n :param arr: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n args = () if args is None else args\n kwargs = {} if kwargs is None else kwargs\n shape = arr.shape\n block_shape = arr.block_shape\n dtype = array_utils.get_uop_output_type(op_name, arr.dtype)\n assert len(shape) == len(block_shape)\n if out is None:\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n else:\n rarr = out\n grid = rarr.grid\n assert rarr.shape == arr.shape and rarr.block_shape == arr.block_shape\n for grid_entry in grid.get_entry_iterator():\n # TODO(hme): Faster to create ndarray first,\n # and instantiate block array on return\n # to avoid instantiating blocks on BlockArray initialization.\n rarr.blocks[grid_entry] = arr.blocks[grid_entry].uop_map(op_name,\n args=args,\n kwargs=kwargs)\n return rarr\n\n def matmul(self,\n arr_1: BlockArray,\n arr_2: BlockArray) -> BlockArray:\n return arr_1 @ arr_2\n\n def tensordot(self,\n arr_1: BlockArray,\n arr_2: BlockArray,\n axes: int = 2) -> BlockArray:\n return arr_1.tensordot(arr_2, axes)\n\n def map_bop(self,\n op_name: str,\n arr_1: BlockArray,\n arr_2: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n # TODO (hme): Move this into BlockArray, and invoke on operator implementations.\n \"\"\"\n A map, for binary operators, that applies element-wise to every entry of the input arrays.\n :param op_name: An element-wise binary operator.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n if args is not None:\n raise NotImplementedError(\"'args' is not yet supported.\")\n if not (kwargs is None or len(kwargs) == 0):\n raise NotImplementedError(\"'kwargs' is not yet supported.\")\n\n try:\n ufunc = np.__getattribute__(op_name)\n if (op_name.endswith(\"max\") or op_name == \"maximum\"\n or op_name.endswith(\"min\") or op_name == \"minimum\"\n or op_name.startswith(\"logical\")):\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n else:\n result_blocks: np.ndarray = ufunc(arr_1.blocks, arr_2.blocks)\n rarr = BlockArray.from_blocks(result_blocks,\n result_shape=None,\n system=self.system)\n except Exception as _:\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n if out is not None:\n assert out.grid.grid_shape == rarr.grid.grid_shape\n assert out.shape == rarr.shape\n assert out.block_shape == rarr.block_shape\n out.blocks[:] = rarr.blocks[:]\n rarr = out\n return rarr\n\n def _broadcast_bop(self, op_name, arr_1, arr_2) -> BlockArray:\n \"\"\"\n We want to avoid invoking this op whenever possible; NumPy's imp is faster.\n :param op_name: Name of binary operation.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :return: A BlockArray.\n \"\"\"\n if arr_1.shape != arr_2.shape:\n output_grid_shape = array_utils.broadcast_shape(arr_1.grid.grid_shape,\n arr_2.grid.grid_shape)\n arr_1 = arr_1.broadcast_to(output_grid_shape)\n arr_2 = arr_2.broadcast_to(output_grid_shape)\n dtype = array_utils.get_bop_output_type(op_name,\n arr_1.dtype,\n arr_2.dtype)\n grid = ArrayGrid(arr_1.shape, arr_1.block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in rarr.grid.get_entry_iterator():\n block_1: Block = arr_1.blocks[grid_entry]\n block_2: Block = arr_2.blocks[grid_entry]\n rarr.blocks[grid_entry] = block_1.bop(op_name, block_2, {})\n return rarr\n\n def get(self, *arrs):\n if len(arrs) == 1:\n if isinstance(arrs[0], BlockArray):\n return arrs[0].get()\n else:\n return arrs[0]\n else:\n r = []\n for item in arrs:\n if isinstance(item, BlockArray):\n r.append(item.get())\n else:\n r.append(item)\n return r\n\n def allclose(self, a: BlockArray, b: BlockArray, rtol=1.e-5, atol=1.e-8):\n assert a.shape == b.shape and a.block_shape == b.block_shape\n bool_list = []\n grid_shape = a.grid.grid_shape\n for grid_entry in a.grid.get_entry_iterator():\n a_block, b_block = a.blocks[grid_entry].oid, b.blocks[grid_entry].oid\n bool_list.append(self.system.allclose(a_block, b_block, rtol, atol,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid_shape\n }))\n oid = self.system.logical_and(*bool_list,\n syskwargs={\"grid_entry\": (0, 0), \"grid_shape\": (1, 1)})\n return BlockArray.from_oid(oid, (), np.bool, self.system)\n\n def qr(self, X: BlockArray):\n return self.indirect_tsqr(X)\n\n def indirect_tsr(self, X: BlockArray, reshape_output=True):\n assert len(X.shape) == 2\n # TODO (hme): This assertion is temporary and ensures returned\n # shape of qr of block is correct.\n assert X.block_shape[0] >= X.shape[1]\n # Compute R for each block.\n grid = X.grid\n grid_shape = grid.grid_shape\n shape = X.shape\n block_shape = X.block_shape\n R_oids = []\n # Assume no blocking along second dim.\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n R_oids.append(self.system.qr(*row,\n mode=\"r\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 1}\n })\n )\n\n # Construct R by summing over R blocks.\n # TODO (hme): Communication may be inefficient due to redundancy of data.\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = BlockArray(ArrayGrid(shape=R_shape,\n block_shape=R_shape,\n dtype=X.dtype.__name__),\n self.system)\n tsR.blocks[0, 0].oid = self.system.qr(*R_oids,\n mode=\"r\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 1}\n })\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R = tsR\n return R\n\n def indirect_tsqr(self, X: BlockArray, reshape_output=True):\n shape = X.shape\n block_shape = X.block_shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self.indirect_tsr(X, reshape_output=False)\n\n # Compute inverse of R.\n tsR_inverse = self.inv(tsR)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n R_inverse = tsR_inverse.reshape(shape=R_shape, block_shape=R_block_shape)\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R_inverse = tsR_inverse\n R = tsR\n\n Q = X @ R_inverse\n return Q, R\n\n def direct_tsqr(self, X, reshape_output=True):\n assert len(X.shape) == 2\n\n # Compute R for each block.\n shape = X.shape\n grid = X.grid\n grid_shape = grid.grid_shape\n block_shape = X.block_shape\n Q_oids = []\n R_oids = []\n QR_dims = []\n Q2_shape = [0, shape[1]]\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n # We invoke \"reduced\", so q, r is returned with dimensions (M, K), (K, N), K = min(M, N)\n M = grid.get_block_shape((i, 0))[0]\n N = shape[1]\n K = min(M, N)\n QR_dims.append(((M, K), (K, N)))\n Q2_shape[0] += K\n # Run each row on separate nodes along first axis.\n # This maintains some data locality.\n Q_oid, R_oid = self.system.qr(*row,\n mode=\"reduced\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 2}\n })\n R_oids.append(R_oid)\n Q_oids.append(Q_oid)\n\n # TODO (hme): This pulls several order N^2 R matrices on a single node.\n # A solution is the recursive extension to direct TSQR.\n Q2_oid, R2_oid = self.system.qr(*R_oids,\n mode=\"reduced\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 2}\n })\n\n Q2_shape = tuple(Q2_shape)\n Q2_block_shape = (QR_dims[0][1][0], shape[1])\n Q2 = self._vec_from_oids([Q2_oid],\n shape=Q2_shape,\n block_shape=Q2_block_shape,\n dtype=X.dtype)\n # The resulting Q's from this operation are N^2 (same size as above R's).\n Q2_oids = list(map(lambda block: block.oid, Q2.blocks.flatten()))\n\n # Construct Q.\n Q = self.zeros(shape=shape,\n block_shape=(block_shape[0], shape[1]),\n dtype=X.dtype)\n for i, grid_entry in enumerate(Q.grid.get_entry_iterator()):\n Q_dims, R_dims = QR_dims[i]\n Q1_block_shape = Q_dims\n Q2_block_shape = R_dims\n Q.blocks[grid_entry].oid = self.system.bop(\"tensordot\", Q_oids[i], Q2_oids[i],\n a1_shape=Q1_block_shape,\n a2_shape=Q2_block_shape,\n a1_T=False, a2_T=False, axes=1,\n syskwargs={\"grid_entry\": grid_entry,\n \"grid_shape\": Q.grid.grid_shape})\n\n # Construct R.\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self._vec_from_oids([R2_oid], shape=R_shape, block_shape=R_shape, dtype=X.dtype)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape == R_block_shape or not reshape_output:\n R = tsR\n else:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n\n if Q.shape != block_shape or not reshape_output:\n Q = Q.reshape(shape=shape, block_shape=block_shape)\n\n return Q, R\n\n def svd(self, X):\n # TODO(hme): Optimize by merging with direct qr to compute U directly,\n # to avoid wasting space storing intermediate Q.\n # This may not really help until we have operator fusion.\n assert len(X.shape) == 2\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n assert R.shape == R.block_shape\n R_U, S, VT = self.system.svd(R.blocks[(0, 0)].oid,\n syskwargs={\"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)})\n R_U: BlockArray = self._vec_from_oids([R_U], R_shape, R_block_shape, X.dtype)\n S: BlockArray = self._vec_from_oids([S], R_shape[:1], R_block_shape[:1], X.dtype)\n VT = self._vec_from_oids([VT], R_shape, R_block_shape, X.dtype)\n U = Q @ R_U\n\n return U, S, VT\n\n def inv(self, X: BlockArray):\n return self._inv(self.system.inv, {}, X)\n\n def _inv(self, remote_func, kwargs, X: BlockArray):\n # TODO (hme): Implement scalable version.\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = remote_func(result.blocks[0, 0].oid,\n **kwargs,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def cholesky(self, X: BlockArray):\n # TODO (hme): Implement scalable version.\n # Note:\n # A = Q, R\n # A.T @ A = R.T @ R\n # A.T @ A = L @ L.T\n # => R == L.T\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = self.system.cholesky(result.blocks[0, 0].oid,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def fast_linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.indirect_tsqr(X, reshape_output=False)\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n # Invert R.\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def ridge_regression(self, X: BlockArray, y: BlockArray, lamb: float):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n assert lamb >= 0\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n R = self.indirect_tsr(X)\n lamb_vec = self.array(lamb*np.eye(R_shape[0]), block_shape=R_block_shape)\n # TODO (hme): A better solution exists, which inverts R by augmenting X and y.\n # See Murphy 7.5.2.\n theta = self.inv(lamb_vec + R.T @ R) @ (X.T @ y)\n return theta\n\n def _vec_from_oids(self, oids, shape, block_shape, dtype):\n arr = BlockArray(ArrayGrid(shape=shape,\n block_shape=shape,\n dtype=dtype.__name__),\n self.system)\n # Make sure resulting grid shape is a vector (1 dimensional).\n assert np.sum(arr.grid.grid_shape) == (max(arr.grid.grid_shape)\n + len(arr.grid.grid_shape) - 1)\n for i, grid_entry in enumerate(arr.grid.get_entry_iterator()):\n arr.blocks[grid_entry].oid = oids[i]\n if block_shape != shape:\n return arr.reshape(block_shape=block_shape)\n return arr\n\n def random_state(self, seed=None):\n return NumsRandomState(self.system, seed)\n" ]
[ [ "numpy.__getattribute__", "numpy.product", "numpy.ones_like", "numpy.eye", "numpy.dtype", "numpy.finfo", "numpy.ceil", "numpy.argmax", "numpy.zeros_like", "numpy.iinfo", "numpy.floor", "numpy.diff", "numpy.exp", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
caglasozen/wilds
[ "db2ff095304891244962509459ee48e2fc5fd5e6", "db2ff095304891244962509459ee48e2fc5fd5e6", "db2ff095304891244962509459ee48e2fc5fd5e6" ]
[ "examples/pretraining/swav/src/logger.py", "wilds/common/data_loaders.py", "wilds/datasets/waterbirds_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport os\nimport logging\nimport time\nfrom datetime import timedelta\nimport pandas as pd\n\n\nclass LogFormatter:\n def __init__(self):\n self.start_time = time.time()\n\n def format(self, record):\n elapsed_seconds = round(record.created - self.start_time)\n\n prefix = \"%s - %s - %s\" % (\n record.levelname,\n time.strftime(\"%x %X\"),\n timedelta(seconds=elapsed_seconds),\n )\n message = record.getMessage()\n message = message.replace(\"\\n\", \"\\n\" + \" \" * (len(prefix) + 3))\n return \"%s - %s\" % (prefix, message) if message else \"\"\n\n\ndef create_logger(filepath, rank):\n \"\"\"\n Create a logger.\n Use a different log file for each process.\n \"\"\"\n # create log formatter\n log_formatter = LogFormatter()\n\n # create file handler and set level to debug\n if filepath is not None:\n if rank > 0:\n filepath = \"%s-%i\" % (filepath, rank)\n file_handler = logging.FileHandler(filepath, \"a\")\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(log_formatter)\n\n # create console handler and set level to info\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(log_formatter)\n\n # create logger and set level to debug\n logger = logging.getLogger()\n logger.handlers = []\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n if filepath is not None:\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n # reset logger elapsed time\n def reset_time():\n log_formatter.start_time = time.time()\n\n logger.reset_time = reset_time\n\n return logger\n\n\nclass PD_Stats(object):\n \"\"\"\n Log stuff with pandas library\n \"\"\"\n\n def __init__(self, path, columns):\n self.path = path\n\n # reload path stats\n if os.path.isfile(self.path):\n self.stats = pd.read_pickle(self.path)\n\n # check that columns are the same\n assert list(self.stats.columns) == list(columns)\n\n else:\n self.stats = pd.DataFrame(columns=columns)\n\n def update(self, row, save=True):\n self.stats.loc[len(self.stats.index)] = row\n\n # save the statistics\n if save:\n self.stats.to_pickle(self.path)\n", "import numpy as np\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data.sampler import WeightedRandomSampler, SubsetRandomSampler\r\nfrom wilds.common.utils import get_counts, split_into_groups\r\n\r\ndef get_train_loader(loader, dataset, batch_size,\r\n uniform_over_groups=None, grouper=None, distinct_groups=True, n_groups_per_batch=None, **loader_kwargs):\r\n \"\"\"\r\n Constructs and returns the data loader for training.\r\n Args:\r\n - loader (str): Loader type. 'standard' for standard loaders and 'group' for group loaders,\r\n which first samples groups and then samples a fixed number of examples belonging\r\n to each group.\r\n - dataset (WILDSDataset or WILDSSubset): Data\r\n - batch_size (int): Batch size\r\n - uniform_over_groups (None or bool): Whether to sample the groups uniformly or according\r\n to the natural data distribution.\r\n Setting to None applies the defaults for each type of loaders.\r\n For standard loaders, the default is False. For group loaders,\r\n the default is True.\r\n - grouper (Grouper): Grouper used for group loaders or for uniform_over_groups=True\r\n - distinct_groups (bool): Whether to sample distinct_groups within each minibatch for group loaders.\r\n - n_groups_per_batch (int): Number of groups to sample in each minibatch for group loaders.\r\n - loader_kwargs: kwargs passed into torch DataLoader initialization.\r\n Output:\r\n - data loader (DataLoader): Data loader.\r\n \"\"\"\r\n if loader == 'standard':\r\n if uniform_over_groups is None or not uniform_over_groups:\r\n return DataLoader(\r\n dataset,\r\n shuffle=True, # Shuffle training dataset\r\n sampler=None,\r\n collate_fn=dataset.collate,\r\n batch_size=batch_size,\r\n **loader_kwargs)\r\n else:\r\n assert grouper is not None\r\n groups, group_counts = grouper.metadata_to_group(\r\n dataset.metadata_array,\r\n return_counts=True)\r\n group_weights = 1 / group_counts\r\n weights = group_weights[groups]\r\n\r\n # Replacement needs to be set to True, otherwise we'll run out of minority samples\r\n sampler = WeightedRandomSampler(weights, len(dataset), replacement=True)\r\n return DataLoader(\r\n dataset,\r\n shuffle=False, # The WeightedRandomSampler already shuffles\r\n sampler=sampler,\r\n collate_fn=dataset.collate,\r\n batch_size=batch_size,\r\n **loader_kwargs)\r\n\r\n elif loader == 'group':\r\n if uniform_over_groups is None:\r\n uniform_over_groups = True\r\n assert grouper is not None\r\n assert n_groups_per_batch is not None\r\n if n_groups_per_batch > grouper.n_groups:\r\n raise ValueError(f'n_groups_per_batch was set to {n_groups_per_batch} but there are only {grouper.n_groups} groups specified.')\r\n\r\n group_ids = grouper.metadata_to_group(dataset.metadata_array)\r\n batch_sampler = GroupSampler(\r\n group_ids=group_ids,\r\n batch_size=batch_size,\r\n n_groups_per_batch=n_groups_per_batch,\r\n uniform_over_groups=uniform_over_groups,\r\n distinct_groups=distinct_groups)\r\n\r\n return DataLoader(dataset,\r\n shuffle=None,\r\n sampler=None,\r\n collate_fn=dataset.collate,\r\n batch_sampler=batch_sampler,\r\n drop_last=False,\r\n **loader_kwargs)\r\n\r\ndef get_eval_loader(loader, dataset, batch_size, grouper=None, **loader_kwargs):\r\n \"\"\"\r\n Constructs and returns the data loader for evaluation.\r\n Args:\r\n - loader (str): Loader type. 'standard' for standard loaders.\r\n - dataset (WILDSDataset or WILDSSubset): Data\r\n - batch_size (int): Batch size\r\n - loader_kwargs: kwargs passed into torch DataLoader initialization.\r\n Output:\r\n - data loader (DataLoader): Data loader.\r\n \"\"\"\r\n if loader == 'standard':\r\n return DataLoader(\r\n dataset,\r\n shuffle=False, # Do not shuffle eval datasets\r\n sampler=None,\r\n collate_fn=dataset.collate,\r\n batch_size=batch_size,\r\n **loader_kwargs)\r\n\r\nclass GroupSampler:\r\n \"\"\"\r\n Constructs batches by first sampling groups,\r\n then sampling data from those groups.\r\n It drops the last batch if it's incomplete.\r\n \"\"\"\r\n def __init__(self, group_ids, batch_size, n_groups_per_batch,\r\n uniform_over_groups, distinct_groups):\r\n\r\n if batch_size % n_groups_per_batch != 0:\r\n raise ValueError(f'batch_size ({batch_size}) must be evenly divisible by n_groups_per_batch ({n_groups_per_batch}).')\r\n if len(group_ids) < batch_size:\r\n raise ValueError(f'The dataset has only {len(group_ids)} examples but the batch size is {batch_size}. There must be enough examples to form at least one complete batch.')\r\n\r\n self.group_ids = group_ids\r\n self.unique_groups, self.group_indices, unique_counts = split_into_groups(group_ids)\r\n\r\n self.distinct_groups = distinct_groups\r\n self.n_groups_per_batch = n_groups_per_batch\r\n self.n_points_per_group = batch_size // n_groups_per_batch\r\n\r\n self.dataset_size = len(group_ids)\r\n self.num_batches = self.dataset_size // batch_size\r\n\r\n if uniform_over_groups: # Sample uniformly over groups\r\n self.group_prob = None\r\n else: # Sample a group proportionately to its size\r\n self.group_prob = unique_counts.numpy() / unique_counts.numpy().sum()\r\n\r\n def __iter__(self):\r\n for batch_id in range(self.num_batches):\r\n # Note that we are selecting group indices rather than groups\r\n groups_for_batch = np.random.choice(\r\n len(self.unique_groups),\r\n size=self.n_groups_per_batch,\r\n replace=(not self.distinct_groups),\r\n p=self.group_prob)\r\n sampled_ids = [\r\n np.random.choice(\r\n self.group_indices[group],\r\n size=self.n_points_per_group,\r\n replace=len(self.group_indices[group]) <= self.n_points_per_group, # False if the group is larger than the sample size\r\n p=None)\r\n for group in groups_for_batch]\r\n\r\n # Flatten\r\n sampled_ids = np.concatenate(sampled_ids)\r\n yield sampled_ids\r\n\r\n def __len__(self):\r\n return self.num_batches\r\n", "import os\r\nimport torch\r\nimport pandas as pd\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom wilds.datasets.wilds_dataset import WILDSDataset\r\nfrom wilds.common.grouper import CombinatorialGrouper\r\nfrom wilds.common.metrics.all_metrics import Accuracy\r\n\r\nclass WaterbirdsDataset(WILDSDataset):\r\n \"\"\"\r\n The Waterbirds dataset.\r\n This dataset is not part of the official WILDS benchmark.\r\n We provide it for convenience and to facilitate comparisons to previous work.\r\n\r\n Supported `split_scheme`:\r\n 'official'\r\n\r\n Input (x):\r\n Images of birds against various backgrounds that have already been cropped and centered.\r\n\r\n Label (y):\r\n y is binary. It is 1 if the bird is a waterbird (e.g., duck), and 0 if it is a landbird.\r\n\r\n Metadata:\r\n Each image is annotated with whether the background is a land or water background.\r\n\r\n Original publication:\r\n @inproceedings{sagawa2019distributionally,\r\n title = {Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},\r\n author = {Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},\r\n booktitle = {International Conference on Learning Representations},\r\n year = {2019}\r\n }\r\n\r\n The dataset was constructed from the CUB-200-2011 dataset and the Places dataset:\r\n @techreport{WahCUB_200_2011,\r\n \tTitle = {{The Caltech-UCSD Birds-200-2011 Dataset}},\r\n \tAuthor = {Wah, C. and Branson, S. and Welinder, P. and Perona, P. and Belongie, S.},\r\n \tYear = {2011}\r\n \tInstitution = {California Institute of Technology},\r\n \tNumber = {CNS-TR-2011-001}\r\n }\r\n @article{zhou2017places,\r\n title = {Places: A 10 million Image Database for Scene Recognition},\r\n author = {Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},\r\n journal ={IEEE Transactions on Pattern Analysis and Machine Intelligence},\r\n year = {2017},\r\n publisher = {IEEE}\r\n }\r\n\r\n License:\r\n The use of this dataset is restricted to non-commercial research and educational purposes.\r\n \"\"\"\r\n\r\n _dataset_name = 'waterbirds'\r\n _versions_dict = {\r\n '1.0': {\r\n 'download_url': 'https://worksheets.codalab.org/rest/bundles/0x505056d5cdea4e4eaa0e242cbfe2daa4/contents/blob/',\r\n 'compressed_size': None}}\r\n\r\n def __init__(self, version=None, root_dir='data', download=False, split_scheme='official'):\r\n self._version = version\r\n self._data_dir = self.initialize_data_dir(root_dir, download)\r\n\r\n if not os.path.exists(self.data_dir):\r\n raise ValueError(\r\n f'{self.data_dir} does not exist yet. Please generate the dataset first.')\r\n\r\n # Read in metadata\r\n # Note: metadata_df is one-indexed.\r\n metadata_df = pd.read_csv(\r\n os.path.join(self.data_dir, 'metadata.csv'))\r\n\r\n # Get the y values\r\n self._y_array = torch.LongTensor(metadata_df['y'].values)\r\n self._y_size = 1\r\n self._n_classes = 2\r\n\r\n self._metadata_array = torch.stack(\r\n (torch.LongTensor(metadata_df['place'].values), self._y_array),\r\n dim=1\r\n )\r\n self._metadata_fields = ['background', 'y']\r\n self._metadata_map = {\r\n 'background': [' land', 'water'], # Padding for str formatting\r\n 'y': [' landbird', 'waterbird']\r\n }\r\n\r\n # Extract filenames\r\n self._input_array = metadata_df['img_filename'].values\r\n self._original_resolution = (224, 224)\r\n\r\n # Extract splits\r\n self._split_scheme = split_scheme\r\n if self._split_scheme != 'official':\r\n raise ValueError(f'Split scheme {self._split_scheme} not recognized')\r\n self._split_array = metadata_df['split'].values\r\n\r\n self._eval_grouper = CombinatorialGrouper(\r\n dataset=self,\r\n groupby_fields=(['background', 'y']))\r\n\r\n super().__init__(root_dir, download, split_scheme)\r\n\r\n def get_input(self, idx):\r\n \"\"\"\r\n Returns x for a given idx.\r\n \"\"\"\r\n img_filename = os.path.join(\r\n self.data_dir,\r\n self._input_array[idx])\r\n x = Image.open(img_filename).convert('RGB')\r\n return x\r\n\r\n def eval(self, y_pred, y_true, metadata, prediction_fn=None):\r\n \"\"\"\r\n Computes all evaluation metrics.\r\n Args:\r\n - y_pred (Tensor): Predictions from a model. By default, they are predicted labels (LongTensor).\r\n But they can also be other model outputs such that prediction_fn(y_pred)\r\n are predicted labels.\r\n - y_true (LongTensor): Ground-truth labels\r\n - metadata (Tensor): Metadata\r\n - prediction_fn (function): A function that turns y_pred into predicted labels\r\n Output:\r\n - results (dictionary): Dictionary of evaluation metrics\r\n - results_str (str): String summarizing the evaluation metrics\r\n \"\"\"\r\n metric = Accuracy(prediction_fn=prediction_fn)\r\n\r\n results, results_str = self.standard_group_eval(\r\n metric,\r\n self._eval_grouper,\r\n y_pred, y_true, metadata)\r\n\r\n # For Waterbirds, the validation and test sets are constructed to be more balanced\r\n # compared to the training set.\r\n # To compute the actual average accuracy over the empirical (training) distribution,\r\n # we therefore weight each groups according to their frequency in the training set.\r\n\r\n results['adj_acc_avg'] = (\r\n (results['acc_y:landbird_background:land'] * 3498\r\n + results['acc_y:landbird_background:water'] * 184\r\n + results['acc_y:waterbird_background:land'] * 56\r\n + results['acc_y:waterbird_background:water'] * 1057) /\r\n (3498 + 184 + 56 + 1057))\r\n\r\n del results['acc_avg']\r\n results_str = f\"Adjusted average acc: {results['adj_acc_avg']:.3f}\\n\" + '\\n'.join(results_str.split('\\n')[1:])\r\n\r\n return results, results_str\r\n" ]
[ [ "pandas.read_pickle", "pandas.DataFrame" ], [ "numpy.concatenate", "torch.utils.data.DataLoader" ], [ "torch.LongTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yugangzhang/pyFAI
[ "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba", "e0453b279dac1f165f637e2a2ed1d4ddf57d31ba" ]
[ "pyFAI/opencl/OCLFullSplit.py", "pyFAI/units.py", "pyFAI/test/test_pickle.py", "pyFAI/test/test_convolution.py", "pyFAI/gui/widgets/CalibrantPreview.py", "pyFAI/average.py", "sandbox/profile_ocl_lut.py", "pyFAI/engines/preproc.py", "sandbox/profile_ocl_hist_pixelsplit.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n#\n# Copyright (C) 2014-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n# Giannis Ashiotis\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n__authors__ = [\"Jérôme Kieffer\", \"Giannis Ashiotis\"]\n__license__ = \"MIT\"\n__date__ = \"18/02/2020\"\n__copyright__ = \"2014, ESRF, Grenoble\"\n__contact__ = \"[email protected]\"\n\nimport os\nimport logging\nimport threading\nimport numpy\nfrom . import ocl, pyopencl\nfrom ..ext.splitBBoxLUT import HistoBBox1d\n\nif pyopencl:\n mf = pyopencl.mem_flags\nelse:\n raise ImportError(\"pyopencl is not installed\")\nfrom ..utils import crc32, get_cl_file\nlogger = logging.getLogger(__name__)\n\n\nclass OCLFullSplit1d(object):\n def __init__(self,\n pos,\n bins=100,\n pos0Range=None,\n pos1Range=None,\n mask=None,\n mask_checksum=None,\n allow_pos0_neg=False,\n unit=\"undefined\",\n workgroup_size=256,\n devicetype=\"all\",\n platformid=None,\n deviceid=None,\n profile=False):\n\n self.bins = bins\n self.lut_size = 0\n self.allow_pos0_neg = allow_pos0_neg\n\n if len(pos.shape) == 3:\n assert pos.shape[1] == 4\n assert pos.shape[2] == 2\n elif len(pos.shape) == 4:\n assert pos.shape[2] == 4\n assert pos.shape[3] == 2\n else:\n raise ValueError(\"Pos array dimentions are wrong\")\n self.pos_size = pos.size\n self.size = self.pos_size / 8\n self.pos = numpy.ascontiguousarray(pos.ravel(), dtype=numpy.float32)\n self.pos0Range = numpy.empty(2, dtype=numpy.float32)\n self.pos1Range = numpy.empty(2, dtype=numpy.float32)\n\n if (pos0Range is not None) and (len(pos0Range) == 2):\n self.pos0Range[0] = min(pos0Range) # do it on GPU?\n self.pos0Range[1] = max(pos0Range)\n if (not self.allow_pos0_neg) and (self.pos0Range[0] < 0):\n self.pos0Range[0] = 0.0\n if self.pos0Range[1] < 0:\n print(\"Warning: Invalid 0-dim range! Using the data derived range instead\")\n self.pos0Range[1] = 0.0\n # self.pos0Range[0] = pos0Range[0]\n # self.pos0Range[1] = pos0Range[1]\n else:\n self.pos0Range[0] = 0.0\n self.pos0Range[1] = 0.0\n if (pos1Range is not None) and (len(pos1Range) == 2):\n self.pos1Range[0] = min(pos1Range) # do it on GPU?\n self.pos1Range[1] = max(pos1Range)\n # self.pos1Range[0] = pos1Range[0]\n # self.pos1Range[1] = pos1Range[1]\n else:\n self.pos1Range[0] = 0.0\n self.pos1Range[1] = 0.0\n\n if mask is not None:\n assert mask.size == self.size\n self.check_mask = True\n self.cmask = numpy.ascontiguousarray(mask.ravel(), dtype=numpy.int8)\n if mask_checksum:\n self.mask_checksum = mask_checksum\n else:\n self.mask_checksum = crc32(mask)\n else:\n self.check_mask = False\n self.mask_checksum = None\n\n self._sem = threading.Semaphore()\n self.profile = profile\n self._cl_kernel_args = {}\n self._cl_mem = {}\n self.events = []\n self.workgroup_size = workgroup_size\n if self.size < self.workgroup_size:\n raise RuntimeError(\"Fatal error in workgroup size selection. Size (%d) must be >= workgroup size (%d)\\n\", self.size, self.workgroup_size)\n if (platformid is None) and (deviceid is None):\n platformid, deviceid = ocl.select_device(devicetype)\n elif platformid is None:\n platformid = 0\n elif deviceid is None:\n deviceid = 0\n self.platform = ocl.platforms[platformid]\n self.device = self.platform.devices[deviceid]\n self.device_type = self.device.type\n\n if (self.device_type == \"CPU\") and (self.platform.vendor == \"Apple\"):\n logger.warning(\"This is a workaround for Apple's OpenCL on CPU: enforce BLOCK_SIZE=1\")\n self.workgroup_size = 1\n try:\n self._ctx = pyopencl.Context(devices=[pyopencl.get_platforms()[platformid].get_devices()[deviceid]])\n if self.profile:\n self._queue = pyopencl.CommandQueue(self._ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)\n else:\n self._queue = pyopencl.CommandQueue(self._ctx)\n self._compile_kernels()\n self._calc_boundaries()\n self._calc_LUT()\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n\n def _compile_kernels(self, kernel_file=None):\n \"\"\"\n Call the OpenCL compiler\n :param kernel_file: path tothe\n \"\"\"\n kernel_name = \"ocl_lut.cl\"\n if kernel_file is None:\n if os.path.isfile(kernel_name):\n kernel_file = os.path.abspath(kernel_name)\n else:\n kernel_file = get_cl_file(\"pyfai:openCL/\" + kernel_name)\n else:\n kernel_file = str(kernel_file)\n kernel_src = open(kernel_file).read()\n compile_options = \"-D BINS=%i -D POS_SIZE=%i -D SIZE=%i -D WORKGROUP_SIZE=%i -D EPS=%e\" % \\\n (self.bins, self.pos_size, self.size, self.workgroup_size, numpy.finfo(numpy.float32).eps)\n logger.info(\"Compiling file %s with options %s\", kernel_file, compile_options)\n try:\n self._program = pyopencl.Program(self._ctx, kernel_src).build(options=compile_options)\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n\n def _calc_boundaries(self):\n \"\"\"\n comments\n \"\"\"\n # # # # # # # # Check for memory# # # # # # # #\n size_of_float = numpy.dtype(numpy.float32).itemsize\n\n ualloc = (self.pos_size * size_of_float)\n ualloc += (self.workgroup_size * 4 * size_of_float)\n ualloc += (4 * size_of_float)\n memory = self.device.memory\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n # No returned event for profiling\n # self._cl_mem[\"pos\"] = pyopencl.array.to_device(self._queue, self.pos)\n # self._cl_mem[\"preresult\"] = pyopencl.array.empty(self._queue, (4*self.workgroup_size,), dtype=numpy.float32)\n # self._cl_mem[\"minmax\"] = pyopencl.array.empty(self._queue, (4,), dtype=numpy.float32)\n self._cl_mem[\"pos\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * self.pos_size)\n self._cl_mem[\"preresult\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4 * self.workgroup_size)\n self._cl_mem[\"minmax\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4)\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # move data # # # # # # # # # #\n with self._sem:\n copy_pos = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos\"], self.pos)\n self.events += [(\"copy pos\", copy_pos)]\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"reduce_minmax_1\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"preresult\"]]\n self._cl_kernel_args[\"reduce_minmax_2\"] = [self._cl_mem[\"preresult\"], self._cl_mem[\"minmax\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # do the minmax reduction # # # # # #\n with self._sem:\n reduce_minmax_1 = self._program.reduce_minmax_1(self._queue, (self.workgroup_size * self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args[\"reduce_minmax_1\"])\n self.events += [(\"reduce_minmax_1\", reduce_minmax_1)]\n reduce_minmax_2 = self._program.reduce_minmax_2(self._queue, (self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args[\"reduce_minmax_2\"])\n self.events += [(\"reduce_minmax_2\", reduce_minmax_2)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # release the redundant data # # # # #\n self._cl_mem[\"preresult\"].release()\n self._cl_mem.pop(\"preresult\")\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n # check memory of d_pos + d_preresult + d_minmax\n # load d_pos\n # allocate d_preresult\n # allocate d_minmax\n # run reduce1\n # run reduce2\n # save reference to d_minMax\n # free d_preresult\n\n def _calc_LUT(self):\n \"\"\"\n first need to call lut_1 and lut_2 to find the size of the LUT and the lut_3 to create it\n \"\"\"\n # # # # # # # # Check for memory# # # # # # # #\n size_of_float = numpy.dtype(numpy.float32).itemsize\n size_of_int = numpy.dtype(numpy.int32).itemsize\n\n ualloc = (self.pos_size * size_of_float) # pos\n ualloc += (4 * size_of_float) # minmax\n ualloc += (2 * size_of_float) * 2 # pos0Range, pos1Range\n ualloc += (self.bins * size_of_int) # outMax\n ualloc += (1 * size_of_int) # lutsize\n ualloc += ((self.bins + 1) * size_of_int) # idx_ptr\n memory = self.device.memory\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n # self._cl_mem[\"pos0Range\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)\n # self._cl_mem[\"pos1Range\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)\n self._cl_mem[\"outMax\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.bins)\n self._cl_mem[\"lutsize\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 1)\n self._cl_mem[\"idx_ptr\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * (self.bins + 1))\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # move data # # # # # # # # # #\n # with self._sem:\n # copy_pos0Range = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos0Range\"], self.pos0Range)\n # self.events += [(\"copy pos0Range\", copy_pos0Range)]\n # copy_pos1Range = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos1Range\"], self.pos1Range)\n # self.events += [(\"copy pos1Range\", copy_pos1Range)]\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"memset_outMax\"] = [self._cl_mem[\"outMax\"]]\n self._cl_kernel_args[\"lut_1\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"minmax\"], self.pos0Range.data, self.pos1Range.data, self._cl_mem[\"outMax\"]]\n self._cl_kernel_args[\"lut_2\"] = [self._cl_mem[\"outMax\"], self._cl_mem[\"idx_ptr\"], self._cl_mem[\"lutsize\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # start the LUT creation # # # # # #\n memset_size = (self.bins + self.workgroup_size - 1) & ~(self.workgroup_size - 1),\n global_size = (self.size + self.workgroup_size - 1) & ~(self.workgroup_size - 1),\n with self._sem:\n memset_outMax = self._program.memset_outMax(self._queue, memset_size, (self.workgroup_size,), *self._cl_kernel_args[\"memset_outMax\"])\n self.events += [(\"memset_outMax\", memset_outMax)]\n lut_1 = self._program.lut_1(self._queue, global_size, (self.workgroup_size,), *self._cl_kernel_args[\"lut_1\"])\n self.events += [(\"lut_1\", lut_1)]\n lut_2 = self._program.lut_2(self._queue, (1,), (1,), *self._cl_kernel_args[\"lut_2\"])\n self.events += [(\"lut_2\", lut_2)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # get the lutsize # # # # # # # #\n self.lutsize = numpy.ndarray(1, dtype=numpy.int32)\n get_lutsize = pyopencl.enqueue_copy(self._queue, self.lutsize, self._cl_mem[\"lutsize\"])\n self.events += [(\"get_lutsize\", get_lutsize)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # check memory # # # # # # # #\n ualloc += (self.lutsize * size_of_int) # indices\n ualloc += (self.lutsize * size_of_float) # data\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n self._cl_mem[\"indices\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_int * self.lutsize[0])\n self._cl_mem[\"data\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.lutsize[0])\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"lut_3\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"minmax\"], self.pos0Range.data, self.pos1Range.data, self._cl_mem[\"outMax\"], self._cl_mem[\"idx_ptr\"], self._cl_mem[\"indices\"], self._cl_mem[\"data\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # finish the LUT creation # # # # #\n with self._sem:\n memset_outMax = self._program.memset_outMax(self._queue, memset_size, (self.workgroup_size,), *self._cl_kernel_args[\"memset_outMax\"])\n self.events += [(\"memset_outMax\", memset_outMax)]\n lut_3 = self._program.lut_3(self._queue, global_size, (self.workgroup_size,), *self._cl_kernel_args[\"lut_3\"])\n self.events += [(\"lut_3\", lut_3)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # release the redundant data # # # # #\n self._cl_mem[\"pos\"].release()\n self._cl_mem.pop(\"pos\")\n self._cl_mem[\"minmax\"].release()\n self._cl_mem.pop(\"minmax\")\n # self._cl_mem[\"pos0Range\"].release()\n # self._cl_mem.pop(\"pos0Range\")\n # self._cl_mem[\"pos1Range\"].release()\n # self._cl_mem.pop(\"pos1Range\")\n self._cl_mem[\"outMax\"].release()\n self._cl_mem.pop(\"outMax\")\n self._cl_mem[\"lutsize\"].release()\n self._cl_mem.pop(\"lutsize\")\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n # check memory of d_pos + d_minmax + d_outMax + d_lutsize\n # allocate d_outMax\n # allocate d_lutsize\n # memset d_outMax\n # run lut1\n # run lut2\n # save d_lutsize\n # memset d_outMax\n # allocate d_data\n # allocate d_indices\n # run lut3\n # free d_pos\n # free d_minMax\n # free d_lutsize\n # run lut4\n # free d_outMax\n\n def _free_device_memory(self):\n \"\"\"\n free all memory allocated on the device\n \"\"\"\n for buffer_name in list(self._cl_mem.keys())[:]:\n buf = self._cl_mem.pop[buffer_name]\n if buf is not None:\n try:\n buf.release()\n except pyopencl.LogicError:\n logger.error(\"Error while freeing buffer %s\", buffer_name)\n\n def get_platform(self):\n pass\n\n def get_queue(self):\n pass\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2012-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Picca Frédéric-Emmanuel <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Manages the different units\n\nNota for developers: this module is used a singleton to store all units in a\nunique manner. This explains the number of top-level variables on the one\nhand and their CAPITALIZATION on the other.\n\"\"\"\n\n\nfrom __future__ import division, print_function\n\n\n__authors__ = [\"Picca Frédéric-Emmanuel\", \"Jérôme Kieffer\"]\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"18/02/2020\"\n__status__ = \"production\"\n__docformat__ = 'restructuredtext'\n\nimport logging\nlogger = logging.getLogger(__name__)\nimport numpy\nfrom numpy import pi\nimport scipy.constants\n\nfrom .third_party import six\n\n################################################################################\n# A few physical constants\n################################################################################\n\nhc = CONST_hc = scipy.constants.c * scipy.constants.h / scipy.constants.e * 1e7\n\"\"\"Product of h the Planck constant, and c the speed of light in vacuum\nin Angstrom.KeV. It is approximativly equal to:\n \npyFAI reference 12.398419292004204\nscipy v1.3.1: 12.398419739640717\nscipy-1.4.0rc1: 12.398419843320026\n\"\"\"\n\nCONST_q = scipy.constants.e\n\"\"\"One electron-volt is equal to 1.602176565⋅10-19 joules\"\"\"\n\n\nclass Unit(object):\n \"\"\"Represents a unit.\n\n It has at least a name and a scale (in SI-unit)\n \"\"\"\n def __init__(self, name, scale=1, label=None, equation=None,\n center=None, corner=None, delta=None, short_name=None, unit_symbol=None):\n \"\"\"Constructor of a unit.\n\n :param str name: name of the unit\n :param float scale: scale of th unit to go to SI\n :param string label: label for nice representation in matplotlib,\n can use latex representation\n :param func equation: equation to calculate the value from coordinates\n (x,y,z) in detector space.\n Parameters of the function are x, y, z, lambda\n :param str center: name of the fast-path function\n :param str unit_symbol: Symbol used to display values of this unit\n \"\"\"\n self.name = name\n self.scale = scale\n self.label = label if label is not None else name\n self.corner = corner\n self.center = center\n self.delta = delta\n self.equation = equation\n self.short_name = short_name\n self.unit_symbol = unit_symbol\n\n def get(self, key):\n \"\"\"Mimic the dictionary interface\n\n :param (str) key: key wanted\n :return: self.key\n \"\"\"\n res = None\n if key in dir(self):\n res = self.__getattribute__(key)\n return res\n\n def __repr__(self):\n return self.name\n\n # ensures hashability\n def __hash__(self):\n return self.name.__hash__()\n\n\nRADIAL_UNITS = {}\n\n\ndef register_radial_unit(name, scale=1, label=None, equation=None,\n center=None, corner=None, delta=None, short_name=None, unit_symbol=None):\n RADIAL_UNITS[name] = Unit(name, scale, label, equation, center, corner, delta, short_name, unit_symbol)\n\n\ndef eq_r(x, y, z=None, wavelength=None):\n \"\"\"Calculates the radius\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return numpy.sqrt(x * x + y * y)\n\n\ndef eq_2th(x, y, z, wavelength=None):\n \"\"\"Calculates the 2theta aperture of the cone\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return numpy.arctan2(eq_r(x, y), z)\n\n\ndef eq_q(x, y, z, wavelength):\n \"\"\"Calculates the modulus of the scattering vector\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return 4.0e-9 * numpy.pi * numpy.sin(eq_2th(x, y, z) / 2.0) / wavelength\n\n\nregister_radial_unit(\"r_mm\",\n center=\"rArray\",\n delta=\"deltaR\",\n scale=1000.0,\n label=r\"Radius $r$ ($mm$)\",\n equation=eq_r,\n short_name=\"r\",\n unit_symbol=\"mm\")\n\nregister_radial_unit(\"r_m\",\n center=\"rArray\",\n delta=\"deltaR\",\n scale=1.0,\n label=r\"Radius $r$ ($m$)\",\n equation=eq_r,\n short_name=\"r\",\n unit_symbol=\"m\")\n\nregister_radial_unit(\"2th_deg\", scale=180.0 / numpy.pi,\n center=\"twoThetaArray\",\n delta=\"delta2Theta\",\n label=r\"Scattering angle $2\\theta$ ($^{o}$)\",\n equation=eq_2th,\n short_name=r\"2\\theta\",\n unit_symbol=\"deg\")\n\nregister_radial_unit(\"2th_rad\",\n center=\"twoThetaArray\",\n delta=\"delta2Theta\",\n scale=1.0,\n label=r\"Scattering angle $2\\theta$ ($rad$)\",\n equation=eq_2th,\n short_name=r\"2\\theta\",\n unit_symbol=\"rad\")\n\nregister_radial_unit(\"q_nm^-1\",\n center=\"qArray\",\n delta=\"deltaQ\",\n scale=1.0,\n label=r\"Scattering vector $q$ ($nm^{-1}$)\",\n equation=eq_q,\n short_name=\"q\",\n unit_symbol=\"nm^{-1}\")\n\nregister_radial_unit(\"q_A^-1\",\n center=\"qArray\",\n delta=\"deltaQ\",\n scale=0.1,\n label=r\"Scattering vector $q$ ($\\AA^{-1}$)\",\n equation=eq_q,\n short_name=\"q\",\n unit_symbol=r\"\\AA^{-1}\")\n\nregister_radial_unit(\"d*2_A^-2\",\n center=\"rd2Array\",\n delta=\"deltaRd2\",\n scale=0.01,\n label=r\"Reciprocal spacing squared $d^{*2}$ ($\\AA^{-2}$)\",\n equation=lambda x, y, z, wavelength: (eq_q(x, y, z, wavelength) / (2.0 * numpy.pi)) ** 2,\n short_name=\"d^{*2}\",\n unit_symbol=r\"\\AA^{-2}\")\n\nregister_radial_unit(\"d*2_nm^-2\",\n center=\"rd2Array\",\n delta=\"deltaRd2\",\n scale=1.0,\n label=r\"Reciprocal spacing squared $d^{*2}$ ($nm^{-2}$)\",\n equation=lambda x, y, z, wavelength: (eq_q(x, y, z, wavelength) / (2.0 * numpy.pi)) ** 2,\n short_name=\"d^{*2}\",\n unit_symbol=\"nm^{-2}\")\n\nregister_radial_unit(\"log10(q.m)_None\",\n scale=1.0,\n label=r\"log10($q$.m)\",\n equation=lambda x, y, z, wavelength: numpy.log10(1e9 * eq_q(x, y, z, wavelength)),\n short_name=\"log10(q.m)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(q.nm)_None\",\n scale=1.0,\n label=r\"log($q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.log(eq_q(x, y, z, wavelength)),\n short_name=\"log(q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(1+q.nm)_None\",\n scale=1.0,\n label=r\"log(1+$q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.log1p(eq_q(x, y, z, wavelength)),\n short_name=\"log(1+q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(1+q.A)_None\",\n scale=1.0,\n label=r\"log(1+$q$.\\AA)\",\n equation=lambda x, y, z, wavelength: numpy.log1p(0.1 * eq_q(x, y, z, wavelength)),\n short_name=r\"log(1+q.\\AA)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"arcsinh(q.nm)_None\",\n scale=1.0,\n label=r\"arcsinh($q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.arcsinh(eq_q(x, y, z, wavelength)),\n short_name=\"arcsinh(q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"arcsinh(q.A)_None\",\n scale=1.0,\n label=r\"arcsinh($q$.\\AA)\",\n equation=lambda x, y, z, wavelength: numpy.arcsinh(0.1 * eq_q(x, y, z, wavelength)),\n short_name=r\"arcsinh(q.\\AA)\",\n unit_symbol=\"?\")\n\n\nLENGTH_UNITS = {\"m\": Unit(\"m\", scale=1., label=r\"length $l$ ($m$)\"),\n \"mm\": Unit(\"mm\", scale=1e3, label=r\"length $l$ ($mm$)\"),\n \"cm\": Unit(\"cm\", scale=1e2, label=r\"length $l$ ($cm$)\"),\n \"micron\": Unit(\"micron\", scale=1e6, label=r\"length $l$ ($\\mu m$)\"),\n \"nm\": Unit(\"nm\", scale=1e9, label=r\"length $l$ ($nm$)\"),\n \"A\": Unit(\"A\", scale=1e10, label=r\"length $l$ ($\\AA$)\"),\n }\n\n\nANGLE_UNITS = {\"deg\": Unit(\"deg\", scale=180.0 / pi, label=r\"angle $\\alpha$ ($^{o}$)\"),\n \"rad\": Unit(\"rad\", scale=1.0, label=r\"angle $\\alpha$ ($rad$)\"),\n }\n\nAZIMUTHAL_UNITS = {\"chi_rad\": Unit(\"chi_rad\", scale=1.0, label=r\"Azimuthal angle $\\chi$ ($rad$)\"),\n \"chi_deg\": Unit(\"chi_deg\", scale=180 / pi, label=r\"Azimuthal angle $\\chi$ ($^{o}$)\")}\n\n\ndef to_unit(obj, type_=None):\n if type_ is None:\n type_ = RADIAL_UNITS\n rad_unit = None\n if isinstance(obj, six.string_types):\n rad_unit = type_.get(obj)\n elif isinstance(obj, Unit):\n rad_unit = obj\n if rad_unit is None:\n logger.error(\"Unable to recognize this type unit '%s' of type %s. \"\n \"Valid units are %s\" % (obj, type(obj), \", \".join([i for i in type_])))\n return rad_unit\n\n\n# To ensure the compatibility with former code:\nQ = Q_NM = RADIAL_UNITS[\"q_nm^-1\"]\nQ_A = RADIAL_UNITS[\"q_A^-1\"]\nTTH_RAD = RADIAL_UNITS[\"2th_rad\"]\nTTH_DEG = TTH = RADIAL_UNITS[\"2th_deg\"]\nR = R_MM = RADIAL_UNITS[\"r_mm\"]\nR_M = RADIAL_UNITS[\"r_m\"]\nRecD2_NM = RADIAL_UNITS[\"d*2_nm^-2\"]\nl_m = LENGTH_UNITS[\"m\"]\nA_rad = ANGLE_UNITS[\"rad\"]\nCHI_DEG = AZIMUTHAL_UNITS[\"chi_deg\"]\nCHI_RAD = AZIMUTHAL_UNITS[\"chi_rad\"]\n", "#!/usr/bin/env python\n# coding: utf-8\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import, division, print_function\n\n\"\"\"Test suite for pickled objects\"\"\"\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"18/10/2018\"\n\n\nimport numpy\nfrom pyFAI.azimuthalIntegrator import AzimuthalIntegrator\nfrom pyFAI.detectors import detector_factory\nfrom pickle import dumps, loads\nimport unittest\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass TestPickle(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestPickle, cls).setUpClass()\n cls.ai = AzimuthalIntegrator(1.0, detector=\"Pilatus100k\")\n cls.ai.wavelength = 1e-10\n cls.npt = 100\n cls.data = numpy.random.random(cls.ai.detector.shape)\n\n @classmethod\n def tearDownClass(cls):\n super(TestPickle, cls).tearDownClass()\n cls.data = cls.ai = cls.npt = None\n\n def test_Detector_pickle(self):\n det = self.ai.detector # type: Detector\n dets = dumps(det)\n self.assert_(dets, \"pickle works\")\n rest = loads(dets)\n self.assert_(rest, \"unpickle works\")\n self.assertEqual(rest.shape, self.ai.detector.MAX_SHAPE)\n\n # test the binning\n mar = detector_factory(\"RayonixMx225\")\n mar.guess_binning((2048, 2048))\n self.assertEqual(mar.binning, (3, 3), \"binning OK\")\n mars = dumps(mar)\n marr = loads(mars)\n self.assertEqual(mar.binning, marr.binning, \"restored binning OK\")\n\n def test_AzimuthalIntegrator_pickle(self):\n spectra = self.ai.integrate1d(self.data, self.npt) # force lut generation\n ais = dumps(self.ai)\n newai = loads(ais) # type: AzimuthalIntegrator\n self.assertEqual(newai._cached_array.keys(), self.ai._cached_array.keys())\n for key in self.ai._cached_array.keys():\n if isinstance(self.ai._cached_array[key], numpy.ndarray):\n self.assertEqual(abs(newai._cached_array[key] - self.ai._cached_array[key]).max(), 0,\n \"key %s is the same\" % key)\n else:\n self.assertEqual(newai._cached_array[key], self.ai._cached_array[key],\n \"key %s is the same: %s %s\" %\n (key, newai._cached_array[key], self.ai._cached_array[key]))\n for first, second in zip(newai.integrate1d(self.data, self.npt), spectra):\n self.assertEqual(abs(first - second).max(), 0, \"Spectra are the same\")\n\n def test_Calibrant(self):\n from pyFAI import calibrant\n calibrant = calibrant.CalibrantFactory()('AgBh')\n assert dumps(calibrant)\n assert loads(dumps(calibrant))\n\n\ndef suite():\n loader = unittest.defaultTestLoader.loadTestsFromTestCase\n testsuite = unittest.TestSuite()\n testsuite.addTest(loader(TestPickle))\n return testsuite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n", "#!/usr/bin/env python\n# coding: utf-8\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import, division, print_function\n\n\"\"\"Test suite for convolution cython code\"\"\"\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"Jérô[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"10/01/2018\"\n\nimport unittest\nimport numpy\nimport logging\nlogger = logging.getLogger(__name__)\nfrom ..ext import _convolution\nimport scipy.ndimage\nimport scipy.misc\nimport scipy.signal\n\n\nclass TestConvolution(unittest.TestCase):\n def setUp(self):\n self.sigma = 1\n self.width = 8 * self.sigma + 1\n if self.width % 2 == 0:\n self.width += 1\n self.gauss = scipy.signal.gaussian(self.width, self.sigma)\n self.gauss /= self.gauss.sum()\n if \"ascent\" in dir(scipy.misc):\n self.lena = scipy.misc.ascent().astype(\"float32\")\n else:\n self.lena = scipy.misc.lena().astype(\"float32\")\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n self.lena = self.gauss = self.sigma = self.width = None\n\n def test_gaussian(self):\n gauss = _convolution.gaussian(self.sigma)\n self.assertTrue(numpy.allclose(gauss, self.gauss), \"gaussian curves are the same\")\n\n def test_horizontal_convolution(self):\n gauss = self.gauss.astype(numpy.float32)\n ref = scipy.ndimage.filters.convolve1d(self.lena, self.gauss, axis=-1)\n obt = _convolution.horizontal_convolution(self.lena, gauss)\n self.assertTrue(numpy.allclose(ref, obt), \"horizontal filtered images are the same\")\n\n def test_vertical_convolution(self):\n gauss = self.gauss.astype(numpy.float32)\n ref = scipy.ndimage.filters.convolve1d(self.lena, self.gauss, axis=0)\n obt = _convolution.vertical_convolution(self.lena, gauss)\n self.assertTrue(numpy.allclose(ref, obt), \"vertical filtered images are the same\")\n\n def test_gaussian_filter(self):\n ref = scipy.ndimage.filters.gaussian_filter(self.lena, self.sigma)\n obt = _convolution.gaussian_filter(self.lena, self.sigma)\n self.assertTrue(numpy.allclose(ref, obt), \"gaussian filtered images are the same\")\n\n\ndef suite():\n loader = unittest.defaultTestLoader.loadTestsFromTestCase\n testsuite = unittest.TestSuite()\n testsuite.addTest(loader(TestConvolution))\n return testsuite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n", "# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (C) 2016-2018 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\nfrom __future__ import absolute_import\n\n__authors__ = [\"V. Valls\"]\n__license__ = \"MIT\"\n__date__ = \"16/05/2019\"\n\nimport logging\nimport numpy\nimport os.path\n\nfrom silx.gui import qt\nfrom silx.gui.colors import Colormap\n\nfrom ..utils import imageutils\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass CalibrantPreview(qt.QFrame):\n \"\"\"\n CalibrantPreview show the rays of a calibrat at a wayvelength between 0\n and Pi.\n \"\"\"\n\n _PIXMAP_OFFSET = 2\n\n def __init__(self, parent=None):\n super(CalibrantPreview, self).__init__(parent)\n self.__calibrant = None\n self.__waveLength = None\n self.__pixmap = None\n self.__cachedSize = None\n self.setMinimumSize(qt.QSize(50, 20))\n\n def setCalibrant(self, calibrant):\n if self.__calibrant is calibrant:\n return\n self.__calibrant = calibrant\n self.__pixmap = None\n self.__updateToolTip()\n self.repaint()\n\n def setWaveLength(self, waveLength):\n if self.__waveLength == waveLength:\n return\n self.__waveLength = waveLength\n self.__pixmap = None\n self.__updateToolTip()\n self.repaint()\n\n def getCalibrant(self):\n return self.__pixmap\n\n def __getConfiguredCalibrant(self):\n calibrant = self.__calibrant\n if calibrant is None:\n return None\n waveLenght = self.__waveLength\n if waveLenght is None:\n return None\n\n calibrant.setWavelength_change2th(waveLenght)\n return calibrant\n\n def __updateToolTip(self):\n calibrant = self.__getConfiguredCalibrant()\n if calibrant is None:\n return\n\n name = calibrant.filename\n if name is not None:\n name = os.path.basename(name)\n if name.endswith(\".D\"):\n name = name[0:-2]\n\n fileds = []\n if name is not None:\n fileds.append((u\"Name\", name, None))\n fileds.append((u\"Nb registered rays\", calibrant.count_registered_dSpacing(), None))\n dSpacing = calibrant.get_dSpacing()\n fileds.append((u\"Nb visible rays\", len(dSpacing), u\"between 0 and 180°\"))\n if len(dSpacing) > 0:\n ray = calibrant.get_dSpacing()[0]\n angle = calibrant.get_2th()[0]\n fileds.append((u\"First visible ray\", u\"%f Å (%f°)\" % (ray, numpy.rad2deg(angle)), None))\n ray = calibrant.get_dSpacing()[-1]\n angle = calibrant.get_2th()[-1]\n fileds.append((u\"Last visible ray\", u\"%f Å (%f°)\" % (ray, numpy.rad2deg(angle)), None))\n\n toolTip = []\n for f in fileds:\n field_name, field_value, suffix = f\n field = u\"<li><b>%s</b>: %s</li>\" % (field_name, field_value)\n if suffix is not None:\n field = u\"%s (%s)\" % (field, suffix)\n toolTip.append(field)\n\n toolTip = u\"\\n\".join(toolTip)\n toolTip = u\"<html><ul>%s</ul></html>\" % toolTip\n self.setToolTip(toolTip)\n\n def __getPixmap(self, size=360):\n if self.__pixmap is not None and self.__cachedSize == size:\n return self.__pixmap\n calibrant = self.__getConfiguredCalibrant()\n if calibrant is None:\n return None\n tths = numpy.array(calibrant.get_2th())\n\n tth_min, tth_max = 0, numpy.pi\n histo = numpy.histogram(tths, bins=size, range=(tth_min, tth_max))\n agregation = histo[0].reshape(1, -1)\n colormap = Colormap(name=\"reversed gray\", vmin=agregation.min(), vmax=agregation.max())\n rgbImage = colormap.applyToData(agregation)[:, :, :3]\n qimage = imageutils.convertArrayToQImage(rgbImage)\n qpixmap = qt.QPixmap.fromImage(qimage)\n self.__pixmap = qpixmap\n self.__cachedSize = size\n return self.__pixmap\n\n def paintEvent(self, event):\n super(CalibrantPreview, self).paintEvent(event)\n painter = qt.QPainter(self)\n\n # border\n option = qt.QStyleOptionProgressBar()\n option.initFrom(self)\n option.rect = self.rect()\n option.state = qt.QStyle.State_Enabled if self.isEnabled() else qt.QStyle.State_None\n style = qt.QApplication.style()\n style.drawControl(qt.QStyle.CE_ProgressBarGroove,\n option,\n painter,\n self)\n\n # content\n pixmapRect = self.rect().adjusted(self._PIXMAP_OFFSET, self._PIXMAP_OFFSET,\n -self._PIXMAP_OFFSET, -self._PIXMAP_OFFSET)\n pixmap = self.__getPixmap(size=pixmapRect.width())\n if pixmap is not None:\n painter.drawPixmap(pixmapRect,\n pixmap,\n pixmap.rect())\n\n def sizeHint(self):\n return qt.QSize(200, self.minimumHeight())\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2003-2018 European Synchrotron Radiation Facility, Grenoble,\n# France\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\n\nUtilities, mainly for image treatment\n\n\"\"\"\n\n__authors__ = [\"Jérôme Kieffer\", \"Valentin Valls\"]\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"23/01/2020\"\n__status__ = \"production\"\n\nimport logging\nimport numpy\nimport fabio\nimport weakref\nfrom scipy import ndimage\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize.optimize import fmin\nfrom scipy.optimize.optimize import fminbound\n\nfrom .third_party import six\nfrom .utils import stringutil\nfrom .utils import header_utils\nfrom .io.image import read_data\n\nfrom ._version import calc_hexversion\nif (\"hexversion\" not in dir(fabio)) or (fabio.hexversion < calc_hexversion(0, 4, 0, \"dev\", 5)):\n # Short cut fabio.factory do not exists on older versions\n fabio.factory = fabio.fabioimage.FabioImage.factory\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImageReductionFilter(object):\n \"\"\"\n Generic filter applied in a set of images.\n \"\"\"\n\n def init(self, max_images=None):\n \"\"\"\n Initialize the filter before using it.\n\n :param int max_images: Max images supported by the filter\n \"\"\"\n pass\n\n def add_image(self, image):\n \"\"\"\n Add an image to the filter.\n\n :param numpy.ndarray image: image to add\n \"\"\"\n raise NotImplementedError()\n\n def get_parameters(self):\n \"\"\"Return a dictionary containing filter parameters\n\n :rtype: dict\n \"\"\"\n return {\"cutoff\": None, \"quantiles\": None}\n\n def get_result(self):\n \"\"\"\n Get the result of the filter.\n\n :return: result filter\n \"\"\"\n raise NotImplementedError()\n\n\nclass ImageAccumulatorFilter(ImageReductionFilter):\n \"\"\"\n Filter applied in a set of images in which it is possible\n to reduce data step by step into a single merged image.\n \"\"\"\n\n def init(self, max_images=None):\n self._count = 0\n self._accumulated_image = None\n\n def add_image(self, image):\n \"\"\"\n Add an image to the filter.\n\n :param numpy.ndarray image: image to add\n \"\"\"\n self._accumulated_image = self._accumulate(self._accumulated_image, image)\n self._count += 1\n\n def _accumulate(self, accumulated_image, added_image):\n \"\"\"\n Add an image to the filter.\n\n :param numpy.ndarray accumulated_image: image use to accumulate\n information\n :param numpy.ndarray added_image: image to add\n \"\"\"\n raise NotImplementedError()\n\n def get_result(self):\n \"\"\"\n Get the result of the filter.\n\n :return: result filter\n :rtype: numpy.ndarray\n \"\"\"\n result = self._accumulated_image\n # release the allocated memory\n self._accumulated_image = None\n return result\n\n\nclass MaxAveraging(ImageAccumulatorFilter):\n name = \"max\"\n\n def _accumulate(self, accumulated_image, added_image):\n if accumulated_image is None:\n return added_image\n return numpy.maximum(accumulated_image, added_image)\n\n\nclass MinAveraging(ImageAccumulatorFilter):\n name = \"min\"\n\n def _accumulate(self, accumulated_image, added_image):\n if accumulated_image is None:\n return added_image\n return numpy.minimum(accumulated_image, added_image)\n\n\nclass SumAveraging(ImageAccumulatorFilter):\n name = \"sum\"\n\n def _accumulate(self, accumulated_image, added_image):\n if accumulated_image is None:\n return added_image\n return accumulated_image + added_image\n\n\nclass MeanAveraging(SumAveraging):\n name = \"mean\"\n\n def get_result(self):\n result = super(MeanAveraging, self).get_result()\n return result / numpy.float32(self._count)\n\n\nclass ImageStackFilter(ImageReductionFilter):\n \"\"\"\n Filter creating a stack from all images and computing everything at the\n end.\n \"\"\"\n def init(self, max_images=None):\n self._stack = None\n self._max_stack_size = max_images\n self._count = 0\n\n def add_image(self, image):\n \"\"\"\n Add an image to the filter.\n\n :param numpy.ndarray image: image to add\n \"\"\"\n if self._stack is None:\n shape = self._max_stack_size, image.shape[0], image.shape[1]\n self._stack = numpy.zeros(shape, dtype=numpy.float32)\n self._stack[self._count] = image\n self._count += 1\n\n def _compute_stack_reduction(self, stack):\n \"\"\"Called after initialization of the stack and return the reduction\n result.\"\"\"\n raise NotImplementedError()\n\n def get_result(self):\n if self._stack is None:\n raise Exception(\"No data to reduce\")\n\n shape = self._count, self._stack.shape[1], self._stack.shape[2]\n self._stack.resize(shape)\n result = self._compute_stack_reduction(self._stack)\n # release the allocated memory\n self._stack = None\n return result\n\n\nclass AverageDarkFilter(ImageStackFilter):\n \"\"\"\n Filter based on the algorithm of average_dark\n\n TODO: Must be split according to each filter_name, and removed\n \"\"\"\n def __init__(self, filter_name, cut_off, quantiles):\n super(AverageDarkFilter, self).__init__()\n self._filter_name = filter_name\n self._cut_off = cut_off\n self._quantiles = quantiles\n\n @property\n def name(self):\n return self._filter_name\n\n def get_parameters(self):\n \"\"\"Return a dictionary containing filter parameters\"\"\"\n return {\"cutoff\": self._cut_off, \"quantiles\": self._quantiles}\n\n def _compute_stack_reduction(self, stack):\n \"\"\"\n Compute the stack reduction.\n\n :param numpy.ndarray stack: stack to reduce\n :return: result filter\n :rtype: numpy.ndarray\n \"\"\"\n return average_dark(stack,\n self._filter_name,\n self._cut_off,\n self._quantiles)\n\n\n_FILTERS = [\n MaxAveraging,\n MinAveraging,\n MeanAveraging,\n SumAveraging,\n]\n\n_FILTER_NAME_MAPPING = {}\nfor _f in _FILTERS:\n _FILTER_NAME_MAPPING[_f.name] = _f\n\n_AVERAGE_DARK_FILTERS = set([\"min\", \"max\", \"sum\", \"mean\", \"std\", \"quantiles\", \"median\"])\n\n\ndef is_algorithm_name_exists(filter_name):\n \"\"\"Return true if the name is a name of a filter algorithm\"\"\"\n if filter_name in _FILTER_NAME_MAPPING:\n return True\n elif filter_name in _AVERAGE_DARK_FILTERS:\n return True\n return False\n\n\nclass AlgorithmCreationError(RuntimeError):\n \"\"\"Exception returned if creation of an ImageReductionFilter is not\n possible\"\"\"\n pass\n\n\ndef create_algorithm(filter_name, cut_off=None, quantiles=None):\n \"\"\"Factory to create algorithm according to parameters\n\n :param cutoff: keep all data where (I-center)/std < cutoff\n :type cutoff: float or None\n :param quantiles: 2-tuple of floats average out data between the two\n quantiles\n :type quantiles: tuple(float, float) or None\n :return: An algorithm\n :rtype: ImageReductionFilter\n :raise AlgorithmCreationError: If it is not possible to create the\n algorithm\n \"\"\"\n if filter_name in _FILTER_NAME_MAPPING and cut_off is None:\n # use less memory\n filter_class = _FILTER_NAME_MAPPING[filter_name]\n algorithm = filter_class()\n elif filter_name in _AVERAGE_DARK_FILTERS:\n # must create a big array with all the data\n if filter_name == \"quantiles\" and quantiles is None:\n raise AlgorithmCreationError(\"Quantiles algorithm expect quantiles parameters\")\n algorithm = AverageDarkFilter(filter_name, cut_off, quantiles)\n else:\n raise AlgorithmCreationError(\"No algorithm available for the expected parameters\")\n\n return algorithm\n\n\ndef bounding_box(img):\n \"\"\"\n Tries to guess the bounding box around a valid massif\n\n :param img: 2D array like\n :return: 4-typle (d0_min, d1_min, d0_max, d1_max)\n \"\"\"\n img = img.astype(numpy.int)\n img0 = (img.sum(axis=1) > 0).astype(numpy.int)\n img1 = (img.sum(axis=0) > 0).astype(numpy.int)\n dimg0 = img0[1:] - img0[:-1]\n min0 = dimg0.argmax()\n max0 = dimg0.argmin() + 1\n dimg1 = img1[1:] - img1[:-1]\n min1 = dimg1.argmax()\n max1 = dimg1.argmin() + 1\n if max0 == 1:\n max0 = img0.size\n if max1 == 1:\n max1 = img1.size\n return (min0, min1, max0, max1)\n\n\ndef remove_saturated_pixel(ds, threshold=0.1, minimum=None, maximum=None):\n \"\"\"\n Remove saturated fixes from an array inplace.\n\n :param ds: a dataset as ndarray\n :param float threshold: what is the upper limit?\n all pixel > max*(1-threshold) are discareded.\n :param float minimum: minumum valid value (or True for auto-guess)\n :param float maximum: maximum valid value\n :return: the input dataset\n \"\"\"\n shape = ds.shape\n if ds.dtype == numpy.uint16:\n maxt = (1.0 - threshold) * 65535.0\n elif ds.dtype == numpy.int16:\n maxt = (1.0 - threshold) * 32767.0\n elif ds.dtype == numpy.uint8:\n maxt = (1.0 - threshold) * 255.0\n elif ds.dtype == numpy.int8:\n maxt = (1.0 - threshold) * 127.0\n else:\n if maximum is None:\n maxt = (1.0 - threshold) * ds.max()\n else:\n maxt = maximum\n if maximum is not None:\n maxt = min(maxt, maximum)\n invalid = (ds > maxt)\n if minimum:\n if minimum is True:\n # automatic guess of the best minimum TODO: use the HWHM to guess the minumum...\n data_min = ds.min()\n x, y = numpy.histogram(numpy.log(ds - data_min + 1.0), bins=100)\n f = interp1d((y[1:] + y[:-1]) / 2.0, -x, bounds_error=False, fill_value=-x.min())\n max_low = fmin(f, y[1], disp=0)\n max_hi = fmin(f, y[-1], disp=0)\n if max_hi > max_low:\n f = interp1d((y[1:] + y[:-1]) / 2.0, x, bounds_error=False)\n min_center = fminbound(f, max_low, max_hi)\n else:\n min_center = max_hi\n minimum = float(numpy.exp(y[((min_center / y) > 1).sum() - 1])) - 1.0 + data_min\n logger.debug(\"removeSaturatedPixel: best minimum guessed is %s\", minimum)\n ds[ds < minimum] = minimum\n ds -= minimum # - 1.0\n\n if invalid.sum(dtype=int) == 0:\n logger.debug(\"No saturated area where found\")\n return ds\n gi = ndimage.morphology.binary_dilation(invalid)\n lgi, nc = ndimage.label(gi)\n if nc > 100:\n logger.warning(\"More than 100 saturated zones were found on this image !!!!\")\n for zone in range(nc + 1):\n dzone = (lgi == zone)\n if dzone.sum(dtype=int) > ds.size // 2:\n continue\n min0, min1, max0, max1 = bounding_box(dzone)\n ksize = min(max0 - min0, max1 - min1)\n subset = ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)]\n while subset.max() > maxt:\n subset = ndimage.median_filter(subset, ksize)\n ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)] = subset\n return ds\n\n\ndef average_dark(lstimg, center_method=\"mean\", cutoff=None, quantiles=(0.5, 0.5)):\n \"\"\"\n Averages a serie of dark (or flat) images.\n Centers the result on the mean or the median ...\n but averages all frames within cutoff*std\n\n :param lstimg: list of 2D images or a 3D stack\n :param str center_method: is the center calculated by a \"mean\", \"median\",\n \"quantile\", \"std\"\n :param cutoff: keep all data where (I-center)/std < cutoff\n :type cutoff: float or None\n :param quantiles: 2-tuple of floats average out data between the two\n quantiles\n :type quantiles: tuple(float, float) or None\n :return: 2D image averaged\n \"\"\"\n if \"ndim\" in dir(lstimg) and lstimg.ndim == 3:\n stack = lstimg.astype(numpy.float32)\n shape = stack.shape[1:]\n length = stack.shape[0]\n else:\n shape = lstimg[0].shape\n length = len(lstimg)\n if length == 1:\n return lstimg[0].astype(numpy.float32)\n stack = numpy.zeros((length, shape[0], shape[1]), dtype=numpy.float32)\n for i, img in enumerate(lstimg):\n stack[i] = img\n if center_method in dir(stack):\n center = stack.__getattribute__(center_method)(axis=0)\n elif center_method == \"median\":\n logger.info(\"Filtering data (median)\")\n center = numpy.median(stack, axis=0)\n elif center_method.startswith(\"quantil\"):\n logger.info(\"Filtering data (quantiles: %s)\", quantiles)\n sorted_ = numpy.sort(stack, axis=0)\n lower = max(0, int(numpy.floor(min(quantiles) * length)))\n upper = min(length, int(numpy.ceil(max(quantiles) * length)))\n if (upper == lower):\n if upper < length:\n upper += 1\n elif lower > 0:\n lower -= 1\n else:\n logger.warning(\"Empty selection for quantil %s, would keep points from %s to %s\", quantiles, lower, upper)\n center = sorted_[lower:upper].mean(axis=0)\n else:\n raise RuntimeError(\"Cannot understand method: %s in average_dark\" % center_method)\n if cutoff is None or cutoff <= 0:\n output = center\n else:\n std = stack.std(axis=0)\n strides = 0, std.strides[0], std.strides[1]\n std.shape = 1, shape[0], shape[1]\n std.strides = strides\n center.shape = 1, shape[0], shape[1]\n center.strides = strides\n mask = ((abs(stack - center) / std) > cutoff)\n stack[numpy.where(mask)] = 0.0\n summed = stack.sum(axis=0)\n output = summed / numpy.float32(numpy.maximum(1, (length - mask.sum(axis=0))))\n return output\n\n\ndef _normalize_image_stack(image_stack):\n \"\"\"\n Convert input data to a list of 2D numpy arrays or a stack\n of numpy array (3D array).\n\n :param image_stack: slice of images\n :type image_stack: list or numpy.ndarray\n :return: A stack of image (list of 2D array or a single 3D array)\n :rtype: list or numpy.ndarray\n \"\"\"\n if image_stack is None:\n return None\n\n if isinstance(image_stack, numpy.ndarray) and image_stack.ndim == 3:\n # numpy image stack (single 3D image)\n return image_stack\n\n if isinstance(image_stack, list):\n # list of numpy images (multi 2D images)\n result = []\n for image in image_stack:\n if isinstance(image, six.string_types):\n data = read_data(image)\n elif isinstance(image, numpy.ndarray) and image.ndim == 2:\n data = image\n else:\n raise Exception(\"Unsupported image type '%s' in image_stack\" % type(image))\n result.append(data)\n return result\n\n raise Exception(\"Unsupported type '%s' for image_stack\" % type(image_stack))\n\n\nclass AverageWriter():\n \"\"\"Interface for using writer in `Average` process.\"\"\"\n\n def write_header(self, merged_files, nb_frames, monitor_name):\n \"\"\"Write the header of the average\n\n :param list merged_files: List of files used to generate this output\n :param int nb_frames: Number of frames used\n :param str monitor_name: Name of the monitor used. Can be None.\n \"\"\"\n raise NotImplementedError()\n\n def write_reduction(self, algorithm, data):\n \"\"\"Write one reduction\n\n :param ImageReductionFilter algorithm: Algorithm used\n :param object data: Data of this reduction\n \"\"\"\n raise NotImplementedError()\n\n def close(self):\n \"\"\"Close the writer. Must not be used anymore.\"\"\"\n raise NotImplementedError()\n\n\nclass MultiFilesAverageWriter(AverageWriter):\n \"\"\"Write reductions into multi files. File headers are duplicated.\"\"\"\n\n def __init__(self, file_name_pattern, file_format, dry_run=False):\n \"\"\"\n :param str file_name_pattern: File name pattern for the output files.\n If it contains \"{method_name}\", it is updated for each\n reduction writing with the name of the reduction.\n :param str file_format: File format used. It is the default\n extension file.\n :param bool dry_run: If dry_run, the file is created on memory but not\n saved on the file system at the end\n \"\"\"\n self._file_name_pattern = file_name_pattern\n self._global_header = {}\n self._fabio_images = weakref.WeakKeyDictionary()\n self._dry_run = dry_run\n\n # in case \"edf.gz\"\n if \".\" in file_format:\n file_format = file_format.split(\".\")[0]\n\n self._fabio_class = fabio.factory(file_format + \"image\")\n\n def write_header(self, merged_files, nb_frames, monitor_name):\n self._global_header[\"nfiles\"] = len(merged_files)\n self._global_header[\"nframes\"] = nb_frames\n if monitor_name is not None:\n self._global_header[\"monitor_name\"] = monitor_name\n\n pattern = \"merged_file_%%0%ii\" % len(str(len(merged_files)))\n for i, f in enumerate(merged_files):\n name = pattern % i\n self._global_header[name] = f.filename\n\n def _get_file_name(self, reduction_name):\n keys = {\"method_name\": reduction_name}\n return stringutil.safe_format(self._file_name_pattern, keys)\n\n def write_reduction(self, algorithm, data):\n file_name = self._get_file_name(algorithm.name)\n # overwrite the method\n header = fabio.fabioimage.OrderedDict()\n header[\"method\"] = algorithm.name\n for name, value in self._global_header.items():\n header[name] = str(value)\n filter_parameters = algorithm.get_parameters()\n for name, value in filter_parameters.items():\n header[name] = str(value)\n image = self._fabio_class.__class__(data=data, header=header)\n if not self._dry_run:\n image.write(file_name)\n logger.info(\"Wrote %s\", file_name)\n self._fabio_images[algorithm] = image\n\n def get_fabio_image(self, algorithm):\n \"\"\"Get the constructed fabio image\n\n :rtype: fabio.fabioimage.FabioImage\n \"\"\"\n return self._fabio_images[algorithm]\n\n def close(self):\n \"\"\"Close the writer. Must not be used anymore.\"\"\"\n self._header = None\n\n\ndef common_prefix(string_list):\n \"\"\"Return the common prefix of a list of strings\n\n TODO: move it into utils package\n\n :param list(str) string_list: List of strings\n :rtype: str\n \"\"\"\n prefix = \"\"\n for ch in zip(string_list):\n c = ch[0]\n good = True\n for i in ch:\n if i != c:\n good = False\n break\n if good:\n prefix += c\n else:\n break\n return prefix\n\n\nclass AverageObserver(object):\n\n def image_loaded(self, fabio_image, image_index, images_count):\n \"\"\"Called when an input image is loaded\"\"\"\n pass\n\n def process_started(self):\n \"\"\"Called when the full processing is started\"\"\"\n pass\n\n def algorithm_started(self, algorithm):\n \"\"\"Called when an algorithm is started\"\"\"\n pass\n\n def frame_processed(self, algorithm, frame_index, frames_count):\n \"\"\"Called after providing a frame to an algorithm\"\"\"\n pass\n\n def result_processing(self, algorithm):\n \"\"\"Called before the result of an algorithm is computed\"\"\"\n pass\n\n def algorithm_finished(self, algorithm):\n \"\"\"Called when an algorithm is finished\"\"\"\n pass\n\n def process_finished(self):\n \"\"\"Called when the full process is finished\"\"\"\n pass\n\n\nclass Average(object):\n \"\"\"Process images to generate an average using different algorithms.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._dark = None\n self._raw_flat = None\n self._flat = None\n self._monitor_key = None\n self._threshold = None\n self._minimum = None\n self._maximum = None\n self._fabio_images = []\n self._writer = None\n self._algorithms = []\n self._nb_frames = 0\n self._correct_flat_from_dark = False\n self._results = weakref.WeakKeyDictionary()\n self._observer = None\n\n def set_observer(self, observer):\n \"\"\"Set an observer to the average process.\n\n :param AverageObserver observer: An observer\n \"\"\"\n self._observer = observer\n\n def set_dark(self, dark_list):\n \"\"\"Defines images used as dark.\n\n :param list dark_list: List of dark used\n \"\"\"\n if dark_list is None:\n self._dark = None\n return\n darks = _normalize_image_stack(dark_list)\n self._dark = average_dark(darks, center_method=\"mean\", cutoff=4)\n\n def set_flat(self, flat_list):\n \"\"\"Defines images used as flat.\n\n :param list flat_list: List of dark used\n \"\"\"\n if flat_list is None:\n self._raw_flat = None\n return\n flats = _normalize_image_stack(flat_list)\n self._raw_flat = average_dark(flats, center_method=\"mean\", cutoff=4)\n\n def set_correct_flat_from_dark(self, correct_flat_from_dark):\n \"\"\"Defines if the dark must be applied on the flat.\n\n :param bool correct_flat_from_dark: If true, the dark is applied.\n \"\"\"\n self._correct_flat_from_dark = correct_flat_from_dark\n\n def get_counter_frames(self):\n \"\"\"Returns the number of frames used for the process.\n\n :rtype: int\n \"\"\"\n return self._nb_frames\n\n def get_fabio_images(self):\n \"\"\"Returns source images as fabio images.\n\n :rtype: list(fabio.fabioimage.FabioImage)\"\"\"\n return self._fabio_images\n\n def set_images(self, image_list):\n \"\"\"Defines the set set of source images to used to process an average.\n\n :param list image_list: List of filename, numpy arrays, fabio images\n used as source for the computation.\n \"\"\"\n self._fabio_images = []\n self._nb_frames = 0\n if len(image_list) > 100:\n # if too many files are opened, it may crash. The har limit is 1024\n copy_data = True\n else:\n copy_data = False\n for image_index, image in enumerate(image_list):\n if isinstance(image, six.string_types):\n logger.info(\"Reading %s\", image)\n try:\n fabio_image = fabio.open(image)\n except:\n # Handles the different URL like data\n data = read_data(image)\n fabio_image = fabio.numpyimage.NumpyImage(data)\n else:\n if copy_data and fabio_image.nframes == 1:\n # copy the data so that we can close the file right now.\n fimg = fabio_image.convert(fabio_image.__class__)\n fimg.filename = image\n fabio_image.close()\n fabio_image = fimg\n elif isinstance(image, fabio.fabioimage.fabioimage):\n fabio_image = image\n else:\n if fabio.hexversion < 262148:\n logger.error(\"Old version of fabio detected, upgrade to 0.4 or newer\")\n\n # Assume this is a numpy array like\n if not isinstance(image, numpy.ndarray):\n raise RuntimeError(\"Not good type for input, got %s, expected numpy array\" % type(image))\n fabio_image = fabio.numpyimage.NumpyImage(data=image)\n\n if self._observer:\n self._observer.image_loaded(fabio_image, image_index, len(image_list))\n self._fabio_images.append(fabio_image)\n self._nb_frames += fabio_image.nframes\n\n def set_monitor_name(self, monitor_name):\n \"\"\"Defines the monitor name used to correct images before processing\n the average. This monitor must be part of the file header, else the\n image is skipped.\n\n :param str monitor_name: Name of the monitor available on the header\n file\n \"\"\"\n\n self._monitor_key = monitor_name\n\n def set_pixel_filter(self, threshold, minimum, maximum):\n \"\"\"Defines the filter applied on each pixels of the images before\n processing the average.\n\n :param threshold: what is the upper limit?\n all pixel > max*(1-threshold) are discareded.\n :param minimum: minimum valid value or True\n :param maximum: maximum valid value\n \"\"\"\n self._threshold = threshold\n self._minimum = minimum\n self._maximum = maximum\n\n def set_writer(self, writer):\n \"\"\"Defines the object write which will be used to store the result.\n\n :param AverageWriter writer: The writer to use.\"\"\"\n self._writer = writer\n\n def add_algorithm(self, algorithm):\n \"\"\"Defines another algorithm which will be computed on the source.\n\n :param ImageReductionFilter algorithm: An averaging algorithm.\n \"\"\"\n self._algorithms.append(algorithm)\n\n def _get_corrected_image(self, fabio_image, image):\n \"\"\"Returns an image corrected by pixel filter, saturation, flat, dark,\n and monitor correction. The internal computation is done in float\n 64bits. The result is provided as float 32 bits.\n\n :param fabio.fabioimage.FabioImage fabio_image: Object containing the\n header of the data to process\n :param numpy.ndarray image: Data to process\n :rtype: numpy.ndarray\n \"\"\"\n corrected_image = numpy.ascontiguousarray(image, numpy.float64)\n if self._threshold or self._minimum or self._maximum:\n corrected_image = remove_saturated_pixel(corrected_image, self._threshold, self._minimum, self._maximum)\n if self._dark is not None:\n corrected_image -= self._dark\n if self._flat is not None:\n corrected_image /= self._flat\n if self._monitor_key is not None:\n try:\n monitor = header_utils.get_monitor_value(fabio_image, self._monitor_key)\n corrected_image /= monitor\n except header_utils.MonitorNotFound as e:\n logger.warning(\"Monitor not found in filename '%s', data skipped. Cause: %s\", fabio_image.filename, str(e))\n return None\n return numpy.ascontiguousarray(corrected_image, numpy.float32)\n\n def _get_image_reduction(self, algorithm):\n \"\"\"Returns the result of an averaging algorithm using all over\n parameters defined in this object.\n\n :param ImageReductionFilter algorithm: Averaging algorithm\n :rtype: numpy.ndarray\n \"\"\"\n algorithm.init(max_images=self._nb_frames)\n frame_index = 0\n for fabio_image in self._fabio_images:\n for frame in range(fabio_image.nframes):\n if fabio_image.nframes == 1:\n data = fabio_image.data\n else:\n data = fabio_image.getframe(frame).data\n logger.debug(\"Intensity range for %s#%i is %s --> %s\", fabio_image.filename, frame, data.min(), data.max())\n\n corrected_image = self._get_corrected_image(fabio_image, data)\n if corrected_image is not None:\n algorithm.add_image(corrected_image)\n if self._observer:\n self._observer.frame_processed(algorithm, frame_index, self._nb_frames)\n frame_index += 1\n if self._observer:\n self._observer.result_processing(algorithm)\n return algorithm.get_result()\n\n def _update_flat(self):\n \"\"\"\n Update the flat according to the last process parameters\n\n :rtype: numpy.ndarray\n \"\"\"\n if self._raw_flat is not None:\n flat = numpy.array(self._raw_flat)\n if self._correct_flat_from_dark:\n if self._dark is not None:\n flat -= self._dark\n else:\n logger.debug(\"No dark. Flat correction using dark skipped\")\n flat[numpy.where(flat <= 0)] = 1.0\n else:\n flat = None\n self._flat = flat\n\n def process(self):\n \"\"\"Process source images to all defined averaging algorithms defined\n using defined parameters. To access to the results you have to define\n a writer (`AverageWriter`). To follow the process forward you have to\n define an observer (`AverageObserver`).\n \"\"\"\n self._update_flat()\n writer = self._writer\n\n if self._observer:\n self._observer.process_started()\n\n if writer is not None:\n writer.write_header(self._fabio_images, self._nb_frames, self._monitor_key)\n\n for algorithm in self._algorithms:\n if self._observer:\n self._observer.algorithm_started(algorithm)\n image_reduction = self._get_image_reduction(algorithm)\n logger.debug(\"Intensity range in merged dataset : %s --> %s\", image_reduction.min(), image_reduction.max())\n if writer is not None:\n writer.write_reduction(algorithm, image_reduction)\n self._results[algorithm] = image_reduction\n if self._observer:\n self._observer.algorithm_finished(algorithm)\n\n if self._observer:\n self._observer.process_finished()\n\n if writer is not None:\n writer.close()\n\n def get_image_reduction(self, algorithm):\n \"\"\"Returns the result of an algorithm. The `process` must be already\n done.\n\n :param ImageReductionFilter algorithm: An averaging algorithm\n :rtype: numpy.ndarray\n \"\"\"\n return self._results[algorithm]\n\n\ndef average_images(listImages, output=None, threshold=0.1, minimum=None,\n maximum=None, darks=None, flats=None, filter_=\"mean\",\n correct_flat_from_dark=False, cutoff=None, quantiles=None,\n fformat=\"edf\", monitor_key=None):\n \"\"\"\n Takes a list of filenames and create an average frame discarding all\n saturated pixels.\n\n :param listImages: list of string representing the filenames\n :param output: name of the optional output file\n :param threshold: what is the upper limit? all pixel > max*(1-threshold)\n are discareded.\n :param minimum: minimum valid value or True\n :param maximum: maximum valid value\n :param darks: list of dark current images for subtraction\n :param flats: list of flat field images for division\n :param filter_: can be \"min\", \"max\", \"median\", \"mean\", \"sum\", \"quantiles\"\n (default='mean')\n :param correct_flat_from_dark: shall the flat be re-corrected ?\n :param cutoff: keep all data where (I-center)/std < cutoff\n :param quantiles: 2-tuple containing the lower and upper quantile (0<q<1)\n to average out.\n :param fformat: file format of the output image, default: edf\n :param monitor_key str: Key containing the monitor. Can be none.\n :return: filename with the data or the data ndarray in case format=None\n \"\"\"\n\n # input sanitization\n if not is_algorithm_name_exists(filter_):\n logger.warning(\"Filter %s not understood. switch to mean filter\", filter_)\n filter_ = \"mean\"\n\n if quantiles is not None and filter_ != \"quantiles\":\n logger.warning(\"Set method to quantiles as quantiles parameters is defined.\")\n filter_ = \"quantiles\"\n\n average = Average()\n average.set_images(listImages)\n average.set_dark(darks)\n average.set_flat(flats)\n average.set_correct_flat_from_dark(correct_flat_from_dark)\n average.set_monitor_name(monitor_key)\n average.set_pixel_filter(threshold, minimum, maximum)\n\n algorithm = create_algorithm(filter_, cutoff, quantiles)\n average.add_algorithm(algorithm)\n\n # define writer\n if fformat is not None:\n if fformat.startswith(\".\"):\n fformat = fformat.lstrip(\".\")\n if output is None:\n prefix = common_prefix([i.filename for i in average.get_fabio_images()])\n output = \"filt%02i-%s.%s\" % (average.get_counter_frames(), prefix, fformat)\n output = \"{method_name}\" + output\n\n if output is not None:\n writer = MultiFilesAverageWriter(output, fformat)\n average.set_writer(writer)\n else:\n writer = None\n\n average.process()\n\n if writer is not None:\n fabio_image = writer.get_fabio_image(algorithm)\n return fabio_image.filename\n else:\n return average.get_image_reduction(algorithm)\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sys, numpy, time\nfrom pyFAI.utils import mathutil\nfrom . import utilstest\nimport fabio, pyopencl\nfrom pylab import *\nprint(\"#\"*50)\npyFAI = sys.modules[\"pyFAI\"]\nfrom pyFAI import splitBBox\n#splitBBox = sys.modules[\"pyFAI.splitBBox\"]\nai = pyFAI.load(\"testimages/Pilatus1M.poni\")\ndata = fabio.open(\"testimages/Pilatus1M.edf\").data\nref = ai.xrpd_LUT(data, 1000)\nobt = ai.xrpd_LUT_OCL(data, 1000)\nprint(abs(obt[1] - ref[1]).max())\nlut = ai._lut_integrator.lut\ngpu = pyFAI.ocl_azim_lut.OCL_LUT_Integrator(lut, data.size, \"GPU\")\nprint(gpu.device)\nimg = numpy.zeros(data.shape, dtype=\"float32\")\nprint(\"ref\", (data == -2).sum(), (data == -1).sum())\npyopencl.enqueue_copy(gpu._queue, img, gpu._cl_mem[\"image\"])#.wait()\nprint(\"obt\", (img == -2).sum(), (img == -1).sum())\n\nout_cyt = ai._lut_integrator.integrate(data)\nout_ocl = gpu.integrate(data)[0]\nprint(\"NoCorr R=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"no corrections\"))\nnodummy = out_cyt[1]\nplot(nodummy + 1, label=\"no_corr\")\nout_cyt = ai._lut_integrator.integrate(data, dummy= -2, delta_dummy=1.5)\nout_ocl = gpu.integrate(data, dummy= -2, delta_dummy=1.5)[0]\nprint(\"Dummy R=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"Dummy\"))\n#print(\"nodummy/Dummy\", mathutil.rwp((out_cyt[0], out_cyt[1]), (out_cyt[0], nodummy), \"nodummy/Dummy\")\n\ndark = numpy.random.random(data.shape)\nout_cyt = ai._lut_integrator.integrate(data, dark=dark)\nout_ocl = gpu.integrate(data, dark=dark)[0]\nprint(\"Dark R=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"dark\"))\n\nflat = 2 * numpy.ones_like(data)\nout_cyt = ai._lut_integrator.integrate(data, flat=flat)\nout_ocl = gpu.integrate(data, flat=flat)[0]\nprint(\"Flat R=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"flat\"))\n\nsolidAngle = ai.solidAngleArray(data.shape)\nout_cyt = ai._lut_integrator.integrate(data, solidAngle=solidAngle)\nout_ocl = gpu.integrate(data, solidAngle=solidAngle)[0]\nprint(\"SolidAngle R=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"SolidAngle\"))\n\npolarization = ai.polarization(data.shape, 0.95)\nout_cyt = ai._lut_integrator.integrate(data, polarization=polarization)\nout_ocl = gpu.integrate(data, polarization=polarization)[0]\nprint(\"PolarizationR=\", mathutil.rwp((out_cyt[0], out_ocl), out_cyt[:2], \"Polarization\"))\n\n#pyopencl.enqueue_copy(gpu._queue, img, gpu._cl_mem[\"image\"]).wait()\n#xx = splitBBox.histoBBox1d(weights=data,\n# pos0=ai._ttha,\n# delta_pos0=ai._dttha,\n# bins=1000,\n# polarization=polarization)[1]\n#plot(xx + 2, label=\"xrpd\")\nplot(out_cyt[1], label=\"ref\")\nplot(out_ocl, label=\"obt\")\n\n#plot(out, label=\"out\")\n#outData = numpy.zeros(1000, \"float32\")\n#outCount = numpy.zeros(1000, \"float32\")\n#outMerge = numpy.zeros(1000, \"float32\")\n#pyopencl.enqueue_copy(gpu._queue, outData, gpu._cl_mem[\"outData\"])#.wait()\n#pyopencl.enqueue_copy(gpu._queue, outCount, gpu._cl_mem[\"outCount\"])#.wait()\n#pyopencl.enqueue_copy(gpu._queue, outMerge, gpu._cl_mem[\"outMerge\"])#.wait()\n#plot(outData, label=\"outData\")\n#plot(outCount, label=\"outCount\")\n#plot(outMerge, label=\"outMerge\")\nlegend()\nt0 = time.time()\nout = gpu.integrate(data, dummy= -2, delta_dummy=1.5)\nprint(\"Timings With dummy\", 1000 * (time.time() - t0))\nt0 = time.time()\nout = gpu.integrate(data)\nprint(\"Timings Without dummy\", 1000 * (time.time() - t0))\nyscale(\"log\")\nshow()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2017-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# .\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# .\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Module providing common pixel-wise pre-processing of data.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function, with_statement\n\n__author__ = \"Jerome Kieffer\"\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"05/12/2019\"\n__status__ = \"development\"\n\nimport warnings\nimport numpy\n\n\ndef preproc(raw,\n dark=None,\n flat=None,\n solidangle=None,\n polarization=None,\n absorption=None,\n mask=None,\n dummy=None,\n delta_dummy=None,\n normalization_factor=1.0,\n empty=None,\n split_result=False,\n variance=None,\n dark_variance=None,\n poissonian=False,\n dtype=numpy.float32\n ):\n \"\"\"Common preprocessing step for all integration engines\n\n :param data: raw value, as a numpy array, 1D or 2D\n :param mask: array non null where data should be ignored\n :param dummy: value of invalid data\n :param delta_dummy: precision for invalid data\n :param dark: array containing the value of the dark noise, to be subtracted\n :param flat: Array containing the flatfield image. It is also checked for dummies if relevant.\n :param solidangle: the value of the solid_angle. This processing may be performed during the rebinning instead. left for compatibility\n :param polarization: Correction for polarization of the incident beam\n :param absorption: Correction for absorption in the sensor volume\n :param normalization_factor: final value is divided by this\n :param empty: value to be given for empty bins\n :param split_result: set to true to separate signal from normalization and\n return an array of float2, float3 (with variance) ot float4 (including counts)\n :param variance: provide an estimation of the variance, enforce\n split_result=True and return an float3 array with variance in second position.\n :param dark_variance: provide an estimation of the variance of the dark_current,\n enforce split_result=True and return an float3 array with variance in second position.\n :param poissonian: set to \"True\" for assuming the detector is poissonian and variance = max(1, raw + dark)\n :param dtype: dtype for all processing\n\n All calculation are performed in single precision floating point (32 bits).\n\n NaN are always considered as invalid values\n\n if neither empty nor dummy is provided, empty pixels are 0.\n Empty pixels are always zero in \"split_result\" mode.\n\n When set to False, i.e the default, the pixel-wise operation is:\n\n .. math::\n\n I = \\\\frac{raw - dark}{flat \\\\cdot solidangle \\\\cdot polarization \\\\cdot absorption}\n\n Invalid pixels are set to the dummy or empty value.\n\n When split_result is set to True, each result is a float2\n or a float3 (with an additional value for the variance) as such:\n\n I = [:math:`raw - dark`, :math:`variance`, :math:`flat \\\\cdot solidangle \\\\cdot polarization \\\\cdot absorption`]\n\n If split_result is 4, then the count of pixel is appended to the list, i.e. 1 or 0 for masked pixels\n Empty pixels will have all their 2 or 3 or 4 values to 0 (and not to dummy or empty value)\n\n If poissonian is set to True, the variance is evaluated as raw + dark, with a minimum of 1.\n \"\"\"\n if isinstance(dtype, str):\n dtype = numpy.dtype(dtype).type\n shape = raw.shape\n out_shape = list(shape)\n if split_result or (variance is not None) or poissonian:\n if split_result == 4:\n out_shape += [4]\n elif (variance is not None) or poissonian:\n out_shape += [3]\n else:\n out_shape += [2]\n split_result = True\n size = raw.size\n if (mask is None) or (mask is False):\n mask = numpy.zeros(size, dtype=bool)\n else:\n assert mask.size == size, \"Mask array size is correct\"\n mask = numpy.ascontiguousarray(mask.ravel(), dtype=bool)\n\n if (dummy is not None) and (delta_dummy is not None):\n check_dummy = True\n cdummy = dtype(dummy)\n ddummy = dtype(delta_dummy)\n elif (dummy is not None):\n check_dummy = True\n cdummy = dtype(dummy)\n ddummy = 0.0\n else:\n check_dummy = False\n cdummy = dtype(empty or 0.0)\n ddummy = 0.0\n\n signal = numpy.ascontiguousarray(raw.ravel(), dtype=dtype)\n normalization = numpy.zeros_like(signal) + normalization_factor\n if variance is not None:\n variance = numpy.ascontiguousarray(variance.ravel(), dtype=dtype)\n elif poissonian:\n variance = numpy.maximum(1.0, signal) # this makes a copy\n\n # runtime warning here\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n\n if check_dummy:\n # runtime warning here\n if ddummy == 0:\n mask |= (signal == cdummy)\n else:\n mask |= (abs(signal - cdummy) <= ddummy)\n\n if dark is not None:\n assert dark.size == size, \"Dark array size is correct\"\n dark = numpy.ascontiguousarray(dark.ravel(), dtype=dtype)\n if check_dummy:\n # runtime warning here\n if ddummy == 0:\n mask |= (dark == cdummy)\n else:\n mask |= abs(dark - cdummy) < ddummy\n signal -= dark\n if poissonian:\n variance += dark\n elif dark_variance is not None:\n variance += dark_variance\n\n if flat is not None:\n assert flat.size == size, \"Flat array size is correct\"\n flat = numpy.ascontiguousarray(flat.ravel(), dtype=dtype)\n if check_dummy:\n # runtime warning here\n if ddummy == 0:\n mask |= (flat == cdummy)\n else:\n mask |= abs(flat - cdummy) <= ddummy\n normalization *= flat\n\n if polarization is not None:\n assert polarization.size == size, \"Polarization array size is correct\"\n normalization *= numpy.ascontiguousarray(polarization.ravel(), dtype=dtype)\n\n if solidangle is not None:\n assert solidangle.size == size, \"Solid angle array size is correct\"\n normalization *= numpy.ascontiguousarray(solidangle.ravel(), dtype=dtype)\n\n if absorption is not None:\n assert absorption.size == size, \"Absorption array size is correct\"\n normalization *= numpy.ascontiguousarray(absorption.ravel(), dtype=dtype)\n\n mask |= numpy.logical_not(numpy.isfinite(signal))\n mask |= numpy.logical_not(numpy.isfinite(normalization))\n mask |= (normalization == 0)\n if variance is not None:\n mask |= numpy.logical_not(numpy.isfinite(variance))\n if split_result:\n result = numpy.zeros(out_shape, dtype=dtype)\n signal[mask] = 0.0\n normalization[mask] = 0.0\n result[..., 0] = signal.reshape(shape)\n if out_shape[-1] == 4:\n if variance is not None:\n variance[mask] = 0.0\n result[..., 1] = variance.reshape(shape)\n result[..., 2] = normalization.reshape(shape)\n result[..., 3] = 1.0 - mask.reshape(shape)\n elif variance is None:\n result[:, :, 1] = normalization.reshape(shape)\n else:\n variance[mask] = 0.0\n result[..., 1] = variance.reshape(shape)\n result[..., 2] = normalization.reshape(shape)\n else:\n result = signal / normalization\n result[mask] = cdummy\n result.shape = shape\n return result\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 07 09:52:51 2014\n\n@author: ashiotis\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sys, numpy, time, os\n\nimport fabio\nimport pyopencl as cl\nfrom pylab import *\nfrom pyFAI.third_party import six\nprint(\"#\"*50)\nif __name__ == '__main__':\n import pkgutil\n __path__ = pkgutil.extend_path([os.path.dirname(__file__)], \"pyFAI.test\")\nfrom pyFAI.test.utilstest import UtilsTest\n\npyFAI = sys.modules[\"pyFAI\"]\nfrom pyFAI import splitPixelFullLUT\nfrom pyFAI import splitPixelFull\nfrom pyFAI import ocl_hist_pixelsplit\n# from pyFAI import splitBBoxLUT\n# from pyFAI import splitBBoxCSR\n\nos.chdir(\"testimages\")\nai = pyFAI.load(\"halfccd.poni\")\ndata = fabio.open(\"halfccd.edf\").data\n\nworkgroup_size = 256\nbins = 1000\n\npos_in = ai.array_from_unit(data.shape, \"corner\", unit=\"2th_deg\", scale=False)\n\npos = pos_in.reshape(pos_in.size / 8, 4, 2)\n\npos_size = pos.size\nsize = data.size\n\n\nctx = cl.create_some_context()\nqueue = cl.CommandQueue(ctx)\nmf = cl.mem_flags\n\nd_pos = cl.array.to_device(queue, pos)\nd_preresult = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4 * workgroup_size)\nd_minmax = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4)\n\nwith open(\"../../openCL/ocl_hist_pixelsplit.cl\", \"r\") as kernelFile:\n kernel_src = kernelFile.read()\n\ncompile_options = \"-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%f\" % \\\n (bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)\n\nprogram = cl.Program(ctx, kernel_src).build(options=compile_options)\n\nprogram.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult)\n\nprogram.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult, d_minmax)\n\nresult = numpy.ndarray(4, dtype=numpy.float32)\n\ncl.enqueue_copy(queue, result, d_minmax)\n\n\nmin0 = pos[:, :, 0].min()\nmax0 = pos[:, :, 0].max()\nmin1 = pos[:, :, 1].min()\nmax1 = pos[:, :, 1].max()\nminmax = (min0, max0, min1, max1)\n\nprint(minmax)\nprint(result)\n\n\nd_outData = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)\nd_outCount = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)\nd_outMerge = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)\n\nprogram.memset_out(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)\n\noutData = numpy.ndarray(bins, dtype=numpy.float32)\noutCount = numpy.ndarray(bins, dtype=numpy.float32)\noutMerge = numpy.ndarray(bins, dtype=numpy.float32)\n\ncl.enqueue_copy(queue, outData, d_outData)\ncl.enqueue_copy(queue, outCount, d_outCount)\ncl.enqueue_copy(queue, outMerge, d_outMerge)\n\nglobal_size = (data.size + workgroup_size - 1) & ~(workgroup_size - 1),\n\nd_image = cl.array.to_device(queue, data)\nd_image_float = cl.Buffer(ctx, mf.READ_WRITE, 4 * size)\n\n# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M\nprogram.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # halfccd\n\nprogram.integrate1(queue, global_size, (workgroup_size,), d_pos.data, d_image_float, d_minmax, numpy.int32(data.size), d_outData, d_outCount)\n\ncl.enqueue_copy(queue, outData, d_outData)\ncl.enqueue_copy(queue, outCount, d_outCount)\ncl.enqueue_copy(queue, outMerge, d_outMerge)\n\nprogram.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)\n\ncl.enqueue_copy(queue, outData, d_outData)\ncl.enqueue_copy(queue, outCount, d_outCount)\ncl.enqueue_copy(queue, outMerge, d_outMerge)\n\n\n\nref = ai.xrpd_LUT(data, bins, correctSolidAngle=False)\ntest = splitPixelFull.fullSplit1D(pos, data, bins)\n\n# assert(numpy.allclose(ref,outMerge))\n\n# plot(outMerge, label=\"ocl_hist\")\nplot(ref[0], test[1], label=\"splitPixelFull\")\nplot(ref[0], ref[1], label=\"ref\")\n# plot(abs(ref-outMerge)/outMerge, label=\"ocl_csr_fullsplit\")\nlegend()\nshow()\nsix.moves.input()\n" ]
[ [ "numpy.dtype", "numpy.ndarray", "numpy.empty", "numpy.finfo" ], [ "numpy.sqrt" ], [ "numpy.random.random" ], [ "numpy.allclose" ], [ "numpy.histogram", "numpy.rad2deg" ], [ "scipy.ndimage.morphology.binary_dilation", "numpy.log", "numpy.maximum", "numpy.minimum", "scipy.optimize.optimize.fmin", "numpy.ascontiguousarray", "numpy.median", "scipy.ndimage.median_filter", "numpy.sort", "scipy.ndimage.label", "scipy.optimize.optimize.fminbound", "scipy.interpolate.interp1d", "numpy.float32", "numpy.array", "numpy.zeros", "numpy.where" ], [ "numpy.ones_like", "numpy.random.random", "numpy.zeros" ], [ "numpy.maximum", "numpy.isfinite", "numpy.dtype", "numpy.zeros_like", "numpy.zeros" ], [ "numpy.int32", "numpy.ndarray", "numpy.uint32", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seib2/PypeIt
[ "4c68b38cb907345a480d7afee58200a05ecd4556", "4c68b38cb907345a480d7afee58200a05ecd4556", "4c68b38cb907345a480d7afee58200a05ecd4556" ]
[ "pypeit/tests/test_save.py", "pypeit/core/tracewave.py", "pypeit/wavecalib.py" ]
[ "\"\"\"\nModule to run tests on arsave\n\"\"\"\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units\nfrom astropy.io import fits\n\nfrom pypeit import specobjs\nfrom pypeit.core import save\n\nfrom pypeit.tests.tstutils import dummy_fitstbl\nfrom pypeit.spectrographs import util\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'files')\n return os.path.join(data_dir, filename)\n\n\ndef mk_specobj(flux=5, objid=500):\n # specobj\n npix = 100\n specobj = specobjs.SpecObj((100,100), 0, (0.4,0.6), objtype='science',\n spat_pixpos=300)\n specobj.boxcar = dict(wave=np.arange(npix)*units.AA, counts=np.ones(npix)*flux)\n specobj.optimal = dict(wave=np.arange(npix)*units.AA, counts=np.ones(npix)*flux-0.5)\n specobj.objid = objid\n specobj.trace_spat = np.arange(npix) / npix\n specobj.fwhmfit = np.arange(npix) / npix\n # Return\n return specobj\n\n\ndef test_save2d_fits():\n #settings.dummy_settings()\n #fitsdict = arutils.dummy_fitsdict(nfile=1, spectrograph='none', directory=data_path(''))\n fitstbl = dummy_fitstbl(directory=data_path(''))\n # Kludge\n fitstbl.table.remove_column('filename')\n fitstbl['filename'] = 'b1.fits.gz'\n # Settings\n #settings.argflag['run']['directory']['science'] = data_path('')\n spectrograph = 'shane_kast_blue'\n # Fill with dummy images\n dum = np.ones((100,100))\n sci_dict = {}\n sci_dict[0] = {}\n sci_dict[0]['sciframe'] = dum\n sci_dict[0]['finalvar'] = dum * 2\n sci_dict[0]['finalsky'] = dum + 0.1\n\n sci_dict['meta'] = {}\n sci_dict['meta']['vel_corr'] = 0.\n sci_dict['meta']['ir_redux'] = False\n\n basename = 'test'\n scidx = 5\n path = fitstbl['directory'][scidx]\n ifile = fitstbl['filename'][scidx]\n rawfile = os.path.join(path, ifile)\n master_dir = data_path('MF')+'_'+spectrograph\n outfile = data_path('') + 'spec2d_{:s}.fits'.format(basename)\n # Create a dummy master_key_dict\n master_key_dict = dict(frame='', bpm='bpmkey',bias='',arc='',trace='',flat='')\n raw_hdr = fits.open(rawfile)[0].header\n save.save_2d_images(sci_dict, raw_hdr, spectrograph, master_key_dict, master_dir, outfile)\n # Read and test\n head0 = fits.getheader(data_path('spec2d_test.fits'))\n assert head0['PYPMFDIR'] == master_dir\n assert head0['BPMMKEY'] == 'bpm' # See save_2d_images; removes last 3 characters\n assert 'PYPEIT' in head0['PIPELINE']\n\n\ndef test_save1d_fits():\n \"\"\" save1d to FITS and HDF5\n \"\"\"\n # Init\n fitstbl = dummy_fitstbl(spectro_name='shane_kast_blue', directory=data_path(''))\n sobj = mk_specobj()\n specObjs = specobjs.SpecObjs([sobj])\n spectrograph = util.load_spectrograph('shane_kast_blue')\n # Write to FITS\n basename = 'test'\n outfile = data_path('') + 'spec1d_{:s}.fits'.format(basename)\n save.save_1d_spectra_fits(specObjs, fitstbl[5], spectrograph, outfile)\n\n\n# NEEDS REFACTORING\n#def test_save1d_hdf5():\n# \"\"\" save1d to FITS and HDF5\n# \"\"\"\n# # Dummy self\n# fitstbl = arsort.dummy_fitstbl(spectrograph='shane_kast_blue', directory=data_path(''))\n# slf = arsciexp.dummy_self(fitstbl=fitstbl)\n# # specobj\n# slf._specobjs = []\n# slf._specobjs.append([])\n# slf._specobjs[0].append([mk_specobj(objid=455), mk_specobj(flux=3., objid=555)])\n# # Write to HDF5\n# arsave.save_1d_spectra_hdf5(slf, fitstbl)\n\n", "\"\"\" Module for methods related to tracing arc/sky lines across a slit/order\n\"\"\"\nimport inspect\nimport copy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom pypeit import msgs\nfrom pypeit.core import arc\nfrom pypeit import utils\nfrom pypeit.core import qa\nfrom pypeit.core import trace_slits\nfrom pypeit.core import extract\nfrom astropy.stats import sigma_clipped_stats\nimport matplotlib as mpl\nfrom matplotlib.lines import Line2D\nimport scipy\n\nfrom IPython import embed\n\ntry:\n from pypeit import ginga\nexcept ImportError:\n pass\n\n\ndef tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_neigh=3.0,\n only_these_lines=None, fwhm=4.0, nonlinear_counts=1e10, fit_frac_fwhm=1.25, cont_frac_fwhm=1.0,\n max_frac_fwhm=2.0, cont_samp=30, niter_cont=3, debug_lines=False, debug_peaks=False):\n \"\"\"\n I can't believe this method has no docs\n\n Args:\n arc_spec:\n slit_cen:\n tracethresh:\n sig_neigh:\n nfwhm_neigh:\n only_these_lines:\n fwhm:\n nonlinear_counts:\n fit_frac_fwhm:\n cont_frac_fwhm:\n max_frac_fwhm:\n cont_samp:\n niter_cont:\n debug_lines:\n debug_peaks:\n\n Returns:\n (np.ndarray, np.ndarray) or (None,None):\n\n \"\"\"\n\n\n nspec = arc_spec.size\n spec_vec = np.arange(nspec)\n # Find peaks with a liberal threshold of sigdetect = 5.0\n tampl_tot, tampl_cont_tot, tcent_tot, twid_tot, _, wgood, arc_cont_sub, nsig_tot = arc.detect_lines(\n arc_spec, sigdetect=np.min([sig_neigh,tracethresh]), fwhm=fwhm, fit_frac_fwhm=fit_frac_fwhm, cont_frac_fwhm=cont_frac_fwhm,\n max_frac_fwhm=max_frac_fwhm, cont_samp=cont_samp, niter_cont=niter_cont, nonlinear_counts=nonlinear_counts,\n debug=debug_peaks)\n # Good lines\n arcdet = tcent_tot[wgood]\n arc_ampl = tampl_cont_tot[wgood]\n nsig = nsig_tot[wgood]\n\n npix_neigh = nfwhm_neigh*fwhm\n # Determine the best lines to use to trace the tilts\n aduse = np.zeros(arcdet.size, dtype=np.bool) # Which lines should be used to trace the tilts\n w = np.where(nsig >= tracethresh)\n aduse[w] = 1\n # Remove lines that are within npix_neigh pixels.\n # #ToDO replce this with a near-neighbor based approach, where\n # we identify groups and take the brightest line in a given group?\n nuse = np.sum(aduse)\n detuse = arcdet[aduse]\n idxuse = np.arange(arcdet.size)[aduse]\n olduse = aduse.copy()\n for s in range(nuse):\n w = np.where((np.abs(arcdet - detuse[s]) <= npix_neigh) & (np.abs(arcdet - detuse[s]) >= 1.0) & (nsig > sig_neigh))[0]\n for u in range(w.size):\n if nsig[w[u]] > nsig[olduse][s]:\n aduse[idxuse[s]] = False\n break\n # Restricted to ID lines? [introduced to avoid LRIS ghosts]\n if only_these_lines is not None:\n ids_pix = np.array(only_these_lines)\n idxuse = np.arange(arcdet.size)[aduse]\n for s in idxuse:\n if np.min(np.abs(arcdet[s] - ids_pix)) > 2.0:\n msgs.info(\"Ignoring line at spectral position={:6.1f} which was not identified\".format(arcdet[s]))\n aduse[s] = False\n\n # Final spectral positions of arc lines we will trace\n lines_spec = arcdet[aduse]\n nlines = len(lines_spec)\n if nlines == 0:\n msgs.warn('No arc lines were deemed usable on this slit. Cannot compute tilts. Try lowering tracethresh.')\n msgs.warn('Or, more likely, this was a bad slit (which you might remove)')\n return None, None\n else:\n msgs.info('Modelling arc line tilts with {:d} arc lines'.format(nlines))\n\n\n if debug_lines:\n xrng = np.arange(nspec)\n plt.figure(figsize=(14, 6))\n plt.plot(xrng, arc_cont_sub, color='black', drawstyle='steps-mid', lw=3, label='arc', linewidth=1.0)\n plt.plot(arcdet[~aduse], arc_ampl[~aduse], 'r+', markersize=6.0, label='bad for tilts')\n plt.plot(arcdet[aduse], arc_ampl[aduse], 'g+', markersize=6.0, label='good for tilts')\n if nonlinear_counts < 1e9:\n plt.hlines(nonlinear_counts, xrng.min(), xrng.max(), color='orange', linestyle='--', linewidth=2.0,\n label='nonlinear', zorder=10)\n plt.title('Good Lines = {:d}'.format(np.sum(aduse)) + ', Bad Lines = {:d}'.format(np.sum(~aduse)))\n plt.ylim(arc_cont_sub.min(), 1.5 * arc_cont_sub.max())\n plt.legend()\n plt.show()\n\n # Spatial position of line, i.e. the central trace interpolated onto the spectral pixel of the line\n lines_spat = np.interp(lines_spec, spec_vec, slit_cen)\n\n return lines_spec, lines_spat\n\n\ndef trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, gauss=False, tilts_guess=None, fwhm=4.0,\n spat_order=3, maxdev_tracefit=0.02,sigrej_trace=3.0, max_badpix_frac=0.30,\n tcrude_maxerr=1.0, tcrude_maxshift=3.0, tcrude_maxshift0=3.0,tcrude_nave=5, show_tracefits=False):\n\n\n \"\"\"\n Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs.\n\n Parameters\n ----------\n arcimg: ndarray, float (nspec, nspat)\n Image of arc or sky that will be used for tracing tilts.\n lines_spec: ndarray, float (nlines,)\n Array containing arc line centroids along the center of the slit for each arc line that will be traced. This is\n in pixels in image coordinates.\n lines_spat: ndarray, float (nlines,)\n Array contianing the spatial position of the center of the slit along which the arc was extracted. This is is in\n pixels in image coordinates.\n thismask: ndarray, boolean (nspec, nsapt)\n Boolean mask image specifying the pixels which lie on the slit/order to search for objects on.\n The convention is: True = on the slit/order, False = off the slit/order. This must be the same size as the arcimg.\n Optional Parameters\n -------------------\n inmask: float ndarray, default = None\n Input mask image.\n gauss: bool, default = False\n If true the code will trace the arc lines usign Gaussian weighted centroiding (trace_gweight) instead of the default,\n which is flux weighted centroiding (trace_fweight)\n tilts_guess: float ndarray, default = None\n A guess for the tilts used for running this tilt tracing in an iterative manner. If the tilts_guess is not None, it\n should be an array containing the tilts from a previous iteration which will be used as a crutch for the tracing of\n the tilts. The default is None, which is how this code is run on a first iteration. In that case the crutces are\n determined via trace_crude, and then the flux (or Gaussian) weighted tracing is performed.\n fwhm: float\n Expected FWHM of the arc lines.\n spat_order: int, default = None\n Order of the legendre polynomial that will be fit to the tilts.\n maxdev_tracefit: float, default = 0.2\n Maximum absolute deviation for the arc tilt fits during iterative trace fitting expressed in units of the fwhm.\n sigrej_trace: float, default = 3.0\n From each line we compute a median absolute deviation of the trace from the polynomial fit. We then\n analyze the distribution of maximxum absolute deviations (MADs) for all the lines, and reject sigrej_trace outliers\n from that distribution.\n max_badpix_frac: float, default = 0.20\n Maximum fraction of total pixels that can be masked by the trace_gweight algorithm\n (because the residuals are too large) to still be usable for tilt fitting.\n tcrude_maxerr: float, default = 1.0\n maxerr parameter for trace crude\n tcrude_maxshift: float, default = 3.0\n maxshift parameter for trace crude\n tcrude_maxshift0: float, default = 3.0\n maxshift0 parameter for trace crude\n tcrude_nave: int, default = 5\n Trace crude is used to determine the initial arc line tilts, which are then iteratively fit. Trace crude\n can optionally boxcar smooth the image (along the spatial direction of the image, i.e. roughly along the arc line tilts)\n to improve the tracing.\n show_tracefits: bool, default = False\n If true the fits will be shown to each arc line trace by iterative_fitting\n \"\"\"\n\n\n nspec, nspat = arcimg.shape\n spec_vec = np.arange(nspec)\n spat_vec = np.arange(nspat)\n slit_widp2 = int(np.ceil((np.sum(thismask,axis=1)).max()) + 2)\n slit_width_even = np.fmin(slit_widp2 if slit_widp2 % 2 == 0 else slit_widp2 + 1, nspat-1)\n trace_int = slit_width_even//2\n\n maxdev = maxdev_tracefit*fwhm # maxdev is fraction of fwhm\n do_crude = True if tilts_guess is None else False\n nlines = len(lines_spec)\n\n nsub = 2 * trace_int + 1\n\n lines_spat_int = np.round(lines_spat).astype(int)\n\n spat_min = np.zeros(nlines, dtype=int)\n spat_max = np.zeros(nlines, dtype=int)\n\n if inmask is None:\n inmask = thismask\n\n # The sub arrays hold the sub-imaged tilts\n #tilts_sub = np.zeros((nsub, nlines)) # Thee trace_fweight (or gweighed) tilts\n\n #tilts_sub_err = np.zeros((nsub, nlines)) # errors on the tilts (only used for masking but not weighted fitting)\n #tilts_sub_mask = np.zeros((nsub, nlines), dtype=bool) # mask indicating where the tilts are actually covered in the sub image, i.e. where thismask != False\n #tilts_sub_spec = np.outer(np.ones(nsub), lines_spec) # spectral coordinate of each tilt, which is the arc line spectral pixel location\n #tilts_sub_spec_fit = np.zeros((nsub, nlines)) # spectral position determined by evaluating the tilt fit at the center of the slit\n\n #tilts_sub_dspat = np.zeros_like(tilts_sub_spat) # delta position of the tilt in pixels, i.e. difference between slitcen and the spatial coordinate above\n\n # PCA fitting uses the sub-imaged fits, so we need them\n tilts_sub_fit = np.zeros((nsub, nlines)) # legendre polynomial fits to the tilt traces\n tilts_sub_spat = np.outer(np.arange(nsub), np.ones(nlines)) # spatial coordinate along each tilt\n\n tilts = np.zeros((nspat, nlines)) # The trace_fweight (or gweighed) tilts\n tilts_fit = np.zeros((nspat, nlines)) # legendre polynomial fits to the tilt traces\n tilts_err = np.zeros((nspat, nlines)) # errors on the tilts (only used for masking but not weighted fitting)\n tilts_mask = np.zeros((nspat, nlines), dtype=bool) # This is true if the pixel was in a region traced\n tilts_spec = np.zeros((nspat, nlines)) # spectral position determined by evaluating the tilt fit at the center of the slit\n tilts_spat = np.outer(np.arange(nspat), np.ones(nlines)) # spatial coordinate along each tilt\n tilts_dspat= np.zeros_like(tilts_spat) # delta position of the tilt in pixels, i.e. difference between slitcen and the spatial coordinate above\n\n # Transposed image and masks for traceing\n arcimg_trans = (arcimg * thismask).T\n inmask_trans = (inmask * thismask).T.astype(float)\n thismask_trans = thismask.T\n\n # 1) Trace the tilts from a guess. If no guess is provided from a previous iteration use trace_crude\n for iline in range(nlines):\n # We sub-image each tilt using a symmetric window about the (integer) spatial location of each line,\n # which is the slitcen evaluated at the line spectral position.\n spat_min[iline] = lines_spat_int[iline] - trace_int # spat_min is the minium location of the sub-image\n spat_max[iline] = lines_spat_int[iline] + trace_int + 1 # spat_max is the maximum location of the sub-image\n min_spat = np.fmax(spat_min[iline], 0) # These min_spat and max_spat are to prevent leaving the image\n max_spat = np.fmin(spat_max[iline], nspat - 1)\n sub_img = arcimg_trans[min_spat:max_spat, :]\n sub_inmask = inmask_trans[min_spat:max_spat,:]\n sub_thismask = thismask_trans[min_spat:max_spat,:]\n if do_crude: # First time tracing, do a trace crude\n tilts_guess_now, err_now = trace_slits.trace_crude_init(\n sub_img, np.array([lines_spec[iline]]), (sub_img.shape[0] - 1) // 2, invvar=sub_inmask, radius=fwhm,\n nave=tcrude_nave, maxshift0=tcrude_maxshift0, maxshift=tcrude_maxshift, maxerr=tcrude_maxerr)\n tilts_guess_now=tilts_guess_now.flatten()\n else: # A guess was provided, use that as the crutch, but determine if it is a full trace or a sub-trace\n if tilts_guess.shape[0] == nspat:\n # This is full image size tilt trace, sub-window it\n tilts_guess_now = tilts_guess[min_spat:max_spat, iline]\n else:\n # If it is a sub-trace, deal with falling off the image\n if spat_min[iline] < 0:\n tilts_guess_now = tilts_guess[-spat_min[iline]:,iline]\n elif spat_max[iline] > (nspat-1):\n tilts_guess_now = tilts_guess[:-(spat_max[iline] - nspat + 1),iline]\n else:\n tilts_guess_now = tilts_guess[:, iline]\n # Boxcar extract the thismask to have a mask indicating whether a tilt is defined along the spatial direction\n tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_guess_now, fwhm/2.0) > 0.99*fwhm)\n # If more than 80% of the pixels are masked, then don't mask at all. This happens when the traces leave the good\n # part of the slit. If we proceed with everything masked the iter_tracefit fitting will crash.\n if (np.sum(tilts_sub_mask_box) < 0.8*nsub):\n tilts_sub_mask_box = np.ones_like(tilts_sub_mask_box)\n # Do iterative flux weighted tracing and polynomial fitting to refine these traces. This must also be done in a loop\n # since the sub image is different for every aperture, i.e. each aperature has its own image\n tilts_sub_fit_out, tilts_sub_out, tilts_sub_err_out, tset_out = extract.iter_tracefit(\n sub_img, tilts_guess_now, spat_order, inmask=sub_inmask, trc_inmask = tilts_sub_mask_box, fwhm=fwhm,\n maxdev=maxdev, niter=6, idx=str(iline),show_fits=show_tracefits, xmin=0.0,xmax=float(nsub-1))\n tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_sub_fit_out, fwhm/2.0) > 0.99*fwhm)\n if gauss: # If gauss is set, do a Gaussian refinement to the flux weighted tracing\n if (np.sum(tilts_sub_mask_box) < 0.8 * nsub):\n tilts_sub_mask_box = np.ones_like(tilts_sub_mask_box)\n tilts_sub_fit_gw, tilts_sub_gw, tilts_sub_err_gw, tset_gw = extract.iter_tracefit(\n sub_img, tilts_sub_fit_out, spat_order, inmask=sub_inmask, trc_inmask = tilts_sub_mask_box, fwhm=fwhm,\n maxdev=maxdev, niter=3, idx=str(iline),show_fits=show_tracefits, xmin=0.0, xmax=float(nsub-1))\n tilts_sub_fit_out = tilts_sub_fit_gw\n tilts_sub_out = tilts_sub_gw\n tilts_sub_err_out = tilts_sub_err_gw\n tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_sub_fit_out, fwhm/2.0) > 0.99*fwhm)\n\n # Pack the results into arrays, accounting for possibly falling off the image\n # Deal with possibly falling off the chip\n\n # This is the same for all cases since it is the evaluation of a fit\n tilts_sub_fit[:, iline] = tset_out.xy(tilts_sub_spat[:, iline].reshape(1, nsub))[1]\n\n # We use the tset_out.xy to evaluate the trace across the whole sub-image even for pixels off the slit. This\n # guarantees that the fits are always evaluated across the whole sub-image which is required for the PCA step.\n if spat_min[iline] < 0:\n #tilts_sub[ -spat_min[iline]:,iline] = tilts_sub_out.flatten()\n #tilts_sub_err[ -spat_min[iline]:,iline] = tilts_sub_err_out.flatten()\n #tilts_sub_mask[ -spat_min[iline]:,iline] = tilts_sub_mask_box.flatten()\n #tilts_sub_dspat[-spat_min[iline]:,iline] = tilts_dspat[min_spat:max_spat,iline]\n tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() # tilts_sub[ -spat_min[iline]:,iline]\n tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ -spat_min[iline]:,iline]\n tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ -spat_min[iline]:,iline]\n tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[-spat_min[iline]:,iline]\n elif spat_max[iline] > (nspat - 1):\n #tilts_sub[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_out.flatten()\n #tilts_sub_err[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_err_out.flatten()\n #tilts_sub_mask[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_mask_box.flatten()\n #tilts_sub_dspat[:-(spat_max[iline] - nspat + 1),iline] = tilts_dspat[min_spat:max_spat,iline]\n tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() # tilts_sub[ :-(spat_max[iline] - nspat + 1),iline]\n tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ :-(spat_max[iline] - nspat + 1),iline]\n tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ :-(spat_max[iline] - nspat + 1),iline]\n tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[:-(spat_max[iline] - nspat + 1),iline]\n else:\n #tilts_sub[ :,iline] = tilts_sub_out.flatten()\n #tilts_sub_err[ :,iline] = tilts_sub_err_out.flatten()\n #tilts_sub_mask[ :,iline] = tilts_sub_mask_box.flatten()\n #tilts_sub_dspat[:,iline] = tilts_dspat[min_spat:max_spat,iline]\n tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() #tilts_sub[ :,iline]\n tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ :,iline]\n tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ :,iline]\n tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[:,iline]\n\n # Now use these fits to the traces to get a more robust value of the tilt spectral position and spatial\n # offset from the trace than what was initially determined from the 1d arc line spectrum. This is technically\n # where the slit_cen cross the tilts_fit, but it is a tricky since they are parameterized by different\n # independent variables (slit_cen uses spec_vec, whereas tilts_fit uses spat_vec). This code uses\n # a trick of interpolating the slit_cen onto the arc pixels. If we find it fails, then replace with something\n # simpler that simply iterates to zero on where the two cross.\n\n # ToDO Fix this later with an iterative thing that also updates spatial reference position of the tilt\n #imask = tilts_mask[:, iline]\n #slit_cen_spat_ontilt = np.interp(tilts_fit[imask,iline],spec_vec, slit_cen)\n #delta_spat = slit_cen_spat_ontilt - tilts_spat[imask,iline]\n # Grab the monotonic indices\n #ediff = np.ediff1d(delta_spat,to_begin=0.0)\n #mono_ind = np.sign(ediff) == np.sign(np.median(ediff))\n #zero_cross_spat = (scipy.interpolate.interp1d(delta_spat[mono_ind],(tilts_spat[imask,iline])[mono_ind],assume_sorted=False))(0.0)\n #spec_fit_now = np.interp(zero_cross_spat,tilts_spat[imask,iline], tilts_fit[imask,iline])\n #spat_fit_now = np.interp(spec_fit_now,spec_vec, slit_cen)\n #tilts_spec[:, iline] = np.full(nspat, spec_fit_now)\n\n tilts_dspat[:, iline] = (spat_vec - lines_spat[iline])\n imask = tilts_mask[:,iline]\n try:\n spec_fit_now = np.interp(0.0, tilts_dspat[imask, iline], tilts_fit[imask, iline])\n except ValueError:\n spec_fit_now = lines_spec[iline]\n tilts_spec[:,iline] = np.full(nspat, spec_fit_now)\n\n\n # Create the mask for the bad lines. Define the error on the bad tilt as being the\n bad_mask = (tilts_err > 900) | (tilts_mask == False)\n on_slit = np.sum(tilts_mask,0)\n on_slit_bad = np.sum((tilts_mask & (tilts_err > 900)),0)\n bad_frac = on_slit_bad/on_slit\n\n dev_mean, dev_median, dev_sig = sigma_clipped_stats(np.abs(tilts - tilts_fit), mask=bad_mask, sigma=4.0,axis=0)\n good_line = np.any(bad_mask == False,axis=0) # Is it masked everywhere?\n # Median absolute deviation for each line quantifies the goodnes of tracing\n dev_mad = 1.4826*dev_median\n # Now reject outliers from this distribution\n dev_mad_dist_median = np.median(dev_mad[good_line])\n dev_mad_dist_mad = 1.4826*np.median(np.abs(dev_mad[good_line] - dev_mad_dist_median)) # i.e. this is like the sigma\n # Reject lines that are sigrej trace outliers\n mad_rej = ((dev_mad - dev_mad_dist_median)/dev_mad_dist_mad) < sigrej_trace\n\n # Do we need this dev_mad < maxdev step?\n use_tilt = (mad_rej) & (bad_frac < max_badpix_frac) & good_line & (dev_mad < maxdev)\n nuse = np.sum(use_tilt)\n\n msgs.info('Number of usable arc lines for tilts: {:d}/{:d}'.format(nuse,nlines))\n\n tilts_mad = np.outer(np.ones(nspat),dev_mad)\n\n if (nuse < 0.05*nlines):\n msgs.warn('This slit/order would have too many rejected lines.' + msgs.newline() +\n 'We should be rejecting nuse ={:d} out of nlines = {:d} total lines'.format(nuse,nlines) + msgs.newline() +\n 'We are will proceed without rejecting anything but something is probably very wrong with this slit/order.')\n use_tilt = np.ones(nlines,dtype=bool)\n nuse = nlines\n\n # Tighten it up with Gaussian weighted centroiding\n trc_tilt_dict = dict(nspec = nspec, nspat = nspat, nsub = nsub, nlines = nlines, nuse = nuse,\n spat_min=spat_min, spat_max=spat_max, do_crude=do_crude, fwhm = fwhm, use_tilt=use_tilt,\n tilts_sub_spat=tilts_sub_spat, tilts_sub_fit=tilts_sub_fit,\n tilts_mad = tilts_mad,tilts_spec=tilts_spec, tilts_spat=tilts_spat, tilts_dspat=tilts_dspat,\n tilts=tilts, tilts_fit=tilts_fit, tilts_err=tilts_err, tilts_mask=tilts_mask)\n\n return trc_tilt_dict\n\n\ndef trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, gauss=False, fwhm=4.0,spat_order=5, maxdev_tracefit=0.2,\n sigrej_trace=3.0, max_badpix_frac=0.20, tcrude_nave = 5,\n npca = 1, coeff_npoly_pca = 2, sigrej_pca = 2.0,debug_pca = False, show_tracefits=False):\n\n \"\"\"\n Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs.\n\n Parameters\n ----------\n arcimg: ndarray, float (nspec, nspat)\n Image of arc or sky that will be used for tracing tilts.\n lines_spec: ndarray, float (nlines,)\n Array containing arc line centroids along the center of the slit for each arc line that will be traced. This is\n in pixels in image coordinates.\n lines_spat: ndarray, float (nlines,)\n Array contianing the spatial position of the center of the slit along which the arc was extracted. This is is in\n pixels in image coordinates.\n thismask: ndarray, boolean (nspec, nsapt)\n Boolean mask image specifying the pixels which lie on the slit/order to search for objects on.\n The convention is: True = on the slit/order, False = off the slit/order. This must be the same size as the arcimg.\n Optional Parameters\n -------------------\n inmask: float ndarray, default = None\n Input mask image.\n gauss: bool, default = False\n If true the code will trace the arc lines usign Gaussian weighted centroiding (trace_gweight) instead of the default,\n which is flux weighted centroiding (trace_fweight)\n fwhm: float\n Expected FWHM of the arc lines.\n spat_order: int, default = None\n Order of the legendre polynomial that will be fit to the tilts.\n maxdev_tracefit: float, default = 1.0\n Maximum absolute deviation for the arc tilt fits during iterative trace fitting expressed in units of the fwhm.\n sigrej_trace: float, default = 3.0\n From each line we compute a median absolute deviation of the trace from the polynomial fit. We then\n analyze the distribution of maximxum absolute deviations (MADs) for all the lines, and reject sigrej_trace outliers\n from that distribution.\n max_badpix_frac: float, default = 0.20\n Maximum fraction of total pixels that can be masked by the trace_gweight algorithm\n (because the residuals are too large) to still be usable for tilt fitting.\n tcrude_nave: int, default = 5\n Trace crude is used to determine the initial arc line tilts, which are then iteratively fit. Trace crude\n can optionally boxcar smooth the image (along the spatial direction of the image, i.e. roughly along the arc line tilts)\n to improve the tracing.\n npca: int, default = 1\n Tilts are initially traced and then a PCA is performed. The PCA is used to determine better crutches for a second\n round of improved tilt tracing. This parameter is the order of that PCA and determined how much the tilts behavior\n is being compressed. npca = 0 would be just using the mean tilt. This PCA is only an intermediate step to\n improve the crutches and is an attempt to make the tilt tracing that goes into the final fit more robust.\n coeff_npoly_pca: int, default = 1\n Order of polynomial fits used for PCA coefficients fitting for the PCA described above.\n sigrej_pca: float, default = 2.0\n Significance threhsold for rejection of outliers from fits to PCA coefficients for the PCA described above.\n show_tracefits: bool, default = False\n If true the fits will be shown to each arc line trace by iter_fitting.py\n\n\n Returns:\n --------\n \"\"\"\n\n tcrude_maxerr = fwhm/4.0\n tcrude_maxshift = 3.0*fwhm/4.0\n tcrude_maxshift0 = fwhm\n\n trace_dict0 = trace_tilts_work(\n arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=inmask, gauss=gauss, tilts_guess=None, fwhm=fwhm, spat_order=spat_order,\n maxdev_tracefit=maxdev_tracefit,sigrej_trace=sigrej_trace, max_badpix_frac=max_badpix_frac,\n tcrude_maxerr=tcrude_maxerr, tcrude_maxshift=tcrude_maxshift,tcrude_maxshift0=tcrude_maxshift0,\n tcrude_nave=tcrude_nave, show_tracefits=show_tracefits)\n\n # TODO THE PCA may not be necessary. It appears to improve the results though for some instruments where the\n # tracing is problematic. We could consider making this optional to speed things up.\n debug_pca_fit = False\n if debug_pca_fit:\n # !!!! FOR TESTING ONLY!!!! Evaluate the model fit to the tilts for all of our lines\n msgs.info('TESTING: Performing an initial fit before PCA.')\n # JFH Note spec_order is hard wired here as we don't pass it in\n tilt_fit_dict0 = fit_tilts(trace_dict0, spat_order=spat_order, spec_order=6, debug=True,\n maxdev=0.2, sigrej=3.0,doqa=True, setup='test', slit=0, show_QA=True)\n\n\n # Do a PCA fit, which rejects some outliers\n iuse = trace_dict0['use_tilt']\n nuse = np.sum(iuse)\n msgs.info('PCA modeling {:d} good tilts'.format(nuse))\n pca_fit, poly_fit_dict, pca_mean, pca_vectors = extract.pca_trace(\n trace_dict0['tilts_sub_fit'], predict=np.invert(iuse), npca=npca, coeff_npoly=coeff_npoly_pca,\n lower=sigrej_pca, upper=sigrej_pca, order_vec=lines_spec, xinit_mean=lines_spec,\n minv=0.0, maxv=float(trace_dict0['nsub'] - 1), debug=debug_pca)\n\n # Now trace again with the PCA predictions as the starting crutches\n trace_dict1 = trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=inmask, gauss=gauss, tilts_guess=pca_fit,\n fwhm=fwhm, spat_order=spat_order, maxdev_tracefit=maxdev_tracefit,sigrej_trace=sigrej_trace,\n max_badpix_frac=max_badpix_frac,show_tracefits=show_tracefits)\n\n return trace_dict1\n\n\ndef fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, maxdev = 0.2,\n maxrej = None,\n maxiter = 100, sigrej = 3.0, pad_spec = 30, pad_spat =5,\n func2d='legendre2d', doqa=True, master_key='test',\n slit = 0, show_QA=False, out_dir=None, debug=False):\n \"\"\"\n\n Parameters\n ----------\n trc_tilt_dict: dict\n Diciontary containing tilt info\n\n Optional Parameters\n -------------------\n slit:\n all_tilts:\n order:\n yorder:\n func2D:\n maskval:\n setup:\n doqa:\n show_QA:\n out_dir:\n\n Returns:\n\n \"\"\"\n\n nspec = trc_tilt_dict['nspec']\n nspat = trc_tilt_dict['nspat']\n fwhm = trc_tilt_dict['fwhm']\n maxdev_pix = maxdev*fwhm\n xnspecmin1 = float(nspec-1)\n xnspatmin1 = float(nspat-1)\n nspat = trc_tilt_dict['nspat']\n use_tilt = trc_tilt_dict['use_tilt'] # mask for good/bad tilts, based on aggregate fit, frac good pixels\n nuse = np.sum(use_tilt)\n tilts = trc_tilt_dict['tilts'] # legendre polynomial fit\n #JFH Before we were fitting the fits. Now we fit the actual flux weighted centroided tilts.\n tilts_err = trc_tilt_dict['tilts_err'] # flux weighted centroidding error\n tilts_dspat = trc_tilt_dict['tilts_dspat'] # spatial offset from the central trace\n #tilts_spat = trc_tilt_dict['tilts_dspat'][:,use_tilt] # spatial offset from the central trace\n tilts_spec = trc_tilt_dict['tilts_spec'] # line spectral pixel position from legendre fit evaluated at slit center\n tilts_mask = trc_tilt_dict['tilts_mask'] # Reflects if trace is on the slit\n tilts_mad = trc_tilt_dict['tilts_mad'] # quantitfies aggregate error of this tilt\n\n use_mask = np.outer(np.ones(nspat,dtype=bool),use_tilt)\n tot_mask = tilts_mask & (tilts_err < 900) & use_mask\n fitxy = [spec_order, spat_order]\n\n # Fit the inverted model with a 2D polynomial\n msgs.info(\"Fitting tilts with a low order, 2D {:s}\".format(func2d))\n\n adderr = 0.03\n tilts_sigma = ((tilts_mad < 100.0) & (tilts_mad > 0.0))*np.sqrt(np.abs(tilts_mad)**2 + adderr**2)\n\n fitmask, coeff2 = utils.robust_polyfit_djs(tilts_spec.flatten()/xnspecmin1, (tilts.flatten() - tilts_spec.flatten())/xnspecmin1,\n fitxy, x2=tilts_dspat.flatten()/xnspatmin1, inmask = tot_mask.flatten(),\n sigma=tilts_sigma.flatten()/xnspecmin1,\n function=func2d, maxiter=maxiter, lower=sigrej, upper=sigrej,\n maxdev=maxdev_pix/xnspecmin1,minx=-0.0, maxx=1.0, minx2=-1.0, maxx2=1.0,\n use_mad=False, sticky=False)\n fitmask = fitmask.reshape(tilts_dspat.shape)\n # Compute a rejection mask that we will use later. These are locations that were fit but were rejected\n rej_mask = tot_mask & np.invert(fitmask)\n # Compute and store the 2d tilts fit\n delta_tilt_1 = xnspecmin1*utils.func_val(coeff2, tilts_spec[tilts_mask]/xnspecmin1, func2d, x2=tilts_dspat[tilts_mask]/xnspatmin1,\n minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)\n delta_tilt = np.zeros_like(tilts_dspat)\n tilts_2dfit = np.zeros_like(tilts_dspat)\n delta_tilt[tilts_mask] = delta_tilt_1\n tilts_2dfit[tilts_mask] = tilts_spec[tilts_mask] + delta_tilt[tilts_mask]\n # Add the 2d fit to the tracetilt dictionary\n trc_tilt_dict_out = copy.deepcopy(trc_tilt_dict)\n trc_tilt_dict_out['tilt_2dfit'] = tilts_2dfit\n\n # Report the residuals in pixels\n res_fit = tilts[fitmask] - tilts_2dfit[fitmask]\n rms_fit = np.std(res_fit)\n msgs.info(\"Residuals: 2D Legendre Fit\")\n msgs.info(\"RMS (pixels): {}\".format(rms_fit))\n msgs.info(\"RMS/FWHM: {}\".format(rms_fit/fwhm))\n\n msgs.info('Inverting the fit to generate the tilts image')\n spec_vec = np.arange(nspec)\n spat_vec = np.arange(nspat)\n spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)\n # We do some padding here to guarantee that the tilts arc lines falling off the image get tilted onto the image\n spec_vec_pad = np.arange(-pad_spec, nspec + pad_spec)\n spat_vec_pad = np.arange(-pad_spat, nspat + pad_spat)\n spat_img_pad, spec_img_pad = np.meshgrid(np.arange(-pad_spat, nspat + pad_spat),np.arange(-pad_spec, nspec + pad_spec))\n slit_cen_pad = (scipy.interpolate.interp1d(spec_vec, slit_cen, bounds_error=False, fill_value='extrapolate'))(spec_vec_pad)\n thismask_pad = np.zeros_like(spec_img_pad, dtype=bool)\n ind_spec, ind_spat = np.where(thismask)\n slit_cen_img_pad = np.outer(slit_cen_pad, np.ones(nspat + 2 * pad_spat)) # center of the slit replicated spatially\n # Normalized spatial offset image (from central trace)\n dspat_img_nrm = (spat_img_pad - slit_cen_img_pad) / xnspatmin1\n # normalized spec image\n spec_img_nrm = spec_img_pad / xnspecmin1\n # Embed the old thismask in the new larger padded thismask\n thismask_pad[ind_spec + pad_spec, ind_spat + pad_spat] = thismask[ind_spec, ind_spat]\n # Now grow the thismask_pad\n kernel = np.ones((2*pad_spec, 2*pad_spat)) / float(4 * pad_spec * pad_spat)\n thismask_grow = scipy.ndimage.convolve(thismask_pad.astype(float), kernel, mode='nearest') > 0.0\n # Evaluate the tilts on the padded image grid\n tiltpix = spec_img_pad[thismask_grow] + xnspecmin1 * utils.func_val(coeff2, spec_img_nrm[thismask_grow], func2d,\n x2=dspat_img_nrm[thismask_grow],\n minx=0.0, maxx=1.0, minx2=-1.0, maxx2=1.0)\n # Now do one last fit to invert the function above to obtain the final tilts model in normalized image coordinates\n inmask = np.isfinite(tiltpix)\n sigma = np.full_like(spec_img_pad, 10.0)\n # JFH What I find confusing is that this last fit was actually what Burles was doing on the raw tilts, so why was that failing?\n fitmask_tilts, coeff2_tilts = utils.robust_polyfit_djs(tiltpix/xnspecmin1, spec_img_pad[thismask_grow]/xnspecmin1,\n fitxy, x2=spat_img_pad[thismask_grow]/xnspatmin1,\n sigma=sigma[thismask_grow]/xnspecmin1,\n upper=5.0, lower=5.0, maxdev=10.0/xnspecmin1,\n inmask=inmask, function=func2d, maxiter=20,\n minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0, use_mad=False)\n irej = np.invert(fitmask_tilts) & inmask\n msgs.info('Rejected {:d}/{:d} pixels in final inversion tilts image fit'.format(np.sum(irej),np.sum(inmask)))\n # normalized tilts image\n #tilts_img = utils.func_val(coeff2_tilts, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1,minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)\n #tilts_img = np.fmax(np.fmin(tilts_img, 1.2),-0.2)\n\n tilt_fit_dict = dict(nspec = nspec, nspat = nspat, ngood_lines=np.sum(use_tilt), npix_fit = np.sum(tot_mask),\n npix_rej = np.sum(fitmask == False), coeff2=coeff2_tilts, spec_order = spec_order, spat_order = spat_order,\n minx = 0.0, maxx = 1.0, minx2 = 0.0, maxx2 = 1.0, func=func2d)\n\n # Now do some QA\n if doqa:\n plot_tilt_2d(tilts_dspat, tilts, tilts_2dfit, tot_mask, rej_mask, spat_order, spec_order, rms_fit, fwhm,\n slit=slit, setup=master_key, show_QA=show_QA, out_dir=out_dir)\n plot_tilt_spat(tilts_dspat, tilts, tilts_2dfit, tilts_spec, tot_mask, rej_mask, spat_order, spec_order, rms_fit, fwhm,\n slit=slit, setup=master_key, show_QA=show_QA, out_dir=out_dir)\n plot_tilt_spec(tilts_spec, tilts, tilts_2dfit, tot_mask, rej_mask, rms_fit, fwhm, slit=slit,\n setup = master_key, show_QA=show_QA, out_dir=out_dir)\n\n return tilt_fit_dict, trc_tilt_dict_out\n\n\n #fitmask, coeff2 = fit_tilts_rej(\n # tilts_dspat, tilts_spec_fit, tilts, tilts_invvar, tot_mask, slit_cen, spat_order, spec_order,\n # maxdev = 1.0, maxrej=maxrej, sigrej = sigrej, maxiter = maxiter)\n\n\n #result = optimize.minimize(fit_tilts_func, coeff2.flatten(), tol=0.01, args=(tilts_dspat, tilts_spec_fit, tilts, tilts_invvar,\n # tot_mask, fitmask, slit_cen, spat_order, spec_order))\n #bounds = [(i,j) for i,j in zip(0.8*coeff2.flatten(),1.2*coeff2.flatten())]\n #result_df = optimize.differential_evolution(\n # fit_tilts_func, bounds, tol=0.01, disp=True, polish=True,\n # args=(tilts_dspat, tilts_spec_fit, tilts, tilts_invvar,tot_mask, fitmask, slit_cen, spat_order, spec_order))\n\n\n\n # This is a second way of computing the 2d fit at the tilts. Do this just for now as a consistency check that our tilts_img is okay\n\n # Testing. For the moment do this on exactly the same set of lines\n #tilt_2dfit_piximg_all = eval_2d_at_tilts(trc_tilt_dict['tilts_spec'], trc_tilt_dict['tilts_mask'], trc_tilt_dict['tilts'] (nspec, nspat), thismask, slit_cen, coeff2, func2d)\n #tilts_2dfit_piximg = tilt_2dfit_piximg_all[:, use_tilt]\n #tilts_2dfit_piximg = eval_2d_at_tilts(tilts_spec, tilts_mask, (nspec, nspat), thismask, slit_cen, coeff2, func2d)\n\n # Actual 2D Model Tilt Residuals\n #res_real = tilts[fitmask] - tilts_2dfit_piximg[fitmask]\n #rms_real = np.std(res_real)\n #msgs.info(\"Residuals: Actual 2D Tilt Residuals from piximg\")\n #msgs.info(\"RMS (pixels): {}\".format(rms_real))\n #msgs.info(\"RMS/FWHM: {}\".format(rms_real/fwhm))\n\n\n\n\ndef fit2tilts(shape, coeff2, func2d):\n \"\"\"\n\n Parameters\n ----------\n shape: tuple of ints,\n shape of image\n coeff2: ndarray, float\n result of griddata tilt fit\n func2d: str\n the 2d function used to fit the tilts\n Returns\n -------\n tilts: ndarray, float\n Image indicating how spectral pixel locations move across the image. This output is used in the pipeline.\n \"\"\"\n\n # Compute the tilts image\n nspec, nspat = shape\n xnspecmin1 = float(nspec-1)\n xnspatmin1 = float(nspat-1)\n spec_vec = np.arange(nspec)\n spat_vec = np.arange(nspat)\n spat_img, spec_img = np.meshgrid(spat_vec, spec_vec)\n tilts = utils.func_val(coeff2, spec_img/xnspecmin1, func2d, x2=spat_img/xnspatmin1, minx=0.0, maxx=1.0, minx2=0.0, maxx2=1.0)\n # Added this to ensure that tilts are never crazy values due to extrapolation of fits which can break\n # wavelength solution fitting\n tilts = np.fmax(np.fmin(tilts, 1.2),-0.2)\n return tilts\n\n\n\n\ndef plot_tilt_2d(tilts_dspat, tilts, tilts_model, tot_mask, rej_mask, spat_order, spec_order, rms, fwhm,\n slit=0, setup='A', outfile=None, show_QA=False, out_dir=None):\n\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'Helvetica'\n\n # Outfile\n method = inspect.stack()[0][3]\n if (outfile is None):\n outfile = qa.set_qa_filename(setup, method, slit=slit, out_dir=out_dir)\n\n\n # Show the fit\n fig, ax = plt.subplots(figsize=(12, 18))\n ax.cla()\n ax.plot(tilts_dspat[tot_mask], tilts[tot_mask], color='black', linestyle=' ', mfc='None', marker='o',\n markersize=9.0, markeredgewidth=1.0, zorder=4, label='Good Tilt')\n ax.plot(tilts_dspat[rej_mask], tilts[rej_mask], color='red', linestyle=' ', mfc='None', marker='o',\n markersize=9.0, markeredgewidth=2.0, zorder=5, label='Rejected')\n ax.plot(tilts_dspat[tot_mask], tilts_model[tot_mask], color='black', linestyle=' ', marker='o',\n markersize=2.0, markeredgewidth=1.0, zorder=1, label='2D Model')\n\n xmin = 1.1 * tilts_dspat[tot_mask].min()\n xmax = 1.1 * tilts_dspat[tot_mask].max()\n ax.set_xlim((xmin, xmax))\n ax.set_xlabel('Spatial Offset from Central Trace (pixels)', fontsize=15)\n ax.set_ylabel('Spectral Pixel', fontsize=15)\n ax.legend()\n ax.set_title('Tilts vs Fit (spat_order, spec_order)=({:d},{:d}) for slit={:d}: RMS = {:5.3f}, '\n 'RMS/FWHM={:5.3f}'.format(spat_order, spec_order, slit, rms, rms / fwhm), fontsize=15)\n\n # Finish\n #plt.tight_layout(pad=1.0, h_pad=1.0, w_pad=1.0)\n\n if outfile is not None:\n plt.savefig(outfile, dpi=400)\n\n if show_QA:\n plt.show()\n\n plt.close()\n plt.rcdefaults()\n\n\n\ndef plot_tilt_spec(tilts_spec_fit, tilts, tilts_model, tot_mask, rej_mask, rms, fwhm,\n slit=0, setup = 'A', outfile=None, show_QA=False, out_dir=None):\n \"\"\" Generate a QA plot of the residuals for the fit to the tilts in the spectral direction one slit at a time\n\n Parameters\n ----------\n \"\"\"\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'Helvetica'\n\n # Outfil\n method = inspect.stack()[0][3]\n if (outfile is None):\n outfile = qa.set_qa_filename(setup, method, slit=slit, out_dir=out_dir)\n\n # Setup\n plt.figure(figsize=(14, 6))\n plt.clf()\n ax = plt.gca()\n\n # Scatter plot\n res = (tilts - tilts_model)\n\n nspat, nuse = tilts.shape\n # Show the fit residuals as a function of spatial position\n line_indx = np.outer(np.ones(nspat), np.arange(nuse))\n\n xmin = 0.90*(tilts_spec_fit.min())\n xmax = 1.10*(tilts_spec_fit.max())\n\n ax.hlines(0.0, xmin, xmax,linestyle='--', color='green')\n\n for iline in range(nuse):\n iall = (line_indx == iline) & tot_mask\n igd = (line_indx == iline) & tot_mask & (rej_mask == False)\n irej = (line_indx == iline) & tot_mask & rej_mask\n\n ax.plot(tilts_spec_fit[igd], (res[igd]), 'ko', mfc='k', markersize=4.0)\n ax.plot(tilts_spec_fit[irej],(res[irej]), 'ro', mfc='r', markersize=4.0)\n # Compute the RMS for this line\n all_rms = np.std(res[iall])\n good_rms = np.std(res[igd])\n # ToDo show the mean here as well\n if np.any(igd):\n ax.plot(tilts_spec_fit[igd][0], all_rms, marker='s',linestyle=' ', color='g', mfc='g', markersize=7.0)\n ax.plot(tilts_spec_fit[igd][0], good_rms, marker='^', linestyle=' ', color='orange', mfc='orange', markersize=7.0)\n\n ax.text(0.90, 0.90, 'Slit {:d}: Residual (pixels) = {:0.5f}'.format(slit, rms),\n transform=ax.transAxes, size='large', ha='right', color='black',fontsize=16)\n ax.text(0.90, 0.80, ' Slit {:d}: RMS/FWHM = {:0.5f}'.format(slit, rms/fwhm),\n transform=ax.transAxes, size='large', ha='right', color='black',fontsize=16)\n # Label\n ax.set_xlabel('Spectral Pixel')\n ax.set_ylabel('RMS (pixels)')\n ax.set_title('RMS of Each Arc Line Traced')\n ax.set_xlim((xmin,xmax))\n ax.set_ylim((-5.0*rms,5.0*rms))\n # Legend\n legend_elements = [Line2D([0], [0],linestyle=' ', color='k', marker='o', mfc='k', markersize=4.0, label='good'),\n Line2D([0], [0], linestyle=' ', color='r', marker='o', mfc='r', markersize=4.0, label='rejected'),\n Line2D([0], [0], linestyle=' ', color='g', marker='s', mfc='g', markersize=7.0, label='all RMS'),\n Line2D([0], [0], linestyle=' ', color='orange', marker='^', mfc='orange', markersize=7.0, label='good RMS')]\n ax.legend(handles=legend_elements)\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n\n if outfile is not None:\n plt.savefig(outfile, dpi=400)\n\n if show_QA:\n plt.show()\n\n plt.close()\n plt.rcdefaults()\n\n\n\ndef plot_tilt_spat(tilts_dspat, tilts, tilts_model, tilts_spec_fit, tot_mask, rej_mask,spat_order, spec_order, rms, fwhm,\n setup='A', slit=0, outfile=None, show_QA=False, out_dir=None):\n\n\n import matplotlib as mpl\n from matplotlib.lines import Line2D\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'Helvetica'\n\n # Outfil\n method = inspect.stack()[0][3]\n if (outfile is None):\n outfile = qa.set_qa_filename(setup, method, slit=slit, out_dir=out_dir)\n\n nspat, nuse = tilts_dspat.shape\n # Show the fit residuals as a function of spatial position\n line_indx = np.outer(np.ones(nspat), np.arange(nuse))\n lines_spec = tilts_spec_fit[0, :]\n cmap = mpl.cm.get_cmap('coolwarm', nuse)\n\n fig, ax = plt.subplots(figsize=(14, 12))\n # dummy mappable shows the spectral pixel\n dummie_cax = ax.scatter(lines_spec, lines_spec, c=lines_spec, cmap=cmap)\n ax.cla()\n\n for iline in range(nuse):\n iall = (line_indx == iline) & tot_mask\n irej = (line_indx == iline) & tot_mask & rej_mask\n this_color = cmap(iline)\n # plot the residuals\n ax.plot(tilts_dspat[iall], tilts[iall] - tilts_model[iall], color=this_color,\n linestyle='-', linewidth=3.0, marker='None', alpha=0.5)\n ax.plot(tilts_dspat[irej], tilts[irej] - tilts_model[irej], linestyle=' ',\n marker='o', color='limegreen', mfc='limegreen', markersize=5.0)\n\n xmin = 1.1 * tilts_dspat[tot_mask].min()\n xmax = 1.1 * tilts_dspat[tot_mask].max()\n ax.hlines(0.0, xmin, xmax, linestyle='--', linewidth=2.0, color='k', zorder=10)\n\n ax.set_xlim((xmin, xmax))\n ax.set_xlabel('Spatial Offset from Central Trace (pixels)')\n ax.set_ylabel('Arc Line Tilt Residual (pixels)')\n\n legend_elements = [Line2D([0], [0], color='cornflowerblue', linestyle='-', linewidth=3.0, label='residual'),\n Line2D([0], [0], color='limegreen', linestyle=' ', marker='o', mfc='limegreen', markersize=7.0,\n label='rejected')]\n ax.legend(handles=legend_elements)\n ax.set_title('Tilts vs Fit (spat_order, spec_order)=({:d},{:d}) for slit={:d}: RMS = {:5.3f}, '\n 'RMS/FWHM={:5.3f}'.format(spat_order, spec_order, slit, rms, rms / fwhm), fontsize=15)\n cb = fig.colorbar(dummie_cax, ticks=lines_spec)\n cb.set_label('Spectral Pixel')\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n\n if outfile is not None:\n plt.savefig(outfile, dpi=400)\n\n if show_QA:\n plt.show()\n\n plt.close()\n plt.rcdefaults()\n", "\"\"\"\nModule for guiding 1D Wavelength Calibration\n\"\"\"\nimport os\nimport copy\nimport inspect\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nfrom pypeit import msgs\nfrom pypeit import masterframe\nfrom pypeit.core import arc, qa, pixels\nfrom pypeit.core.wavecal import autoid, waveio\nfrom pypeit.core import trace_slits\n\nfrom pypeit import debugger\nfrom IPython import embed\n\nclass WaveCalib(masterframe.MasterFrame):\n \"\"\"\n Class to guide wavelength calibration\n\n Args:\n msarc (np.ndarray or None): Arc image, created by the ArcImage class\n tslits_dict (dict or None): TraceSlits dict\n spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph` or None):\n The `Spectrograph` instance that sets the\n instrument used to take the observations. Used to set\n :attr:`spectrograph`.\n par (:class:`pypeit.par.pypeitpar.WaveSolutionPar` or None):\n The parameters used for the wavelength solution\n binspectral (int, optional): Binning of the Arc in the spectral dimension\n det (int, optional): Detector number\n master_key (str, optional)\n master_dir (str, optional): Path to master frames\n reuse_masters (bool, optional): Load from disk if possible\n qa_path (str, optional): For QA\n msbpm (ndarray, optional): Bad pixel mask image\n\n Attributes:\n frametype : str\n Hard-coded to 'wv_calib'\n steps : list\n List of the processing steps performed\n wv_calib : dict\n Primary output\n Keys\n 0, 1, 2, 3 -- Solution for individual slit\n steps\n arccen (ndarray): (nwave, nslit) Extracted arc(s) down the center of the slit(s)\n maskslits : ndarray (nslit); bool\n Slits to ignore because they were not extracted\n WARNING: Outside of this Class, it is best to regenerate\n the mask using make_maskslits()\n \"\"\"\n\n # Frametype is a class attribute\n frametype = 'wv_calib'\n master_type = 'WaveCalib'\n\n def __init__(self, msarc, tslits_dict, spectrograph, par, binspectral=None, det=1,\n master_key=None, master_dir=None, reuse_masters=False, qa_path=None,\n msbpm=None):\n\n # MasterFrame\n masterframe.MasterFrame.__init__(self, self.master_type, master_dir=master_dir,\n master_key=master_key, file_format='json',\n reuse_masters=reuse_masters)\n\n # Required parameters (but can be None)\n self.msarc = msarc\n self.tslits_dict = tslits_dict\n self.spectrograph = spectrograph\n self.par = par\n\n # Optional parameters\n self.bpm = msbpm\n self.binspectral = binspectral\n self.qa_path = qa_path\n self.det = det\n self.master_key = master_key\n\n # Attributes\n self.steps = [] # steps executed\n self.wv_calib = {} # main output\n self.arccen = None # central arc spectrum\n\n # --------------------------------------------------------------\n # TODO: Build another base class that does these things for both\n # WaveTilts and WaveCalib?\n\n # Get the non-linear count level\n self.nonlinear_counts = 1e10 if self.spectrograph is None \\\n else self.spectrograph.nonlinear_counts(det=self.det)\n\n # Set the slitmask and slit boundary related attributes that the\n # code needs for execution. This also deals with arcimages that\n # have a different binning then the trace images used to defined\n # the slits\n if self.tslits_dict is not None and self.msarc is not None:\n self.slitmask_science = pixels.tslits2mask(self.tslits_dict)\n inmask = self.bpm == 0 if self.bpm is not None \\\n else np.ones_like(self.slitmask_science, dtype=bool)\n self.shape_science = self.slitmask_science.shape\n self.shape_arc = self.msarc.shape\n self.nslits = self.tslits_dict['slit_left'].shape[1]\n self.slit_left = arc.resize_slits2arc(self.shape_arc, self.shape_science,\n self.tslits_dict['slit_left'])\n self.slit_righ = arc.resize_slits2arc(self.shape_arc, self.shape_science,\n self.tslits_dict['slit_righ'])\n self.slitcen = arc.resize_slits2arc(self.shape_arc, self.shape_science,\n self.tslits_dict['slitcen'])\n self.slitmask = arc.resize_mask2arc(self.shape_arc, self.slitmask_science)\n self.inmask = arc.resize_mask2arc(self.shape_arc, inmask)\n # TODO: Remove the following two lines if deemed ok\n if self.par['method'] != 'full_template':\n self.inmask &= self.msarc < self.nonlinear_counts\n self.slit_spat_pos = trace_slits.slit_spat_pos(self.tslits_dict)\n else:\n self.slitmask_science = None\n self.shape_science = None\n self.shape_arc = None\n self.nslits = 0\n self.slit_left = None\n self.slit_righ = None\n self.slitcen = None\n self.slitmask = None\n self.inmask = None\n # --------------------------------------------------------------\n\n def build_wv_calib(self, arccen, method, skip_QA=False):\n \"\"\"\n Main routine to generate the wavelength solutions in a loop over slits\n Wrapper to arc.simple_calib or arc.calib_with_arclines\n\n self.maskslits is updated for slits that fail\n\n Args:\n method : str\n 'simple' -- arc.simple_calib\n 'arclines' -- arc.calib_with_arclines\n 'holy-grail' -- wavecal.autoid.HolyGrail\n 'reidentify' -- wavecal.auotid.ArchiveReid\n 'full_template' -- wavecal.auotid.full_template\n skip_QA (bool, optional)\n\n Returns:\n dict: self.wv_calib\n \"\"\"\n # Obtain a list of good slits\n ok_mask = np.where(~self.maskslits)[0]\n\n # Obtain calibration for all slits\n if method == 'simple':\n lines = self.par['lamps']\n line_lists = waveio.load_line_lists(lines)\n\n self.wv_calib = arc.simple_calib_driver(self.msarc, line_lists, arccen, ok_mask,\n nfitpix=self.par['nfitpix'],\n IDpixels=self.par['IDpixels'],\n IDwaves=self.par['IDwaves'])\n elif method == 'semi-brute':\n # TODO: THIS IS CURRENTLY BROKEN\n debugger.set_trace()\n final_fit = {}\n for slit in ok_mask:\n # HACKS BY JXP\n self.par['wv_cen'] = 8670.\n self.par['disp'] = 1.524\n # ToDO remove these hacks and use the parset in semi_brute\n best_dict, ifinal_fit \\\n = autoid.semi_brute(arccen[:, slit], self.par['lamps'], self.par['wv_cen'],\n self.par['disp'], match_toler=self.par['match_toler'],\n func=self.par['func'], n_first=self.par['n_first'],\n sigrej_first=self.par['n_first'],\n n_final=self.par['n_final'],\n sigrej_final=self.par['sigrej_final'],\n sigdetect=self.par['sigdetect'],\n nonlinear_counts= self.nonlinear_counts)\n final_fit[str(slit)] = ifinal_fit.copy()\n elif method == 'basic':\n final_fit = {}\n for slit in ok_mask:\n status, ngd_match, match_idx, scores, ifinal_fit = \\\n autoid.basic(arccen[:, slit], self.par['lamps'], self.par['wv_cen'],\n self.par['disp'], nonlinear_counts=self.nonlinear_counts)\n final_fit[str(slit)] = ifinal_fit.copy()\n if status != 1:\n self.maskslits[slit] = True\n elif method == 'holy-grail':\n # Sometimes works, sometimes fails\n arcfitter = autoid.HolyGrail(arccen, par=self.par, ok_mask=ok_mask)\n patt_dict, final_fit = arcfitter.get_results()\n elif method == 'reidentify':\n # Now preferred\n # Slit positions\n arcfitter = autoid.ArchiveReid(arccen, self.spectrograph, self.par, ok_mask=ok_mask,\n slit_spat_pos=self.slit_spat_pos)\n patt_dict, final_fit = arcfitter.get_results()\n elif method == 'full_template':\n # Now preferred\n if self.binspectral is None:\n msgs.error(\"You must specify binspectral for the full_template method!\")\n final_fit = autoid.full_template(arccen, self.par, ok_mask, self.det, self.binspectral,\n nsnippet=self.par['nsnippet'])\n else:\n msgs.error('Unrecognized wavelength calibration method: {:}'.format(method))\n\n self.wv_calib = final_fit\n\n # Remake mask (*mainly for the QA that follows*)\n self.maskslits = self.make_maskslits(len(self.maskslits))\n ok_mask = np.where(~self.maskslits)[0]\n\n # QA\n if not skip_QA:\n for slit in ok_mask:\n outfile = qa.set_qa_filename(self.master_key, 'arc_fit_qa', slit=slit,\n out_dir=self.qa_path)\n autoid.arc_fit_qa(self.wv_calib[str(slit)], outfile = outfile)\n\n # Return\n self.steps.append(inspect.stack()[0][3])\n return self.wv_calib\n\n def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False):\n \"\"\"\n Evaluate 2-d wavelength solution for echelle data. Unpacks\n wv_calib for slits to be input into arc.fit2darc\n\n Args:\n wv_calib (dict): Wavelength calibration\n debug (bool, optional): Show debugging info\n skip_QA (bool, optional): Skip QA\n\n Returns:\n dict: dictionary containing information from 2-d fit\n\n \"\"\"\n msgs.info('Fitting 2-d wavelength solution for echelle....')\n all_wave = np.array([], dtype=float)\n all_pixel = np.array([], dtype=float)\n all_order = np.array([],dtype=float)\n\n # Obtain a list of good slits\n ok_mask = np.where(~self.maskslits)[0]\n nspec = self.msarc.shape[0]\n for islit in wv_calib.keys():\n if int(islit) not in ok_mask:\n continue\n iorder = self.spectrograph.slit2order(self.slit_spat_pos[int(islit)])\n mask_now = wv_calib[islit]['mask']\n all_wave = np.append(all_wave, wv_calib[islit]['wave_fit'][mask_now])\n all_pixel = np.append(all_pixel, wv_calib[islit]['pixel_fit'][mask_now])\n all_order = np.append(all_order, np.full_like(wv_calib[islit]['pixel_fit'][mask_now],\n float(iorder)))\n\n # Fit\n fit2d_dict = arc.fit2darc(all_wave, all_pixel, all_order, nspec,\n nspec_coeff=self.par['ech_nspec_coeff'],\n norder_coeff=self.par['ech_norder_coeff'],\n sigrej=self.par['ech_sigrej'], debug=debug)\n\n self.steps.append(inspect.stack()[0][3])\n\n # QA\n if not skip_QA:\n outfile_global = qa.set_qa_filename(self.master_key, 'arc_fit2d_global_qa',\n out_dir=self.qa_path)\n arc.fit2darc_global_qa(fit2d_dict, outfile=outfile_global)\n outfile_orders = qa.set_qa_filename(self.master_key, 'arc_fit2d_orders_qa',\n out_dir=self.qa_path)\n arc.fit2darc_orders_qa(fit2d_dict, outfile=outfile_orders)\n\n return fit2d_dict\n\n # TODO: JFH this method is identical to the code in wavetilts.\n # SHould we make it a separate function?\n def extract_arcs(self, slitcen, slitmask, msarc, inmask):\n \"\"\"\n Extract the arcs down each slit/order\n\n Wrapper to arc.get_censpec()\n\n Returns\n -------\n (self.arccen, self.arc_maskslit_\n self.arccen: ndarray, (nspec, nslit)\n arc spectrum for all slits\n self.arc_maskslit: ndarray, bool (nsit)\n boolean array containing a mask indicating which slits are good\n\n \"\"\"\n # Full template kludge\n if self.par['method'] == 'full_template':\n nonlinear = 1e10\n else:\n nonlinear = self.nonlinear_counts\n # Do it\n # TODO: Consider *not* passing in nonlinear_counts; Probably\n # should not mask saturated lines at this stage\n arccen, arc_maskslit = arc.get_censpec(slitcen, slitmask, msarc, inmask=inmask,\n nonlinear_counts=nonlinear)\n # Step\n self.steps.append(inspect.stack()[0][3])\n return arccen, arc_maskslit\n\n def save(self, outfile=None, overwrite=True):\n \"\"\"\n Save the wavelength calibration data to a master frame.\n\n This is largely a wrapper for\n :func:`pypeit.core.wavecal.waveio.save_wavelength_calibration`.\n\n Args:\n outfile (:obj:`str`, optional):\n Name for the output file. Defaults to\n :attr:`file_path`.\n overwrite (:obj:`bool`, optional):\n Overwrite any existing file.\n \"\"\"\n _outfile = self.file_path if outfile is None else outfile\n # Check if it exists\n if os.path.exists(_outfile) and not overwrite:\n msgs.warn('Master file exists: {0}'.format(_outfile) + msgs.newline()\n + 'Set overwrite=True to overwrite it.')\n return\n\n # Report and save\n waveio.save_wavelength_calibration(_outfile, self.wv_calib, overwrite=overwrite)\n msgs.info('Master frame written to {0}'.format(_outfile))\n\n def load(self, ifile=None):\n \"\"\"\n Load a full (all slit) wavelength calibration.\n\n This is largely a wrapper for\n :func:`pypeit.core.wavecal.waveio.load_wavelength_calibration`.\n\n Args:\n ifile (:obj:`str`, optional):\n Name of the master frame file. Defaults to\n :attr:`file_path`.\n\n Returns:\n dict or None: self.wv_calib\n \"\"\"\n if not self.reuse_masters:\n # User does not want to load masters\n msgs.warn('PypeIt will not reuse masters!')\n return None\n\n # Check the input path\n _ifile = self.file_path if ifile is None else ifile\n\n if not os.path.isfile(_ifile):\n # Master file doesn't exist\n msgs.warn('No Master {0} frame found: {1}'.format(self.master_type, self.file_path))\n return None\n\n # Read, save it to self, return\n # TODO: Need to save it to self?\n msgs.info('Loading Master {0} frame: {1}'.format(self.master_type, _ifile))\n self.wv_calib = waveio.load_wavelength_calibration(_ifile)\n return self.wv_calib\n\n @staticmethod\n def load_from_file(filename):\n \"\"\"\n Load a full (all slit) wavelength calibration.\n\n This simply executes\n :func:`pypeit.core.wavecal.waveio.load_wavelength_calibration`.\n\n Args:\n filename (:obj:`str`):\n Name of the master frame file.\n\n Returns:\n dict: The wavelength calibration data.\n \"\"\"\n return waveio.load_wavelength_calibration(filename)\n\n def make_maskslits(self, nslit):\n \"\"\"\n (re)Generate the mask for wv_calib based on its contents\n This is the safest way to go...\n\n Args:\n nslit (int): Number of slits/orders\n\n Returns:\n ndarray: self.maskslits, boolean array -- True = masked, i.e. do not use\n\n \"\"\"\n # Set mask based on wv_calib\n mask = np.array([True]*nslit)\n for key in self.wv_calib.keys():\n if key in ['steps', 'par', 'fit2d']:\n continue\n if (self.wv_calib[key] is not None) and (len(self.wv_calib[key]) > 0):\n mask[int(key)] = False\n self.maskslits = mask\n return self.maskslits\n\n def run(self, skip_QA=False, debug=False):\n \"\"\"\n Main driver for wavelength calibration\n\n Code flow:\n 1. Extract 1D arc spectra down the center of each slit/order\n 2. Load the parameters guiding wavelength calibration\n 3. Generate the 1D wavelength fits\n 4. Generate a mask\n\n Args:\n skip_QA : bool, optional\n\n Returns:\n dict, ndarray: wv_calib dict and maskslits bool array\n\n \"\"\"\n ###############\n # Extract an arc down each slit\n self.arccen, self.maskslits = self.extract_arcs(self.slitcen, self.slitmask, self.msarc,\n self.inmask)\n\n # Fill up the calibrations and generate QA\n self.wv_calib = self.build_wv_calib(self.arccen, self.par['method'], skip_QA=skip_QA)\n\n # Return\n if self.par['echelle'] is True:\n fit2d_dict = self.echelle_2dfit(self.wv_calib, skip_QA = skip_QA, debug=debug)\n self.wv_calib['fit2d'] = fit2d_dict\n\n # Build mask\n self.make_maskslits(self.nslits)\n\n # Pack up\n self.wv_calib['steps'] = self.steps\n sv_par = self.par.data.copy()\n self.wv_calib['par'] = sv_par\n\n return self.wv_calib, self.maskslits\n\n def show(self, item, slit=None):\n \"\"\"\n Show one of the class internals\n\n Args:\n item (str):\n 'spec' -- Show the fitted points and solution; requires slit\n 'fit' -- Show fit QA; requires slit\n slit (int, optional):\n\n Returns:\n\n \"\"\"\n if item == 'spec':\n # spec\n spec = self.wv_calib[str(slit)]['spec']\n # tcent\n tcent = self.wv_calib[str(slit)]['tcent']\n yt = np.zeros_like(tcent)\n for jj,t in enumerate(tcent):\n it = int(np.round(t))\n yt[jj] = np.max(spec[it-1:it+1])\n # Plot\n plt.clf()\n ax=plt.gca()\n ax.plot(spec, drawstyle='steps-mid')\n ax.scatter(tcent, yt, color='red', marker='*')\n ax.set_xlabel('Pixel')\n ax.set_ylabel('Counts')\n plt.show()\n elif item == 'fit':\n autoid.arc_fit_qa(self.wv_calib[str(slit)])\n\n def __repr__(self):\n # Generate sets string\n txt = '<{:s}: '.format(self.__class__.__name__)\n if len(self.steps) > 0:\n txt+= ' steps: ['\n for step in self.steps:\n txt += '{:s}, '.format(step)\n txt = txt[:-2]+']' # Trim the trailing comma\n txt += '>'\n return txt\n\n\n" ]
[ [ "numpy.arange", "numpy.ones" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.round", "numpy.zeros_like", "numpy.any", "numpy.where", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.ones_like", "numpy.arange", "numpy.full", "numpy.std", "scipy.interpolate.interp1d", "numpy.interp", "matplotlib.pyplot.close", "matplotlib.pyplot.rcdefaults", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.invert", "numpy.min", "numpy.median", "matplotlib.pyplot.savefig", "numpy.full_like", "numpy.fmax", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.fmin", "matplotlib.pyplot.show", "numpy.abs", "numpy.isfinite", "matplotlib.lines.Line2D", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.clf", "matplotlib.cm.get_cmap" ], [ "matplotlib.pyplot.gca", "numpy.ones_like", "numpy.round", "numpy.max", "numpy.append", "matplotlib.pyplot.clf", "numpy.zeros_like", "numpy.array", "numpy.where", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gskdhiman/zomato-recommendation
[ "76d050d654f5ae4db4801eadb065db324baacf5e" ]
[ "backend_code.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 00:14:39 2020\n\n@author: Gursewak\n\"\"\"\n\nimport pandas as pd\nimport re\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.neighbors import NearestNeighbors\nfrom datetime import datetime\ndata_path = 'data.csv'\ndf = pd.read_csv(data_path)\nN = 3 # maximum recommendations\n\ncost_for_two = 'approx_cost(for two people)'\nlocation = 'listed_in(city)'\nlisting_type = 'listed_in(type)'\nlisting_city = 'listed_in(city)'\nonline_order = 'online_order'\n\n# making cost of two as float\ndf[cost_for_two]=df[cost_for_two].str.replace(\",\",'').astype(float)\n\ndef create_knn():\n STOPWORDS = set(stopwords.words('english'))\n url_pattern = re.compile(r'https?://\\S+|www\\.\\S+')\n def clean_data(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n text = \" \".join([word for word in str(text).split() if word not in STOPWORDS])\n return url_pattern.sub(r'', text)\n \n df[\"reviews_list\"] = df[\"reviews_list\"].apply(lambda x: clean_data(x))\n \n tfidf = TfidfVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')\n \n corpus = df['reviews_list'].tolist()\n tfidf_matrix = tfidf.fit_transform(corpus )\n \n knn_recomm = NearestNeighbors(metric = 'cosine', algorithm = 'brute',n_neighbors=30)\n knn_recomm.fit(tfidf_matrix)\n return knn_recomm,tfidf\n\nknn_recomm,tfidf = create_knn()\n\ndef restaurant_recommend(user_input_text,budget,location,cuisine_type):\n start_time = datetime.now() \n user_inp_mat = tfidf.transform([user_input_text]) \n # user_inp_mat.shape\n score,idx = knn_recomm.kneighbors(user_inp_mat.reshape(1, -1))\n score_idx = dict(zip(idx[0],score[0]))\n df_user = df.iloc[idx[0]]\n \n df_loc = df_user\n if location is not None:\n df_loc = df_user[df_user['location'].str.lower().str.contains(location.lower())]\n \n df_budget = df_loc\n if budget is not None:\n df_budget = df_loc[df_loc[cost_for_two] <= budget]\n \n df_cuisine = df_budget\n if cuisine_type is not None:\n df_cuisine = df_budget[df_budget['cuisines'].str.lower().str.contains(cuisine_type.lower())]\n \n final_recommend = {}\n for idx,row in df_cuisine.iterrows():\n rest_name = row['name']\n score = score_idx[idx]\n score = str(round(score, 2)*100)+\" %\"\n final_recommend[rest_name] = score \n \n final_recommend = sorted(final_recommend.items(), key=lambda x: x[1], reverse=True)\n final_recommend = final_recommend[:N]\n recomendation_time = (datetime.now() -start_time).seconds\n return final_recommend,recomendation_time \n \n\n\n# restaurant_recommend(user_input_text = 'Lassi and paratha',\n# budget = 1000,\n# location = 'Koramangala',\n# cuisine_type= 'north indian')\n\n\n# restaurant_recommend(user_input_text = 'good ambiance restaurants, serving fish',\n# budget = None,\n# location = 'Koramangala',\n# cuisine_type= None)\n\n# restaurant_recommend(user_input_text = 'must visit restaurants',\n# budget = 1000,\n# location = None,\n# cuisine_type= 'north indian')\n\n\n# restaurant_recommend(user_input_text = 'best cakes',\n# budget = 1000,\n# location = 'Koramangala',\n# cuisine_type= None)\n\n\n# restaurant_recommend(user_input_text = 'authentic chicken biryani',\n# budget = 800,\n# location = 'BTM',\n# cuisine_type= None)\n" ]
[ [ "sklearn.neighbors.NearestNeighbors", "pandas.read_csv", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Weilory/python-matplotlib-graphs
[ "4578c184daba587417becc6df1ad4566e881343a" ]
[ "graph/hist_bin.py" ]
[ "import pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.style.use(\"fivethirtyeight\")\r\n\r\npath = input(\"please input the age.csv file path here: \")\r\ndata = pd.read_csv(path)\r\nids = data[\"Responder_id\"]\r\nages = data[\"Age\"]\r\n\r\nbins = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\r\n\r\nplt.hist(ages, bins=bins, edgecolor=\"black\", log=True)\r\n\r\nmedian_age = 29\r\nred = \"#fc4f30\"\r\n\r\nplt.axvline(median_age, color=red, linewidth=5, label=\"Age Median\")\r\n\r\nplt.legend()\r\nplt.title(\"Ages of Respondents\")\r\nplt.xlabel(\"Ages\")\r\nplt.ylabel(\"Total Respondents\")\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.axvline", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.style.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hellomoto-ai/splatoon2-ml
[ "4bd24eed527d6b56ce4369b70d24f20058962383" ]
[ "spml/trainer/vae_gan.py" ]
[ "\"\"\"Training mechanism for VAE-GAN\"\"\"\nimport os\nimport time\nimport logging\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom spml import (\n image_util,\n loss_utils,\n)\nfrom . import (\n misc_utils,\n saved_model_manager,\n)\n\n_LG = logging.getLogger(__name__)\n\n\ndef _save_images(images, src_path, step, output_dir):\n src_name = os.path.splitext(os.path.basename(src_path))[0]\n save_path = os.path.join(\n output_dir, 'images', src_name, 'step_%d.png' % step)\n misc_utils.ensure_dir(save_path)\n\n images = [img.detach().cpu().numpy() for img in images]\n images = np.concatenate(images, axis=1)\n image_util.save_image(images, save_path)\n\n\ndef _log_header():\n fields = ' '.join(['%10s'] * 9) % (\n 'KLD', 'BETA', 'F_RECON',\n 'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', '[PIXEL]',\n )\n _LG.info('%5s %5s: %s', '', 'PHASE', fields)\n\n\n_LOGGED = {'last': 0}\n\n\ndef _log_loss(loss, phase, progress=None):\n if _LOGGED['last'] % 30 == 0:\n _log_header()\n _LOGGED['last'] += 1\n\n header = '' if progress is None else '%3d %%' % progress\n fields = ' '.join(['%10.2e'] * 9) % (\n loss['kld'], loss['beta'], loss['feats_recon'],\n loss['gen_recon'], loss['gen_fake'],\n loss['disc_orig'], loss['disc_recon'], loss['disc_fake'],\n loss['pixel'],\n )\n _LG.info('%5s %5s: %s', header, phase, fields)\n\n\ndef _get_latent_stats(z, z_std):\n z = z.detach().cpu().numpy()\n z_std = z_std.detach().cpu().numpy()\n return {\n 'z_mean': np.mean(z),\n 'z_min': np.min(z),\n 'z_max': np.max(z),\n 'z_var': np.var(z),\n 'z_std_mean': np.mean(z_std),\n 'z_std_min': np.min(z_std),\n 'z_std_max': np.max(z_std),\n 'z_std_var': np.var(z_std),\n }\n\n\nclass Trainer:\n def __init__(\n self, model, optimizers,\n train_loader, test_loader,\n device, output_dir,\n initial_beta=10.0,\n beta_step=0.1,\n target_kld=0.1,\n beta_momentum=0.1,\n samples=None,\n ):\n self.model = model.float().to(device)\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.optimizers = optimizers\n self.device = device\n self.output_dir = output_dir\n\n self.beta = initial_beta\n self.beta_step = beta_step\n self.target_kld = target_kld\n self.beta_momentum = beta_momentum\n\n self.samples = samples\n\n self.saved_model_manager = saved_model_manager.SavedModelManager()\n\n fields = [\n 'PHASE', 'TIME', 'STEP', 'EPOCH', 'KLD', 'BETA', 'F_RECON',\n 'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', 'PIXEL',\n 'Z_MEAN', 'Z_MIN', 'Z_MAX', 'Z_VAR',\n 'Z_STD_MEAN', 'Z_STD_MIN', 'Z_STD_MAX', 'Z_STD_VAR',\n ]\n logfile = open(os.path.join(output_dir, 'result.csv'), 'w')\n self.writer = misc_utils.CSVWriter(fields, logfile)\n\n self.step = 0\n self.epoch = 0\n\n self.latent_stats = loss_utils.MovingStats(beta_momentum)\n\n def _write(self, phase, loss, stats):\n self.writer.write(\n PHASE=phase, STEP=self.step, EPOCH=self.epoch, TIME=time.time(),\n KLD=loss['kld'], BETA=loss['beta'],\n F_RECON=loss['feats_recon'],\n G_RECON=loss['gen_recon'], G_FAKE=loss['gen_fake'],\n D_REAL=loss['disc_orig'],\n D_RECON=loss['disc_recon'], D_FAKE=loss['disc_fake'],\n PIXEL=loss['pixel'],\n Z_MEAN=stats['z_mean'], Z_VAR=stats['z_var'],\n Z_MIN=stats['z_min'], Z_MAX=stats['z_max'],\n Z_STD_MEAN=stats['z_std_mean'], Z_STD_VAR=stats['z_std_var'],\n Z_STD_MIN=stats['z_std_min'], Z_STD_MAX=stats['z_std_max'],\n )\n\n def save(self):\n filename = 'epoch_%s_step_%s.pt' % (self.epoch, self.step)\n output = os.path.join(self.output_dir, 'checkpoints', filename)\n\n _LG.info('Saving checkpoint at %s', output)\n misc_utils.ensure_dir(output)\n torch.save({\n 'model': self.model.state_dict(),\n 'optimizers': {\n key: opt.state_dict()\n for key, opt in self.optimizers.items()\n },\n 'epoch': self.epoch,\n 'step': self.step,\n }, output)\n return output\n\n def manage_saved(self, path, loss):\n path = self.saved_model_manager.update(path, loss)\n if path:\n os.remove(path)\n\n def load(self, checkpoint):\n _LG.info('Loading checkpoint from %s', checkpoint)\n data = torch.load(checkpoint, map_location=self.device)\n self.model.load_state_dict(data['model'])\n for key, opt in data['optimizers'].items():\n self.optimizers[key].load_state_dict(opt)\n self.epoch = data['epoch']\n self.step = data['step']\n\n def _forward_gan(self, orig, update=False):\n # Update discriminator with original image\n preds_orig, _ = self.model.discriminator(orig)\n disc_loss_orig = loss_utils.bce(preds_orig, 1)\n if update:\n self.model.zero_grad()\n disc_loss_orig.backward()\n self.optimizers['discriminator'].step()\n\n # Update discriminator with reconstructed image\n recon, latent = self.model.vae(orig)\n preds_recon, _ = self.model.discriminator(recon.detach())\n disc_loss_recon = loss_utils.bce(preds_recon, 0)\n if update:\n self.model.zero_grad()\n disc_loss_recon.backward()\n self.optimizers['discriminator'].step()\n\n # Update generator with reconstructed image\n preds_recon, _ = self.model.discriminator(recon)\n gen_loss_recon = loss_utils.bce(preds_recon, 1)\n if update:\n self.model.zero_grad()\n gen_loss_recon.backward()\n self.optimizers['decoder'].step()\n\n # Update discriminator with fake image\n sample = torch.randn_like(latent[0], requires_grad=True)\n fake = self.model.vae.decoder(sample)\n preds_fake, _ = self.model.discriminator(fake.detach())\n disc_loss_fake = loss_utils.bce(preds_fake, 0)\n if update:\n self.model.zero_grad()\n disc_loss_fake.backward()\n self.optimizers['discriminator'].step()\n\n # Update generator with fake image\n preds_fake, _ = self.model.discriminator(fake)\n gen_loss_fake = loss_utils.bce(preds_fake, 1)\n if update:\n self.model.zero_grad()\n gen_loss_fake.backward()\n self.optimizers['decoder'].step()\n\n return {\n 'disc_orig': disc_loss_orig.item(),\n 'disc_recon': disc_loss_recon.item(),\n 'disc_fake': disc_loss_fake.item(),\n 'gen_recon': gen_loss_recon.item(),\n 'gen_fake': gen_loss_fake.item(),\n }\n\n def _forward_vae(self, orig, update=False):\n # Update feature\n recon, _ = self.model.vae(orig)\n _, feats_orig = self.model.discriminator(orig)\n _, feats_recon = self.model.discriminator(recon)\n feats_loss = F.mse_loss(input=feats_recon, target=feats_orig)\n if update:\n self.model.zero_grad()\n feats_loss.backward()\n self.optimizers['encoder'].step()\n self.optimizers['decoder'].step()\n\n # KLD\n sample, latent = self.model.vae.encoder(orig)\n latent_stats = self.latent_stats(sample, update)\n kld = torch.mean(loss_utils.kld_loss(*latent_stats))\n if update:\n beta_latent_loss = self.beta * kld\n self.model.zero_grad()\n beta_latent_loss.backward()\n self.optimizers['encoder'].step()\n\n # Adjust beta\n if update:\n kld_error = kld.item() - self.target_kld\n self.beta += self.beta_step * kld_error\n self.beta = max(1e-3, self.beta)\n\n loss = {\n 'kld': kld.item(),\n 'beta': self.beta,\n 'feats_recon': feats_loss.item(),\n }\n stats = _get_latent_stats(*latent)\n return recon, loss, stats\n\n def _get_pixel_loss(self, orig):\n recon, _ = self.model.vae(orig)\n return F.mse_loss(orig, recon)\n\n def _forward(self, orig, update=False):\n loss_gan = self._forward_gan(orig, update=update)\n recon, loss_vae, stats = self._forward_vae(orig, update=update)\n with torch.no_grad():\n pixel_loss = self._get_pixel_loss(orig)\n\n loss = {'pixel': pixel_loss.item()}\n loss.update(loss_vae)\n loss.update(loss_gan)\n return recon, loss, stats\n\n def train_batch(self, batch):\n self.model.train()\n orig = batch['image'].float().to(self.device)\n _, loss, stats = self._forward(orig, update=True)\n self._write('train', loss, stats)\n return loss\n\n def test(self):\n with torch.no_grad():\n return self._test()\n\n def _test(self):\n self.model.eval()\n loss_tracker = misc_utils.StatsTracker()\n stats_tracker = misc_utils.StatsTracker()\n for i, batch in enumerate(self.test_loader):\n orig, path = batch['image'].float().to(self.device), batch['path']\n recon, loss, stats = self._forward(orig, update=False)\n loss_tracker.update(loss)\n stats_tracker.update(stats)\n if i % 10 == 0:\n _save_images(\n (orig[0], recon[0]), path[0],\n self.step, self.output_dir)\n self._write('test', loss_tracker, stats_tracker)\n _log_loss(loss_tracker, phase='Test')\n return loss_tracker\n\n def generate(self, samples=None):\n samples = self.samples if samples is None else samples\n with torch.no_grad():\n self._generate(samples)\n\n def _generate(self, samples):\n self.model.eval()\n recons = self.model.vae.decoder(samples)\n for i, recon in enumerate(recons):\n path = 'sample_%d.png' % i\n _save_images([recon], path, self.step, self.output_dir)\n\n def train_one_epoch(self, report_every=180, test_interval=1000):\n last_report = 0\n for i, batch in enumerate(self.train_loader):\n loss = self.train_batch(batch)\n self.step += 1\n if time.time() - last_report > report_every:\n progress = 100. * i / len(self.train_loader)\n _log_loss(loss, 'Train', progress)\n last_report = time.time()\n if self.step % test_interval == 0:\n self.generate()\n loss = self.test()\n path = self.save()\n self.manage_saved(path, loss['pixel'])\n self.epoch += 1\n\n def __repr__(self):\n opt = '\\n'.join([\n '%s: %s' % (key, val) for key, val in self.optimizers.items()\n ])\n beta = '\\n'.join([\n 'Beta: %s' % self.beta,\n 'Beta Step: %s' % self.beta_step,\n 'Target KLD: %s' % self.target_kld,\n 'Beta Momuntum: %s' % self.beta_momentum,\n ])\n return 'Epoch: %d\\nStep: %d\\nModel: %s\\nOptimizers: %s\\n%s\\n' % (\n self.epoch, self.step, self.model, opt, beta\n )\n" ]
[ [ "torch.randn_like", "numpy.min", "torch.load", "numpy.concatenate", "numpy.max", "torch.nn.functional.mse_loss", "numpy.mean", "torch.no_grad", "numpy.var" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BOSS-Danuphan/coralinedb
[ "23458c82528ac7ceb78c17e23163d542ad96b79a" ]
[ "coralinedb/coralinedb.py" ]
[ "\"\"\"\n Coraline DB Manager - This will take care of reading and saving tables to SQL database\n\"\"\"\n\n# import python packages\nimport pandas as pd\nimport time\n\n\nclass BaseDB:\n \"\"\"\n Base class for all DB\n These functions must be inherited by sub-class\n - create_connection\n - show_databases\n - show_tables\n \"\"\"\n def __init__(self, host, username, passwd):\n \"\"\"\n Initial object by specify host username and password for database connection\n :param host: host name of the database (str)\n :param username: username of the database (str)\n :param passwd: password of the database (str)\n \"\"\"\n self.host = host\n self.username = username\n self.passwd = passwd\n self.engines = {}\n\n def __del__(self):\n \"\"\"\n On delete object\n :return:\n \"\"\"\n for en_key in self.engines:\n engine = self.engines[en_key]\n try:\n engine.dispose()\n except :\n # engine cannot be dispose #TODO fix it!!\n pass\n\n def get_engine(self, db_name):\n \"\"\"\n Get engine for db name\n :return:\n \"\"\"\n pass\n\n def create_connection(self, db_name=None):\n \"\"\"\n Create Connection and engine for database\n :param: db_name : name of connecting database (str)\n :return: engine and connection\n \"\"\"\n connected = False\n max_tries = 10\n\n # if db_name is not defined, let it be empty string\n if db_name is None:\n db_name = \"\"\n\n # Reconnect until max_tries exceeded\n while not connected and max_tries > 0:\n try:\n # create engine from db settings\n engine = self.get_engine(db_name)\n\n # Create connection for query\n connection = engine.connect()\n\n connected = True\n\n return engine, connection\n except Exception as e:\n print(\"Database Connection Error: {}\".format(e))\n print(\"Network is unreachable. Retrying to connect to database in 10 seconds...\")\n time.sleep(10)\n max_tries -= 1\n\n def try_decoration(self, func):\n \"\"\"\n Decoration for looping tries\n :return:\n \"\"\"\n while True:\n try:\n func()\n break\n except:\n print(\"\")\n\n def load_table(self, db_name, table_name):\n \"\"\"\n Load a table from database\n *The whole table will be download, please make sure you have enough memory*\n :param db_name: name of database (str)\n :param table_name: table name to be read (str)\n :return: pandas dataframe if table exists. Otherwise, None\n \"\"\"\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists and read\n if engine.dialect.has_table(engine, table_name):\n sql = 'SELECT * FROM %s' % table_name\n result = pd.read_sql(sql, connection, coerce_float=True)\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result\n\n def load_tables(self, db_name, table_names):\n \"\"\"\n Load all tables from database\n *The whole table will be download, please make sure you have enough memory*\n :param db_name: name of database (str)\n :param table_names: list of table names (list of strings)\n :return: list of pandas dataframes if the corresponding table exists. Otherwise, None\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n dfs = []\n\n # Load each table\n for tbn in table_names:\n if engine.dialect.has_table(engine, tbn):\n df = pd.read_sql('SELECT * FROM %s' % tbn, connection, coerce_float=True)\n else:\n print(tbn, \"does not exist\")\n df = None\n dfs.append(df)\n\n # Close connection\n connection.close()\n\n return dfs\n\n def save_table(self, df, db_name, table_name, **kwargs):\n \"\"\"\n Save pandas dataframe to database\n :param df: dataframe to be save (pandas dataframe)\n :param db_name: name of database (str)\n :param table_name: name of table (str)\n :param kwargs: pandas to_sql arguments e.g. if_exists, dtype, ...\n :return:\n \"\"\"\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Set default if_exists to replace\n if 'if_exists' not in kwargs:\n kwargs['if_exists'] = 'replace'\n\n # Write df to database\n df.to_sql(name=table_name, con=engine, index=False, **kwargs)\n\n # Close connection\n connection.close()\n\n def get_databases(self):\n \"\"\"\n list of all accessable databases on this host\n :return: list of database names\n \"\"\"\n pass\n\n def get_tables(self, db_name):\n \"\"\"\n List all tables in database\n :param db_name: database name (str)\n :return: list of table names\n \"\"\"\n pass\n\n def query(self, sql_statement, db_name=None):\n \"\"\"\n Run SQL query\n :param sql_statement: SQL statement (str)\n :param db_name: database name\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n result = pd.read_sql(sql_statement, connection, coerce_float=True)\n\n # Close connection\n connection.close()\n\n return result\n\n def get_count(self, db_name, table_name):\n \"\"\"\n Get number of rows of a table\n :param db_name: database name (str)\n :param table_name: table name (str)\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists\n if engine.dialect.has_table(engine, table_name):\n sql = 'select count(*) from %s;' % table_name\n result = pd.read_sql(sql, connection, coerce_float=True).iloc[:, 0].values[0]\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result\n\n def execute(self, sql_statement, db_name=None):\n \"\"\"\n Execute SQL Statement to database\n :param sql_statement: sql statement (str)\n :param db_name: database name (str)\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Execute SQL\n connection.execute(sql_statement)\n\n # Close connection\n connection.close()\n\n\ndef print_help():\n \"\"\"\n print help\n :return:\n \"\"\"\n print(\"Please go to https://pypi.org/project/coralinedb/ to see how to use the package\")\n\n" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yonesuke/prax
[ "6957776b11c297d4463fba6d15cd06671dfbd45f" ]
[ "examples/hodgkinhuxley.py" ]
[ "import jax.numpy as jnp\nfrom prax import Oscillator\nfrom jax.config import config; config.update(\"jax_enable_x64\", True)\n\nimport matplotlib.pyplot as plt\n\nclass HodgkinHuxley(Oscillator):\n def __init__(self, input_current, C=1.0, G_Na=120.0, G_K=36.0, G_L=0.3, E_Na=50.0, E_K=-77.0, E_L=-54.4, dt=0.01, eps=10**-5):\n super().__init__(n_dim=4, dt=dt, eps=eps)\n self.input_current = input_current\n self.C = C\n self.G_Na = G_Na\n self.G_K = G_K\n self.G_L = G_L\n self.E_Na = E_Na\n self.E_K = E_K\n self.E_L = E_L\n\n def alpha_m(self, V):\n return 0.1*(V+40.0)/(1.0 - jnp.exp(-(V+40.0) / 10.0))\n \n def beta_m(self, V):\n return 4.0*jnp.exp(-(V+65.0) / 18.0)\n \n def alpha_h(self, V):\n return 0.07*jnp.exp(-(V+65.0) / 20.0)\n \n def beta_h(self, V):\n return 1.0/(1.0 + jnp.exp(-(V+35.0) / 10.0))\n \n def alpha_n(self, V):\n return 0.01*(V+55.0)/(1.0 - jnp.exp(-(V+55.0) / 10.0))\n \n def beta_n(self, V):\n return 0.125*jnp.exp(-(V+65) / 80.0)\n\n def forward(self, state):\n V, m, h, n = state\n dVdt = self.G_Na * (m ** 3) * h * (self.E_Na - V) + self.G_K * (n ** 4) * (self.E_K - V) + self.G_L * (self.E_L - V) + self.input_current\n dVdt /= self.C\n dmdt = self.alpha_m(V) * (1.0 - m) - self.beta_m(V) * m\n dhdt = self.alpha_h(V) * (1.0 - h) - self.beta_h(V) * h\n dndt = self.alpha_n(V) * (1.0 - n) - self.beta_n(V) * n\n return jnp.array([dVdt, dmdt, dhdt, dndt])\n\nmodel = HodgkinHuxley(input_current=30.0)\ninit_val = jnp.array([-75, 0.6, 0.05, 0.32])\nmodel.find_periodic_orbit(init_val)\nmodel.calc_phase_response()\n\nplt.figure(figsize=[12,8])\n\nplt.subplot(2,2,1)\nplt.title(\"periodic orbit\")\nplt.xlabel(\"t\")\nplt.ylabel(\"V\")\nplt.plot(model.ts, model.periodic_orbit[:, 0])\n\nplt.subplot(2,2,2)\nplt.title(\"phase response curve\")\nplt.plot(model.ts, model.phase_response_curve[:,0])\nplt.legend(labels=[\"$Z_V$\"])\nplt.xlabel(\"t\")\nplt.ylabel(\"$Z_V$\")\n\nplt.subplot(2,2,3)\nplt.xlabel(\"t\")\nplt.ylabel(\"m,h,n\")\nplt.plot(model.ts, model.periodic_orbit[:, 1:])\n\nplt.subplot(2,2,4)\nplt.plot(model.ts, model.phase_response_curve[:,1:])\nplt.legend(labels=[\"$Z_m$\",\"$Z_h$\",\"$Z_n$\"])\nplt.xlabel(\"t\")\nplt.ylabel(\"$Z_m,Z_h,Z_n$\")\n\nplt.savefig(\"hodgkinhuxley.svg\")" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LukiBa/zybo_face
[ "5f229818727b65ffa82efee2f63522234364fbe2" ]
[ "PC/application_pc.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 1 18:09:27 2021\n\n@author: lukas\n\"\"\"\n\nimport cv2\nimport dlib\nimport numpy as np\nimport timeit\nimport utils\nimport queue\nimport multiprocessing\nimport pathlib\nimport argparse\nimport time\n\n\ndef _create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--descriptor_file', type=str,\n default='./saved_descriptors', help='path to descriptor file')\n parser.add_argument('--threshold', type=float, default=0.6,\n help='Threshold of euclidean distance to distinguish persons.')\n parser.add_argument('--max_angle', type=float, default=4.0, help='maximum rotation angle of the face.')\n parser.add_argument('--max_fps', type=float, default=5.0, help='maximum frame rate of the application.')\n parser.add_argument(\n '--cam_url', type=str,\n default=\"http://10.0.0.241/zm/cgi-bin/nph-zms?mode=jpeg&monitor=2&maxfps=5&scale=100&user=admin&pass=admin\",\n help=\"IP camera url including username and password\")\n parser.add_argument('--landmarkPredictor', type=str,\n default=\"../dlib_models/shape_predictor_68_face_landmarks.dat\",\n help=\"Path to dlib 68 face landmark predictor: shape_predictor_68_face_landmarks.dat\")\n parser.add_argument('--faceDescriptor', type=str,\n default=\"../dlib_models/dlib_face_recognition_resnet_model_v1.dat\",\n help=\"Path to dlibs face recognition model: dlib_face_recognition_resnet_model_v1.dat\")\n return parser.parse_args()\n\n\nclass StateMachine():\n def __init__(self, url, predictorPath, facerecPath, descriptorFilePath,\n threshold=0.6, maxFps: float = 5.0, imgSize: int = 384,\n maxAngle: float = 4.0, MaxMovement=50.0, showLandmarks: bool = False) -> None:\n\n predictor = dlib.shape_predictor(predictorPath)\n detector = dlib.get_frontal_face_detector()\n\n self.__decriptorHandler = utils.Descriptor_FileHandler(descriptorFilePath, threshold)\n\n self.__minLatency = 1000.0/maxFps\n self.__imgSize = imgSize\n self.__maxAngle = maxAngle\n self.__MaxMovement = MaxMovement\n self.__showLandmarks = showLandmarks\n\n self.__state = self.___waitForFace\n self.__imgPos = np.zeros((4), dtype=np.int32)\n self.__name = \"processing..\"\n self.__faceDetected = False\n self.__score = 0.0\n self.__maxMissDetection = 2\n self.__missDetections = 0\n self.__ReqHeadRot = 8.0\n\n self.__imgQueue = queue.Queue(maxsize=3)\n self.__detectQueue = queue.Queue(maxsize=3)\n self.__faceRecQueueIn = multiprocessing.Queue(maxsize=3)\n self.__faceRecQueueOut = multiprocessing.Queue(maxsize=3)\n\n self.__ImageWorker = utils.Image_loader(self.__imgQueue, url, imgSize,\n maxFps)\n self.__DetectionWorker = utils.Detector(self.__imgQueue, self.__detectQueue,\n detector, predictor)\n\n self.__FaceRecWorker = utils.FaceDecriptorProcess(self.__faceRecQueueIn, self.__faceRecQueueOut, \n facerecPath)\n\n self.__ImageWorker()\n self.__DetectionWorker()\n self.__FaceRecWorker()\n\n def __del__(self) -> None:\n return self.__FaceRecWorker.kill()\n\n def __call__(self, key) -> np.ndarray:\n # execute current state\n return self.__state(key)\n\n def __discardCurrentDescriptor(self) -> None:\n # If Output Queue is empty -> Face descriptor computations are not done yet --> kill the process and restart it\n self.__name == \"processing..\"\n self.__faceDetected = False\n self.__score = 0.0\n \n if self.__faceRecQueueOut.empty():\n self.__FaceRecWorker.kill()\n self.__FaceRecWorker()\n return\n # If Output Queue is not empty -> Face descriptor computations are done --> discard the face descriptor in the Queue\n _ = self.__faceRecQueueOut.get()\n return\n\n def ___waitForFace(self, key) -> np.ndarray:\n rects, shapes, img = self.__detectQueue.get()\n\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n tilt_angle = utils.get_angle(shape_np[45, :]-shape_np[36, :],\n np.array([1, 0]))\n\n if np.abs(rot_angle) > self.__maxAngle or np.abs(tilt_angle) > self.__maxAngle:\n outtext = \"Look straight into the camera. Current rot angle: \" + \\\n str(rot_angle) + \" tilt angle: \" + str(tilt_angle)\n\n utils.write_text_bottom(img, outtext, (0, 127, 255))\n return img\n\n # start Computation of face descriptor\n self.__faceRecQueueIn.put((shapes, img))\n\n # draw rectangle\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n\n # draw landmarks\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n\n # store face position for tracking\n self.__imgPos = rect_np\n\n # next state --> Do life check look left\n self.__state = self.__lifeCheckLookLeft\n return img\n\n def __lifeCheckLookLeft(self, key) -> np.ndarray:\n return self.__lifeCheckLookLeftRight(False, self.__lifeCheckLookRight)\n\n def __lifeCheckLookRight(self, key) -> np.ndarray:\n return self.__lifeCheckLookLeftRight(True, self.__tracking)\n\n def __lifeCheckLookLeftRight(self, nLeftRight, nextState) -> np.ndarray:\n rects, shapes, img = self.__detectQueue.get()\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # Check for feasible movement -> If face jumps around most properly it is no real person\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n movement = np.linalg.norm(rect_np-self.__imgPos)\n if movement > self.__MaxMovement:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n\n if not nLeftRight and (rot_angle < self.__ReqHeadRot):\n outtext = \"Rotate your head LEFT. Current rot angle: \" + str(rot_angle)\n utils.write_text_bottom(img, outtext, (100, 255, 255))\n cv2.arrowedLine(img, (30, int(self.__imgSize/2)), (5, int(self.__imgSize/2)),\n (100, 255, 255), 3)\n elif nLeftRight and (rot_angle > (-self.__ReqHeadRot)):\n outtext = \"Rotate your head RIGHT. Current rot angle: \" + str(rot_angle)\n utils.write_text_bottom(img, outtext, (255, 255, 100))\n cv2.arrowedLine(img, (self.__imgSize-30, int(self.__imgSize/2)-5),\n (self.__imgSize, int(self.__imgSize/2)),\n (255, 255, 100), 3)\n else:\n outtext = \"Good\"\n utils.write_text_bottom(img, outtext, (0, 255, 0))\n # next state --> Do life check look left\n self.__state = nextState\n\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n self.__imgPos = rect_np\n return img\n\n def __tracking(self, key):\n rects, shapes, img = self.__detectQueue.get()\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # Check for feasible movement -> If face jumps around most properly it is no real person\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n movement = np.linalg.norm(rect_np-self.__imgPos)\n if movement > self.__MaxMovement:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n tilt_angle = utils.get_angle(shape_np[45, :]-shape_np[36, :],\n np.array([1, 0]))\n\n if self.__faceDetected:\n outtext = self.__name + \" detected with {}\\% confidence.\".format(self.__score)\n utils.write_text_bottom(img, outtext, (0, 255, 0))\n else:\n if self.__faceRecQueueOut.empty():\n outtext = self.__name\n utils.write_text_bottom(img, outtext, (255, 0, 0))\n else:\n faceDescriptor = self.__faceRecQueueOut.get()\n self.__faceDetected, self.__name = self.__decriptorHandler.exists(faceDescriptor)\n self.__score = 99.38 # dlib face recognition accuracy\n\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n self.__imgPos = rect_np\n return img\n\n\ndef main(opt):\n\n stm = StateMachine(opt.cam_url, opt.landmarkPredictor, opt.faceDescriptor,\n opt.descriptor_file, opt.threshold,\n maxFps=opt.max_fps, imgSize=384, showLandmarks=True)\n\n key = 0\n while(True):\n img = stm(key)\n cv2.imshow(\"Face Detector\", img)\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\n # Destroy all the windows\n cv2.destroyAllWindows()\n print(\"Leave Face Detection\")\n\n\nif __name__ == '__main__':\n opt = _create_parser()\n print(opt)\n main(opt)\n" ]
[ [ "numpy.abs", "numpy.array", "numpy.zeros", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
tasx0823/BBN
[ "7992e908842f5934f0d1ee3f430d796621e81975", "7992e908842f5934f0d1ee3f430d796621e81975" ]
[ "lib/utils/utils.py", "main/valid.py" ]
[ "import logging\r\nimport time\r\nimport os\r\n\r\nimport torch\r\nfrom utils.lr_scheduler import WarmupMultiStepLR\r\nfrom net import Network\r\n\r\n\r\ndef create_logger(cfg):\r\n dataset = cfg.DATASET.DATASET\r\n net_type = cfg.BACKBONE.TYPE\r\n module_type = cfg.MODULE.TYPE\r\n log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\")\r\n log_name = \"{}_{}_{}_{}.log\".format(dataset, net_type, module_type, time_str)\r\n log_file = os.path.join(log_dir, log_name)\r\n # set up logger\r\n print(\"=> creating log {}\".format(log_file))\r\n head = \"%(asctime)-15s %(message)s\"\r\n logging.basicConfig(filename=str(log_file), format=head)\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.INFO)\r\n console = logging.StreamHandler()\r\n logging.getLogger(\"\").addHandler(console)\r\n\r\n logger.info(\"---------------------Cfg is set as follow--------------------\")\r\n logger.info(cfg)\r\n logger.info(\"-------------------------------------------------------------\")\r\n return logger, log_file\r\n\r\n\r\ndef get_optimizer(cfg, model):\r\n base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR\r\n params = []\r\n\r\n for name, p in model.named_parameters():\r\n if p.requires_grad:\r\n params.append({\"params\": p})\r\n\r\n if cfg.TRAIN.OPTIMIZER.TYPE == \"SGD\":\r\n optimizer = torch.optim.SGD(\r\n params,\r\n lr=base_lr,\r\n momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,\r\n weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,\r\n nesterov=True,\r\n )\r\n elif cfg.TRAIN.OPTIMIZER.TYPE == \"ADAM\":\r\n optimizer = torch.optim.Adam(\r\n params,\r\n lr=base_lr,\r\n betas=(0.9, 0.999),\r\n weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,\r\n )\r\n return optimizer\r\n\r\n\r\ndef get_scheduler(cfg, optimizer):\r\n if cfg.TRAIN.LR_SCHEDULER.TYPE == \"multistep\":\r\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\r\n optimizer,\r\n cfg.TRAIN.LR_SCHEDULER.LR_STEP,\r\n gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,\r\n )\r\n elif cfg.TRAIN.LR_SCHEDULER.TYPE == \"cosine\":\r\n if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\r\n optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4\r\n )\r\n else:\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\r\n optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4\r\n )\r\n elif cfg.TRAIN.LR_SCHEDULER.TYPE == \"warmup\":\r\n scheduler = WarmupMultiStepLR(\r\n optimizer,\r\n cfg.TRAIN.LR_SCHEDULER.LR_STEP,\r\n gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,\r\n warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported LR Scheduler: {}\".format(cfg.TRAIN.LR_SCHEDULER.TYPE))\r\n\r\n return scheduler\r\n\r\n\r\ndef get_model(cfg, num_classes, device, logger):\r\n model = Network(cfg, mode=\"train\", num_classes=num_classes)\r\n\r\n if cfg.BACKBONE.FREEZE == True:\r\n model.freeze_backbone()\r\n logger.info(\"Backbone has been freezed\")\r\n\r\n if cfg.CPU_MODE:\r\n model = model.to(device)\r\n else:\r\n model = torch.nn.DataParallel(model).cuda()\r\n\r\n return model\r\n\r\ndef get_category_list(annotations, num_classes, cfg):\r\n num_list = [0] * num_classes\r\n cat_list = []\r\n print(\"Weight List has been produced\")\r\n for anno in annotations:\r\n category_id = anno[\"category_id\"]\r\n num_list[category_id] += 1\r\n cat_list.append(category_id)\r\n return num_list, cat_list", "import _init_paths\nfrom net import Network\nfrom config import cfg, update_config\nfrom dataset import *\nimport numpy as np\nimport torch\nimport os\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport argparse\nfrom core.evaluate import FusionMatrix\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"BBN evaluation\")\n\n parser.add_argument(\n \"--cfg\",\n help=\"decide which cfg to use\",\n required=True,\n default=\"configs/cifar10.yaml\",\n type=str,\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n return args\n\ndef valid_model(dataLoader, model, cfg, device, num_classes):\n result_list = []\n pbar = tqdm(total=len(dataLoader))\n model.eval()\n top1_count, top2_count, top3_count, index, fusion_matrix = (\n [],\n [],\n [],\n 0,\n FusionMatrix(num_classes),\n )\n\n func = torch.nn.Softmax(dim=1)\n\n with torch.no_grad():\n for i, (image, image_labels, meta) in enumerate(dataLoader):\n image = image.to(device)\n output = model(image)\n result = func(output)\n _, top_k = result.topk(5, 1, True, True)\n score_result = result.cpu().numpy()\n fusion_matrix.update(score_result.argmax(axis=1), image_labels.numpy())\n topk_result = top_k.cpu().tolist()\n if not \"image_id\" in meta:\n meta[\"image_id\"] = [0] * image.shape[0]\n image_ids = meta[\"image_id\"]\n for i, image_id in enumerate(image_ids):\n result_list.append(\n {\n \"image_id\": image_id,\n \"image_label\": int(image_labels[i]),\n \"top_3\": topk_result[i],\n }\n )\n top1_count += [topk_result[i][0] == image_labels[i]]\n top2_count += [image_labels[i] in topk_result[i][0:2]]\n top3_count += [image_labels[i] in topk_result[i][0:3]]\n index += 1\n now_acc = np.sum(top1_count) / index\n pbar.set_description(\"Now Top1:{:>5.2f}%\".format(now_acc * 100))\n pbar.update(1)\n top1_acc = float(np.sum(top1_count) / len(top1_count))\n top2_acc = float(np.sum(top2_count) / len(top1_count))\n top3_acc = float(np.sum(top3_count) / len(top1_count))\n print(\n \"Top1:{:>5.2f}% Top2:{:>5.2f}% Top3:{:>5.2f}%\".format(\n top1_acc * 100, top2_acc * 100, top3_acc * 100\n )\n )\n pbar.close()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n update_config(cfg, args)\n\n test_set = eval(cfg.DATASET.DATASET)(\"valid\", cfg)\n num_classes = test_set.get_num_classes()\n device = torch.device(\"cpu\" if cfg.CPU_MODE else \"cuda\")\n model = Network(cfg, mode=\"test\", num_classes=num_classes)\n\n model_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, \"models\")\n model_file = cfg.TEST.MODEL_FILE\n if \"/\" in model_file:\n model_path = model_file\n else:\n model_path = os.path.join(model_dir, model_file)\n model.load_model(model_path)\n\n if cfg.CPU_MODE:\n model = model.to(device)\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n testLoader = DataLoader(\n test_set,\n batch_size=cfg.TEST.BATCH_SIZE,\n shuffle=False,\n num_workers=cfg.TEST.NUM_WORKERS,\n pin_memory=cfg.PIN_MEMORY,\n )\n valid_model(testLoader, model, cfg, device, num_classes)\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.optim.Adam", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.optim.SGD", "torch.nn.DataParallel" ], [ "torch.nn.Softmax", "torch.utils.data.DataLoader", "torch.no_grad", "torch.device", "torch.nn.DataParallel", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dwhu/pandas
[ "283fa07e723fac091685366ba83727624748fddb" ]
[ "pandas/core/internals/blocks.py" ]
[ "from datetime import datetime, timedelta\nimport functools\nimport inspect\nimport re\nfrom typing import Any, List\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, algos as libalgos, lib, tslib, writers\nfrom pandas._libs.index import convert_scalar\nimport pandas._libs.internals as libinternals\nfrom pandas._libs.tslibs import Timedelta, conversion\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe,\n find_common_type,\n infer_dtype_from,\n infer_dtype_from_scalar,\n maybe_downcast_numeric,\n maybe_downcast_to_dtype,\n maybe_infer_dtype_type,\n maybe_promote,\n maybe_upcast,\n soft_convert_objects,\n)\nfrom pandas.core.dtypes.common import (\n _NS_DTYPE,\n _TD_DTYPE,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical,\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_re,\n is_re_compilable,\n is_sparse,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.concat import concat_categorical, concat_datetime\nfrom pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCExtensionArray,\n ABCPandasArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n _isna_compat,\n array_equivalent,\n is_valid_nat_for_dtype,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexers import (\n check_setitem_lengths,\n is_empty_indexer,\n is_scalar_indexer,\n)\nimport pandas.core.missing as missing\nfrom pandas.core.nanops import nanpercentile\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\nclass Block(PandasObject):\n \"\"\"\n Canonical n-dimensional unit of homogeneous dtype contained in a pandas\n data structure\n\n Index-ignorant; let the container take care of that\n \"\"\"\n\n __slots__ = [\"_mgr_locs\", \"values\", \"ndim\"]\n is_numeric = False\n is_float = False\n is_integer = False\n is_complex = False\n is_datetime = False\n is_datetimetz = False\n is_timedelta = False\n is_bool = False\n is_object = False\n is_categorical = False\n is_extension = False\n _can_hold_na = False\n _can_consolidate = True\n _verify_integrity = True\n _validate_ndim = True\n _ftype = \"dense\"\n _concatenator = staticmethod(np.concatenate)\n\n def __init__(self, values, placement, ndim=None):\n self.ndim = self._check_ndim(values, ndim)\n self.mgr_locs = placement\n self.values = values\n\n if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):\n raise ValueError(\n f\"Wrong number of items passed {len(self.values)}, \"\n f\"placement implies {len(self.mgr_locs)}\"\n )\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if self._validate_ndim and values.ndim != ndim:\n raise ValueError(\n \"Wrong number of dimensions. \"\n f\"values.ndim != ndim [{values.ndim} != {ndim}]\"\n )\n return ndim\n\n @property\n def _holder(self):\n \"\"\"The array-like that can hold the underlying values.\n\n None for 'Block', overridden by subclasses that don't\n use an ndarray.\n \"\"\"\n return None\n\n @property\n def _consolidate_key(self):\n return (self._can_consolidate, self.dtype.name)\n\n @property\n def _is_single_block(self):\n return self.ndim == 1\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n return self.values.base is not None\n\n @property\n def is_datelike(self):\n \"\"\" return True if I am a non-datelike \"\"\"\n return self.is_datetime or self.is_timedelta\n\n def is_categorical_astype(self, dtype):\n \"\"\"\n validate that we have a astypeable to categorical,\n returns a boolean if we are a categorical\n \"\"\"\n if dtype is Categorical or dtype is CategoricalDtype:\n # this is a pd.Categorical, but is not\n # a valid type for astypeing\n raise TypeError(f\"invalid type {dtype} for astype\")\n\n elif is_categorical_dtype(dtype):\n return True\n\n return False\n\n def external_values(self, dtype=None):\n \"\"\" return an outside world format, currently just the ndarray \"\"\"\n return self.values\n\n def internal_values(self, dtype=None):\n \"\"\" return an internal format, currently just the ndarray\n this should be the pure internal API format\n \"\"\"\n return self.values\n\n def get_values(self, dtype=None):\n \"\"\"\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n \"\"\"\n if is_object_dtype(dtype):\n return self.values.astype(object)\n return self.values\n\n def get_block_values(self, dtype=None):\n \"\"\"\n This is used in the JSON C code\n \"\"\"\n return self.get_values(dtype=dtype)\n\n def to_dense(self):\n return self.values.view()\n\n @property\n def fill_value(self):\n return np.nan\n\n @property\n def mgr_locs(self):\n return self._mgr_locs\n\n @mgr_locs.setter\n def mgr_locs(self, new_mgr_locs):\n if not isinstance(new_mgr_locs, libinternals.BlockPlacement):\n new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)\n\n self._mgr_locs = new_mgr_locs\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return self.dtype\n\n def make_block(self, values, placement=None) -> \"Block\":\n \"\"\"\n Create a new block, with type inference propagate any values that are\n not specified\n \"\"\"\n if placement is None:\n placement = self.mgr_locs\n\n return make_block(values, placement=placement, ndim=self.ndim)\n\n def make_block_same_class(self, values, placement=None, ndim=None):\n \"\"\" Wrap given values in a block of same type as self. \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if ndim is None:\n ndim = self.ndim\n return make_block(values, placement=placement, ndim=ndim, klass=type(self))\n\n def __repr__(self) -> str:\n # don't want to print out all of the items here\n name = type(self).__name__\n if self._is_single_block:\n\n result = f\"{name}: {len(self)} dtype: {self.dtype}\"\n\n else:\n\n shape = \" x \".join(pprint_thing(s) for s in self.shape)\n result = (\n f\"{name}: {pprint_thing(self.mgr_locs.indexer)}, \"\n f\"{shape}, dtype: {self.dtype}\"\n )\n\n return result\n\n def __len__(self) -> int:\n return len(self.values)\n\n def __getstate__(self):\n return self.mgr_locs.indexer, self.values\n\n def __setstate__(self, state):\n self.mgr_locs = libinternals.BlockPlacement(state[0])\n self.values = state[1]\n self.ndim = self.values.ndim\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n return self.values[slicer]\n\n def getitem_block(self, slicer, new_mgr_locs=None):\n \"\"\"\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n \"\"\"\n if new_mgr_locs is None:\n if isinstance(slicer, tuple):\n axis0_slicer = slicer[0]\n else:\n axis0_slicer = slicer\n new_mgr_locs = self.mgr_locs[axis0_slicer]\n\n new_values = self._slice(slicer)\n\n if self._validate_ndim and new_values.ndim != self.ndim:\n raise ValueError(\"Only same dim slicing is allowed\")\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n @property\n def ftype(self):\n if getattr(self.values, \"_pandas_ftype\", False):\n dtype = self.dtype.subtype\n else:\n dtype = self.dtype\n return f\"{dtype}:{self._ftype}\"\n\n def merge(self, other):\n return _merge_blocks([self, other])\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._concatenator(\n [blk.values for blk in to_concat], axis=self.ndim - 1\n )\n return self.make_block_same_class(\n values, placement=placement or slice(0, len(values), 1)\n )\n\n def iget(self, i):\n return self.values[i]\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n self.values[locs] = values\n\n def delete(self, loc):\n \"\"\"\n Delete given loc(-s) from block in-place.\n \"\"\"\n self.values = np.delete(self.values, loc, 0)\n self.mgr_locs = self.mgr_locs.delete(loc)\n\n def apply(self, func, **kwargs):\n \"\"\" apply the function to my values; return a block if we are not\n one\n \"\"\"\n with np.errstate(all=\"ignore\"):\n result = func(self.values, **kwargs)\n\n if is_extension_array_dtype(result) and result.ndim > 1:\n # if we get a 2D ExtensionArray, we need to split it into 1D pieces\n nbs = []\n for i, loc in enumerate(self.mgr_locs):\n vals = result[i]\n nv = _block_shape(vals, ndim=self.ndim)\n block = self.make_block(values=nv, placement=[loc])\n nbs.append(block)\n return nbs\n\n if not isinstance(result, Block):\n result = self.make_block(values=_block_shape(result, ndim=self.ndim))\n\n return result\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n \"\"\" fillna on the block with the value. If we fail, then convert to\n ObjectBlock and try again\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = isna(self.values)\n if limit is not None:\n limit = libalgos._validate_limit(None, limit=limit)\n mask[mask.cumsum(self.ndim - 1) > limit] = False\n\n if not self._can_hold_na:\n if inplace:\n return self\n else:\n return self.copy()\n\n if self._can_hold_element(value):\n # equivalent: _try_coerce_args(value) would not raise\n blocks = self.putmask(mask, value, inplace=inplace)\n return self._maybe_downcast(blocks, downcast)\n\n # we can't process the value, but nothing to do\n if not mask.any():\n return self if inplace else self.copy()\n\n # operate column-by-column\n def f(mask, val, idx):\n block = self.coerce_to_target_dtype(value)\n\n # slice out our block\n if idx is not None:\n # i.e. self.ndim == 2\n block = block.getitem_block(slice(idx, idx + 1))\n return block.fillna(value, limit=limit, inplace=inplace, downcast=None)\n\n return self.split_and_operate(None, f, inplace)\n\n def split_and_operate(self, mask, f, inplace: bool):\n \"\"\"\n split the block per-column, and apply the callable f\n per-column, return a new block for each. Handle\n masking which will not change a block unless needed.\n\n Parameters\n ----------\n mask : 2-d boolean mask\n f : callable accepting (1d-mask, 1d values, indexer)\n inplace : boolean\n\n Returns\n -------\n list of blocks\n \"\"\"\n\n if mask is None:\n mask = np.broadcast_to(True, shape=self.shape)\n\n new_values = self.values\n\n def make_a_block(nv, ref_loc):\n if isinstance(nv, list):\n assert len(nv) == 1, nv\n assert isinstance(nv[0], Block)\n block = nv[0]\n else:\n # Put back the dimension that was taken from it and make\n # a block out of the result.\n nv = _block_shape(nv, ndim=self.ndim)\n block = self.make_block(values=nv, placement=ref_loc)\n return block\n\n # ndim == 1\n if self.ndim == 1:\n if mask.any():\n nv = f(mask, new_values, None)\n else:\n nv = new_values if inplace else new_values.copy()\n block = make_a_block(nv, self.mgr_locs)\n return [block]\n\n # ndim > 1\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n m = mask[i]\n v = new_values[i]\n\n # need a new block\n if m.any():\n nv = f(m, v, i)\n else:\n nv = v if inplace else v.copy()\n\n block = make_a_block(nv, [ref_loc])\n new_blocks.append(block)\n\n return new_blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n # no need to downcast our float\n # unless indicated\n if downcast is None and (\n self.is_float or self.is_timedelta or self.is_datetime\n ):\n return blocks\n\n return _extend_blocks([b.downcast(downcast) for b in blocks])\n\n def downcast(self, dtypes=None):\n \"\"\" try to downcast each item to the dict of dtypes if present \"\"\"\n\n # turn it off completely\n if dtypes is False:\n return self\n\n values = self.values\n\n # single block handling\n if self._is_single_block:\n\n # try to cast all non-floats here\n if dtypes is None:\n dtypes = \"infer\"\n\n nv = maybe_downcast_to_dtype(values, dtypes)\n return self.make_block(nv)\n\n # ndim > 1\n if dtypes is None:\n return self\n\n if not (dtypes == \"infer\" or isinstance(dtypes, dict)):\n raise ValueError(\n \"downcast must have a dictionary or 'infer' as its argument\"\n )\n elif dtypes != \"infer\":\n raise AssertionError(\"dtypes as dict is not supported yet\")\n\n # operate column-by-column\n # this is expensive as it splits the blocks items-by-item\n def f(mask, val, idx):\n val = maybe_downcast_to_dtype(val, dtype=\"infer\")\n return val\n\n return self.split_and_operate(None, f, False)\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n Coerce to the new dtype.\n\n Parameters\n ----------\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'ignore'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n Block\n \"\"\"\n errors_legal_values = (\"raise\", \"ignore\")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n \"Expected value of kwarg 'errors' to be one of \"\n f\"{list(errors_legal_values)}. Supplied value is '{errors}'\"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f\"Expected an instance of {dtype.__name__}, \"\n \"but got the class instead. Try instantiating 'dtype'.\"\n )\n raise TypeError(msg)\n\n # may need to convert to categorical\n if self.is_categorical_astype(dtype):\n\n if is_categorical_dtype(self.values):\n # GH 10696/18593: update an existing categorical efficiently\n return self.make_block(self.values.astype(dtype, copy=copy))\n\n return self.make_block(Categorical(self.values, dtype=dtype))\n\n dtype = pandas_dtype(dtype)\n\n # astype processing\n if is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n\n # force the copy here\n if self.is_extension:\n # TODO: Should we try/except this astype?\n values = self.values.astype(dtype)\n else:\n if issubclass(dtype.type, str):\n\n # use native type formatting for datetime/tz/timedelta\n if self.is_datelike:\n values = self.to_native_types()\n\n # astype formatting\n else:\n values = self.get_values()\n\n else:\n values = self.get_values(dtype=dtype)\n\n # _astype_nansafe works fine with 1-d only\n vals1d = values.ravel()\n try:\n values = astype_nansafe(vals1d, dtype, copy=True)\n except (ValueError, TypeError):\n # e.g. astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == \"raise\":\n raise\n newb = self.copy() if copy else self\n return newb\n\n # TODO(extension)\n # should we make this attribute?\n if isinstance(values, np.ndarray):\n values = values.reshape(self.shape)\n\n newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)\n\n if newb.is_numeric and self.is_numeric:\n if newb.shape != self.shape:\n raise TypeError(\n f\"cannot set astype for copy = [{copy}] for dtype \"\n f\"({self.dtype.name} [{self.shape}]) to different shape \"\n f\"({newb.dtype.name} [{newb.shape}])\"\n )\n return newb\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ):\n \"\"\" attempt to coerce any object types to better types return a copy\n of the block (if copy = True) by definition we are not an ObjectBlock\n here!\n \"\"\"\n\n return self.copy() if copy else self\n\n def _can_hold_element(self, element: Any) -> bool:\n \"\"\" require the same dtype as ourselves \"\"\"\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)\n\n def to_native_types(self, slicer=None, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n values = self.get_values()\n\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n itemsize = writers.word_len(na_rep)\n\n if not self.is_object and not quoting and itemsize:\n values = values.astype(f\"<U{itemsize}\")\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return values\n\n # block actions #\n def copy(self, deep=True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy()\n return self.make_block_same_class(values, ndim=self.ndim)\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n \"\"\"replace the to_replace value with value, possible to create new\n blocks here this is just a call to putmask. regex is not used here.\n It is used in ObjectBlocks. It is here for API compatibility.\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n original_to_replace = to_replace\n\n # If we cannot replace with own dtype, convert to ObjectBlock and\n # retry\n if not self._can_hold_element(to_replace):\n if not isinstance(to_replace, list):\n if inplace:\n return [self]\n return [self.copy()]\n\n to_replace = [x for x in to_replace if self._can_hold_element(x)]\n if not len(to_replace):\n # GH#28084 avoid costly checks since we can infer\n # that there is nothing to replace in this block\n if inplace:\n return [self]\n return [self.copy()]\n\n if len(to_replace) == 1:\n # _can_hold_element checks have reduced this back to the\n # scalar case and we can avoid a costly object cast\n return self.replace(\n to_replace[0],\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n # GH 22083, TypeError or ValueError occurred within error handling\n # causes infinite loop. Cast and retry only if not objectblock.\n if is_object_dtype(self):\n raise AssertionError\n\n # try again with a compatible block\n block = self.astype(object)\n return block.replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n values = self.values\n if lib.is_scalar(to_replace) and isinstance(values, np.ndarray):\n # The only non-DatetimeLike class that also has a non-trivial\n # try_coerce_args is ObjectBlock, but that overrides replace,\n # so does not get here.\n to_replace = convert_scalar(values, to_replace)\n\n mask = missing.mask_missing(values, to_replace)\n if filter is not None:\n filtered_out = ~self.mgr_locs.isin(filter)\n mask[filtered_out.nonzero()[0]] = False\n\n if not mask.any():\n if inplace:\n return [self]\n return [self.copy()]\n\n try:\n blocks = self.putmask(mask, value, inplace=inplace)\n # Note: it is _not_ the case that self._can_hold_element(value)\n # is always true at this point. In particular, that can fail\n # for:\n # \"2u\" with bool-dtype, float-dtype\n # 0.5 with int64-dtype\n # np.nan with int64-dtype\n except (TypeError, ValueError):\n # GH 22083, TypeError or ValueError occurred within error handling\n # causes infinite loop. Cast and retry only if not objectblock.\n if is_object_dtype(self):\n raise\n\n assert not self._can_hold_element(value), value\n\n # try again with a compatible block\n block = self.astype(object)\n return block.replace(\n to_replace=original_to_replace,\n value=value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n if convert:\n blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]\n return blocks\n\n def _replace_single(self, *args, **kwargs):\n \"\"\" no-op on a non-ObjectBlock \"\"\"\n return self if kwargs[\"inplace\"] else self.copy()\n\n def setitem(self, indexer, value):\n \"\"\"\n Set the value inplace, returning a a maybe different typed block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n transpose = self.ndim == 2\n\n # coerce None values, if appropriate\n if value is None:\n if self.is_numeric:\n value = np.nan\n\n # coerce if block dtype can store value\n values = self.values\n if self._can_hold_element(value):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(value):\n value = convert_scalar(values, value)\n\n else:\n # current dtype cannot store value, coerce to common dtype\n find_dtype = False\n\n if hasattr(value, \"dtype\"):\n dtype = value.dtype\n find_dtype = True\n\n elif lib.is_scalar(value) and not isna(value):\n dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)\n find_dtype = True\n\n if find_dtype:\n dtype = find_common_type([values.dtype, dtype])\n if not is_dtype_equal(self.dtype, dtype):\n b = self.astype(dtype)\n return b.setitem(indexer, value)\n\n # value must be storeable at this moment\n if is_extension_array_dtype(getattr(value, \"dtype\", None)):\n # We need to be careful not to allow through strings that\n # can be parsed to EADtypes\n arr_value = value\n else:\n arr_value = np.array(value)\n\n # cast the values to a type that can hold nan (if necessary)\n if not self._can_hold_element(value):\n dtype, _ = maybe_promote(arr_value.dtype)\n values = values.astype(dtype)\n\n if transpose:\n values = values.T\n\n # length checking\n check_setitem_lengths(indexer, value, values)\n\n if is_empty_indexer(indexer, arr_value):\n # GH#8669 empty indexers\n pass\n\n elif is_scalar_indexer(indexer, arr_value):\n # setting a single element for each dim and with a rhs that could\n # be e.g. a list; see GH#6043\n values[indexer] = value\n\n # if we are an exact match (ex-broadcasting),\n # then use the resultant dtype\n elif (\n len(arr_value.shape)\n and arr_value.shape[0] == values.shape[0]\n and arr_value.size == values.size\n ):\n values[indexer] = value\n try:\n values = values.astype(arr_value.dtype)\n except ValueError:\n pass\n\n # set\n else:\n values[indexer] = value\n\n if transpose:\n values = values.T\n block = self.make_block(values)\n return block\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):\n \"\"\" putmask the data to the block; it is possible that we may create a\n new dtype of block\n\n return the resulting block(s)\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n axis : int\n transpose : boolean\n Set to True if self is stored with axes reversed\n\n Returns\n -------\n a list of new blocks, the result of the putmask\n \"\"\"\n\n new_values = self.values if inplace else self.values.copy()\n\n new = getattr(new, \"values\", new)\n mask = getattr(mask, \"values\", mask)\n\n # if we are passed a scalar None, convert it here\n if not is_list_like(new) and isna(new) and not self.is_object:\n # FIXME: make sure we have compatible NA\n new = self.fill_value\n\n if self._can_hold_element(new):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(new):\n new = convert_scalar(new_values, new)\n\n if transpose:\n new_values = new_values.T\n\n # If the default repeat behavior in np.putmask would go in the\n # wrong direction, then explicitly repeat and reshape new instead\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim and axis == 1:\n new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)\n new = new.astype(new_values.dtype)\n\n # we require exact matches between the len of the\n # values we are setting (or is compat). np.putmask\n # doesn't check this and will simply truncate / pad\n # the output, but we want sane error messages\n #\n # TODO: this prob needs some better checking\n # for 2D cases\n if (\n is_list_like(new)\n and np.any(mask[mask])\n and getattr(new, \"ndim\", 1) == 1\n ):\n if mask[mask].shape[-1] == len(new):\n # GH 30567\n # If length of ``new`` is less than the length of ``new_values``,\n # `np.putmask` would first repeat the ``new`` array and then\n # assign the masked values hence produces incorrect result.\n # `np.place` on the other hand uses the ``new`` values at it is\n # to place in the masked locations of ``new_values``\n np.place(new_values, mask, new)\n elif mask.shape[-1] == len(new) or len(new) == 1:\n np.putmask(new_values, mask, new)\n else:\n raise ValueError(\"cannot assign mismatch length to masked array\")\n else:\n np.putmask(new_values, mask, new)\n\n # maybe upcast me\n elif mask.any():\n if transpose:\n mask = mask.T\n if isinstance(new, np.ndarray):\n new = new.T\n axis = new_values.ndim - axis - 1\n\n # Pseudo-broadcast\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim:\n new_shape = list(new.shape)\n new_shape.insert(axis, 1)\n new = new.reshape(tuple(new_shape))\n\n # operate column-by-column\n def f(mask, val, idx):\n\n if idx is None:\n # ndim==1 case.\n n = new\n else:\n\n if isinstance(new, np.ndarray):\n n = np.squeeze(new[idx % new.shape[0]])\n else:\n n = np.array(new)\n\n # type of the new block\n dtype, _ = maybe_promote(n.dtype)\n\n # we need to explicitly astype here to make a copy\n n = n.astype(dtype)\n\n nv = _putmask_smart(val, mask, n)\n return nv\n\n new_blocks = self.split_and_operate(mask, f, inplace)\n return new_blocks\n\n if inplace:\n return [self]\n\n if transpose:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def coerce_to_target_dtype(self, other):\n \"\"\"\n coerce the current block to a dtype compat for other\n we will return a block, possibly object, and not raise\n\n we can also safely try to coerce to the same dtype\n and will receive the same block\n \"\"\"\n\n # if we cannot then coerce to object\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\n\n if is_dtype_equal(self.dtype, dtype):\n return self\n\n if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):\n # we don't upcast to bool\n return self.astype(object)\n\n elif (self.is_float or self.is_complex) and (\n is_integer_dtype(dtype) or is_float_dtype(dtype)\n ):\n # don't coerce float/complex to int\n return self\n\n elif (\n self.is_datetime\n or is_datetime64_dtype(dtype)\n or is_datetime64tz_dtype(dtype)\n ):\n\n # not a datetime\n if not (\n (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))\n and self.is_datetime\n ):\n return self.astype(object)\n\n # don't upcast timezone with different timezone or no timezone\n mytz = getattr(self.dtype, \"tz\", None)\n othertz = getattr(dtype, \"tz\", None)\n\n if not tz_compare(mytz, othertz):\n return self.astype(object)\n\n raise AssertionError(\n f\"possible recursion in coerce_to_target_dtype: {self} {other}\"\n )\n\n elif self.is_timedelta or is_timedelta64_dtype(dtype):\n\n # not a timedelta\n if not (is_timedelta64_dtype(dtype) and self.is_timedelta):\n return self.astype(object)\n\n raise AssertionError(\n f\"possible recursion in coerce_to_target_dtype: {self} {other}\"\n )\n\n try:\n return self.astype(dtype)\n except (ValueError, TypeError, OverflowError):\n return self.astype(object)\n\n def interpolate(\n self,\n method=\"pad\",\n axis=0,\n index=None,\n values=None,\n inplace=False,\n limit=None,\n limit_direction=\"forward\",\n limit_area=None,\n fill_value=None,\n coerce=False,\n downcast=None,\n **kwargs,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n def check_int_bool(self, inplace):\n # Only FloatBlocks will contain NaNs.\n # timedelta subclasses IntBlock\n if (self.is_bool or self.is_integer) and not self.is_timedelta:\n if inplace:\n return self\n else:\n return self.copy()\n\n # a fill na type method\n try:\n m = missing.clean_fill_method(method)\n except ValueError:\n m = None\n\n if m is not None:\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate_with_fill(\n method=m,\n axis=axis,\n inplace=inplace,\n limit=limit,\n fill_value=fill_value,\n coerce=coerce,\n downcast=downcast,\n )\n # validate the interp method\n m = missing.clean_interp_method(method, **kwargs)\n\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate(\n method=m,\n index=index,\n values=values,\n axis=axis,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n def _interpolate_with_fill(\n self,\n method=\"pad\",\n axis=0,\n inplace=False,\n limit=None,\n fill_value=None,\n coerce=False,\n downcast=None,\n ):\n \"\"\" fillna but using the interpolate machinery \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # if we are coercing, then don't force the conversion\n # if the block can't hold the type\n if coerce:\n if not self._can_hold_na:\n if inplace:\n return [self]\n else:\n return [self.copy()]\n\n values = self.values if inplace else self.values.copy()\n\n # We only get here for non-ExtensionBlock\n fill_value = convert_scalar(self.values, fill_value)\n\n values = missing.interpolate_2d(\n values,\n method=method,\n axis=axis,\n limit=limit,\n fill_value=fill_value,\n dtype=self.dtype,\n )\n\n blocks = [self.make_block_same_class(values, ndim=self.ndim)]\n return self._maybe_downcast(blocks, downcast)\n\n def _interpolate(\n self,\n method=None,\n index=None,\n values=None,\n fill_value=None,\n axis=0,\n limit=None,\n limit_direction=\"forward\",\n limit_area=None,\n inplace=False,\n downcast=None,\n **kwargs,\n ):\n \"\"\" interpolate using scipy wrappers \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n data = self.values if inplace else self.values.copy()\n\n # only deal with floats\n if not self.is_float:\n if not self.is_integer:\n return self\n data = data.astype(np.float64)\n\n if fill_value is None:\n fill_value = self.fill_value\n\n if method in (\"krogh\", \"piecewise_polynomial\", \"pchip\"):\n if not index.is_monotonic:\n raise ValueError(\n f\"{method} interpolation requires that the index be monotonic.\"\n )\n # process 1-d slices in the axis direction\n\n def func(x):\n\n # process a 1-d slice, returning it\n # should the axis argument be handled below in apply_along_axis?\n # i.e. not an arg to missing.interpolate_1d\n return missing.interpolate_1d(\n index,\n x,\n method=method,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n bounds_error=False,\n **kwargs,\n )\n\n # interp each column independently\n interp_values = np.apply_along_axis(func, axis, data)\n\n blocks = [self.make_block_same_class(interp_values)]\n return self._maybe_downcast(blocks, downcast)\n\n def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.bb\n\n \"\"\"\n\n # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock\n # so need to preserve types\n # sparse is treated like an ndarray, but needs .get_values() shaping\n\n values = self.values\n\n if fill_tuple is None:\n fill_value = self.fill_value\n allow_fill = False\n else:\n fill_value = fill_tuple[0]\n allow_fill = True\n\n new_values = algos.take_nd(\n values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value\n )\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (axis == 0 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n if not is_dtype_equal(new_values.dtype, self.dtype):\n return self.make_block(new_values, new_mgr_locs)\n else:\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n \"\"\" return block for the diff of the values \"\"\"\n new_values = algos.diff(self.values, n, axis=axis)\n return [self.make_block(values=new_values)]\n\n def shift(self, periods, axis=0, fill_value=None):\n \"\"\" shift the block by periods, possibly upcast \"\"\"\n\n # convert integer to float if necessary. need to do a lot more than\n # that, handle boolean etc also\n new_values, fill_value = maybe_upcast(self.values, fill_value)\n\n # make sure array sent to np.roll is c_contiguous\n f_ordered = new_values.flags.f_contiguous\n if f_ordered:\n new_values = new_values.T\n axis = new_values.ndim - axis - 1\n\n if np.prod(new_values.shape):\n new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)\n\n axis_indexer = [slice(None)] * self.ndim\n if periods > 0:\n axis_indexer[axis] = slice(None, periods)\n else:\n axis_indexer[axis] = slice(periods, None)\n new_values[tuple(axis_indexer)] = fill_value\n\n # restore original order\n if f_ordered:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def where(\n self,\n other,\n cond,\n align=True,\n errors=\"raise\",\n try_cast: bool = False,\n axis: int = 0,\n ) -> List[\"Block\"]:\n \"\"\"\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : the condition to respect\n align : boolean, perform alignment on other/cond\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n axis : int\n\n Returns\n -------\n a new block(s), the result of the func\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n assert errors in [\"raise\", \"ignore\"]\n transpose = self.ndim == 2\n\n values = self.values\n orig_other = other\n if transpose:\n values = values.T\n\n other = getattr(other, \"_values\", getattr(other, \"values\", other))\n cond = getattr(cond, \"values\", cond)\n\n # If the default broadcasting would go in the wrong direction, then\n # explicitly reshape other instead\n if getattr(other, \"ndim\", 0) >= 1:\n if values.ndim - 1 == other.ndim and axis == 1:\n other = other.reshape(tuple(other.shape + (1,)))\n elif transpose and values.ndim == self.ndim - 1:\n cond = cond.T\n\n if not hasattr(cond, \"shape\"):\n raise ValueError(\"where must have a condition that is ndarray like\")\n\n # our where function\n def func(cond, values, other):\n\n if not (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # np.where will cast integer array to floats in this case\n if not self._can_hold_element(other):\n raise TypeError\n if lib.is_scalar(other) and isinstance(values, np.ndarray):\n other = convert_scalar(values, other)\n\n fastres = expressions.where(cond, values, other)\n return fastres\n\n if cond.ravel().all():\n result = values\n else:\n # see if we can operate on the entire block, or need item-by-item\n # or if we are a single block (ndim == 1)\n try:\n result = func(cond, values, other)\n except TypeError:\n\n # we cannot coerce, return a compat dtype\n # we are explicitly ignoring errors\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(\n orig_other,\n cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=axis,\n )\n return self._maybe_downcast(blocks, \"infer\")\n\n if self._can_hold_na or self.ndim == 1:\n\n if transpose:\n result = result.T\n\n return [self.make_block(result)]\n\n # might need to separate out blocks\n axis = cond.ndim - 1\n cond = cond.swapaxes(axis, 0)\n mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)\n\n result_blocks = []\n for m in [mask, ~mask]:\n if m.any():\n taken = result.take(m.nonzero()[0], axis=axis)\n r = maybe_downcast_numeric(taken, self.dtype)\n nb = self.make_block(r.T, placement=self.mgr_locs[m])\n result_blocks.append(nb)\n\n return result_blocks\n\n def equals(self, other) -> bool:\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return array_equivalent(self.values, other.values)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n \"\"\"Return a list of unstacked blocks of self\n\n Parameters\n ----------\n unstacker_func : callable\n Partially applied unstacker.\n new_columns : Index\n All columns of the unstacked BlockManager.\n n_rows : int\n Only used in ExtensionBlock._unstack\n fill_value : int\n Only used in ExtensionBlock._unstack\n\n Returns\n -------\n blocks : list of Block\n New blocks of unstacked values.\n mask : array_like of bool\n The mask of columns of `blocks` we should keep.\n \"\"\"\n unstacker = unstacker_func(self.values.T)\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n\n blocks = [make_block(new_values, placement=new_placement)]\n return blocks, mask\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n \"\"\"\n compute the quantiles of the\n\n Parameters\n ----------\n qs: a scalar or list of the quantiles to be computed\n interpolation: type of interpolation, default 'linear'\n axis: axis to compute, default 0\n\n Returns\n -------\n Block\n \"\"\"\n # We should always have ndim == 2 because Series dispatches to DataFrame\n assert self.ndim == 2\n\n values = self.get_values()\n\n is_empty = values.shape[axis] == 0\n orig_scalar = not is_list_like(qs)\n if orig_scalar:\n # make list-like, unpack later\n qs = [qs]\n\n if is_empty:\n # create the array of na_values\n # 2d len(values) * len(qs)\n result = np.repeat(\n np.array([self.fill_value] * len(qs)), len(values)\n ).reshape(len(values), len(qs))\n else:\n # asarray needed for Sparse, see GH#24600\n mask = np.asarray(isna(values))\n result = nanpercentile(\n values,\n np.array(qs) * 100,\n axis=axis,\n na_value=self.fill_value,\n mask=mask,\n ndim=values.ndim,\n interpolation=interpolation,\n )\n\n result = np.array(result, copy=False)\n result = result.T\n\n if orig_scalar and not lib.is_scalar(result):\n # result could be scalar in case with is_empty and self.ndim == 1\n assert result.shape[-1] == 1, result.shape\n result = result[..., 0]\n result = lib.item_from_zerodim(result)\n\n ndim = np.ndim(result)\n return make_block(result, placement=np.arange(len(result)), ndim=ndim)\n\n def _replace_coerce(\n self, to_replace, value, inplace=True, regex=False, convert=False, mask=None\n ):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n\n if mask.any():\n if not regex:\n self = self.coerce_to_target_dtype(value)\n return self.putmask(mask, value, inplace=inplace)\n else:\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n regex=regex,\n convert=convert,\n mask=mask,\n )\n return self\n\n\nclass NonConsolidatableMixIn:\n \"\"\" hold methods for the nonconsolidatable blocks \"\"\"\n\n _can_consolidate = False\n _verify_integrity = False\n _validate_ndim = False\n\n def __init__(self, values, placement, ndim=None):\n \"\"\"Initialize a non-consolidatable block.\n\n 'ndim' may be inferred from 'placement'.\n\n This will call continue to call __init__ for the other base\n classes mixed in with this Mixin.\n \"\"\"\n # Placement must be converted to BlockPlacement so that we can check\n # its length\n if not isinstance(placement, libinternals.BlockPlacement):\n placement = libinternals.BlockPlacement(placement)\n\n # Maybe infer ndim from placement\n if ndim is None:\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n super().__init__(values, placement, ndim=ndim)\n\n @property\n def shape(self):\n if self.ndim == 1:\n return ((len(self.values)),)\n return (len(self.mgr_locs), len(self.values))\n\n def iget(self, col):\n\n if self.ndim == 2 and isinstance(col, tuple):\n col, loc = col\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n elif isinstance(col, slice):\n if col != slice(None):\n raise NotImplementedError(col)\n return self.values[[loc]]\n return self.values[loc]\n else:\n if col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values\n\n def should_store(self, value):\n return isinstance(value, self._holder)\n\n def set(self, locs, values, check=False):\n assert locs.tolist() == [0]\n self.values = values\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):\n \"\"\"\n putmask the data to the block; we must be a single block and not\n generate other blocks\n\n return the resulting block\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n\n Returns\n -------\n a new block, the result of the putmask\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # use block's copy logic.\n # .values may be an Index which does shallow copy by default\n new_values = self.values if inplace else self.copy().values\n\n if isinstance(new, np.ndarray) and len(new) == len(mask):\n new = new[mask]\n\n mask = _safe_reshape(mask, new_values.shape)\n\n new_values[mask] = new\n return [self.make_block(values=new_values)]\n\n def _get_unstack_items(self, unstacker, new_columns):\n \"\"\"\n Get the placement, values, and mask for a Block unstack.\n\n This is shared between ObjectBlock and ExtensionBlock. They\n differ in that ObjectBlock passes the values, while ExtensionBlock\n passes the dummy ndarray of positions to be used by a take\n later.\n\n Parameters\n ----------\n unstacker : pandas.core.reshape.reshape._Unstacker\n new_columns : Index\n All columns of the unstacked BlockManager.\n\n Returns\n -------\n new_placement : ndarray[int]\n The placement of the new columns in `new_columns`.\n new_values : Union[ndarray, ExtensionArray]\n The first return value from _Unstacker.get_new_values.\n mask : ndarray[bool]\n The second return value from _Unstacker.get_new_values.\n \"\"\"\n # shared with ExtensionBlock\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n return new_placement, new_values, mask\n\n\nclass ExtensionBlock(NonConsolidatableMixIn, Block):\n \"\"\"Block for holding extension types.\n\n Notes\n -----\n This holds all 3rd-party extension array types. It's also the immediate\n parent class for our internal extension types' blocks, CategoricalBlock.\n\n ExtensionArrays are limited to 1-D.\n \"\"\"\n\n is_extension = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super().__init__(values, placement, ndim)\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Unbox to an extension array.\n\n This will unbox an ExtensionArray stored in an Index or Series.\n ExtensionArrays pass through. No dtype coercion is done.\n\n Parameters\n ----------\n values : Index, Series, ExtensionArray\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return extract_array(values)\n\n @property\n def _holder(self):\n # For extension blocks, the holder is values-dependent.\n return type(self.values)\n\n @property\n def fill_value(self):\n # Used in reindex_indexer\n return self.values.dtype.na_value\n\n @property\n def _can_hold_na(self):\n # The default ExtensionArray._can_hold_na is True\n return self._holder._can_hold_na\n\n @property\n def is_view(self):\n \"\"\"Extension arrays are never treated as views.\"\"\"\n return False\n\n @property\n def is_numeric(self):\n return self.values.dtype._is_numeric\n\n def setitem(self, indexer, value):\n \"\"\"Set the value inplace, returning a same-typed block.\n\n This differs from Block.setitem by not allowing setitem to change\n the dtype of the Block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n if isinstance(indexer, tuple):\n # we are always 1-D\n indexer = indexer[0]\n\n check_setitem_lengths(indexer, value, self.values)\n self.values[indexer] = value\n return self\n\n def get_values(self, dtype=None):\n # ExtensionArrays must be iterable, so this works.\n values = np.asarray(self.values)\n if values.ndim == self.ndim - 1:\n values = values.reshape((1,) + values.shape)\n return values\n\n def to_dense(self):\n return np.asarray(self.values)\n\n def to_native_types(self, slicer=None, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\"override to use ExtensionArray astype for the conversion\"\"\"\n values = self.values\n if slicer is not None:\n values = values[slicer]\n mask = isna(values)\n\n values = np.asarray(values.astype(object))\n values[mask] = na_rep\n\n # we are expected to return a 2-d ndarray\n return values.reshape(1, len(values))\n\n def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.\n \"\"\"\n if fill_tuple is None:\n fill_value = None\n else:\n fill_value = fill_tuple[0]\n\n # axis doesn't matter; we are really a single-dim object\n # but are passed the axis depending on the calling routing\n # if its REALLY axis 0, then this will be a reindex and not a take\n new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (self.ndim == 1 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def _can_hold_element(self, element: Any) -> bool:\n # XXX: We may need to think about pushing this onto the array.\n # We're doing the same as CategoricalBlock here.\n return True\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n\n # slice the category\n # return same dims as we currently have\n\n if isinstance(slicer, tuple) and len(slicer) == 2:\n if not com.is_null_slice(slicer[0]):\n raise AssertionError(\"invalid slicing for a 1-ndim categorical\")\n slicer = slicer[1]\n\n return self.values[slicer]\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._holder._concat_same_type([blk.values for blk in to_concat])\n placement = placement or slice(0, len(values), 1)\n return self.make_block_same_class(values, ndim=self.ndim, placement=placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n values = self.values if inplace else self.values.copy()\n values = values.fillna(value=value, limit=limit)\n return [\n self.make_block_same_class(\n values=values, placement=self.mgr_locs, ndim=self.ndim\n )\n ]\n\n def interpolate(\n self, method=\"pad\", axis=0, inplace=False, limit=None, fill_value=None, **kwargs\n ):\n\n values = self.values if inplace else self.values.copy()\n return self.make_block_same_class(\n values=values.fillna(value=fill_value, method=method, limit=limit),\n placement=self.mgr_locs,\n )\n\n def shift(\n self,\n periods: int,\n axis: libinternals.BlockPlacement = 0,\n fill_value: Any = None,\n ) -> List[\"ExtensionBlock\"]:\n \"\"\"\n Shift the block by `periods`.\n\n Dispatches to underlying ExtensionArray and re-boxes in an\n ExtensionBlock.\n \"\"\"\n return [\n self.make_block_same_class(\n self.values.shift(periods=periods, fill_value=fill_value),\n placement=self.mgr_locs,\n ndim=self.ndim,\n )\n ]\n\n def where(\n self,\n other,\n cond,\n align=True,\n errors=\"raise\",\n try_cast: bool = False,\n axis: int = 0,\n ) -> List[\"Block\"]:\n if isinstance(other, ABCDataFrame):\n # ExtensionArrays are 1-D, so if we get here then\n # `other` should be a DataFrame with a single column.\n assert other.shape[1] == 1\n other = other.iloc[:, 0]\n\n other = extract_array(other, extract_numpy=True)\n\n if isinstance(cond, ABCDataFrame):\n assert cond.shape[1] == 1\n cond = cond.iloc[:, 0]\n\n cond = extract_array(cond, extract_numpy=True)\n\n if lib.is_scalar(other) and isna(other):\n # The default `other` for Series / Frame is np.nan\n # we want to replace that with the correct NA value\n # for the type\n other = self.dtype.na_value\n\n if is_sparse(self.values):\n # TODO(SparseArray.__setitem__): remove this if condition\n # We need to re-infer the type of the data after doing the\n # where, for cases where the subtypes don't match\n dtype = None\n else:\n dtype = self.dtype\n\n result = self.values.copy()\n icond = ~cond\n if lib.is_scalar(other):\n set_other = other\n else:\n set_other = other[icond]\n try:\n result[icond] = set_other\n except (NotImplementedError, TypeError):\n # NotImplementedError for class not implementing `__setitem__`\n # TypeError for SparseArray, which implements just to raise\n # a TypeError\n result = self._holder._from_sequence(\n np.where(cond, self.values, other), dtype=dtype\n )\n\n return [self.make_block_same_class(result, placement=self.mgr_locs)]\n\n @property\n def _ftype(self):\n return getattr(self.values, \"_pandas_ftype\", Block._ftype)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n # ExtensionArray-safe unstack.\n # We override ObjectBlock._unstack, which unstacks directly on the\n # values of the array. For EA-backed blocks, this would require\n # converting to a 2-D ndarray of objects.\n # Instead, we unstack an ndarray of integer positions, followed by\n # a `take` on the actual values.\n dummy_arr = np.arange(n_rows)\n dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)\n unstacker = dummy_unstacker(dummy_arr)\n\n new_placement, new_values, mask = self._get_unstack_items(\n unstacker, new_columns\n )\n\n blocks = [\n self.make_block_same_class(\n self.values.take(indices, allow_fill=True, fill_value=fill_value),\n [place],\n )\n for indices, place in zip(new_values.T, new_placement)\n ]\n return blocks, mask\n\n\nclass ObjectValuesExtensionBlock(ExtensionBlock):\n \"\"\"\n Block providing backwards-compatibility for `.values`.\n\n Used by PeriodArray and IntervalArray to ensure that\n Series[T].values is an ndarray of objects.\n \"\"\"\n\n def external_values(self, dtype=None):\n return self.values.astype(object)\n\n\nclass NumericBlock(Block):\n __slots__ = ()\n is_numeric = True\n _can_hold_na = True\n\n\nclass FloatOrComplexBlock(NumericBlock):\n __slots__ = ()\n\n def equals(self, other) -> bool:\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n left, right = self.values, other.values\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n\nclass FloatBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_float = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(\n tipo.type, (np.datetime64, np.timedelta64)\n )\n return isinstance(\n element, (float, int, np.floating, np.int_)\n ) and not isinstance(\n element,\n (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64),\n )\n\n def to_native_types(\n self,\n slicer=None,\n na_rep=\"\",\n float_format=None,\n decimal=\".\",\n quoting=None,\n **kwargs,\n ):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n\n # see gh-13418: no special formatting is desired at the\n # output (important for appropriate 'quoting' behaviour),\n # so do not pass it through the FloatArrayFormatter\n if float_format is None and decimal == \".\":\n mask = isna(values)\n\n if not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return values\n\n from pandas.io.formats.format import FloatArrayFormatter\n\n formatter = FloatArrayFormatter(\n values,\n na_rep=na_rep,\n float_format=float_format,\n decimal=decimal,\n quoting=quoting,\n fixed_width=False,\n )\n return formatter.get_result_as_array()\n\n def should_store(self, value):\n # when inserting a column should not coerce integers to floats\n # unnecessarily\n return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype\n\n\nclass ComplexBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_complex = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))\n return isinstance(\n element, (float, int, complex, np.float_, np.int_)\n ) and not isinstance(element, (bool, np.bool_))\n\n def should_store(self, value):\n return issubclass(value.dtype.type, np.complexfloating)\n\n\nclass IntBlock(NumericBlock):\n __slots__ = ()\n is_integer = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (\n issubclass(tipo.type, np.integer)\n and not issubclass(tipo.type, (np.datetime64, np.timedelta64))\n and self.dtype.itemsize >= tipo.itemsize\n )\n return is_integer(element)\n\n def should_store(self, value):\n return is_integer_dtype(value) and value.dtype == self.dtype\n\n\nclass DatetimeLikeBlockMixin:\n \"\"\"Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.\"\"\"\n\n @property\n def _holder(self):\n return DatetimeArray\n\n @property\n def fill_value(self):\n return np.datetime64(\"NaT\", \"ns\")\n\n def get_values(self, dtype=None):\n \"\"\"\n return object dtype as boxed values, such as Timestamps/Timedelta\n \"\"\"\n if is_object_dtype(dtype):\n values = self.values.ravel()\n result = self._holder(values).astype(object)\n return result.reshape(self.values.shape)\n return self.values\n\n\nclass DatetimeBlock(DatetimeLikeBlockMixin, Block):\n __slots__ = ()\n is_datetime = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _can_hold_na(self):\n return True\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64ns, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : ndarray[datetime64ns]\n\n Overridden by DatetimeTZBlock.\n \"\"\"\n if values.dtype != _NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n if isinstance(values, DatetimeArray):\n values = values._data\n\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n these automatically copy, so copy=True has no effect\n raise on an except if raise == True\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n # if we are passed a datetime64[ns, tz]\n if is_datetime64tz_dtype(dtype):\n values = self.values\n if getattr(values, \"tz\", None) is None:\n values = DatetimeArray(values).tz_localize(\"UTC\")\n values = values.tz_convert(dtype.tz)\n return self.make_block(values)\n\n # delegate\n return super().astype(dtype=dtype, copy=copy, errors=errors)\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n if self.is_datetimetz:\n # require exact match, since non-nano does not exist\n return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(\n element, self.dtype\n )\n\n # GH#27419 if we get a non-nano datetime64 object\n return is_datetime64_dtype(tipo)\n elif element is NaT:\n return True\n elif isinstance(element, datetime):\n if self.is_datetimetz:\n return tz_compare(element.tzinfo, self.dtype.tz)\n return element.tzinfo is None\n\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def to_native_types(\n self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs\n ):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n i8values = self.values.view(\"i8\")\n\n if slicer is not None:\n values = values[..., slicer]\n i8values = i8values[..., slicer]\n\n from pandas.io.formats.format import _get_format_datetime64_from_values\n\n fmt = _get_format_datetime64_from_values(values, date_format)\n\n result = tslib.format_array_from_datetime(\n i8values.ravel(),\n tz=getattr(self.values, \"tz\", None),\n format=fmt,\n na_rep=na_rep,\n ).reshape(i8values.shape)\n return np.atleast_2d(result)\n\n def should_store(self, value):\n return (\n issubclass(value.dtype.type, np.datetime64)\n and not is_datetime64tz_dtype(value)\n and not is_extension_array_dtype(value)\n )\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n values = conversion.ensure_datetime64ns(values, copy=False)\n\n self.values[locs] = values\n\n def external_values(self):\n return np.asarray(self.values.astype(\"datetime64[ns]\", copy=False))\n\n\nclass DatetimeTZBlock(ExtensionBlock, DatetimeBlock):\n \"\"\" implement a datetime64 block with a tz attribute \"\"\"\n\n __slots__ = ()\n is_datetimetz = True\n is_extension = True\n\n _can_hold_element = DatetimeBlock._can_hold_element\n to_native_types = DatetimeBlock.to_native_types\n fill_value = np.datetime64(\"NaT\", \"ns\")\n\n @property\n def _holder(self):\n return DatetimeArray\n\n def _maybe_coerce_values(self, values):\n \"\"\"Input validation for values passed to __init__. Ensure that\n we have datetime64TZ, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : DatetimeArray\n \"\"\"\n if not isinstance(values, self._holder):\n values = self._holder(values)\n\n if values.tz is None:\n raise ValueError(\"cannot create a DatetimeTZBlock without a tz\")\n\n return values\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n # check the ndarray values of the DatetimeIndex values\n return self.values._data.base is not None\n\n def get_values(self, dtype=None):\n \"\"\"\n Returns an ndarray of values.\n\n Parameters\n ----------\n dtype : np.dtype\n Only `object`-like dtypes are respected here (not sure\n why).\n\n Returns\n -------\n values : ndarray\n When ``dtype=object``, then and object-dtype ndarray of\n boxed values is returned. Otherwise, an M8[ns] ndarray\n is returned.\n\n DatetimeArray is always 1-d. ``get_values`` will reshape\n the return value to be the same dimensionality as the\n block.\n \"\"\"\n values = self.values\n if is_object_dtype(dtype):\n values = values.astype(object)\n\n values = np.asarray(values)\n\n if self.ndim == 2:\n # Ensure that our shape is correct for DataFrame.\n # ExtensionArrays are always 1-D, even in a DataFrame when\n # the analogous NumPy-backed column would be a 2-D ndarray.\n values = values.reshape(1, -1)\n return values\n\n def to_dense(self):\n # we request M8[ns] dtype here, even though it discards tzinfo,\n # as lots of code (e.g. anything using values_from_object)\n # expects that behavior.\n return np.asarray(self.values, dtype=_NS_DTYPE)\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n if isinstance(slicer, tuple):\n col, loc = slicer\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values[loc]\n return self.values[slicer]\n\n def diff(self, n: int, axis: int = 0) -> List[\"Block\"]:\n \"\"\"\n 1st discrete difference.\n\n Parameters\n ----------\n n : int\n Number of periods to diff.\n axis : int, default 0\n Axis to diff upon.\n\n Returns\n -------\n A list with a new TimeDeltaBlock.\n\n Notes\n -----\n The arguments here are mimicking shift so they are called correctly\n by apply.\n \"\"\"\n if axis == 0:\n # Cannot currently calculate diff across multiple blocks since this\n # function is invoked via apply\n raise NotImplementedError\n new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8\n\n # Reshape the new_values like how algos.diff does for timedelta data\n new_values = new_values.reshape(1, len(new_values))\n new_values = new_values.astype(\"timedelta64[ns]\")\n return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]\n\n def concat_same_type(self, to_concat, placement=None):\n # need to handle concat([tz1, tz2]) here, since DatetimeArray\n # only handles cases where all the tzs are the same.\n # Instead of placing the condition here, it could also go into the\n # is_uniform_join_units check, but I'm not sure what is better.\n if len({x.dtype for x in to_concat}) > 1:\n values = concat_datetime([x.values for x in to_concat])\n placement = placement or slice(0, len(values), 1)\n\n if self.ndim > 1:\n values = np.atleast_2d(values)\n return ObjectBlock(values, ndim=self.ndim, placement=placement)\n return super().concat_same_type(to_concat, placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n # We support filling a DatetimeTZ with a `value` whose timezone\n # is different by coercing to object.\n if self._can_hold_element(value):\n return super().fillna(value, limit, inplace, downcast)\n\n # different timezones, or a non-tz\n return self.astype(object).fillna(\n value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n def setitem(self, indexer, value):\n # https://github.com/pandas-dev/pandas/issues/24020\n # Need a dedicated setitem until #24020 (type promotion in setitem\n # for extension arrays) is designed and implemented.\n if self._can_hold_element(value) or (\n isinstance(indexer, np.ndarray) and indexer.size == 0\n ):\n return super().setitem(indexer, value)\n\n obj_vals = self.values.astype(object)\n newb = make_block(\n obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim\n )\n return newb.setitem(indexer, value)\n\n def equals(self, other) -> bool:\n # override for significant performance improvement\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return (self.values.view(\"i8\") == other.values.view(\"i8\")).all()\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n naive = self.values.view(\"M8[ns]\")\n\n # kludge for 2D block with 1D values\n naive = naive.reshape(self.shape)\n\n blk = self.make_block(naive)\n res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)\n\n # ravel is kludge for 2D block with 1D values, assumes column-like\n aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)\n return self.make_block_same_class(aware, ndim=res_blk.ndim)\n\n\nclass TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):\n __slots__ = ()\n is_timedelta = True\n _can_hold_na = True\n is_numeric = False\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n\n def __init__(self, values, placement, ndim=None):\n if values.dtype != _TD_DTYPE:\n values = conversion.ensure_timedelta64ns(values)\n if isinstance(values, TimedeltaArray):\n values = values._data\n assert isinstance(values, np.ndarray), type(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _holder(self):\n return TimedeltaArray\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.timedelta64)\n elif element is NaT:\n return True\n elif isinstance(element, (timedelta, np.timedelta64)):\n return True\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def fillna(self, value, **kwargs):\n\n # allow filling with integers to be\n # interpreted as nanoseconds\n if is_integer(value):\n # Deprecation GH#24694, GH#19233\n raise TypeError(\n \"Passing integers to fillna for timedelta64[ns] dtype is no \"\n \"longer supported. To obtain the old behavior, pass \"\n \"`pd.Timedelta(seconds=n)` instead.\"\n )\n return super().fillna(value, **kwargs)\n\n def should_store(self, value):\n return issubclass(\n value.dtype.type, np.timedelta64\n ) and not is_extension_array_dtype(value)\n\n def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n\n rvalues = np.empty(values.shape, dtype=object)\n if na_rep is None:\n na_rep = \"NaT\"\n rvalues[mask] = na_rep\n imask = (~mask).ravel()\n\n # FIXME:\n # should use the formats.format.Timedelta64Formatter here\n # to figure what format to pass to the Timedelta\n # e.g. to not show the decimals say\n rvalues.flat[imask] = np.array(\n [Timedelta(val)._repr_base(format=\"all\") for val in values.ravel()[imask]],\n dtype=object,\n )\n return rvalues\n\n def external_values(self, dtype=None):\n return np.asarray(self.values.astype(\"timedelta64[ns]\", copy=False))\n\n\nclass BoolBlock(NumericBlock):\n __slots__ = ()\n is_bool = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.bool_)\n return isinstance(element, (bool, np.bool_))\n\n def should_store(self, value):\n return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(\n value\n )\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n to_replace_values = np.atleast_1d(to_replace)\n if not np.can_cast(to_replace_values, bool):\n return self\n return super().replace(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n\nclass ObjectBlock(Block):\n __slots__ = ()\n is_object = True\n _can_hold_na = True\n\n def __init__(self, values, placement=None, ndim=2):\n if issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n\n super().__init__(values, ndim=ndim, placement=placement)\n\n @property\n def is_bool(self):\n \"\"\" we can be a bool if we have only bool values but are of type\n object\n \"\"\"\n return lib.is_bool_array(self.values.ravel())\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ):\n \"\"\" attempt to coerce any object types to better types return a copy of\n the block (if copy = True) by definition we ARE an ObjectBlock!!!!!\n\n can return multiple blocks!\n \"\"\"\n\n # operate column-by-column\n def f(mask, val, idx):\n shape = val.shape\n values = soft_convert_objects(\n val.ravel(),\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n if isinstance(values, np.ndarray):\n # TODO: allow EA once reshape is supported\n values = values.reshape(shape)\n\n values = _block_shape(values, ndim=self.ndim)\n return values\n\n if self.ndim == 2:\n blocks = self.split_and_operate(None, f, False)\n else:\n values = f(None, self.values.ravel(), None)\n blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]\n\n return blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n if downcast is not None:\n return blocks\n\n # split and convert the blocks\n return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])\n\n def _can_hold_element(self, element: Any) -> bool:\n return True\n\n def should_store(self, value):\n return not (\n issubclass(\n value.dtype.type,\n (np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),\n )\n or is_extension_array_dtype(value)\n )\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n to_rep_is_list = is_list_like(to_replace)\n value_is_list = is_list_like(value)\n both_lists = to_rep_is_list and value_is_list\n either_list = to_rep_is_list or value_is_list\n\n result_blocks = []\n blocks = [self]\n\n if not either_list and is_re(to_replace):\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=True,\n convert=convert,\n )\n elif not (either_list or regex):\n return super().replace(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n elif both_lists:\n for to_rep, v in zip(to_replace, value):\n result_blocks = []\n for b in blocks:\n result = b._replace_single(\n to_rep,\n v,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n elif to_rep_is_list and regex:\n for to_rep in to_replace:\n result_blocks = []\n for b in blocks:\n result = b._replace_single(\n to_rep,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n convert=convert,\n regex=regex,\n )\n\n def _replace_single(\n self,\n to_replace,\n value,\n inplace=False,\n filter=None,\n regex=False,\n convert=True,\n mask=None,\n ):\n \"\"\"\n Replace elements by the given value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n filter : list, optional\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n a new block, the result after replacing\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # to_replace is regex compilable\n to_rep_re = regex and is_re_compilable(to_replace)\n\n # regex is regex compilable\n regex_re = is_re_compilable(regex)\n\n # only one will survive\n if to_rep_re and regex_re:\n raise AssertionError(\n \"only one of to_replace and regex can be regex compilable\"\n )\n\n # if regex was passed as something that can be a regex (rather than a\n # boolean)\n if regex_re:\n to_replace = regex\n\n regex = regex_re or to_rep_re\n\n # try to get the pattern attribute (compiled re) or it's a string\n if is_re(to_replace):\n pattern = to_replace.pattern\n else:\n pattern = to_replace\n\n # if the pattern is not empty and to_replace is either a string or a\n # regex\n if regex and pattern:\n rx = re.compile(to_replace)\n else:\n # if the thing to replace is not a string or compiled regex call\n # the superclass method -> to_replace is some kind of object\n return super().replace(\n to_replace, value, inplace=inplace, filter=filter, regex=regex\n )\n\n new_values = self.values if inplace else self.values.copy()\n\n # deal with replacing values with objects (strings) that match but\n # whose replacement is not a string (numeric, nan, object)\n if isna(value) or not isinstance(value, str):\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return value if rx.search(s) is not None else s\n else:\n return s\n\n else:\n # value is guaranteed to be a string here, s can be either a string\n # or null if it's null it gets returned\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return rx.sub(value, s)\n else:\n return s\n\n f = np.vectorize(re_replacer, otypes=[self.dtype])\n\n if filter is None:\n filt = slice(None)\n else:\n filt = self.mgr_locs.isin(filter).nonzero()[0]\n\n if mask is None:\n new_values[filt] = f(new_values[filt])\n else:\n new_values[filt][mask] = f(new_values[filt][mask])\n\n # convert\n block = self.make_block(new_values)\n if convert:\n block = block.convert(numeric=False)\n return block\n\n def _replace_coerce(\n self, to_replace, value, inplace=True, regex=False, convert=False, mask=None\n ):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n if mask.any():\n block = super()._replace_coerce(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n regex=regex,\n convert=convert,\n mask=mask,\n )\n if convert:\n block = [b.convert(numeric=False, copy=True) for b in block]\n return block\n if convert:\n return [self.convert(numeric=False, copy=True)]\n return self\n\n\nclass CategoricalBlock(ExtensionBlock):\n __slots__ = ()\n is_categorical = True\n _verify_integrity = True\n _can_hold_na = True\n _concatenator = staticmethod(concat_categorical)\n\n def __init__(self, values, placement, ndim=None):\n # coerce to categorical if we can\n values = extract_array(values)\n assert isinstance(values, Categorical), type(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _holder(self):\n return Categorical\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return np.object_\n\n def to_dense(self):\n # Categorical.get_values returns a DatetimeIndex for datetime\n # categories, so we can't simply use `np.asarray(self.values)` like\n # other types.\n return self.values._internal_get_values()\n\n def to_native_types(self, slicer=None, na_rep=\"\", quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n # Categorical is always one dimension\n values = values[slicer]\n mask = isna(values)\n values = np.array(values, dtype=\"object\")\n values[mask] = na_rep\n\n # we are expected to return a 2-d ndarray\n return values.reshape(1, len(values))\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n\n Note that this CategoricalBlock._concat_same_type *may* not\n return a CategoricalBlock. When the categories in `to_concat`\n differ, this will return an object ndarray.\n\n If / when we decide we don't like that behavior:\n\n 1. Change Categorical._concat_same_type to use union_categoricals\n 2. Delete this method.\n \"\"\"\n values = self._concatenator(\n [blk.values for blk in to_concat], axis=self.ndim - 1\n )\n # not using self.make_block_same_class as values can be object dtype\n return make_block(\n values, placement=placement or slice(0, len(values), 1), ndim=self.ndim\n )\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n filter=None,\n regex: bool = False,\n convert: bool = True,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = self if inplace else self.copy()\n if filter is None: # replace was called on a series\n result.values.replace(to_replace, value, inplace=True)\n if convert:\n return result.convert(numeric=False, copy=not inplace)\n else:\n return result\n else: # replace was called on a DataFrame\n if not isna(value):\n result.values.add_categories(value, inplace=True)\n return super(CategoricalBlock, result).replace(\n to_replace, value, inplace, filter, regex, convert\n )\n\n\n# -----------------------------------------------------------------\n# Constructor Helpers\n\n\ndef get_block_type(values, dtype=None):\n \"\"\"\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n \"\"\"\n dtype = dtype or values.dtype\n vtype = dtype.type\n\n if is_sparse(dtype):\n # Need this first(ish) so that Sparse[datetime] is sparse\n cls = ExtensionBlock\n elif is_categorical(values):\n cls = CategoricalBlock\n elif issubclass(vtype, np.datetime64):\n assert not is_datetime64tz_dtype(values)\n cls = DatetimeBlock\n elif is_datetime64tz_dtype(values):\n cls = DatetimeTZBlock\n elif is_interval_dtype(dtype) or is_period_dtype(dtype):\n cls = ObjectValuesExtensionBlock\n elif is_extension_array_dtype(values):\n cls = ExtensionBlock\n elif issubclass(vtype, np.floating):\n cls = FloatBlock\n elif issubclass(vtype, np.timedelta64):\n assert issubclass(vtype, np.integer)\n cls = TimeDeltaBlock\n elif issubclass(vtype, np.complexfloating):\n cls = ComplexBlock\n elif issubclass(vtype, np.integer):\n cls = IntBlock\n elif dtype == np.bool_:\n cls = BoolBlock\n else:\n cls = ObjectBlock\n return cls\n\n\ndef make_block(values, placement, klass=None, ndim=None, dtype=None):\n # Ensure that we don't allow PandasArray / PandasDtype in internals.\n # For now, blocks should be backed by ndarrays when possible.\n if isinstance(values, ABCPandasArray):\n values = values.to_numpy()\n if ndim and ndim > 1:\n values = np.atleast_2d(values)\n\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(values, dtype)\n\n elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):\n # TODO: This is no longer hit internally; does it need to be retained\n # for e.g. pyarrow?\n values = DatetimeArray._simple_new(values, dtype=dtype)\n\n return klass(values, ndim=ndim, placement=placement)\n\n\n# -----------------------------------------------------------------\n\n\ndef _extend_blocks(result, blocks=None):\n \"\"\" return a new extended blocks, given the result \"\"\"\n from pandas.core.internals import BlockManager\n\n if blocks is None:\n blocks = []\n if isinstance(result, list):\n for r in result:\n if isinstance(r, list):\n blocks.extend(r)\n else:\n blocks.append(r)\n elif isinstance(result, BlockManager):\n blocks.extend(result.blocks)\n else:\n blocks.append(result)\n return blocks\n\n\ndef _block_shape(values, ndim=1, shape=None):\n \"\"\" guarantee the shape of the values to be at least 1 d \"\"\"\n if values.ndim < ndim:\n if shape is None:\n shape = values.shape\n if not is_extension_array_dtype(values):\n # TODO: https://github.com/pandas-dev/pandas/issues/23023\n # block.shape is incorrect for \"2D\" ExtensionArrays\n # We can't, and don't need to, reshape.\n values = values.reshape(tuple((1,) + shape))\n return values\n\n\ndef _merge_blocks(blocks, dtype=None, _can_consolidate=True):\n\n if len(blocks) == 1:\n return blocks[0]\n\n if _can_consolidate:\n\n if dtype is None:\n if len({b.dtype for b in blocks}) != 1:\n raise AssertionError(\"_merge_blocks are invalid!\")\n\n # FIXME: optimization potential in case all mgrs contain slices and\n # combination of those slices is a slice, too.\n new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])\n new_values = np.vstack([b.values for b in blocks])\n\n argsort = np.argsort(new_mgr_locs)\n new_values = new_values[argsort]\n new_mgr_locs = new_mgr_locs[argsort]\n\n return make_block(new_values, placement=new_mgr_locs)\n\n # no merge\n return blocks\n\n\ndef _safe_reshape(arr, new_shape):\n \"\"\"\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n \"\"\"\n if isinstance(arr, ABCSeries):\n arr = arr._values\n if not isinstance(arr, ABCExtensionArray):\n arr = arr.reshape(new_shape)\n return arr\n\n\ndef _putmask_smart(v, mask, n):\n \"\"\"\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : `values`, updated in-place (array like)\n mask : np.ndarray\n Applies to both sides (array like).\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n \"\"\"\n\n # we cannot use np.asarray() here as we cannot have conversions\n # that numpy does when numeric are mixed with strings\n\n # n should be the length of the mask or a scalar here\n if not is_list_like(n):\n n = np.repeat(n, len(mask))\n\n # see if we are only masking values that if putted\n # will work in the current dtype\n try:\n nn = n[mask]\n except TypeError:\n # TypeError: only integer scalar arrays can be converted to a scalar index\n pass\n else:\n # make sure that we have a nullable type\n # if we have nulls\n if not _isna_compat(v, nn[0]):\n pass\n elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):\n # only compare integers/floats\n pass\n elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):\n # only compare integers/floats\n pass\n else:\n\n # we ignore ComplexWarning here\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n nn_at = nn.astype(v.dtype)\n\n comp = nn == nn_at\n if is_list_like(comp) and comp.all():\n nv = v.copy()\n nv[mask] = nn_at\n return nv\n\n n = np.asarray(n)\n\n def _putmask_preserve(nv, n):\n try:\n nv[mask] = n[mask]\n except (IndexError, ValueError):\n nv[mask] = n\n return nv\n\n # preserves dtype if possible\n if v.dtype.kind == n.dtype.kind:\n return _putmask_preserve(v, n)\n\n # change the dtype if needed\n dtype, _ = maybe_promote(n.dtype)\n\n if is_extension_array_dtype(v.dtype) and is_object_dtype(dtype):\n v = v._internal_get_values(dtype)\n else:\n v = v.astype(dtype)\n\n return _putmask_preserve(v, n)\n" ]
[ [ "pandas.util._validators.validate_bool_kwarg", "pandas.core.arrays.DatetimeArray._simple_new", "pandas.core.missing.clean_interp_method", "pandas.core.dtypes.common.is_datetime64_dtype", "numpy.place", "numpy.where", "pandas.core.dtypes.common.is_interval_dtype", "pandas.core.indexers.is_empty_indexer", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.dtypes.common.is_re_compilable", "pandas.core.dtypes.common.is_list_like", "pandas.core.arrays.DatetimeArray", "numpy.delete", "pandas._libs.internals.BlockPlacement", "numpy.array", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas.core.dtypes.common.is_bool_dtype", "pandas.io.formats.format._get_format_datetime64_from_values", "pandas.core.common.is_null_slice", "pandas.core.dtypes.cast.find_common_type", "numpy.datetime64", "pandas.core.dtypes.missing.isna", "pandas.io.formats.printing.pprint_thing", "pandas.core.missing.interpolate_2d", "numpy.vstack", "numpy.asarray", "pandas.core.dtypes.cast.maybe_downcast_numeric", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.concatenate", "pandas._libs.tslibs.conversion.ensure_datetime64ns", "pandas._libs.algos._validate_limit", "numpy.atleast_1d", "pandas.core.algorithms.take_nd", "numpy.putmask", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "numpy.timedelta64", "numpy.ndim", "numpy.atleast_2d", "numpy.errstate", "pandas.core.dtypes.cast.infer_dtype_from", "pandas.core.dtypes.common.is_integer", "numpy.vectorize", "pandas.core.dtypes.cast.maybe_upcast", "pandas._libs.lib.item_from_zerodim", "numpy.empty", "numpy.can_cast", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.common.is_dtype_equal", "pandas._libs.lib.is_scalar", "pandas.core.indexers.check_setitem_lengths", "pandas._libs.index.convert_scalar", "numpy.repeat", "pandas._libs.tslibs.timezones.tz_compare", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_categorical", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.dtypes.concat.concat_datetime", "numpy.isnan", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.dtypes.common.is_period_dtype", "pandas._libs.writers.word_len", "pandas.core.missing.mask_missing", "numpy.argsort", "pandas.core.missing.clean_fill_method", "pandas._libs.tslibs.Timedelta", "pandas._libs.lib.is_float", "pandas.core.algorithms.diff", "pandas.core.dtypes.missing.array_equivalent", "numpy.broadcast_to", "numpy.squeeze", "pandas.core.computation.expressions.where", "pandas.core.missing.interpolate_1d", "numpy.any", "numpy.arange", "numpy.apply_along_axis", "pandas._libs.tslibs.conversion.ensure_timedelta64ns", "pandas.io.formats.format.FloatArrayFormatter", "pandas.core.arrays.Categorical", "pandas.core.dtypes.cast.maybe_promote", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.indexers.is_scalar_indexer", "pandas.core.dtypes.common.is_sparse", "pandas.core.dtypes.missing._isna_compat", "pandas.core.dtypes.missing.is_valid_nat_for_dtype", "pandas.core.dtypes.cast.astype_nansafe", "pandas.core.dtypes.cast.maybe_infer_dtype_type", "pandas.core.dtypes.common.is_re", "pandas.core.dtypes.common.is_object_dtype", "numpy.prod", "pandas.core.construction.extract_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "1.1", "1.0", "1.2" ], "scipy": [], "tensorflow": [] } ]
zhonglihanzhu/tensorflow-objectDetection
[ "aa3d1b754d5c78b8401ce86d4c20f45741fc2b77", "aa3d1b754d5c78b8401ce86d4c20f45741fc2b77", "aa3d1b754d5c78b8401ce86d4c20f45741fc2b77", "aa3d1b754d5c78b8401ce86d4c20f45741fc2b77" ]
[ "builders/losses_builder_test.py", "utils/shape_utils_test.py", "models/faster_rcnn_nas_feature_extractor.py", "meta_architectures/faster_rcnn_meta_arch_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for losses_builder.\"\"\"\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom builders import losses_builder\nfrom core import losses\nfrom protos import losses_pb2\n\n\nclass LocalizationLossBuilderTest(tf.test.TestCase):\n\n def test_build_weighted_l2_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedL2LocalizationLoss))\n\n def test_build_weighted_smooth_l1_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_smooth_l1 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedSmoothL1LocalizationLoss))\n\n def test_build_weighted_iou_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_iou {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedIOULocalizationLoss))\n\n def test_anchorwise_output(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_smooth_l1 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedSmoothL1LocalizationLoss))\n predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])\n targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])\n weights = tf.constant([[1.0, 1.0]])\n loss = localization_loss(predictions, targets, weights=weights)\n self.assertEqual(loss.shape, [1, 2])\n\n def test_raise_error_on_empty_localization_config(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder._build_localization_loss(losses_proto)\n\n\nclass ClassificationLossBuilderTest(tf.test.TestCase):\n\n def test_build_weighted_sigmoid_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n\n def test_build_weighted_sigmoid_focal_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid_focal {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.SigmoidFocalClassificationLoss))\n self.assertAlmostEqual(classification_loss._alpha, None)\n self.assertAlmostEqual(classification_loss._gamma, 2.0)\n\n def test_build_weighted_sigmoid_focal_loss_non_default(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid_focal {\n alpha: 0.25\n gamma: 3.0\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.SigmoidFocalClassificationLoss))\n self.assertAlmostEqual(classification_loss._alpha, 0.25)\n self.assertAlmostEqual(classification_loss._gamma, 3.0)\n\n def test_build_weighted_softmax_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_weighted_softmax_classification_loss_with_logit_scale(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n logit_scale: 2.0\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_bootstrapped_sigmoid_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n bootstrapped_sigmoid {\n alpha: 0.5\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.BootstrappedSigmoidClassificationLoss))\n\n def test_anchorwise_output(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid {\n anchorwise_output: true\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])\n targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])\n weights = tf.constant([[1.0, 1.0]])\n loss = classification_loss(predictions, targets, weights=weights)\n self.assertEqual(loss.shape, [1, 2, 3])\n\n def test_raise_error_on_empty_config(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder.build(losses_proto)\n\n\nclass HardExampleMinerBuilderTest(tf.test.TestCase):\n\n def test_do_not_build_hard_example_miner_by_default(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertEqual(hard_example_miner, None)\n\n def test_build_hard_example_miner_for_classification_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n loss_type: CLASSIFICATION\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._loss_type, 'cls')\n\n def test_build_hard_example_miner_for_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n loss_type: LOCALIZATION\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._loss_type, 'loc')\n\n def test_build_hard_example_miner_with_non_default_values(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n num_hard_examples: 32\n iou_threshold: 0.5\n loss_type: LOCALIZATION\n max_negatives_per_positive: 10\n min_negatives_per_image: 3\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._num_hard_examples, 32)\n self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)\n self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)\n self.assertEqual(hard_example_miner._min_negatives_per_image, 3)\n\n\nclass LossBuilderTest(tf.test.TestCase):\n\n def test_build_all_loss_parameters(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n }\n classification_weight: 0.8\n localization_weight: 0.2\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n (classification_loss, localization_loss,\n classification_weight, localization_weight,\n hard_example_miner) = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedL2LocalizationLoss))\n self.assertAlmostEqual(classification_weight, 0.8)\n self.assertAlmostEqual(localization_weight, 0.2)\n\n def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_sigmoid_focal {\n }\n }\n hard_example_miner {\n }\n classification_weight: 0.8\n localization_weight: 0.2\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder.build(losses_proto)\n\n\nclass FasterRcnnClassificationLossBuilderTest(tf.test.TestCase):\n\n def test_build_sigmoid_loss(self):\n losses_text_proto = \"\"\"\n weighted_sigmoid {\n }\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n\n def test_build_softmax_loss(self):\n losses_text_proto = \"\"\"\n weighted_softmax {\n }\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_softmax_loss_by_default(self):\n losses_text_proto = \"\"\"\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.utils.shape_utils.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils import shape_utils\n\n\nclass UtilTest(tf.test.TestCase):\n\n def test_pad_tensor_using_integer_input(self):\n t1 = tf.constant([1], dtype=tf.int32)\n pad_t1 = shape_utils.pad_tensor(t1, 2)\n t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)\n pad_t2 = shape_utils.pad_tensor(t2, 2)\n\n self.assertEqual(2, pad_t1.get_shape()[0])\n self.assertEqual(2, pad_t2.get_shape()[0])\n\n with self.test_session() as sess:\n pad_t1_result, pad_t2_result = sess.run([pad_t1, pad_t2])\n self.assertAllEqual([1, 0], pad_t1_result)\n self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)\n\n def test_pad_tensor_using_tensor_input(self):\n t1 = tf.constant([1], dtype=tf.int32)\n pad_t1 = shape_utils.pad_tensor(t1, tf.constant(2))\n t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)\n pad_t2 = shape_utils.pad_tensor(t2, tf.constant(2))\n\n with self.test_session() as sess:\n pad_t1_result, pad_t2_result = sess.run([pad_t1, pad_t2])\n self.assertAllEqual([1, 0], pad_t1_result)\n self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result)\n\n def test_clip_tensor_using_integer_input(self):\n t1 = tf.constant([1, 2, 3], dtype=tf.int32)\n clip_t1 = shape_utils.clip_tensor(t1, 2)\n t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)\n clip_t2 = shape_utils.clip_tensor(t2, 2)\n\n self.assertEqual(2, clip_t1.get_shape()[0])\n self.assertEqual(2, clip_t2.get_shape()[0])\n\n with self.test_session() as sess:\n clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2])\n self.assertAllEqual([1, 2], clip_t1_result)\n self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)\n\n def test_clip_tensor_using_tensor_input(self):\n t1 = tf.constant([1, 2, 3], dtype=tf.int32)\n clip_t1 = shape_utils.clip_tensor(t1, tf.constant(2))\n t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)\n clip_t2 = shape_utils.clip_tensor(t2, tf.constant(2))\n\n with self.test_session() as sess:\n clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2])\n self.assertAllEqual([1, 2], clip_t1_result)\n self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result)\n\n def test_pad_or_clip_tensor_using_integer_input(self):\n t1 = tf.constant([1], dtype=tf.int32)\n tt1 = shape_utils.pad_or_clip_tensor(t1, 2)\n t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)\n tt2 = shape_utils.pad_or_clip_tensor(t2, 2)\n\n t3 = tf.constant([1, 2, 3], dtype=tf.int32)\n tt3 = shape_utils.clip_tensor(t3, 2)\n t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)\n tt4 = shape_utils.clip_tensor(t4, 2)\n\n self.assertEqual(2, tt1.get_shape()[0])\n self.assertEqual(2, tt2.get_shape()[0])\n self.assertEqual(2, tt3.get_shape()[0])\n self.assertEqual(2, tt4.get_shape()[0])\n\n with self.test_session() as sess:\n tt1_result, tt2_result, tt3_result, tt4_result = sess.run(\n [tt1, tt2, tt3, tt4])\n self.assertAllEqual([1, 0], tt1_result)\n self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)\n self.assertAllEqual([1, 2], tt3_result)\n self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)\n\n def test_pad_or_clip_tensor_using_tensor_input(self):\n t1 = tf.constant([1], dtype=tf.int32)\n tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))\n t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)\n tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))\n\n t3 = tf.constant([1, 2, 3], dtype=tf.int32)\n tt3 = shape_utils.clip_tensor(t3, tf.constant(2))\n t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)\n tt4 = shape_utils.clip_tensor(t4, tf.constant(2))\n\n with self.test_session() as sess:\n tt1_result, tt2_result, tt3_result, tt4_result = sess.run(\n [tt1, tt2, tt3, tt4])\n self.assertAllEqual([1, 0], tt1_result)\n self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)\n self.assertAllEqual([1, 2], tt3_result)\n self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)\n\n def test_combines_static_dynamic_shape(self):\n tensor = tf.placeholder(tf.float32, shape=(None, 2, 3))\n combined_shape = shape_utils.combined_static_and_dynamic_shape(\n tensor)\n self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0]))\n self.assertListEqual(combined_shape[1:], [2, 3])\n\n\nclass StaticOrDynamicMapFnTest(tf.test.TestCase):\n\n def test_with_dynamic_shape(self):\n def fn(input_tensor):\n return tf.reduce_sum(input_tensor)\n input_tensor = tf.placeholder(tf.float32, shape=(None, 2))\n map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)\n\n op_names = [op.name for op in tf.get_default_graph().get_operations()]\n self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))\n\n with self.test_session() as sess:\n result1 = sess.run(\n map_fn_output, feed_dict={\n input_tensor: [[1, 2], [3, 1], [0, 4]]})\n result2 = sess.run(\n map_fn_output, feed_dict={\n input_tensor: [[-1, 1], [0, 9]]})\n self.assertAllEqual(result1, [3, 4, 4])\n self.assertAllEqual(result2, [0, 9])\n\n def test_with_static_shape(self):\n def fn(input_tensor):\n return tf.reduce_sum(input_tensor)\n input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32)\n map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor)\n\n op_names = [op.name for op in tf.get_default_graph().get_operations()]\n self.assertTrue(all(['map' != op_name[:3] for op_name in op_names]))\n\n with self.test_session() as sess:\n result = sess.run(map_fn_output)\n self.assertAllEqual(result, [3, 4, 4])\n\n def test_with_multiple_dynamic_shapes(self):\n def fn(elems):\n input_tensor, scalar_index_tensor = elems\n return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])\n\n input_tensor = tf.placeholder(tf.float32, shape=(None, 3))\n scalar_index_tensor = tf.placeholder(tf.int32, shape=(None, 1))\n map_fn_output = shape_utils.static_or_dynamic_map_fn(\n fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)\n\n op_names = [op.name for op in tf.get_default_graph().get_operations()]\n self.assertTrue(any(['map' == op_name[:3] for op_name in op_names]))\n\n with self.test_session() as sess:\n result1 = sess.run(\n map_fn_output, feed_dict={\n input_tensor: [[1, 2, 3], [4, 5, -1], [0, 6, 9]],\n scalar_index_tensor: [[0], [2], [1]],\n })\n result2 = sess.run(\n map_fn_output, feed_dict={\n input_tensor: [[-1, 1, 0], [3, 9, 30]],\n scalar_index_tensor: [[1], [0]]\n })\n self.assertAllEqual(result1, [1, -1, 6])\n self.assertAllEqual(result2, [1, 3])\n\n def test_with_multiple_static_shapes(self):\n def fn(elems):\n input_tensor, scalar_index_tensor = elems\n return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), [])\n\n input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]],\n dtype=tf.float32)\n scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32)\n map_fn_output = shape_utils.static_or_dynamic_map_fn(\n fn, [input_tensor, scalar_index_tensor], dtype=tf.float32)\n\n op_names = [op.name for op in tf.get_default_graph().get_operations()]\n self.assertTrue(all(['map' != op_name[:3] for op_name in op_names]))\n\n with self.test_session() as sess:\n result = sess.run(map_fn_output)\n self.assertAllEqual(result, [1, -1, 6])\n\n def test_fails_with_nested_input(self):\n def fn(input_tensor):\n return input_tensor\n input_tensor1 = tf.constant([1])\n input_tensor2 = tf.constant([2])\n with self.assertRaisesRegexp(\n ValueError, '`elems` must be a Tensor or list of Tensors.'):\n shape_utils.static_or_dynamic_map_fn(\n fn, [input_tensor1, [input_tensor2]], dtype=tf.float32)\n\n\nclass CheckMinImageShapeTest(tf.test.TestCase):\n\n def test_check_min_image_dim_static_shape(self):\n input_tensor = tf.constant(np.zeros([1, 42, 42, 3]))\n _ = shape_utils.check_min_image_dim(33, input_tensor)\n\n with self.assertRaisesRegexp(\n ValueError, 'image size must be >= 64 in both height and width.'):\n _ = shape_utils.check_min_image_dim(64, input_tensor)\n\n def test_check_min_image_dim_dynamic_shape(self):\n input_placeholder = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n image_tensor = shape_utils.check_min_image_dim(33, input_placeholder)\n\n with self.test_session() as sess:\n sess.run(image_tensor,\n feed_dict={input_placeholder: np.zeros([1, 42, 42, 3])})\n with self.assertRaises(tf.errors.InvalidArgumentError):\n sess.run(image_tensor,\n feed_dict={input_placeholder: np.zeros([1, 32, 32, 3])})\n\n\nclass AssertShapeEqualTest(tf.test.TestCase):\n\n def test_unequal_static_shape_raises_exception(self):\n shape_a = tf.constant(np.zeros([4, 2, 2, 1]))\n shape_b = tf.constant(np.zeros([4, 2, 3, 1]))\n with self.assertRaisesRegexp(\n ValueError, 'Unequal shapes'):\n shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(shape_a),\n shape_utils.combined_static_and_dynamic_shape(shape_b))\n\n def test_equal_static_shape_succeeds(self):\n shape_a = tf.constant(np.zeros([4, 2, 2, 1]))\n shape_b = tf.constant(np.zeros([4, 2, 2, 1]))\n with self.test_session() as sess:\n op = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(shape_a),\n shape_utils.combined_static_and_dynamic_shape(shape_b))\n sess.run(op)\n\n def test_unequal_dynamic_shape_raises_tf_assert(self):\n tensor_a = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n tensor_b = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n op = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(tensor_a),\n shape_utils.combined_static_and_dynamic_shape(tensor_b))\n with self.test_session() as sess:\n with self.assertRaises(tf.errors.InvalidArgumentError):\n sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]),\n tensor_b: np.zeros([1, 4, 4, 3])})\n\n def test_equal_dynamic_shape_succeeds(self):\n tensor_a = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n tensor_b = tf.placeholder(tf.float32, shape=[1, None, None, 3])\n op = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(tensor_a),\n shape_utils.combined_static_and_dynamic_shape(tensor_b))\n with self.test_session() as sess:\n sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]),\n tensor_b: np.zeros([1, 2, 2, 3])})\n\n def test_unequal_static_shape_along_first_dim_raises_exception(self):\n shape_a = tf.constant(np.zeros([4, 2, 2, 1]))\n shape_b = tf.constant(np.zeros([6, 2, 3, 1]))\n with self.assertRaisesRegexp(\n ValueError, 'Unequal first dimension'):\n shape_utils.assert_shape_equal_along_first_dimension(\n shape_utils.combined_static_and_dynamic_shape(shape_a),\n shape_utils.combined_static_and_dynamic_shape(shape_b))\n\n def test_equal_static_shape_along_first_dim_succeeds(self):\n shape_a = tf.constant(np.zeros([4, 2, 2, 1]))\n shape_b = tf.constant(np.zeros([4, 7, 2]))\n with self.test_session() as sess:\n op = shape_utils.assert_shape_equal_along_first_dimension(\n shape_utils.combined_static_and_dynamic_shape(shape_a),\n shape_utils.combined_static_and_dynamic_shape(shape_b))\n sess.run(op)\n\n def test_unequal_dynamic_shape_along_first_dim_raises_tf_assert(self):\n tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3])\n tensor_b = tf.placeholder(tf.float32, shape=[None, None, 3])\n op = shape_utils.assert_shape_equal_along_first_dimension(\n shape_utils.combined_static_and_dynamic_shape(tensor_a),\n shape_utils.combined_static_and_dynamic_shape(tensor_b))\n with self.test_session() as sess:\n with self.assertRaises(tf.errors.InvalidArgumentError):\n sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]),\n tensor_b: np.zeros([2, 4, 3])})\n\n def test_equal_dynamic_shape_along_first_dim_succeeds(self):\n tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3])\n tensor_b = tf.placeholder(tf.float32, shape=[None])\n op = shape_utils.assert_shape_equal_along_first_dimension(\n shape_utils.combined_static_and_dynamic_shape(tensor_a),\n shape_utils.combined_static_and_dynamic_shape(tensor_b))\n with self.test_session() as sess:\n sess.run(op, feed_dict={tensor_a: np.zeros([5, 2, 2, 3]),\n tensor_b: np.zeros([5])})\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"NASNet Faster R-CNN implementation.\n\nLearning Transferable Architectures for Scalable Image Recognition\nBarret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le\nhttps://arxiv.org/abs/1707.07012\n\"\"\"\n\nimport tensorflow as tf\n\nfrom meta_architectures import faster_rcnn_meta_arch\nfrom nets.nasnet import nasnet\nfrom nets.nasnet import nasnet_utils\n\narg_scope = tf.contrib.framework.arg_scope\nslim = tf.contrib.slim\n\n\ndef nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):\n \"\"\"Defines the default arg scope for the NASNet-A Large for object detection.\n\n This provides a small edit to switch batch norm training on and off.\n\n Args:\n is_batch_norm_training: Boolean indicating whether to train with batch norm.\n\n Returns:\n An `arg_scope` to use for the NASNet Large Model.\n \"\"\"\n imagenet_scope = nasnet.nasnet_large_arg_scope()\n with arg_scope(imagenet_scope):\n with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:\n return sc\n\n\n# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but\n# with special edits to remove instantiation of the stem and the special\n# ability to receive as input a pair of hidden states.\ndef _build_nasnet_base(hidden_previous,\n hidden,\n normal_cell,\n reduction_cell,\n hparams,\n true_cell_num,\n start_cell_num):\n \"\"\"Constructs a NASNet image model.\"\"\"\n\n # Find where to place the reduction cells or stride normal cells\n reduction_indices = nasnet_utils.calc_reduction_layers(\n hparams.num_cells, hparams.num_reduction_layers)\n\n # Note: The None is prepended to match the behavior of _imagenet_stem()\n cell_outputs = [None, hidden_previous, hidden]\n net = hidden\n\n # NOTE: In the nasnet.py code, filter_scaling starts at 1.0. We instead\n # start at 2.0 because 1 reduction cell has been created which would\n # update the filter_scaling to 2.0.\n filter_scaling = 2.0\n\n # Run the cells\n for cell_num in range(start_cell_num, hparams.num_cells):\n stride = 1\n if hparams.skip_reduction_layer_input:\n prev_layer = cell_outputs[-2]\n if cell_num in reduction_indices:\n filter_scaling *= hparams.filter_scaling_rate\n net = reduction_cell(\n net,\n scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),\n filter_scaling=filter_scaling,\n stride=2,\n prev_layer=cell_outputs[-2],\n cell_num=true_cell_num)\n true_cell_num += 1\n cell_outputs.append(net)\n if not hparams.skip_reduction_layer_input:\n prev_layer = cell_outputs[-2]\n net = normal_cell(\n net,\n scope='cell_{}'.format(cell_num),\n filter_scaling=filter_scaling,\n stride=stride,\n prev_layer=prev_layer,\n cell_num=true_cell_num)\n true_cell_num += 1\n cell_outputs.append(net)\n\n # Final nonlinearity.\n # Note that we have dropped the final pooling, dropout and softmax layers\n # from the default nasnet version.\n with tf.variable_scope('final_layer'):\n net = tf.nn.relu(net)\n return net\n\n\n# TODO: Only fixed_shape_resizer is currently supported for NASNet\n# featurization. The reason for this is that nasnet.py only supports\n# inputs with fully known shapes. We need to update nasnet.py to handle\n# shapes not known at compile time.\nclass FasterRCNNNASFeatureExtractor(\n faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):\n \"\"\"Faster R-CNN with NASNet-A feature extractor implementation.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n batch_norm_trainable=False,\n reuse_weights=None,\n weight_decay=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: See base class.\n first_stage_features_stride: See base class.\n batch_norm_trainable: See base class.\n reuse_weights: See base class.\n weight_decay: See base class.\n\n Raises:\n ValueError: If `first_stage_features_stride` is not 16.\n \"\"\"\n if first_stage_features_stride != 16:\n raise ValueError('`first_stage_features_stride` must be 16.')\n super(FasterRCNNNASFeatureExtractor, self).__init__(\n is_training, first_stage_features_stride, batch_norm_trainable,\n reuse_weights, weight_decay)\n\n def preprocess(self, resized_inputs):\n \"\"\"Faster R-CNN with NAS preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: A [batch, height_in, width_in, channels] float32 tensor\n representing a batch of images with values between 0 and 255.0.\n\n Returns:\n preprocessed_inputs: A [batch, height_out, width_out, channels] float32\n tensor representing a batch of images.\n\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n Extracts features using the first half of the NASNet network.\n We construct the network in `align_feature_maps=True` mode, which means\n that all VALID paddings in the network are changed to SAME padding so that\n the feature maps are aligned.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float32 tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n Raises:\n ValueError: If the created network is missing the required activation.\n \"\"\"\n del scope\n\n if len(preprocessed_inputs.get_shape().as_list()) != 4:\n raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '\n 'tensor of shape %s' % preprocessed_inputs.get_shape())\n\n with slim.arg_scope(nasnet_large_arg_scope_for_detection(\n is_batch_norm_training=self._train_batch_norm)):\n _, end_points = nasnet.build_nasnet_large(\n preprocessed_inputs, num_classes=None,\n is_training=self._is_training,\n final_endpoint='Cell_11')\n\n # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.\n rpn_feature_map = tf.concat([end_points['Cell_10'],\n end_points['Cell_11']], 3)\n\n # nasnet.py does not maintain the batch size in the first dimension.\n # This work around permits us retaining the batch for below.\n batch = preprocessed_inputs.get_shape().as_list()[0]\n shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]\n rpn_feature_map_shape = [batch] + shape_without_batch\n rpn_feature_map.set_shape(rpn_feature_map_shape)\n\n return rpn_feature_map\n\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n This function reconstructs the \"second half\" of the NASNet-A\n network after the part defined in `_extract_proposal_features`.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name.\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n del scope\n\n # Note that we always feed into 2 layers of equal depth\n # where the first N channels corresponds to previous hidden layer\n # and the second N channels correspond to the final hidden layer.\n hidden_previous, hidden = tf.split(proposal_feature_maps, 2, axis=3)\n\n # Note that what follows is largely a copy of build_nasnet_large() within\n # nasnet.py. We are copying to minimize code pollution in slim.\n\n # pylint: disable=protected-access\n hparams = nasnet._large_imagenet_config(is_training=self._is_training)\n # pylint: enable=protected-access\n\n # Calculate the total number of cells in the network\n # -- Add 2 for the reduction cells.\n total_num_cells = hparams.num_cells + 2\n # -- And add 2 for the stem cells for ImageNet training.\n total_num_cells += 2\n\n normal_cell = nasnet_utils.NasNetANormalCell(\n hparams.num_conv_filters, hparams.drop_path_keep_prob,\n total_num_cells, hparams.total_training_steps)\n reduction_cell = nasnet_utils.NasNetAReductionCell(\n hparams.num_conv_filters, hparams.drop_path_keep_prob,\n total_num_cells, hparams.total_training_steps)\n with arg_scope([slim.dropout, nasnet_utils.drop_path],\n is_training=self._is_training):\n with arg_scope([slim.batch_norm], is_training=self._train_batch_norm):\n with arg_scope([slim.avg_pool2d,\n slim.max_pool2d,\n slim.conv2d,\n slim.batch_norm,\n slim.separable_conv2d,\n nasnet_utils.factorized_reduction,\n nasnet_utils.global_avg_pool,\n nasnet_utils.get_channel_index,\n nasnet_utils.get_channel_dim],\n data_format=hparams.data_format):\n\n # This corresponds to the cell number just past 'Cell_11' used by\n # by _extract_proposal_features().\n start_cell_num = 12\n # Note that this number equals:\n # start_cell_num + 2 stem cells + 1 reduction cell\n true_cell_num = 15\n\n with slim.arg_scope(nasnet.nasnet_large_arg_scope()):\n net = _build_nasnet_base(hidden_previous,\n hidden,\n normal_cell=normal_cell,\n reduction_cell=reduction_cell,\n hparams=hparams,\n true_cell_num=true_cell_num,\n start_cell_num=start_cell_num)\n\n proposal_classifier_features = net\n return proposal_classifier_features\n\n def restore_from_classification_checkpoint_fn(\n self,\n first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Note that this overrides the default implementation in\n faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for\n NASNet-A checkpoints.\n\n Args:\n first_stage_feature_extractor_scope: A scope name for the first stage\n feature extractor.\n second_stage_feature_extractor_scope: A scope name for the second stage\n feature extractor.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n # Note that the NAS checkpoint only contains the moving average version of\n # the Variables so we need to generate an appropriate dictionary mapping.\n variables_to_restore = {}\n for variable in tf.global_variables():\n if variable.op.name.startswith(\n first_stage_feature_extractor_scope):\n var_name = variable.op.name.replace(\n first_stage_feature_extractor_scope + '/', '')\n var_name += '/ExponentialMovingAverage'\n variables_to_restore[var_name] = variable\n if variable.op.name.startswith(\n second_stage_feature_extractor_scope):\n var_name = variable.op.name.replace(\n second_stage_feature_extractor_scope + '/', '')\n var_name += '/ExponentialMovingAverage'\n variables_to_restore[var_name] = variable\n return variables_to_restore\n\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.meta_architectures.faster_rcnn_meta_arch.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom meta_architectures import faster_rcnn_meta_arch_test_lib\n\n\nclass FasterRCNNMetaArchTest(\n faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):\n\n def test_postprocess_second_stage_only_inference_mode_with_masks(self):\n model = self._build_model(\n is_training=False, number_of_stages=2, second_stage_batch_size=6)\n\n batch_size = 2\n total_num_padded_proposals = batch_size * model.max_num_proposals\n proposal_boxes = tf.constant(\n [[[1, 1, 2, 3],\n [0, 0, 1, 1],\n [.5, .5, .6, .6],\n 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],\n [[2, 3, 6, 8],\n [1, 2, 5, 3],\n 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)\n num_proposals = tf.constant([3, 2], dtype=tf.int32)\n refined_box_encodings = tf.zeros(\n [total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32)\n class_predictions_with_background = tf.ones(\n [total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)\n image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)\n\n mask_height = 2\n mask_width = 2\n mask_predictions = 30. * tf.ones(\n [total_num_padded_proposals, model.num_classes,\n mask_height, mask_width], dtype=tf.float32)\n exp_detection_masks = np.array([[[[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[1, 1], [1, 1]]],\n [[[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[1, 1], [1, 1]],\n [[0, 0], [0, 0]]]])\n\n _, true_image_shapes = model.preprocess(tf.zeros(image_shape))\n detections = model.postprocess({\n 'refined_box_encodings': refined_box_encodings,\n 'class_predictions_with_background': class_predictions_with_background,\n 'num_proposals': num_proposals,\n 'proposal_boxes': proposal_boxes,\n 'image_shape': image_shape,\n 'mask_predictions': mask_predictions\n }, true_image_shapes)\n with self.test_session() as sess:\n detections_out = sess.run(detections)\n self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])\n self.assertAllClose(detections_out['detection_scores'],\n [[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])\n self.assertAllClose(detections_out['detection_classes'],\n [[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])\n self.assertAllClose(detections_out['num_detections'], [5, 4])\n self.assertAllClose(detections_out['detection_masks'],\n exp_detection_masks)\n self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))\n self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))\n\n def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks(\n self):\n batch_size = 2\n image_size = 10\n max_num_proposals = 8\n initial_crop_size = 3\n maxpool_stride = 1\n\n input_shapes = [(batch_size, image_size, image_size, 3),\n (None, image_size, image_size, 3),\n (batch_size, None, None, 3),\n (None, None, None, 3)]\n expected_num_anchors = image_size * image_size * 3 * 3\n expected_shapes = {\n 'rpn_box_predictor_features':\n (2, image_size, image_size, 512),\n 'rpn_features_to_crop': (2, image_size, image_size, 3),\n 'image_shape': (4,),\n 'rpn_box_encodings': (2, expected_num_anchors, 4),\n 'rpn_objectness_predictions_with_background':\n (2, expected_num_anchors, 2),\n 'anchors': (expected_num_anchors, 4),\n 'refined_box_encodings': (2 * max_num_proposals, 2, 4),\n 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),\n 'num_proposals': (2,),\n 'proposal_boxes': (2, max_num_proposals, 4),\n 'proposal_boxes_normalized': (2, max_num_proposals, 4),\n 'box_classifier_features':\n self._get_box_classifier_features_shape(image_size,\n batch_size,\n max_num_proposals,\n initial_crop_size,\n maxpool_stride,\n 3)\n }\n\n for input_shape in input_shapes:\n test_graph = tf.Graph()\n with test_graph.as_default():\n model = self._build_model(\n is_training=False,\n number_of_stages=3,\n second_stage_batch_size=2,\n predict_masks=True)\n preprocessed_inputs = tf.placeholder(tf.float32, shape=input_shape)\n _, true_image_shapes = model.preprocess(preprocessed_inputs)\n result_tensor_dict = model.predict(preprocessed_inputs,\n true_image_shapes)\n init_op = tf.global_variables_initializer()\n with self.test_session(graph=test_graph) as sess:\n sess.run(init_op)\n tensor_dict_out = sess.run(result_tensor_dict, feed_dict={\n preprocessed_inputs:\n np.zeros((batch_size, image_size, image_size, 3))})\n self.assertEqual(\n set(tensor_dict_out.keys()),\n set(expected_shapes.keys()).union(\n set([\n 'detection_boxes', 'detection_scores', 'detection_classes',\n 'detection_masks', 'num_detections'\n ])))\n for key in expected_shapes:\n self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])\n self.assertAllEqual(tensor_dict_out['detection_boxes'].shape, [2, 5, 4])\n self.assertAllEqual(tensor_dict_out['detection_masks'].shape,\n [2, 5, 14, 14])\n self.assertAllEqual(tensor_dict_out['detection_classes'].shape, [2, 5])\n self.assertAllEqual(tensor_dict_out['detection_scores'].shape, [2, 5])\n self.assertAllEqual(tensor_dict_out['num_detections'].shape, [2])\n\n def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks(\n self):\n test_graph = tf.Graph()\n with test_graph.as_default():\n model = self._build_model(\n is_training=True,\n number_of_stages=2,\n second_stage_batch_size=7,\n predict_masks=True)\n\n batch_size = 2\n image_size = 10\n max_num_proposals = 7\n initial_crop_size = 3\n maxpool_stride = 1\n\n image_shape = (batch_size, image_size, image_size, 3)\n preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32)\n groundtruth_boxes_list = [\n tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),\n tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)\n ]\n groundtruth_classes_list = [\n tf.constant([[1, 0], [0, 1]], dtype=tf.float32),\n tf.constant([[1, 0], [1, 0]], dtype=tf.float32)\n ]\n _, true_image_shapes = model.preprocess(tf.zeros(image_shape))\n model.provide_groundtruth(groundtruth_boxes_list,\n groundtruth_classes_list)\n\n result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)\n expected_shapes = {\n 'rpn_box_predictor_features': (2, image_size, image_size, 512),\n 'rpn_features_to_crop': (2, image_size, image_size, 3),\n 'image_shape': (4,),\n 'refined_box_encodings': (2 * max_num_proposals, 2, 4),\n 'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),\n 'num_proposals': (2,),\n 'proposal_boxes': (2, max_num_proposals, 4),\n 'proposal_boxes_normalized': (2, max_num_proposals, 4),\n 'box_classifier_features':\n self._get_box_classifier_features_shape(\n image_size, batch_size, max_num_proposals, initial_crop_size,\n maxpool_stride, 3),\n 'mask_predictions': (2 * max_num_proposals, 2, 14, 14)\n }\n\n init_op = tf.global_variables_initializer()\n with self.test_session(graph=test_graph) as sess:\n sess.run(init_op)\n tensor_dict_out = sess.run(result_tensor_dict)\n self.assertEqual(\n set(tensor_dict_out.keys()),\n set(expected_shapes.keys()).union(\n set([\n 'rpn_box_encodings',\n 'rpn_objectness_predictions_with_background',\n 'anchors',\n ])))\n for key in expected_shapes:\n self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])\n\n anchors_shape_out = tensor_dict_out['anchors'].shape\n self.assertEqual(2, len(anchors_shape_out))\n self.assertEqual(4, anchors_shape_out[1])\n num_anchors_out = anchors_shape_out[0]\n self.assertAllEqual(tensor_dict_out['rpn_box_encodings'].shape,\n (2, num_anchors_out, 4))\n self.assertAllEqual(\n tensor_dict_out['rpn_objectness_predictions_with_background'].shape,\n (2, num_anchors_out, 2))\n\n def test_postprocess_third_stage_only_inference_mode(self):\n num_proposals_shapes = [(2), (None)]\n refined_box_encodings_shapes = [(16, 2, 4), (None, 2, 4)]\n class_predictions_with_background_shapes = [(16, 3), (None, 3)]\n proposal_boxes_shapes = [(2, 8, 4), (None, 8, 4)]\n batch_size = 2\n image_shape = np.array((2, 36, 48, 3), dtype=np.int32)\n for (num_proposals_shape, refined_box_encoding_shape,\n class_predictions_with_background_shape,\n proposal_boxes_shape) in zip(num_proposals_shapes,\n refined_box_encodings_shapes,\n class_predictions_with_background_shapes,\n proposal_boxes_shapes):\n tf_graph = tf.Graph()\n with tf_graph.as_default():\n model = self._build_model(\n is_training=False, number_of_stages=3,\n second_stage_batch_size=6, predict_masks=True)\n total_num_padded_proposals = batch_size * model.max_num_proposals\n proposal_boxes = np.array(\n [[[1, 1, 2, 3],\n [0, 0, 1, 1],\n [.5, .5, .6, .6],\n 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],\n [[2, 3, 6, 8],\n [1, 2, 5, 3],\n 4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]])\n num_proposals = np.array([3, 2], dtype=np.int32)\n refined_box_encodings = np.zeros(\n [total_num_padded_proposals, model.num_classes, 4])\n class_predictions_with_background = np.ones(\n [total_num_padded_proposals, model.num_classes+1])\n\n num_proposals_placeholder = tf.placeholder(tf.int32,\n shape=num_proposals_shape)\n refined_box_encodings_placeholder = tf.placeholder(\n tf.float32, shape=refined_box_encoding_shape)\n class_predictions_with_background_placeholder = tf.placeholder(\n tf.float32, shape=class_predictions_with_background_shape)\n proposal_boxes_placeholder = tf.placeholder(\n tf.float32, shape=proposal_boxes_shape)\n image_shape_placeholder = tf.placeholder(tf.int32, shape=(4))\n _, true_image_shapes = model.preprocess(\n tf.zeros(image_shape_placeholder))\n detections = model.postprocess({\n 'refined_box_encodings': refined_box_encodings_placeholder,\n 'class_predictions_with_background':\n class_predictions_with_background_placeholder,\n 'num_proposals': num_proposals_placeholder,\n 'proposal_boxes': proposal_boxes_placeholder,\n 'image_shape': image_shape_placeholder,\n 'detection_boxes': tf.zeros([2, 5, 4]),\n 'detection_masks': tf.zeros([2, 5, 14, 14]),\n 'detection_scores': tf.zeros([2, 5]),\n 'detection_classes': tf.zeros([2, 5]),\n 'num_detections': tf.zeros([2]),\n }, true_image_shapes)\n with self.test_session(graph=tf_graph) as sess:\n detections_out = sess.run(\n detections,\n feed_dict={\n refined_box_encodings_placeholder: refined_box_encodings,\n class_predictions_with_background_placeholder:\n class_predictions_with_background,\n num_proposals_placeholder: num_proposals,\n proposal_boxes_placeholder: proposal_boxes,\n image_shape_placeholder: image_shape\n })\n self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])\n self.assertAllEqual(detections_out['detection_masks'].shape,\n [2, 5, 14, 14])\n self.assertAllClose(detections_out['detection_scores'].shape, [2, 5])\n self.assertAllClose(detections_out['detection_classes'].shape, [2, 5])\n self.assertAllClose(detections_out['num_detections'].shape, [2])\n self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))\n self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))\n\n def _get_box_classifier_features_shape(self,\n image_size,\n batch_size,\n max_num_proposals,\n initial_crop_size,\n maxpool_stride,\n num_features):\n return (batch_size * max_num_proposals,\n initial_crop_size/maxpool_stride,\n initial_crop_size/maxpool_stride,\n num_features)\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.test.main" ], [ "tensorflow.constant", "tensorflow.slice", "tensorflow.reduce_sum", "tensorflow.test.main", "tensorflow.placeholder", "tensorflow.contrib.framework.is_tensor", "tensorflow.get_default_graph", "numpy.zeros" ], [ "tensorflow.nn.relu", "tensorflow.concat", "tensorflow.global_variables", "tensorflow.variable_scope", "tensorflow.split" ], [ "tensorflow.Graph", "numpy.amax", "tensorflow.constant", "tensorflow.zeros", "numpy.amin", "tensorflow.test.main", "tensorflow.ones", "tensorflow.placeholder", "numpy.ones", "tensorflow.global_variables_initializer", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "0.12", "1.0" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
nicola144/auxiliary-particle-filters
[ "61d72e9163abb73007c0fbd30f68d4cc6d7ab4e9" ]
[ "src/utils.py" ]
[ "import re\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rcParams\nfrom scipy.integrate import simps\nfrom scipy.special import logsumexp\nfrom scipy.optimize import minimize\n# from sklearn.cluster import DBSCAN\n# from sklearn.preprocessing import StandardScaler\nimport time\nimport random\nfrom tqdm import tqdm\n\n# random_seed = 5\n\ndef compute_cluster_centers(all_points):\n\tall_points = StandardScaler().fit_transform(all_points)\n\tdb = DBSCAN(eps=0.3, min_samples=10).fit(all_points)\n\tlabels = db.labels_\n\tprint(labels)\n\tsys.exit()\n\tlabels_unique = np.unique(db.labels_)\n\n\tprint(labels_unique.shape)\n\tsys.exit()\n\n\tcentroids = []\n\tfor i in range(len(labels_unique)):\n\t\tcentroids.append(np.mean(all_points[labels_unique==i,:], axis=0))\n\n\tcentroids = np.asarray(centroids)\n\tprint(centroids.shape)\n\tsys.exit()\n\n# Implements the IHS update. \"right\" means the term on the right of the difference\n# in the update rule (same for \"left\")\ndef ihs_step(lamb, A, b, S, old_lambda, K):\n\tright = (A.T.dot(b - A.dot(old_lambda))).T.dot(lamb)\n\tnorm_term = S.dot(A.dot(lamb - old_lambda))\n\tleft = (1. / (2. * K)) * norm_term.T.dot(norm_term)\n\tres = left - right\n\treturn res\n\ndef randomized_nnls(A, b, n_particle):\n\t\"\"\"\n\tAim is to solve NNLS problem, using Iterative Hessian Sketching:\n\tfind NONNEGATIVE lambda = arg min || A . lambda - b ||_{2}^{2}\n\n\tAlso, there should be at least one non zero lambda.\n\t\"\"\"\n\tK = int(n_particle / 2)\n\t# Initialized at 0 for iteration 0 as in Pilanci & Wainwright 2016\n\tlambdas = np.zeros(b.shape)\n\n\t# Result lambdas should NOT be ALL zeros.\n\t# cons = ({'type': 'ineq', 'fun': lambda x: x.dot(x) - 0.01})\n\n\tfor i in range(5): # 5 iterations\n\t\tinit_lambdas = np.random.multivariate_normal(mean=np.zeros(b.shape), cov=np.eye(b.shape[0]))\n\n\t\t# Sketching matrix. Using Gaussian Sketch.\n\t\tS = np.random.normal(0, 1, (K, b.shape[0]))\n\t\tS /= np.sqrt(K)\n\n\t\t# Minimize the IHS objective, subject to a positive result, with the added constraint as above\n\t\tres = minimize(ihs_step, init_lambdas, (A, b, S, lambdas, K), bounds=[(0., None)] * b.shape[0])\n\t\tlambdas = res['x']\n\n\treturn lambdas\n\n\ndef scale_reduced_system(smaller_A, smaller_b):\n\tsmallest_exp_A = np.min(smaller_A)\n\tsmallest_exp_b = np.min(smaller_b)\n\tsmallest = np.min([smallest_exp_A, smallest_exp_b])\n\tsmallest = np.format_float_scientific(smallest)\n\tmin_exp = int(re.findall(r'\\d+', smallest)[-1])\n\tscaled_smaller_A = smaller_A * (10 ** min_exp)\n\tscaled_smaller_b = smaller_b * (10 ** min_exp)\n\n\treturn scaled_smaller_A, scaled_smaller_b\n\n\n\ndef safe_invert(matrix):\n\treturn np.linalg.lstsq(matrix, np.eye(matrix.shape[0]))[0]\n\t# return np.linalg.inv(matrix)\n\ndef reduce_system(n_particle, A, b):\n\n\t# K = int(n_particle / 50)\n\tK = 5\n\tindices_tokeep = b.argsort()[-K:][::-1]\n\t# indices_tokeep = np.round(np.linspace(0, b.shape[0] - 1, K)).astype(int)\n\n\tsmaller_b = b[indices_tokeep]\n\ttemp = A[:, indices_tokeep]\n\tsmaller_A = temp[indices_tokeep, :]\n\n\treturn smaller_A, smaller_b, indices_tokeep\n\n\ndef sanity_checks(unnormalized):\n\tif np.all(unnormalized == 0.):\n\t\tprint('ALL zeros ... \\n ')\n\t\tprint(unnormalized)\n\t\tsys.exit()\n\n\tif np.isnan(np.log(unnormalized)).any():\n\t\tprint(unnormalized)\n\t\tprint('some log nan')\n\t\tsys.exit()\n\n\n# def set_plotting():\n# \t# Set plotting\n# \tparams = {\n# \t\t'axes.labelsize': 25,\n# \t\t'font.size': 20,\n# \t\t'legend.fontsize': 30,\n# \t\t'xtick.labelsize': 25,\n# \t\t'ytick.labelsize': 25,\n# \t\t'text.usetex': False,\n# \t\t'figure.figsize': [20, 12],\n# \t\t'axes.labelpad': 10,\n# \t\t'lines.linewidth': 10,\n# \t\t'legend.loc': 'upper right'\n# \t}\n# \trcParams['agg.path.chunksize'] = 10000\n# \trcParams.update(params)\n# \tplt.style.use('bmh')\ndef set_plotting():\n\t# Set plotting\n\tparams = {\n\t\t'axes.labelsize': 28,\n\t\t'font.size': 20,\n\t\t'legend.fontsize': 28,\n\t\t'xtick.labelsize': 28,\n\t\t'ytick.labelsize': 28,\n\t\t'text.usetex': False,\n\t\t'figure.figsize': [20, 12],\n\t\t'axes.labelpad': 10,\n\t\t'lines.linewidth': 10,\n\t\t'legend.loc': 'upper right'\n\t}\n\trcParams['agg.path.chunksize'] = 10000\n\trcParams.update(params)\n\tplt.style.use('bmh')\n\n\n\ndef is_pos_def(x):\n\treturn np.all(np.linalg.eigvals(x) > 0)\n\n\ndef chi_square(target, proposal, x):\n\treturn simps((target - proposal) ** 2 / (proposal), dx=x[1] - x[0])\n\n\n# def mse(x, y):\n# \treturn np.average((x - y) ** 2, axis=0)\n\n# Normalized ?\ndef mse(x, y):\n\treturn np.average((x - y) ** 2, axis=0) / np.average(np.sum(y**2,axis=-1))\n\n\ndef sparsity(x):\n\treturn 100. - ((float(np.count_nonzero(x)) / float(x.size)) * 100)\n\n\ndef normalize(unnormalized):\n\treturn unnormalized / np.sum(unnormalized)\n\n\ndef check_symmetric(a, rtol=1e-05, atol=1e-08):\n\treturn np.allclose(a, a.T, rtol=rtol, atol=atol)\n\n\ndef normalize_log(l):\n\treturn np.exp(l - logsumexp(l)).flatten()\n\ndef log_normalize_log(unnormalized):\n\treturn unnormalized - logsumexp(unnormalized)\n\ndef get_ess(logw_norm):\n\treturn np.exp(-logsumexp(2*logw_norm))\n\n\ndef logmatmulexp(log_A, log_B):\n\t\"\"\"Given matrix log_A of shape ϴ×R and matrix log_B of shape R×I, calculates\n\t(log_A.exp() @ log_B.exp()).log() in a numerically stable way.\n\tHas O(ϴRI) time complexity and space complexity.\"\"\"\n\n\tif len(log_B.shape) == 1:\n\t\tlog_B = log_B.reshape(-1, 1)\n\n\tϴ, R = log_A.shape\n\tI = log_B.shape[1]\n\tassert log_B.shape == (R, I)\n\tlog_A_expanded = np.broadcast_to(np.expand_dims(log_A, 2), (ϴ, R, I))\n\tlog_B_expanded = np.broadcast_to(np.expand_dims(log_B, 0), (ϴ, R, I))\n\tlog_pairwise_products = log_A_expanded + log_B_expanded # shape: (ϴ, R, I)\n\n\tif log_B.shape[1] == 1:\n\t\treturn logsumexp(log_pairwise_products, axis=1).flatten()\n\n\treturn logsumexp(log_pairwise_products, axis=1)\n\n# works , but useless \n# def cost(log_params,logA,logb):\n\n# with precision(300):\n\n# # print(log_params)\n\n# left = np.logaddexp( logmatmulexp(logA, log_params) , - logb).reshape(1,-1) \n\n# # print(left)\n\n# right = np.logaddexp( logmatmulexp(logA, log_params), - logb ) \n\n# # print(right)\n\n# res = logmatmulexp( left, right )\n\n# # print(np.exp(res))\n\n# return res\n" ]
[ [ "numpy.expand_dims", "numpy.linalg.eigvals", "numpy.sqrt", "numpy.asarray", "numpy.all", "numpy.mean", "numpy.allclose", "numpy.unique", "numpy.eye", "numpy.count_nonzero", "numpy.format_float_scientific", "numpy.zeros", "matplotlib.pyplot.style.use", "numpy.log", "numpy.min", "matplotlib.rcParams.update", "scipy.optimize.minimize", "scipy.integrate.simps", "numpy.sum", "scipy.special.logsumexp", "numpy.random.normal", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] } ]
fredshentu/public_model_based_controller
[ "9301699bc56aa49ba5c699f7d5be299046a8aa0c", "9301699bc56aa49ba5c699f7d5be299046a8aa0c" ]
[ "railrl/predictors/state_action_network.py", "railrl/planner/forward_planner/choose_init_goal_pairs_pushing_active_sampling.py" ]
[ "import abc\nimport tensorflow as tf\n\nfrom railrl.core.neuralnet import NeuralNetwork\nfrom rllab.misc.overrides import overrides\n\n\nclass StateActionNetwork(NeuralNetwork, metaclass=abc.ABCMeta):\n \"\"\"\n A map from (state, action) to a vector\n \"\"\"\n\n def __init__(\n self,\n name_or_scope,\n output_dim,\n env_spec=None,\n action_dim=None,\n observation_dim=None,\n action_input=None,\n observation_input=None,\n **kwargs\n ):\n \"\"\"\n Create a state-action network.\n\n :param name_or_scope: a string or VariableScope\n :param output_dim: int, output dimension of this network\n :param env_spec: env spec for an Environment\n :param action_dim: int, action dimension\n :param observation_input: tf.Tensor, observation input. If None,\n a placeholder of shape [None, observation dim] will be made\n :param action_input: tf.Tensor, observation input. If None,\n a placeholder of shape [None, action dim] will be made\n :param kwargs: kwargs to be passed to super\n \"\"\"\n self.setup_serialization(locals())\n super(StateActionNetwork, self).__init__(name_or_scope, **kwargs)\n self.output_dim = output_dim\n\n assert env_spec or (action_dim and observation_dim)\n if action_dim is None:\n self.action_dim = env_spec.action_space.flat_dim\n else:\n self.action_dim = action_dim\n\n if observation_dim is None:\n self.observation_dim = env_spec.observation_space.flat_dim\n else:\n self.observation_dim = observation_dim\n\n with tf.variable_scope(self.scope_name):\n if action_input is None:\n action_input = tf.placeholder(\n tf.float32,\n [None, self.action_dim],\n \"_actions\")\n if observation_input is None:\n if hasattr(self.observation_dim, '__len__'):\n observation_input = tf.placeholder(\n tf.float32,\n [None] + list(self.observation_dim),\n \"_observation\")\n else:\n observation_input = tf.placeholder(\n tf.float32,\n [None ,self.observation_dim],\n \"_observation\")\n\n self.action_input = action_input\n self.observation_input = observation_input\n self._create_network(observation_input=observation_input,\n action_input=action_input)\n\n @property\n @overrides\n def _input_name_to_values(self):\n return dict(\n observation_input=self.observation_input,\n action_input=self.action_input,\n )\n\n # TODO(vpong): make it so that the inputs get automatically processed\n", "import argparse\nimport joblib\nimport uuid\nimport tensorflow as tf\nimport pickle\nimport time\nimport numpy as np\nimport gym\n\ndef env_get_state(env):\n\tqpos = env.env.model.data.qpos.flat.copy()\n\tqvel = env.env.model.data.qvel.flat.copy()\n\treturn np.concatenate([qpos, qvel])\n\ndef env_set_state(env, ob):\n\tqpos_idx = env.env.init_qpos.shape[0]\n\tenv.env.set_state(ob[:qpos_idx], ob[qpos_idx:])\n\t\ndef env_set_and_render(env, state):\n\tenv_set_state(env, state)\n\tenv.render()\n\t\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('env_name', type=str,\n\t\t\t\t\t\thelp='name of the env want to use')\n\tparser.add_argument('save_path', type=str,\n\t\t\t\t\t\thelp='where to save')\n\tparser.add_argument('policy', type=str, help='add policy, start applying random action after excuting policy')\n\tparser.add_argument('--num_tests', type=int, default=10000)\n\t\n\targs = parser.parse_args()\n\tenv = gym.make(args.env_name)\n\n\tS_init_list = []\n\tS_goal_list = []\n\t\n\n\tgpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)\n\n\twith tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n\t\t#load policy \n\t\tpolicy_data = joblib.load(args.policy)\n\t\tpolicy = policy_data['policy']\n\t\t\n\t\tcounter = 0\n\t\twhile counter < args.num_tests:\n\t\t\tprint(counter)\n\t\t\tobs = env.reset()\n\t\t\treal_state = env_get_state(env)\n\t\t\tif np.random.rand() < 0.15:\n\t\t\t\tS_init_list.append(real_state)\n\t\t\t\tfor _ in range(50):\n\t\t\t\t\tenv.step(env.action_space.sample())\n\t\t\t\tS_goal_list.append(env_get_state(env))\n\t\t\t\tcounter += 1\n\t\t\t\t# print(\"S_init\")\n\t\t\t\t# env_set_and_render(env, real_state)\n\t\t\t\t# print(\"S_goal\")\n\t\t\t\t# env_set_and_render(env, S_goal_list[-1])\n\t\t\t\t# import pdb; pdb.set_trace()\n\t\t\telse:\n\t\t\t\tpolicy_traj_state = [real_state]\n\t\t\t\tpolicy_traj_contact = [False]\n\t\t\t\tpolicy_contact_counter = 0\n\t\t\t\tfor i in range(80):\n\t\t\t\t\tobs, r, d, env_info = env.step(policy.get_action(obs)[0])\n\t\t\t\t\tpolicy_traj_state.append(env_get_state(env))\n\t\t\t\t\tif env_info['contact']:\n\t\t\t\t\t\tpolicy_traj_contact.append(True)\n\t\t\t\t\t\tpolicy_contact_counter += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpolicy_traj_contact.append(False)\n\t\t\t\tif policy_contact_counter > 7:\n\t\t\t\t#check the policy trajectory choose init and goal state from policy trajectory\n\t\t\t\t\tfirst_touch_index = policy_traj_contact.index(True)\n\t\t\t\t\tS_init_list.append(policy_traj_state[first_touch_index - 1])\n\t\t\t\t\tgoal_index = first_touch_index + np.random.randint(4,10)\n\t\t\t\t\tgoal_index = np.clip(goal_index, 0, 80)\n\t\t\t\t\tS_goal_list.append(policy_traj_state[goal_index])\n\t\t\t\t\tcounter += 1\n\t\t\t\t\t# print(first_touch_index)\n\t\t\t\t\t# print(goal_index)\n\t\t\t\t\t# print(\"S_init\")\n\t\t\t\t\t# env_set_and_render(env, S_init_list[-1])\n\t\t\t\t\t# print(\"S_goal\")\n\t\t\t\t\t# import pdb; pdb.set_trace()\n\t\t\t\t\t# env_set_and_render(env, S_goal_list[-1])\n\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t#else: do nothing, don't use this trajectory\n\t\t#save to args.save_path\n\t\twith open(args.save_path, 'wb') as handle:\n\t\t\tsave_dict = {'S_init': S_init_list, 'S_goal':S_goal_list}\n\t\t\tpickle.dump(save_dict, handle, pickle.HIGHEST_PROTOCOL)" ]
[ [ "tensorflow.variable_scope", "tensorflow.placeholder" ], [ "numpy.clip", "numpy.concatenate", "tensorflow.ConfigProto", "tensorflow.GPUOptions", "numpy.random.rand", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
topolphukhanh/xam
[ "3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c", "3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c" ]
[ "xam/preprocessing/binning/mdlp.py", "xam/ensemble/lgbm_cv.py" ]
[ "\"\"\"\nMinimum Description Length Principle (MDLP) binning\n\n- Original paper: http://sci2s.ugr.es/keel/pdf/algorithm/congreso/fayyad1993.pdf\n- Implementation inspiration: https://www.ibm.com/support/knowledgecenter/it/SSLVMB_21.0.0/com.ibm.spss.statistics.help/alg_optimal-binning.htm\n\"\"\"\n\nimport collections\nimport math\n\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.utils import check_X_y\n\nfrom .base import BaseSupervisedBinner\n\n\nclass MDLPBinner(BaseSupervisedBinner):\n\n def fit(self, X, y, **fit_params):\n \"\"\"Determine which are the best cut points for each column in X based on y.\"\"\"\n\n X, y = check_X_y(X, y, y_numeric=True)\n\n self.cut_points_ = [mdlp_cut(x, y, []) for x in X.T]\n return self\n\n @property\n def cut_points(self):\n return self.cut_points_\n\n\ndef calc_class_entropy(y):\n class_counts = np.unique(y, return_counts=True)[1]\n return stats.entropy(class_counts, base=2)\n\n\ndef calc_class_information_entropy(x, y, cut_point):\n partition = x <= cut_point\n\n y_1 = y[partition]\n y_2 = y[~partition]\n\n ent_1 = calc_class_entropy(y_1)\n ent_2 = calc_class_entropy(y_2)\n\n return (y_1.size * ent_1 + y_2.size * ent_2) / (y_1.size + y_2.size)\n\n\ndef mdlp_cut(x, y, cut_points):\n\n # No cut is necessary if there is only one class\n if len(np.unique(y)) == 1:\n return\n\n # Calculate the current entropy\n y_ent = calc_class_entropy(y)\n\n # Sort x and y according to x\n sorted_indexes = x.argsort()\n x = x[sorted_indexes]\n y = y[sorted_indexes]\n\n # Find the potential cut points\n potential_cut_points = []\n for i in range(x.size - 1):\n potential_cut_points.append((x[i] + x[i+1]) / 2)\n\n # Ignore the cut points that appear more than once\n potential_cut_points = list(set(potential_cut_points))\n\n # Find the cut point with gives the lowest class information entropy\n cut_point = min(\n potential_cut_points,\n key=lambda cut_point: calc_class_information_entropy(x, y, cut_point)\n )\n\n # Calculate the information gain obtained with the obtained cut point\n new_ent = calc_class_information_entropy(x, y, cut_point)\n gain = y_ent - new_ent\n\n # Partition the data\n partition = x <= cut_point\n x_1 = x[partition]\n y_1 = y[partition]\n x_2 = x[~partition]\n y_2 = y[~partition]\n\n # Get the number of unique classes in each group\n k = len(np.unique(y))\n k_1 = len(np.unique(y_1))\n k_2 = len(np.unique(y_2))\n\n # Calculate the entropy of each group\n y_1_ent = calc_class_entropy(y_1)\n y_2_ent = calc_class_entropy(y_2)\n\n # Calculate the acceptance criterion\n delta = math.log2(3 ** k) - k * y_ent + k_1 * y_1_ent + k_2 * y_2_ent\n n = y.size\n acceptance_criterion = (math.log2(n - 1) + delta) / n\n\n # Add the cut point if the gain is higher than the acceptance criterion\n if gain > acceptance_criterion:\n cut_points.append(cut_point)\n # Recursively check if further cuts are possible\n mdlp_cut(x_1, y_1, cut_points)\n mdlp_cut(x_2, y_2, cut_points)\n\n return sorted(cut_points)\n", "import lightgbm as lgbm\nimport numpy as np\nimport pandas as pd\nfrom sklearn import model_selection\nfrom sklearn import utils\n\n\nclass LGBMCV():\n\n def __init__(self, cv=model_selection.KFold(n_splits=5, shuffle=True), **kwargs):\n self.cv = cv\n self.lgbm_params = kwargs\n\n def fit(self, X, y=None, **kwargs):\n\n self.models_ = []\n feature_names = X.columns if isinstance(X, pd.DataFrame) else list(range(X.shape[1]))\n self.feature_importances_ = pd.DataFrame(index=feature_names)\n self.evals_results_ = {}\n\n for i, (fit_idx, val_idx) in enumerate(self.cv.split(X, y)):\n\n # Split the dataset according to the fold indexes\n if isinstance(X, pd.DataFrame):\n X_fit = X.iloc[fit_idx]\n X_val = X.iloc[val_idx]\n else:\n X_fit = X[fit_idx]\n X_val = X[val_idx]\n\n if isinstance(y, pd.Series):\n y_fit = y.iloc[fit_idx]\n y_val = y.iloc[val_idx]\n else:\n y_fit = y[fit_idx]\n y_val = y[val_idx]\n\n # https://lightgbm.readthedocs.io/en/latest/Python-API.html#lightgbm.Dataset\n fit_set = lgbm.Dataset(X_fit, y_fit)\n val_set = lgbm.Dataset(X_val, y_val)\n\n # https://lightgbm.readthedocs.io/en/latest/Python-API.html#lightgbm.train\n self.evals_results_[i] = {}\n model = lgbm.train(\n params=self.lgbm_params,\n train_set=fit_set,\n valid_sets=(fit_set, val_set),\n valid_names=('fit', 'val'),\n evals_result=self.evals_results_[i],\n **kwargs\n )\n\n # Store the feature importances\n self.feature_importances_['gain_{}'.format(i)] = model.feature_importance('gain')\n self.feature_importances_['split_{}'.format(i)] = model.feature_importance('split')\n\n # Store the model\n self.models_.append(model)\n\n return self\n\n def predict(self, X):\n\n utils.validation.check_is_fitted(self, ['models_'])\n\n y = np.zeros(len(X))\n\n for model in self.models_:\n y += model.predict(X)\n\n return y / len(self.models_)\n" ]
[ [ "scipy.stats.entropy", "sklearn.utils.check_X_y", "numpy.unique" ], [ "sklearn.utils.validation.check_is_fitted", "sklearn.model_selection.KFold", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ezeddin/random_forest
[ "07a23af1764fbf7a54a27e79d5ac68c69a64f0b1" ]
[ "bin/AdaBoost.py" ]
[ "from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.datasets import make_gaussian_quantiles\nfrom sklearn.ensemble import RandomForestClassifier\n\ndef AdaBoost(X_train, y_train, X_test, DEPTH, N_ESTIMATORS):\n\t# Create and fit an AdaBoosted decision tree\n\tbdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=DEPTH),\n\t\t\t algorithm=\"SAMME\",\n\t\t\t n_estimators=N_ESTIMATORS)\n\n\tbdt.fit(X_train, y_train)\n\n\t# Test classifier on test data\n\ty_out = bdt.predict(X_test)\n\treturn y_out\n\ndef ForestIB(X_train, y_train, X_test, DEPTH, N_ESTIMATORS):\n\n\t# Create the random forest object which will include all the parameters\n\t# for the fit\n\tforest = RandomForestClassifier(n_estimators = N_ESTIMATORS, max_depth=DEPTH, max_features=DEPTH, criterion='gini', n_jobs=-1)\n\n\t# Fit the training data to the Survived labels and create the decision trees\n\tforest = forest.fit(X_train, y_train)\n\n\t# Take the same decision trees and run it on the test data\n\ty_out = forest.predict(X_test)\n\treturn y_out\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.RandomForestClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BioSystemsUM/biotmpy
[ "f981d58cf7f53a2aa09708e13d6561533c164e1f", "f981d58cf7f53a2aa09708e13d6561533c164e1f" ]
[ "pipelines/cv_biobert_lstm_ft.py", "pipelines/cv_biobert_cls.py" ]
[ "model_name= 'cv_biobert_lstm_ft'\r\n\r\nimport sys \r\nsys.path.append('../')\r\nimport os\r\nimport tensorflow \r\nimport numpy as np\r\nimport random\r\n\r\n\r\nseed_value = 123123\r\n#seed_value = None\r\n\r\nenvironment_name = sys.executable.split('/')[-3]\r\nprint('Environment:', environment_name)\r\nos.environ[environment_name] = str(seed_value)\r\n\r\nnp.random.seed(seed_value)\r\nrandom.seed(seed_value)\r\ntensorflow.random.set_seed(seed_value)\r\n\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\nimport tensorflow.compat.v1.keras.backend as K\r\nconfig = ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsession = InteractiveSession(config=config)\r\nK.set_session(session)\r\n\r\nmultiple_gpus = [0,1,2,3]\r\n#multiple_gpus = None\r\n\r\nimport os\r\nimport tensorflow as tf\r\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\r\n\r\nif multiple_gpus:\r\n devices = []\r\n for gpu in multiple_gpus:\r\n devices.append('/gpu:' + str(gpu)) \r\n strategy = tensorflow.distribute.MirroredStrategy(devices=devices)\r\n\r\nelse:\r\n # Get the GPU device name.\r\n device_name = tensorflow.test.gpu_device_name()\r\n # The device name should look like the following:\r\n if device_name == '/device:GPU:0':\r\n print('Using GPU: {}'.format(device_name))\r\n else:\r\n raise SystemError('GPU device not found')\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = device_name\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n\r\nfrom wrappers.bioc_wrapper import bioc_to_docs, bioc_to_relevances\r\nfrom wrappers.pandas_wrapper import relevances_to_pandas, docs_to_pandasdocs\r\nfrom mlearning.dl import DL_preprocessing\r\nfrom mlearning.dl_models import Bert_LSTM_opt\r\nfrom mlearning.dl_models import DeepDTA\r\nfrom mlearning.embeddings import compute_embedding_matrix, glove_embeddings_2\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score, matthews_corrcoef\r\nfrom sklearn.metrics import cohen_kappa_score\r\nfrom sklearn.metrics import roc_auc_score, auc, roc_curve, precision_recall_curve\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom tensorflow.keras.models import load_model\r\nfrom mlearning.dl import plot_training_history, Bert_preprocessing\r\nfrom mlearning.dl_config import DLConfig\r\nfrom mlearning.dl import average_precision\r\nfrom tensorflow.keras.preprocessing import text\r\nfrom mlearning.dl import plot_roc_n_pr_curves\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import StratifiedKFold\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('wordnet')\r\nnltk.download('punkt')\r\nfrom nltk.corpus import stopwords\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport os\r\nfrom keras import backend as K\r\nimport pickle\r\nfrom transformers import BertTokenizer\r\n\r\ntrain_dataset_path = '../datasets/PMtask_Triage_TrainingSet.xml'\r\ntest_dataset_path = '../datasets/PMtask_Triage_TestSet.xml'\r\n\r\n\r\n\r\ndl_config = DLConfig(model_name=model_name, seed_value=seed_value)\r\n#dl_config.stop_words = set(stopwords.words('english')) \r\ndl_config.stop_words = None\r\ndl_config.lower = False \r\ndl_config.remove_punctuation = False\r\ndl_config.split_by_hyphen = False\r\ndl_config.lemmatization = False \r\ndl_config.stems = False \r\n\r\n\r\ndocs_train = bioc_to_docs(train_dataset_path, dl_config=dl_config)\r\nrelevances_train = bioc_to_relevances(train_dataset_path, 'protein-protein')\r\n\r\n\r\nx_train_df = docs_to_pandasdocs(docs_train)\r\ny_train_df = relevances_to_pandas(x_train_df, relevances_train)\r\n\r\n#Parameters\r\ndl_config.padding = 'post' #'pre' -> default; 'post' -> alternative\r\ndl_config.truncating = 'post' #'pre' -> default; 'post' -> alternative #####\r\n\r\ndl_config.max_sent_len = 512 #sentences will have a maximum of \"max_sent_len\" words\r\ndl_config.nmr_sentences = 1 #[1 or 2]\r\n\r\n\r\ndl_config.learning_rate = 3e-5\r\ndl_config.epochs = 2\r\n\r\ndl_config.batch_size=16\r\n\r\n\r\n\r\ndl_config.k_fold=10\r\nkfold = StratifiedKFold(n_splits=dl_config.k_fold, shuffle=True, random_state=dl_config.seed_value)\r\n\r\ncv_avp_scores = []\r\ncv_acc_scores=[]\r\ncv_prec_scores = []\r\ncv_rec_scores = []\r\ncv_f1_scores = []\r\n\r\nfor train_index, test_index in kfold.split(x_train_df.to_numpy(), y_train_df.to_numpy()):\r\n print(len(train_index))\r\n print(len(test_index))\r\n K.clear_session()\r\n\r\n\r\n environment_name = sys.executable.split('/')[-3]\r\n print('Environment:', environment_name)\r\n os.environ[environment_name] = str(dl_config.seed_value)\r\n\r\n np.random.seed(dl_config.seed_value)\r\n random.seed(dl_config.seed_value)\r\n tensorflow.random.set_seed(dl_config.seed_value)\r\n\r\n dl_config.tokenizer = BertTokenizer.from_pretrained('biobert_v1.1_pubmed', do_lower_case=False)\r\n\r\n x_train, y_train = Bert_preprocessing(x_train_df.iloc[train_index,], y_train_df.iloc[train_index,],\r\n dl_config=dl_config, \r\n validation_percentage=0,\r\n seed_value=dl_config.seed_value)\r\n\r\n\r\n biobert_path = './biobert_v1.1_pubmed'\r\n\r\n if multiple_gpus:\r\n with strategy.scope():\r\n model = Bert_LSTM_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=False, bert_name_or_path=biobert_path, bert_config=True)\r\n else:\r\n model = Bert_LSTM_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=False, bert_name_or_path=biobert_path, bert_config=True)\r\n\r\n history = model.fit(x_train, y_train,\r\n epochs=dl_config.epochs,\r\n batch_size=dl_config.batch_size)\r\n\r\n x_test, y_test = Bert_preprocessing(x_train_df.iloc[test_index,], y_train_df.iloc[test_index,], dl_config=dl_config)\r\n\r\n yhat_probs = model.predict(x_test, verbose=0)\r\n yhat_probs = yhat_probs[:, 0]\r\n\r\n yhat_classes = np.where(yhat_probs > 0.5, 1, yhat_probs)\r\n yhat_classes = np.where(yhat_classes < 0.5, 0, yhat_classes).astype(np.int64)\r\n \r\n test_avp = average_precision(y_train_df.iloc[test_index,], yhat_probs)\r\n test_acc = accuracy_score(y_test, yhat_classes)\r\n test_prec = precision_score(y_test, yhat_classes)\r\n test_rec = recall_score(y_test, yhat_classes)\r\n test_f1 = f1_score(y_test, yhat_classes)\r\n cv_avp_scores.append(test_avp)\r\n cv_acc_scores.append(test_acc)\r\n cv_prec_scores.append(test_prec)\r\n cv_rec_scores.append(test_rec)\r\n cv_f1_scores.append(test_f1)\r\n\r\n K.clear_session()\r\n del model\r\n tf.compat.v1.reset_default_graph()\r\n\r\ndl_config.cv_avp = cv_avp_scores\r\ndl_config.cv_acc = cv_acc_scores\r\ndl_config.cv_prec = cv_prec_scores\r\ndl_config.cv_rec = cv_rec_scores\r\ndl_config.cv_f1 = cv_f1_scores\r\n\r\n\r\n\r\ndl_config.save()\r\n\r\n\r\ndl_config.write_report()\r\n", "model_name= 'cv_biobert_cls'\r\n\r\nimport sys \r\nsys.path.append('../')\r\nimport os\r\nimport tensorflow \r\nimport numpy as np\r\nimport random\r\n\r\n\r\nseed_value = 123123\r\n#seed_value = None\r\n\r\nenvironment_name = sys.executable.split('/')[-3]\r\nprint('Environment:', environment_name)\r\nos.environ[environment_name] = str(seed_value)\r\n\r\nnp.random.seed(seed_value)\r\nrandom.seed(seed_value)\r\ntensorflow.random.set_seed(seed_value)\r\n\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\nimport tensorflow.compat.v1.keras.backend as K\r\nconfig = ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsession = InteractiveSession(config=config)\r\nK.set_session(session)\r\n\r\nmultiple_gpus = [0,1,2,3]\r\n#multiple_gpus = None\r\n\r\nimport os\r\nimport tensorflow as tf\r\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\r\n\r\nif multiple_gpus:\r\n devices = []\r\n for gpu in multiple_gpus:\r\n devices.append('/gpu:' + str(gpu)) \r\n strategy = tensorflow.distribute.MirroredStrategy(devices=devices)\r\n\r\nelse:\r\n # Get the GPU device name.\r\n device_name = tensorflow.test.gpu_device_name()\r\n # The device name should look like the following:\r\n if device_name == '/device:GPU:0':\r\n print('Using GPU: {}'.format(device_name))\r\n else:\r\n raise SystemError('GPU device not found')\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = device_name\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n\r\nfrom wrappers.bioc_wrapper import bioc_to_docs, bioc_to_relevances\r\nfrom wrappers.pandas_wrapper import relevances_to_pandas, docs_to_pandasdocs\r\nfrom mlearning.dl import DL_preprocessing\r\nfrom mlearning.dl_models import Bert_CLS_opt\r\nfrom mlearning.dl_models import DeepDTA\r\nfrom mlearning.embeddings import compute_embedding_matrix, glove_embeddings_2\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score, matthews_corrcoef\r\nfrom sklearn.metrics import cohen_kappa_score\r\nfrom sklearn.metrics import roc_auc_score, auc, roc_curve, precision_recall_curve\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom tensorflow.keras.models import load_model\r\nfrom mlearning.dl import plot_training_history, Bert_preprocessing\r\nfrom mlearning.dl_config import DLConfig\r\nfrom mlearning.dl import average_precision\r\nfrom tensorflow.keras.preprocessing import text\r\nfrom mlearning.dl import plot_roc_n_pr_curves\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import StratifiedKFold\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('wordnet')\r\nnltk.download('punkt')\r\nfrom nltk.corpus import stopwords\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport os\r\nfrom keras import backend as K\r\nimport pickle\r\nfrom transformers import BertTokenizer\r\n\r\ntrain_dataset_path = '../datasets/PMtask_Triage_TrainingSet.xml'\r\ntest_dataset_path = '../datasets/PMtask_Triage_TestSet.xml'\r\n\r\n\r\n\r\ndl_config = DLConfig(model_name=model_name, seed_value=seed_value)\r\n#dl_config.stop_words = set(stopwords.words('english')) \r\ndl_config.stop_words = None\r\ndl_config.lower = False \r\ndl_config.remove_punctuation = False\r\ndl_config.split_by_hyphen = False\r\ndl_config.lemmatization = False \r\ndl_config.stems = False \r\n\r\n\r\ndocs_train = bioc_to_docs(train_dataset_path, dl_config=dl_config)\r\nrelevances_train = bioc_to_relevances(train_dataset_path, 'protein-protein')\r\n\r\n\r\nx_train_df = docs_to_pandasdocs(docs_train)\r\ny_train_df = relevances_to_pandas(x_train_df, relevances_train)\r\n\r\n#Parameters\r\ndl_config.padding = 'post' #'pre' -> default; 'post' -> alternative\r\ndl_config.truncating = 'post' #'pre' -> default; 'post' -> alternative #####\r\n\r\ndl_config.max_sent_len = 512 #sentences will have a maximum of \"max_sent_len\" words\r\ndl_config.nmr_sentences = 1 #[1 or 2]\r\n\r\n\r\ndl_config.learning_rate = 1e-3\r\ndl_config.epochs = 7\r\n\r\ndl_config.batch_size=32\r\n\r\n\r\n\r\ndl_config.k_fold=10\r\nkfold = StratifiedKFold(n_splits=dl_config.k_fold, shuffle=True, random_state=dl_config.seed_value)\r\n\r\ncv_avp_scores = []\r\ncv_acc_scores=[]\r\ncv_prec_scores = []\r\ncv_rec_scores = []\r\ncv_f1_scores = []\r\n\r\nfor train_index, test_index in kfold.split(x_train_df.to_numpy(), y_train_df.to_numpy()):\r\n print(len(train_index))\r\n print(len(test_index))\r\n\r\n K.clear_session()\r\n environment_name = sys.executable.split('/')[-3]\r\n print('Environment:', environment_name)\r\n os.environ[environment_name] = str(dl_config.seed_value)\r\n\r\n np.random.seed(dl_config.seed_value)\r\n random.seed(dl_config.seed_value)\r\n tensorflow.random.set_seed(dl_config.seed_value)\r\n\r\n dl_config.tokenizer = BertTokenizer.from_pretrained('biobert_v1.1_pubmed', do_lower_case=False)\r\n\r\n x_train, y_train = Bert_preprocessing(x_train_df.iloc[train_index,], y_train_df.iloc[train_index,],\r\n dl_config=dl_config, \r\n validation_percentage=0,\r\n seed_value=dl_config.seed_value)\r\n\r\n\r\n biobert_path = './biobert_v1.1_pubmed'\r\n\r\n if multiple_gpus:\r\n with strategy.scope():\r\n model = Bert_CLS_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=True, bert_name_or_path=biobert_path, bert_config=True)\r\n else:\r\n model = Bert_CLS_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=True, bert_name_or_path=biobert_path, bert_config=True)\r\n\r\n history = model.fit(x_train, y_train,\r\n epochs=dl_config.epochs,\r\n batch_size=dl_config.batch_size)\r\n\r\n x_test, y_test = Bert_preprocessing(x_train_df.iloc[test_index,], y_train_df.iloc[test_index,], dl_config=dl_config)\r\n\r\n yhat_probs = model.predict(x_test, verbose=0)\r\n yhat_probs = yhat_probs[:, 0]\r\n\r\n yhat_classes = np.where(yhat_probs > 0.5, 1, yhat_probs)\r\n yhat_classes = np.where(yhat_classes < 0.5, 0, yhat_classes).astype(np.int64)\r\n \r\n test_avp = average_precision(y_train_df.iloc[test_index,], yhat_probs)\r\n test_acc = accuracy_score(y_test, yhat_classes)\r\n test_prec = precision_score(y_test, yhat_classes)\r\n test_rec = recall_score(y_test, yhat_classes)\r\n test_f1 = f1_score(y_test, yhat_classes)\r\n cv_avp_scores.append(test_avp)\r\n cv_acc_scores.append(test_acc)\r\n cv_prec_scores.append(test_prec)\r\n cv_rec_scores.append(test_rec)\r\n cv_f1_scores.append(test_f1)\r\n\r\n\r\n K.clear_session()\r\n del model\r\n tf.compat.v1.reset_default_graph()\r\n\r\ndl_config.cv_avp = cv_avp_scores\r\ndl_config.cv_acc = cv_acc_scores\r\ndl_config.cv_prec = cv_prec_scores\r\ndl_config.cv_rec = cv_rec_scores\r\ndl_config.cv_f1 = cv_f1_scores\r\n\r\n\r\n\r\ndl_config.save()\r\n\r\n\r\ndl_config.write_report()\r\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "sklearn.metrics.recall_score", "numpy.random.seed", "tensorflow.test.gpu_device_name", "tensorflow.config.experimental.list_physical_devices", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.model_selection.StratifiedKFold", "tensorflow.compat.v1.InteractiveSession", "sklearn.metrics.f1_score", "tensorflow.compat.v1.reset_default_graph", "numpy.where", "tensorflow.random.set_seed", "tensorflow.distribute.MirroredStrategy" ], [ "tensorflow.compat.v1.ConfigProto", "sklearn.metrics.recall_score", "numpy.random.seed", "tensorflow.test.gpu_device_name", "tensorflow.config.experimental.list_physical_devices", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.model_selection.StratifiedKFold", "tensorflow.compat.v1.InteractiveSession", "sklearn.metrics.f1_score", "tensorflow.compat.v1.reset_default_graph", "numpy.where", "tensorflow.random.set_seed", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnfmaddox/kilojoule
[ "b4c146ded82e3ef51a0252ff48b1066a076e9aeb" ]
[ "kilojoule/humidair.py" ]
[ "from .units import Quantity, units\nfrom .common import (\n invert_dict,\n CP_symbUpper_to_units,\n preferred_units_from_type,\n preferred_units_from_symbol,\n)\nfrom .realfluid import Properties as rfprop\nfrom .plotting import PropertyPlot, plt\nimport CoolProp\nfrom CoolProp.CoolProp import HAPropsSI,set_reference_state\nimport numpy as np\nimport re\nfrom numpy import floor,ceil,log10\nimport functools\n\n# Default CoolProps units for symbols\nCP_HA_units_to_symb = {\n 'K':['T','B','Twb','T_wb','WetBulb','D','Tdp','DewPoint','T_dp','Tdb','T_db'],\n 'Pa':['P','P_w'],\n 'J/kg_dry_air/K':['C','cp','CV','S','Sda','Entropy'],\n 'J/kg_dry_air/K':['Cw','cpw','CV','S','Sda','Entropy'],\n 'J/kg_humid_air/K':['Cha','cp_ha','CVha','cv_ha','Sha'],\n 'J/kg_dry_air':['H','Hda','Enthalpy'],\n 'J/kg_humid_air':['Hha'],\n 'J/lb_water':['Hw'],\n 'W/m/degK':['K','k','Conductivity'],\n 'Pa*s':['M','Visc','mu'],\n 'mol_water/mol_humid_air':['psi_w','Y'],\n 'm^3/kg_dry_air':['V','Vda'],\n 'm^3/kg_humid_air':['Vha'],\n 'kg_water/kg_dry_air':['W','Omega','HumRat'],\n ' ':['R','RH','RelHum','phi']\n }\nCP_HA_symb_to_units = invert_dict(CP_HA_units_to_symb)\n\nCP_HA_trans_inv = {\n 'Twb':['B','Twb','T_wb','WetBulb'],\n 'Tdb':['Tdb','T_db','DryBulb','T'],\n 'Tdp':['Tdp','D','DewPoint','T_dp'],\n 'C':['C','cp','Cp','C_p','c_p'],\n 'Cha':['Cha','C_ha','cha','c_ha'],\n 'Cv':['Cv','Cv','cv','c_v'],\n 'Cvha':['Cvha','Cv_ha','cvha','c_v_ha'],\n 'H':['H','Hda','Enthalpy','h','hda','h_da'],\n 'Hha':['Hha','h_ha','hha','Enthalpy_Humid_Air'],\n 'K':['K','k','conductivity','Conductivity'],\n 'M':['M','Visc','mu','viscosity'],\n 'Y':['Y','psi_w','mole_fraction','y'],\n 'P':['P','p','pressure'],\n 'P_w':['P_w','p_w','partial_pressure_water'],\n 'R':['R','RelHum','RH','rel_hum','phi'],\n 'S':['S','s','sda','Sda','s_da','Entropy'],\n 'Sha':['Sha','s_ha','sha'],\n 'V':['V','v','v_da','vda'],\n 'Vha':['Vha','v_ha','vha'],\n 'W':['W','w','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],\n 'Z':['Z','compressibility_factor'],\n }\nCP_HA_trans = invert_dict(CP_HA_trans_inv)\n\nCP_HA_symb_to_local = {\n 'Twb':'T_wb',\n 'Tdb':'T_db',\n 'Tdp':'T_dp',\n 'C':'Cp',\n 'Cha':'Cp_ha',\n 'Cv':'Cv',\n 'Cvha':'Cv_ha',\n 'H':'h',\n 'Hha':'h_ha',\n 'K':'conductivity',\n 'M':'viscosity',\n 'Y':'psi_w',\n 'P':'p',\n 'P_w':'p_w',\n 'R':'rel_hum',\n 'S':'s',\n 'Sha':'s_ha',\n 'V':'v',\n 'Vha':'v_ha',\n 'W':'spec_hum',\n 'Z':'Z'\n }\n\nCP_HA_type_to_symb = {\n 'temperature':['B','Twb','T_wb','WetBulb','Tdb','T_db','DryBulb','T','Tdp','D','DewPoint','T_dp'],\n 'pressure':['P','p','pressure','P_w','p_w','partial_pressure_water'],\n 'density':['D','d','rho'],\n 'dry air specific volume':['V','v','v_da','vda'],\n 'humid air specific volume':['Vha','v_ha','vha'],\n 'dry air specific energy':['H','Hda','Enthalpy','h','hda','h_da'],\n 'humid air specific energy':['Hha','h_ha','hha','Enthalpy_Humid_Air'],\n 'dry air specific heat':['C','cp','Cp','C_p','c_p','Cv','Cv','cv','c_v'],\n 'dry air specific entropy':['S','s','sda','Sda','s_da','Entropy'],\n 'humid air specific heat':['Cha','C_ha','cha','c_ha','Cvha','Cv_ha','cvha','c_v_ha'],\n 'humid air specific entropy':['Sha','s_ha','sha'],\n 'conductivity':['K','k','conductivity','Conductivity'],\n 'viscosity':['M','Visc','mu','viscosity'],\n 'water mole fraction':['Y','psi_w','y'],\n 'humidity ratio':['W','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],\n 'dimensionless':['R','RelHum','RH','rel_hum','phi','Z']\n}\nCP_HA_symb_to_type = invert_dict(CP_HA_type_to_symb)\n\n\n\ndef PropertyLookup(\n desired,\n unit_system=None,\n verbose=False,\n **kwargs,\n):\n \"\"\"\n Each of the follow properties/parameters is expected to be a quantity with units\n\n :param desired: Dependent from two of the following independent properties\n :param T: dry-bulb Temperature (Default value = None)\n :param T_wb: wet-bulb Temperature (Default value = None)\n :param T_dp: dew-point Temperature (Default value = None)\n :param p: pressure (Default value = None)\n :param p_w: partial pressure of water vapor (Default value = None)\n :param w: humidity ratio (Default value = None)\n :param v: mixture volume per unit dry air (Default value = None)\n :param v_ha: mixture volume per unit humid air (Default value = None)\n :param h: mixture enthalpy per unit dry air (Default value = None)\n :param h_ha: mixture enthalpy per unit humid air (Default value = None)\n :param s: mixture entropy per unit dry air (Default value = None)\n :param rel_hum: relative humidity (Default value = None)\n :param y: water mole fraction (Default value = None)\n :param unit_system: unit system for return value - one of 'SI_C', 'SI_K', 'English_F', 'English_R' (Default value = )\n :param verbose: show debug information (Default value = False)\n :param **kwargs:\n\n \"\"\"\n desired = CP_HA_trans[desired]\n PropsSI_args =[desired] # add the desired parameter as the first argument to pass to CoolProp.PropsSI\n\n def process_indep_arg(arg, CPSymb):\n \"\"\"\n Add a property symbol and its value to the CoolProp.PropSI argument string\n\n :param arg: value of independent parameter\n :param CPSymb: CoolProp symbol\n :param exponent: exponent used to invert the value (Default value = 1)\n :param AltSymb: symbol to use for inverted values (Default value = None)\n\n \"\"\"\n if arg is not None:\n # if AltSymb: PropsSI_args.append(AltSymb)\n # else:\n PropsSI_args.append(CPSymb) # Add independent parameter symbol to argument list\n if CP_HA_symb_to_units[CPSymb] is not None:\n value = float(arg.to(CP_HA_symb_to_units[CPSymb]).magnitude) # Add independent parameter value to argument list with appropriate magnitude and units stripped\n elif isinstance(arg,Quantity):\n value = float(arg.magnitude)\n else:\n value = float(arg) # Add independent paramter value directly to argument list if it has no units that need to be adjusted\n PropsSI_args.append(value)\n for k,v in kwargs.items():\n if k in CP_HA_trans.keys():\n process_indep_arg(v,CP_HA_trans[k])\n\n def humidity_search(PropsSI_args):\n desired = PropsSI_args[0]\n for i,v in enumerate(PropsSI_args):\n if v == 'P':\n P = PropsSI_args[i+1]\n elif v == 'R':\n R_target = PropsSI_args[i+1]\n elif v == 'W':\n W = PropsSI_args[i+1]\n T = 273.15 # starting guess\n T_guess = T\n n_steps = 100\n search_steps = [5,-5,1,-1,0.1,-0.1,0.01,-0.01]\n for step in search_steps:\n cont = True\n n_step = 0\n while cont:\n if n_step > 0:\n T_guess += step\n try:\n R = HAPropsSI('R','T',T_guess,'W',W,'P',P)\n error = abs(R_target-R)\n if step>0:\n T = T_guess\n if R<R_target:\n cont=False\n elif step<0 and R<R_target:\n T = T_guess\n else:\n cont=False\n except ValueError:\n if step<0: cont=False\n n_step += 1\n if n_step > n_steps: cont=False\n \n if desired == 'Tdb':\n return T\n else:\n return HAPropsSI(desired,'P',P,'W',W,'Tdb',T)\n \n if verbose:\n print('Calling: CoolProp.CoolProp.HAPropsSI({})'.format(','.join([str(i) for i in PropsSI_args])))\n print(PropsSI_args)\n\n if \"R\" in PropsSI_args[1:] and \"W\" in PropsSI_args[1:]:\n result = humidity_search(PropsSI_args)\n else:\n result = HAPropsSI(*PropsSI_args)\n \n # Determine the units of the value as returned from CoolProp\n CP_return_units = CP_HA_symb_to_units[desired]\n CP_return_type = CP_HA_symb_to_type[desired]\n # Determine the preferred units for the value\n if unit_system is None:\n result_units = preferred_units_from_type(CP_return_type, units.preferred_units)\n else:\n result_units = preferred_units_from_type(CP_return_type, unit_system)\n # Convert the returned value to the preferred units\n if result_units is not None:\n result = Quantity(result,CP_return_units).to(result_units)\n return result\n\n\nclass Properties:\n \"\"\"\n A class to return thermodynamic properties for a real fluid\n\n :param p: pressure (Default value = 1 atm)\n :param unit_system: units for return values - one of 'SI_C','SI_K','English_F','English_R' (Default = 'SI_C')\n :returns: an object with methods to evaluate real fluid properties\n \"\"\"\n\n def __init__(self, p=None, unit_system=\"kSI_C\"):\n self.fluid='humidair'\n if p is None:\n self.__p = Quantity(1.0,'atm')\n else:\n self.__p = p\n self.unit_system = unit_system\n # legacy definitions/aliases\n self.relhum = self.phi = self.rel_hum\n self.omega = self.hum_rat = self.humrat = self.w\n self.Cp = self.cp\n self.Cv = self.cv\n self.mu = self.viscosity\n self.nu = self.kinematic_viscosity\n self.water = rfprop('Water',unit_system=unit_system)\n\n def _lookup(self, desired, **kwargs):\n \"\"\"\n Call PropertyLookup to evaluate the desired property for the indepent properties specified\n as keyword arguments\n\n :param desired: desired property\n :param **kwargs: any three dimensional quantities of T,T_wb,T_dp,p,p_w,w,v,v_ha,h,h_ha,s,s_ha,rel_hum,mole_fraction,\n \"\"\"\n unit_system = kwargs.pop('unit_system',self.unit_system)\n return PropertyLookup(\n desired, unit_system=self.unit_system, **kwargs\n )\n\n \n def _update_kwargs(self, args, kwargs, water=False):\n \"\"\"use argument unit to identify appropriate keyword\"\"\"\n for arg in args:\n if isinstance(arg, Quantity):\n try:\n arg_symb = arg.property_symbol\n arg_dict = {arg_symb:arg}\n kwargs = dict(**arg_dict, **kwargs)\n except:\n try:\n arg.to('K') # Temperature\n kwargs = dict(T=arg, **kwargs)\n except:\n try:\n arg.to('kPa') # pressure\n kwargs = dict(p=arg, **kwargs)\n except:\n try:\n arg.to('m^3/kg') # specific volume\n kwargs = dict(v=arg, **kwargs)\n except:\n try:\n arg.to('kJ/kg/K') # entropy\n kwargs = dict(s=arg, **kwargs)\n except:\n try:\n arg.to('J/kg_dry_air') # enthalpy\n kwargs = dict(h=arg, **kwargs)\n except:\n try:\n arg.to('J/kg_humid_air') # enthalpy humid air\n kwargs = dict(h_ha=arg, **kwargs)\n except:\n try:\n arg.to('kg_water/kg_dry_air') # molar density\n kwargs = dict(w=arg, **kwargs)\n except:\n try:\n if arg.dimensionless and (0<= arg <= 1): # relative humidity\n kwargs = dict(rel_hum=arg, **kwargs)\n except:\n print(f'Unable to determine property type for {f} based on units')\n elif 0<= arg <= 1: # quality\n kwargs = dict(rel_hum=arg, **kwargs)\n if not water and \"p\" not in kwargs.keys():\n kwargs = dict(p=self.__p, **kwargs)\n return kwargs\n\n @property\n def p(self):\n \"\"\"\n set or retrieve pressure for humid air\n\n example:\n >> humair.p = Quantity(1,'atm')\n >> humair.p\n '1 atm'\n\n :param pressure: pressure as a dimensional quantity\n :returns: pressure as a dimensional quantity\n \"\"\"\n return self.__p\n\n @p.setter\n def p(self, pressure):\n self.__p = pressure\n\n def T(self, *args, **kwargs):\n \"\"\"\n Dry-bulb Temperature from two independent intensive properties\n\n example:\n >> humair.T(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Dry-bulb Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T\", **kwargs)\n \n def T_wb(self, *args, **kwargs):\n \"\"\"\n Wet-bulb Temperature from two independent intensive properties\n\n example:\n >> humair.T_wb(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Wet-bulb Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T_wb\", **kwargs)\n\n def T_dp(self, *args, **kwargs):\n \"\"\"\n Dew-point Temperature from two independent intensive properties\n\n example:\n >> humair.T_dp(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Dew-point Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T_dp\", **kwargs)\n \n def w(self, *args, **kwargs):\n \"\"\"\n humidity ratio from two independent intensive properties\n\n example:\n >> fluid.v(T=T_1, h=h_2)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: humidity ratio as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"w\", **kwargs)\n\n def v(self, *args, **kwargs):\n \"\"\"\n mixture volume per unit of dry air from two independent intensive properties\n\n example:\n >> fluid.v(T=T_1, h=p_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"v\", **kwargs)\n\n def v_ha(self, *args, **kwargs):\n \"\"\"\n mixture volume per unit of humid air from two independent intensive properties\n\n example:\n >> fluid.v_ha(T=T_1, h=p_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"v_ha\", **kwargs)\n\n def v_w(self, *args, **kwargs):\n \"\"\"\n specific volume of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.v_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.v(**kwargs).to('m^3/kg').magnitude, 'm^3/kg_water')\n \n def h(self, *args, **kwargs):\n \"\"\"\n enthalpy per unit dry air from two independent intensive properties\n\n example:\n >> fluid.h(T=T_1, rel_hum=re1_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific enthalpy per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"h\", **kwargs)\n\n def h_ha(self, *args, **kwargs):\n \"\"\"\n enthalpy per unit humid air from two independent intensive properties\n\n example:\n >> fluid.h_ha(T=T_1, rel_hum=re1_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific enthalpy per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"h_ha\", **kwargs)\n\n def h_w(self, *args, **kwargs):\n \"\"\"\n specific enthalpy of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.h_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.h(**kwargs).to('kJ/kg').magnitude, 'kJ/kg_water') \n \n def s(self, *args, **kwargs):\n \"\"\"\n entropy per unit dry air from two independent intensive properties\n\n example:\n >> fluid.s(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific entropy per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"s\", **kwargs)\n\n def s_ha(self, *args, **kwargs):\n \"\"\"\n entropy per unit humid air from two independent intensive properties\n\n example:\n >> fluid.s_ha(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific entropy per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"s_ha\", **kwargs)\n\n def s_w(self, *args, **kwargs):\n \"\"\"\n specific entropy of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.s_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.s(**kwargs).to('kJ/kg/K').magnitude, 'kJ/kg_water/K') \n \n def rel_hum(self, *args, **kwargs):\n \"\"\"\n relative humidity from two independent intensive properties\n\n example:\n >> fluid.rel_hum(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: relative humidity as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"rel_hum\", **kwargs)\n\n def y(self, *args, **kwargs):\n \"\"\"\n water mole fraction from two independent intensive properties\n\n example:\n >> fluid.y(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: water mole fraction as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"Y\", **kwargs)\n\n def cp(self, *args, **kwargs):\n \"\"\"\n specific heat per unit dry air from two independent intensive properties\n\n example:\n >> fluid.cp(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: specific heat per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cp\", **kwargs)\n\n def cp_ha(self, *args, **kwargs):\n \"\"\"\n specific heat per unit humid air from two independent intensive properties\n\n example:\n >> fluid.cp_ha(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: specific heat per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cp_ha\", **kwargs)\n \n def cv(self, *args, **kwargs):\n \"\"\"\n constant volume specific heat per unit dry air from two independent intensive properties\n\n example:\n >> fluid.cv(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: constant volume specific heat per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cv\", **kwargs)\n\n def cv_ha(self, *args, **kwargs):\n \"\"\"\n constant volume specific heat per unit humid air from two independent intensive properties\n\n example:\n >> fluid.cv_ha(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: constant volume specific heat per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cv_ha\", **kwargs)\n\n def conductivity(self, *args, **kwargs):\n \"\"\"\n thermal conductivity from two independent intensive properties\n\n example:\n >> fluid.conductivity(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: thermal conductivity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"k\", **kwargs)\n\n def viscosity(self, *args, **kwargs):\n \"\"\"\n dynamic viscosity from two independent intensive properties\n\n example:\n >> fluid.viscosity(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: dynamic viscosity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"mu\", **kwargs)\n\n def kinematic_viscosity(self, *args, **kwargs):\n \"\"\"\n dynamic viscosity from two independent intensive properties\n\n example:\n >> fluid.kinematic_viscosity(T=T1, p=p1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: kinematic viscosity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"viscosity\", **kwargs)/self._lookup(\"v\", **kwargs)\n \n def Z(self, *args, **kwargs):\n \"\"\"\n Compressibility factor\n\n example:\n >> fluid.Pr(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar,d_molar\n :returns: Compressibility factor as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"Z\", **kwargs)\n\n def property_diagram(\n self,\n x=None,\n y=None,\n x_units=None,\n y_units=None,\n saturation=False,\n unit_system=None,\n **kwargs,\n ):\n unit_system = unit_system or self.unit_system\n return PropertyPlot(\n x=x,\n y=y,\n x_units=x_units,\n y_units=y_units,\n property_table=self,\n saturation=saturation,\n unit_system=unit_system,\n **kwargs,\n )\n\n def format_units(self,units,displaystyle=True):\n units = re.sub('_water','_w',units)\n units = re.sub('_dry_air','_a',units)\n units = re.sub('deg',r'^\\\\circ{}\\!',units)\n match = re.match('(.*)/(.*)',units)\n if match and displaystyle:\n units = f'\\\\frac{{{match.group(1)}}}{{{match.group(2)}}}'\n return units\n\n def rounded_array(self,val1,val2,n=20,spacing=None):\n if spacing is not None:\n spacing_mag = floor(log10(spacing))\n start = spacing*10**spacing_mag*round(val1/(spacing*10**spacing_mag))\n ret_array = np.arange(start, val2+spacing, spacing)\n else:\n dir = 1 if val2>val1 else -1\n delta = abs(val2-val1)\n mag_delta = floor(log10(delta))\n spacing = round(delta/n,-int(floor(log10(delta/n))))\n spacing_mag = floor(log10(spacing))\n spacings={}\n lists={}\n lengths={}\n for i in [1,2,2.5,5,10]:\n spacings[i] = dir*i*10**spacing_mag*round(spacing/(i*10**spacing_mag))\n spacings[i] = dir*i*10**spacing_mag\n start = i*10**spacing_mag*round(val1/(i*10**spacing_mag))\n if spacings[i] == 0: spacings[i] = i*10**spacing_mag\n lists[i] = np.arange(start,val2+spacings[i],spacings[i])\n if lists[i][0] == -0: lists[i][0]=0\n lengths[i] = len(lists[i])\n kys= list(lengths.keys()) \n lst = list(lengths.values())\n L = lst[min(range(len(lst)), key = lambda i: abs(lst[i]-n))]\n K = kys[lst.index(L)]\n ret_array = lists[K]\n if ret_array[0] == -0: ret_array[0]=0\n if ret_array[-1]>val2 or ret_array[-1]<val1: ret_array = ret_array[:-1]\n if ret_array[0]<val1 or ret_array[-1]>val2: ret_array = ret_array[1:]\n return ret_array\n\n def psychrometric_chart(\n self,\n Tmin=None,\n Tmax=None,\n wmin=None,\n wmax=None,\n main_labels_color=None,\n major_grid_style=None,\n minor_grid_style=None,\n n_h = 15,\n n_v = 20,\n h_isoline_style=None,\n v_isoline_style=None,\n rel_hum_isoline_style=None,\n Twb_isoline_style=None,\n unit_system=None,\n redraw=False,\n cache=True,\n **kwargs\n ):\n if self.cached_psychrometric_chart.cache_info().currsize>0:\n show_psych = True\n else:\n show_psych = False\n\n if redraw or not cache:\n self.cached_psychrometric_chart.cache_clear()\n \n psych = self.cached_psychrometric_chart(\n Tmin,\n Tmax,\n wmin,\n wmax,\n main_labels_color,\n major_grid_style,\n minor_grid_style,\n n_h,\n n_v,\n h_isoline_style,\n v_isoline_style,\n rel_hum_isoline_style,\n Twb_isoline_style,\n unit_system,\n **kwargs\n )\n\n if show_psych: psych.show()\n return psych\n\n @functools.lru_cache()\n def cached_psychrometric_chart(\n self,\n Tmin=None,\n Tmax=None,\n wmin=None,\n wmax=None,\n main_labels_color=None,\n major_grid_style=None,\n minor_grid_style=None,\n n_h = 15,\n n_v = 20,\n h_isoline_style=None,\n v_isoline_style=None,\n rel_hum_isoline_style=None,\n Twb_isoline_style=None,\n unit_system=None,\n **kwargs\n ):\n unit_system = unit_system or self.unit_system\n psych = self.property_diagram(x=\"T\", y=\"omega\", saturation=False, unit_system=unit_system, p=self.__p, **kwargs)\n\n # Line Styles\n main_labels_color = main_labels_color or 'black'\n major_grid_style = major_grid_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4]\n )\n minor_grid_style = minor_grid_style or dict(\n linestyle='-',\n linewidth=0.25,\n color=[0.4,0.4,0.4,0.4]\n )\n h_isoline_style = h_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n pos=0,\n labelprops=dict(\n ha='right',\n va='center',\n pos=0.0\n )\n )\n v_isoline_style = v_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n labelprops=dict(color='grey',offset=2))\n rel_hum_isoline_style = rel_hum_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n labelprops=dict(\n ha='right',\n color='grey',\n offset=2\n )\n )\n Twb_isoline_style = Twb_isoline_style or dict(\n linestyle=(0,(5,10)),\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n pos=0.2,\n labelprops=dict(\n ha='left',\n color='grey',\n offset=2\n )\n )\n \n # Set Axis limits\n if Tmin is None: Tmin = Quantity(30.0,'degF')\n Tmin = Tmin.to(psych.x_units)\n if Tmax is None: Tmax = Quantity(50.0,'degC')\n Tmax = Tmax.to(psych.x_units)\n if wmin is None: wmin = Quantity(0.0,'kg_water/kg_dry_air')\n wmin = wmin.to(psych.y_units)\n if wmax is None: wmax = Quantity(0.03,'kg_water/kg_dry_air')\n wmax = wmax.to(psych.y_units)\n psych.Tmin,psych.Tmax,psych.wmin,psych.wmax = Tmin,Tmax,wmin,wmax\n psych.ax.set_xlim(left=Tmin.magnitude,right=Tmax.magnitude)\n psych.ax.set_ylim(bottom=wmin.magnitude,top=wmax.magnitude)\n \n # Set axis labels\n x_units_str = f\"{self.format_units(f'{psych.x_units}')}\"\n y_units_str = f\"{self.format_units(f'{psych.y_units}')}\"\n psych.ax.set_xlabel(f\"Dry-Bulb Temperature, $T_{{\\\\mathrm{{db}}}}\\\\ [\\\\mathrm{{{x_units_str}}}]$\")\n psych.ax.set_ylabel(f\"Humidity Ratio, $\\\\omega\\\\ \\\\left[\\mathrm{{{y_units_str}}}\\\\right]$\")\n \n # Set axis style\n psych.ax.yaxis.tick_right()\n psych.ax.yaxis.set_label_position(\"right\")\n psych.ax.spines[\"right\"].set_visible(True)\n psych.ax.spines[\"left\"].set_visible(False)\n \n # Add Plot Title\n try:\n pressure_str = f'{psych.props.p}'\n except:\n pressure_str = f'{psych.props.p:~L}'\n title = f'Psychrometric Chart\\nPressure: $\\mathrm{{{pressure_str}}}$'\n psych.text((0.05*(Tmax-Tmin)+Tmin).magnitude, (0.9*(wmax-wmin)+wmin).magnitude, title, fontsize=12)\n \n # Draw grid\n # Dry-bulb grid\n tickscale=1\n x_major_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=5)\n x_minor_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=1)\n plt.xticks(x_major_ticks)\n ymin = wmin\n for i in x_major_ticks:\n ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)\n psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**major_grid_style)\n for i in x_minor_ticks:\n ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)\n psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**minor_grid_style) \n \n # Humidity ratio grid\n y_minor_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.001)\n y_major_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.005)\n plt.yticks(y_major_ticks)\n xmax = Tmax\n for i in y_major_ticks:\n xmin=Tmin\n try:\n phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))\n except:\n xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)\n psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**major_grid_style)\n for i in y_minor_ticks:\n xmin=Tmin\n try:\n phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))\n except:\n xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)\n psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**minor_grid_style) \n\n # Saturated line\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=False,linestyle='-',color='black')\n # Relative humidity lines\n for i in [0.1]:\n lstyle = dict(**rel_hum_isoline_style)\n lstyle['labelprops'] = dict(**rel_hum_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=i,label=f'$\\phi=10\\%$',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**lstyle)\n for i in [0.02,0.04,0.06,0.08,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:\n rel_hum = i\n xmin,xmax = Tmin,Tmax\n if psych.props.w(rel_hum=rel_hum,T=Tmax) > wmax: \n xmax = psych.props.T(w=wmax,rel_hum=rel_hum)\n psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',ycoor=(wmin+0.95*(wmax-wmin)).magnitude,**rel_hum_isoline_style)\n else:\n psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**rel_hum_isoline_style)\n \n # Enthalpy lines\n hmin = psych.props.h(T=Tmin,w=wmin)\n hmax = psych.props.h(T=Tmax,w=wmax)\n h_units = hmin.units\n h_units_str = f\"{self.format_units(f'{h_units}')}\"\n for i in self.rounded_array(hmin.magnitude,hmax.magnitude,15):\n h = Quantity(i,h_units)\n xmin = max(psych.props.T(h=h,rel_hum=1),Tmin,psych.props.T(h=h,w=wmax))\n xmax = min(psych.props.T(h=h,w=wmin),Tmax,psych.props.T(h=h,w=wmin))\n try:\n psych.plot_iso_line(iso_symb='h',iso_value=h,x_range=[xmin,xmax],label=f'{int(i) if i.is_integer() else i}',**h_isoline_style)\n except:\n pass\n # Enthalpy axis label\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=f'Enthalpy, $h$ $\\\\left[\\\\mathrm{{{h_units_str}}}\\\\right]$',linewidth=0,pos=0.5,labelprops=dict(offset=25)) \n \n # Specific volume lines\n vmin = psych.props.v(T=Tmin,omega=wmin)\n vmax = psych.props.v(T=Tmax,omega=wmax)\n v_units = vmin.units\n v_units_str = f\"{self.format_units(f'{v_units}',displaystyle=False)}\"\n v_list = self.rounded_array(vmin.magnitude,vmax.magnitude,20)\n v_main_label_index = int(len(v_list)*0.6)\n for i,val in enumerate(v_list):\n v = Quantity(val,v_units)\n ymax = min(psych.props.w(v=v,rel_hum=1),wmax)\n try:\n ymin = max(psych.props.w(T=Tmax,v=v),wmin)\n except ValueError:\n ymin = wmin\n v_string = int(val) if val.is_integer() else f'{val:.5}'.rstrip() \n if i == v_main_label_index:\n lstyle = dict(**v_isoline_style)\n lstyle['labelprops'] = dict(**v_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],n_points=10,label=f'$v={v_string}\\ \\mathrm{{{v_units_str}}}$',pos=0.7,**lstyle)\n else:\n try:\n psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],label=v_string,n_points=10,pos=0.7,**v_isoline_style)\n except:\n pass\n \n # Wet-bulb Temperature lines\n T_units = Tmin.units\n T_units_str = f\"{self.format_units(f'{T_units}',displaystyle=False)}\"\n Twb_main_label_index = int(len(x_major_ticks)*0.5)\n for i,T in enumerate(x_major_ticks[:-1]):\n Twb = Quantity(T,psych.x_units)\n ymax = min(psych.props.w(T=Twb,rel_hum=1),wmax)\n try:\n ymin = max(psych.props.w(T=Tmax,T_wb=Twb),wmin)\n except ValueError:\n ymin = wmin\n if ymin<wmax:\n if i == Twb_main_label_index:\n lstyle = dict(**Twb_isoline_style)\n lstyle['labelprops'] = dict(**Twb_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'$T_\\mathrm{{wb}}={int(T)}\\mathrm{{{T_units_str}}}$',**lstyle)\n else:\n psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'${int(T)}\\mathrm{{{T_units_str}}}$',**Twb_isoline_style)\n\n return psych\n\n def Ts_diagram(self, unit_system=None, saturation=False, **kwargs):\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"s\", y=\"T\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def pv_diagram(self, unit_system=None, saturation=None, log_x=None, log_y=None, **kwargs):\n if self.fluid == 'Air':\n saturation = saturation or False\n log_x = log_x or False\n log_y = log_y or False\n else:\n saturation = True\n log_x = log_x or True\n log_y = log_y or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"v\", y=\"p\", unit_system=unit_system, saturation=saturation, log_x=log_x, log_y=log_y, **kwargs\n )\n\n def Tv_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"v\", y=\"T\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def hs_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"s\", y=\"h\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def ph_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"h\", y=\"p\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def pT_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"T\", y=\"p\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n\ndef LegacyPropertyPlot(\n x=None,\n y=None,\n x_units=None,\n y_units=None,\n plot_type=None,\n fluid=None,\n saturation=False,\n unit_system=\"SI_C\",\n **kwargs,\n):\n props = Properties(fluid=fluid, unit_system=unit_system, **kwargs)\n return PropertyPlot(\n x=x,\n y=y,\n x_units=x_units,\n y_units=y_units,\n property_table=props,\n saturation=saturation,\n unit_system=unit_system,\n **kwargs,\n )\n" ]
[ [ "numpy.arange", "numpy.log10" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dongzhou-1996/tf_learning
[ "fe764e78cc1a934707ae01d0847f901cb6fbb8b9", "fe764e78cc1a934707ae01d0847f901cb6fbb8b9" ]
[ "tf_mnist.py", "tf_initializer.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\nimport tensorflow as tf\nimport os\nimport numpy as np\nimport argparse\nimport shutil\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nparser = argparse.ArgumentParser('MNIST Softmax')\nparser.add_argument('--data_dir', type=str, default='/tmp/mnist-data', \n help='the directory of MNIST dataset')\nparser.add_argument('--lr', type=float, default=0.01, help='learning rate')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size')\nparser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step')\nparser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file')\nargs = parser.parse_args()\n\ndef model():\n x = tf.placeholder(tf.float32, [None, 784], name='x')\n gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth')\n with tf.variable_scope('layer1'):\n w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer())\n b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0))\n h1 = tf.nn.relu(tf.matmul(x, w1) + b1)\n with tf.variable_scope('layer2'):\n w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer())\n b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0))\n h2 = tf.nn.relu(tf.matmul(h1, w2) + b2)\n with tf.variable_scope('layer3'):\n w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer())\n b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0))\n y = tf.matmul(h2, w3) + b3\n # losses\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y))\n # optimizer\n optimizer = tf.train.GradientDescentOptimizer(args.lr)\n # define one-step train ops\n train_op = optimizer.minimize(cross_entropy)\n return x, y, gt, train_op \n \nif __name__ == \"__main__\":\n max_train_step = args.max_train_step\n batch_size = args.batch_size\n mnist = input_data.read_data_sets(args.data_dir, one_hot=True)\n x, y, gt, train_op = model()\n \n # create saver\n saver = tf.train.Saver()\n if os.path.exists('./mnist'):\n print('=> directory is existed!')\n else:\n print('=> creating temporary directory ...')\n os.makedirs('./mnist')\n\n with tf.Session() as sess:\n if args.model_path == '':\n tf.global_variables_initializer().run()\n else:\n saver.restore(sess, args.model_path)\n\n for i in range(max_train_step):\n batch_x, batch_gt = mnist.train.next_batch(batch_size)\n sess.run(train_op, feed_dict={x: batch_x, gt: batch_gt})\n\n if i % 100 == 0:\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(gt, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print('=> accuracy: {}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, gt: mnist.test.labels})))\n saver.save(sess, 'mnist/mnist_{:02d}.ckpt'.format(int(i / 100) + 1))\n", "import tensorflow as tf\n\nw = tf.Variable(tf.random_normal(shape=(1, 4), stddev=0.35), name='weight')\nb = tf.Variable(tf.zeros([4]), name='bias')\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n print(w.eval(), b.eval())\n\n# partial initializer\nwith tf.Session() as sess:\n tf.variables_initializer([w]).run()\n print(w.eval())\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.matmul", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.constant_initializer", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.random_normal_initializer", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets" ], [ "tensorflow.zeros", "tensorflow.variables_initializer", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
MalayAgr/DeepNeuralNetworksFromScratch
[ "ded75b148d9bb497014c016bfd2d7d0280c007ab", "ded75b148d9bb497014c016bfd2d7d0280c007ab" ]
[ "dnn/loss.py", "dnn/training/optimizers/sgd.py" ]
[ "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nfrom numba import njit\n\n\n@njit(cache=True)\ndef _clip(a: np.ndarray, epsilon: float) -> np.ndarray:\n if ((a == 1) | (a <= 0)).any():\n a = np.maximum(a, epsilon).astype(np.float32)\n a = np.minimum(1 - epsilon, a).astype(np.float32)\n return a\n\n\n@njit(cache=True)\ndef _binary_crossentropy(\n labels: np.ndarray, preds: np.ndarray, epsilon: float\n) -> float:\n preds = _clip(preds, epsilon)\n loss = labels * np.log(preds)\n loss += (1 - labels) * np.log(1 - preds)\n loss = np.sum(-loss)\n loss /= labels.shape[-1]\n return loss\n\n\n@njit(cache=True)\ndef _binary_crossentropy_derivative(\n labels: np.ndarray, preds: np.ndarray, epsilon: float\n) -> np.ndarray:\n preds = _clip(preds, epsilon)\n grad = 1 - labels\n grad /= 1 - preds\n grad -= labels / preds\n grad /= labels.shape[-1]\n return grad\n\n\n@njit(cache=True)\ndef _categorial_crossentropy(labels: np.ndarray, preds: np.ndarray) -> float:\n prod = labels * np.log(preds)\n bs = labels.shape[-1]\n loss = 0.0\n for idx in np.arange(bs):\n loss += -prod[..., idx].sum()\n loss /= bs\n return loss\n\n\n@njit(cache=True)\ndef _categorial_crossentropy_derivative(\n labels: np.ndarray, preds: np.ndarray\n) -> np.ndarray:\n grad = -labels\n grad /= preds\n grad /= labels.shape[-1]\n return grad\n\n\nclass Loss(ABC):\n names: list[str] = None\n REGISTRY: dict[str, type[Loss]] = {}\n ndim: int = None\n\n def __init_subclass__(cls, **kwargs) -> None:\n if (names := cls.names) is not None:\n Loss.REGISTRY.update({name: cls for name in names})\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}()\"\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def validate_input(self, labels: np.ndarray, preds: np.ndarray) -> None:\n if labels.shape != preds.shape:\n raise AttributeError(\n \"The labels and the predictions should have the same shape\"\n )\n\n if labels.ndim < self.ndim:\n raise AttributeError(\n f\"{self.__class__.__name__} expects at least {self.ndim}-dimensional inputs\"\n )\n\n def should_reshape(self, shape: tuple[int, ...]) -> bool:\n \"\"\"Method to determine if the labels and predictions should be reshaped.\"\"\"\n return False\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Method to reshape the labels and predictions if they should be reshaped.\"\"\"\n return labels, preds\n\n @abstractmethod\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n \"\"\"\n The formula used to calculate the loss.\n Subclasses classes must implement this.\n\n If the loss is J with inputs preds and Y,\n this should return J(preds, Y).\n\n Arguments:\n preds: Numpy-array, the predictions to be used for calculating the loss.\n\n Returns:\n A float representing the loss.\n \"\"\"\n\n @abstractmethod\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n \"\"\"\n The formula used to calculate the derivative of the loss function\n With respect to preds.\n Subclasses classes must implement this.\n\n If the loss is J with inputs preds and Y,\n this should return J'(preds, Y).\n\n Arguments:\n preds: Numpy-array, the predictions to be used for calculating the derivatives.\n\n Returns:\n A Numpy-array with the calculated derivatives.\n \"\"\"\n\n def compute_loss(self, labels: np.ndarray, preds: np.ndarray) -> float:\n self.validate_input(labels, preds)\n\n if self.should_reshape(labels.shape):\n labels, preds = self.reshape_labels_and_preds(labels, preds)\n\n return self.loss_func(labels, preds)\n\n def compute_derivatives(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n self.validate_input(labels, preds)\n\n old_shape = None\n\n if self.should_reshape(labels.shape):\n old_shape = labels.shape\n labels, preds = self.reshape_labels_and_preds(labels, preds)\n\n grad = self.loss_derivative(labels, preds).astype(np.float32)\n\n if old_shape is not None:\n grad.shape = old_shape\n\n return grad\n\n\nclass BinaryCrossEntropy(Loss):\n names = [\"binary_crossentropy\", \"bce\"]\n ndim = 2\n epsilon = 1e-15\n\n def should_reshape(self, shape: tuple[int, ...]) -> bool:\n return len(shape) > self.ndim or shape[0] != 1\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n return labels.reshape(1, -1), preds.reshape(1, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n return _binary_crossentropy(labels=labels, preds=preds, epsilon=self.epsilon)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n return _binary_crossentropy_derivative(\n labels=labels,\n preds=preds,\n epsilon=self.epsilon,\n )\n\n\nclass MeanSquaredError(Loss):\n names = [\"mean_squared_error\", \"mse\"]\n ndim = 2\n\n def should_reshape(self, shape: tuple) -> bool:\n return len(shape) > self.ndim or shape[0] != 1\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n return labels.reshape(1, -1), preds.reshape(1, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n loss = preds - labels\n loss **= 2\n loss = np.sum(loss / labels.shape[-1])\n\n return np.squeeze(loss)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n grad = preds - labels\n grad *= 2\n grad /= labels.shape[-1]\n\n return grad\n\n\nclass CategoricalCrossEntropy(Loss):\n names = [\"categorial_crossentropy\", \"cce\"]\n ndim = 2\n\n def should_reshape(self, shape: tuple) -> bool:\n return len(shape) > self.ndim\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n classes = labels.shape[0]\n return labels.reshape(classes, -1), preds.reshape(classes, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n return _categorial_crossentropy(labels=labels, preds=preds)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n return _categorial_crossentropy_derivative(labels=labels, preds=preds)\n", "from __future__ import annotations\n\n\nimport numpy as np\n\nfrom dnn.utils import StateVariable\n\nfrom ..schedulers import LearningRateType\nfrom .base_optimizer import Optimizer, StateVariable, WeightsGradientsType\n\n\nclass SGD(Optimizer):\n momentum = StateVariable()\n\n def __init__(\n self, learning_rate: LearningRateType = 1e-2, momentum: float = 0.0\n ) -> None:\n\n super().__init__(learning_rate=learning_rate)\n\n if not 0.0 <= momentum <= 1.0:\n raise ValueError(\"momentum should be between 0 and 1.\")\n\n self._momentum = momentum > 0.0\n\n self.momentum = momentum\n\n self._velocities: list[np.ndarray] = None\n\n def _update_velocity(self, grad: np.ndarray, velocity: np.ndarray) -> np.ndarray:\n mom = self.momentum\n one_minus_mom = 1 - mom\n\n velocity *= mom\n velocity += one_minus_mom * grad\n\n return velocity\n\n def pre_iteration_state(self, grads: WeightsGradientsType) -> None:\n super().pre_iteration_state(grads=grads)\n\n if self._momentum is True and self.iterations == 0:\n self._velocities = [np.zeros_like(weight) for weight, _ in grads]\n\n def _apply_gradient(\n self, weight: np.ndarray, gradient: np.ndarray, grad_idx: int\n ) -> None:\n\n update = gradient\n\n if self._momentum:\n velocity = self._velocities[grad_idx]\n update = self._update_velocity(gradient, velocity)\n\n weight -= self.lr * update\n" ]
[ [ "numpy.log", "numpy.maximum", "numpy.minimum", "numpy.arange", "numpy.squeeze", "numpy.sum" ], [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dmuraco3/CompuTradePython
[ "5c2bc4d1d3baabd68677b8c3d78b8caeed52ba28" ]
[ "build/lib/Quantuiti/_ema.py" ]
[ "import numpy as np\ndef ema(self, N):\n \"\"\"\n Simple Moving Average = (N - PeriodSum) / N\n N = number of days in a given period\n PeriodSum = sum of stock closing prices in that period\n \"\"\"\n name = 'ema_' + str(N)\n dependent = 'sma_' + str(N)\n try:\n return self.data[name][self.index]\n \n except Exception as error:\n self.sma(N)\n print(self.data.head())\n temp=[]\n for index, row in self.data.iterrows():\n # print(type(row[dependent]))\n print(np.isnan(row[dependent]))\n if np.isnan(row[dependent]):\n temp.append(row[dependent])\n # print(row[dependent])\n else:\n if np.isnan(temp[-1]):\n ema = (self.data['Close'][index] - self.data[dependent][index]) * (2 / (N + 1)) + self.data[dependent][index]\n else:\n ema = (self.data['Close'][index] - temp[-1]) * (2 / (N + 1)) + temp[-1]\n \n temp.append(ema)\n \n self.data[name] = temp\n return self.data[name][self.index]\n \n \n # setattr(self, name, [sma])" ]
[ [ "numpy.isnan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
debowin/pytext
[ "91126bb34bd689f3513f25ca0d356ad374e004ab", "91126bb34bd689f3513f25ca0d356ad374e004ab" ]
[ "pytext/metrics/__init__.py", "pytext/models/semantic_parsers/rnng/rnng_data_structures.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport itertools\nfrom collections import defaultdict\nfrom json import dumps as json_dumps\nfrom typing import (\n Any,\n DefaultDict,\n Dict,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport numpy as np\nfrom pytext.utils import cuda\nfrom pytext.utils.ascii_table import ascii_table, ascii_table_from_dict\n\n\nRECALL_AT_PRECISION_THRESHOLDS = [0.2, 0.4, 0.6, 0.8, 0.9]\n\n\"\"\"\nBasic metric classes and functions for single-label prediction problems.\nExtending to multi-label support\n\"\"\"\n\n\nclass LabelPrediction(NamedTuple):\n \"\"\"\n Label predictions of an example.\n\n Attributes:\n label_scores: Confidence scores that each label receives.\n predicted_label: Index of the predicted label. This is usually the label with\n the highest confidence score in label_scores.\n expected_label: Index of the true label.\n \"\"\"\n\n label_scores: List[float]\n predicted_label: int\n expected_label: int\n\n\nclass LabelListPrediction(NamedTuple):\n \"\"\"\n Label list predictions of an example.\n\n Attributes:\n label_scores: Confidence scores that each label receives.\n predicted_label: List of indices of the predicted label.\n expected_label: List of indices of the true label.\n \"\"\"\n\n label_scores: List[float]\n predicted_label: List[int]\n expected_label: List[int]\n\n\nclass PRF1Scores(NamedTuple):\n \"\"\"\n Precision/recall/F1 scores for a collection of predictions.\n\n Attributes:\n true_positives: Number of true positives.\n false_positives: Number of false positives.\n false_negatives: Number of false negatives.\n precision: TP / (TP + FP).\n recall: TP / (TP + FN).\n f1: 2 * TP / (2 * TP + FP + FN).\n \"\"\"\n\n true_positives: int\n false_positives: int\n false_negatives: int\n precision: float\n recall: float\n f1: float\n\n\nclass SoftClassificationMetrics(NamedTuple):\n \"\"\"\n Classification scores that are independent of thresholds.\n \"\"\"\n\n average_precision: float\n recall_at_precision: Dict[float, float]\n decision_thresh_at_precision: Dict[float, float]\n roc_auc: Optional[float]\n\n\nclass MacroPRF1Scores(NamedTuple):\n \"\"\"\n Macro precision/recall/F1 scores (averages across each label).\n\n Attributes:\n num_label: Number of distinct labels.\n precision: Equally weighted average of precisions for each label.\n recall: Equally weighted average of recalls for each label.\n f1: Equally weighted average of F1 scores for each label.\n \"\"\"\n\n num_labels: int\n precision: float\n recall: float\n f1: float\n\n\nclass MacroPRF1Metrics(NamedTuple):\n \"\"\"\n Aggregated metric class for macro precision/recall/F1 scores.\n\n Attributes:\n per_label_scores: Mapping from label string to the corresponding\n precision/recall/F1 scores.\n macro_scores: Macro precision/recall/F1 scores across the labels in\n `per_label_scores`.\n \"\"\"\n\n per_label_scores: Dict[str, PRF1Scores]\n macro_scores: MacroPRF1Scores\n\n def print_metrics(self, indentation=\"\") -> None:\n print(\n ascii_table(\n [\n {\n \"label\": label,\n \"precision\": f\"{metrics.precision:.2f}\",\n \"recall\": f\"{metrics.recall:.2f}\",\n \"f1\": f\"{metrics.f1:.2f}\",\n \"support\": metrics.true_positives + metrics.false_negatives,\n }\n for label, metrics in sorted(self.per_label_scores.items())\n ],\n human_column_names={\n \"label\": \"Label\",\n \"precision\": \"Precision\",\n \"recall\": \"Recall\",\n \"f1\": \"F1\",\n \"support\": \"Support\",\n },\n footer={\n \"label\": \"Overall macro scores\",\n \"precision\": f\"{self.macro_scores.precision:.2f}\",\n \"recall\": f\"{self.macro_scores.recall:.2f}\",\n \"f1\": f\"{self.macro_scores.f1:.2f}\",\n },\n alignments={\"label\": \"<\"},\n indentation=indentation,\n )\n )\n\n\nclass PRF1Metrics(NamedTuple):\n \"\"\"\n Metric class for all types of precision/recall/F1 scores.\n\n Attributes:\n per_label_scores: Map from label string to the corresponding precision/recall/F1\n scores.\n macro_scores: Macro precision/recall/F1 scores across the labels in\n `per_label_scores`.\n micro_scores: Micro (regular) precision/recall/F1 scores for the same\n collection of predictions.\n \"\"\"\n\n per_label_scores: Dict[str, PRF1Scores]\n macro_scores: MacroPRF1Scores\n micro_scores: PRF1Scores\n\n def print_metrics(self) -> None:\n res = (\n f\"\\t{'Per label scores':<40}\"\n f\"\\t{'Precision':<10}\"\n f\"\\t{'Recall':<10}\"\n f\"\\t{'F1':<10}\"\n f\"\\t{'Support':<10}\\n\\n\"\n )\n for label, label_metrics in self.per_label_scores.items():\n support = label_metrics.true_positives + label_metrics.false_negatives\n res += (\n f\"\\t{label:<40}\"\n f\"\\t{label_metrics.precision * 100:<10.3f}\"\n f\"\\t{label_metrics.recall * 100:<10.3f}\"\n f\"\\t{label_metrics.f1 * 100:<10.3f}\"\n f\"\\t{support:<10}\\n\"\n )\n support = self.micro_scores.true_positives + self.micro_scores.false_negatives\n res += (\n f\"\\n\\t{'Overall micro scores':<40}\"\n f\"\\t{self.micro_scores.precision * 100:<10.3f}\"\n f\"\\t{self.micro_scores.recall * 100:<10.3f}\"\n f\"\\t{self.micro_scores.f1 * 100:<10.3f}\"\n f\"\\t{support:<10}\\n\"\n )\n res += (\n f\"\\t{'Overall macro scores':<40}\"\n f\"\\t{self.macro_scores.precision * 100:<10.3f}\"\n f\"\\t{self.macro_scores.recall * 100:<10.3f}\"\n f\"\\t{self.macro_scores.f1 * 100:<10.3f}\\n\"\n )\n print(res)\n\n\nclass ClassificationMetrics(NamedTuple):\n \"\"\"\n Metric class for various classification metrics.\n\n Attributes:\n accuracy: Overall accuracy of predictions.\n macro_prf1_metrics: Macro precision/recall/F1 scores.\n per_label_soft_scores: Per label soft metrics.\n mcc: Matthews correlation coefficient.\n roc_auc: Area under the Receiver Operating Characteristic curve.\n loss: Training loss (only used for selecting best model, no need to print).\n \"\"\"\n\n accuracy: float\n macro_prf1_metrics: MacroPRF1Metrics\n per_label_soft_scores: Optional[Dict[str, SoftClassificationMetrics]]\n mcc: Optional[float]\n roc_auc: Optional[float]\n loss: float\n\n def print_metrics(self, report_pep=False) -> None:\n print(f\"Accuracy: {self.accuracy * 100:.2f}\")\n print(\"\\nSoft Metrics:\")\n if self.per_label_soft_scores:\n soft_scores = [\n {\n \"label\": label,\n \"avg_pr\": f\"{metrics.average_precision:.3f}\",\n \"roc_auc\": f\"{(metrics.roc_auc or 0.0):.3f}\",\n }\n for label, metrics in sorted(self.per_label_soft_scores.items())\n ]\n columns = {\n \"label\": \"Label\",\n \"avg_pr\": \"Average precision\",\n \"roc_auc\": \"ROC AUC\",\n }\n print(ascii_table(soft_scores, columns))\n all_thresholds = set(\n itertools.chain.from_iterable(\n metrics.recall_at_precision\n for metrics in self.per_label_soft_scores.values()\n )\n )\n print(\"\\nRecall at Precision\")\n print(\n ascii_table(\n (\n dict(\n {\"label\": label},\n **{\n str(p): f\"{r:.3f}\"\n for p, r in metrics.recall_at_precision.items()\n },\n )\n for label, metrics in sorted(self.per_label_soft_scores.items())\n ),\n dict(\n {\"label\": \"Label\"},\n **{str(t): f\"R@P {t}\" for t in all_thresholds},\n ),\n alignments={\"label\": \"<\"},\n )\n )\n if self.mcc:\n print(f\"\\nMatthews correlation coefficient: {self.mcc :.3f}\")\n if self.roc_auc:\n print(f\"\\nROC AUC: {self.roc_auc:.3f}\")\n if report_pep:\n self.print_pep()\n\n def print_pep(self):\n metrics = {\"Accuracy\": f\"{self.accuracy * 100:.2f}\"}\n if self.roc_auc:\n metrics[\"ROC AUC\"] = f\"{self.roc_auc :.3f}\"\n for key, value in metrics.items():\n info = {\"type\": \"NET\", \"metric\": key, \"unit\": \"None\", \"value\": value}\n print(\"PyTorchObserver \" + json_dumps(info))\n\n\nclass Confusions:\n \"\"\"\n Confusion information for a collection of predictions.\n\n Attributes:\n TP: Number of true positives.\n FP: Number of false positives.\n FN: Number of false negatives.\n \"\"\"\n\n __slots__ = \"TP\", \"FP\", \"FN\"\n\n def __init__(self, TP: int = 0, FP: int = 0, FN: int = 0) -> None:\n self.TP: int = TP\n self.FP: int = FP\n self.FN: int = FN\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Confusions):\n return NotImplemented\n return self.TP == other.TP and self.FP == other.FP and self.FN == other.FN\n\n def __add__(self, other: \"Confusions\") -> \"Confusions\":\n return Confusions(\n TP=self.TP + other.TP, FP=self.FP + other.FP, FN=self.FN + other.FN\n )\n\n def __iadd__(self, other: \"Confusions\") -> \"Confusions\":\n self.TP += other.TP\n self.FP += other.FP\n self.FN += other.FN\n return self\n\n def _asdict(self) -> Dict:\n return {\"TP\": self.TP, \"FP\": self.FP, \"FN\": self.FN}\n\n def compute_metrics(self) -> PRF1Scores:\n precision, recall, f1 = compute_prf1(self.TP, self.FP, self.FN)\n return PRF1Scores(\n true_positives=self.TP,\n false_positives=self.FP,\n false_negatives=self.FN,\n precision=precision,\n recall=recall,\n f1=f1,\n )\n\n\nclass PerLabelConfusions:\n \"\"\"\n Per label confusion information.\n\n Attributes:\n label_confusions_map: Map from label string to the corresponding confusion\n counts.\n \"\"\"\n\n __slots__ = \"label_confusions_map\"\n\n def __init__(self) -> None:\n self.label_confusions_map: DefaultDict[str, Confusions] = defaultdict(\n Confusions\n )\n\n def update(self, label: str, item: str, count: int) -> None:\n \"\"\"\n Increase one of TP, FP or FN count for a label by certain amount.\n\n Args:\n label: Label to be modified.\n item: Type of count to be modified, should be one of \"TP\", \"FP\" or \"FN\".\n count: Amount to be added to the count.\n\n Returns:\n None\n \"\"\"\n confusions = self.label_confusions_map[label]\n setattr(confusions, item, getattr(confusions, item) + count)\n\n def compute_metrics(self) -> MacroPRF1Metrics:\n per_label_scores: Dict[str, PRF1Scores] = {}\n precision_sum, recall_sum, f1_sum = 0.0, 0.0, 0.0\n for label, confusions in sorted(self.label_confusions_map.items()):\n scores = confusions.compute_metrics()\n per_label_scores[label] = scores\n if confusions.TP + confusions.FN > 0:\n precision_sum += scores.precision\n recall_sum += scores.recall\n f1_sum += scores.f1\n num_labels = len(self.label_confusions_map)\n return MacroPRF1Metrics(\n per_label_scores=per_label_scores,\n macro_scores=MacroPRF1Scores(\n num_labels=num_labels,\n precision=safe_division(precision_sum, num_labels),\n recall=safe_division(recall_sum, num_labels),\n f1=safe_division(f1_sum, num_labels),\n ),\n )\n\n\nclass AllConfusions:\n \"\"\"\n Aggregated class for per label confusions.\n\n Attributes:\n per_label_confusions: Per label confusion information.\n confusions: Overall TP, FP and FN counts across the labels in\n `per_label_confusions`.\n \"\"\"\n\n __slots__ = \"per_label_confusions\", \"confusions\"\n\n def __init__(self) -> None:\n self.per_label_confusions = PerLabelConfusions()\n self.confusions = Confusions()\n\n def compute_metrics(self) -> PRF1Metrics:\n per_label_metrics = self.per_label_confusions.compute_metrics()\n return PRF1Metrics(\n per_label_scores=per_label_metrics.per_label_scores,\n macro_scores=per_label_metrics.macro_scores,\n micro_scores=self.confusions.compute_metrics(),\n )\n\n\nclass PairwiseRankingMetrics(NamedTuple):\n \"\"\"\n Metric class for pairwise ranking\n\n Attributes:\n num_examples (int): number of samples\n accuracy (float): how many times did we rank in the correct order\n average_score_difference (float): average score(higherRank) - score(lowerRank)\n\n \"\"\"\n\n num_examples: int\n accuracy: float\n average_score_difference: float\n\n def print_metrics(self) -> None:\n print(f\"RankingAccuracy: {self.accuracy * 100:.2f}\")\n print(f\"AvgScoreDiff: {self.average_score_difference}\")\n print(f\"NumExamples: {self.num_examples}\")\n\n\nclass RegressionMetrics(NamedTuple):\n \"\"\"\n Metrics for regression tasks.\n\n Attributes:\n num_examples (int): number of examples\n pearson_correlation (float): correlation between predictions and labels\n mse (float): mean-squared error between predictions and labels\n \"\"\"\n\n num_examples: int\n pearson_correlation: float\n mse: float\n\n def print_metrics(self):\n print(f\"Num examples: {self.num_examples}\")\n print(f\"Pearson correlation: {self.pearson_correlation:.3f}\")\n print(f\"Mean squared error: {self.mse:.3f}\")\n\n\nclass RealtimeMetrics(NamedTuple):\n \"\"\"\n Realtime Metrics for tracking training progress and performance.\n\n Attributes:\n samples (int): number of samples\n tps (float): tokens per second\n ups (float): updates per second\n \"\"\"\n\n samples: int\n tps: float\n ups: float\n\n def _format(self, key, value):\n if key in (\"tps\", \"ups\"):\n return round(value)\n return value\n\n def __str__(self):\n metrics = {\"num_gpus\": cuda.DISTRIBUTED_WORLD_SIZE}\n for key, value in self._asdict().items():\n if not value:\n continue\n metrics[key] = self._format(key, value)\n return str(metrics)\n\n\ndef safe_division(n: Union[int, float], d: int) -> float:\n return float(n) / d if d else 0.0\n\n\ndef compute_prf1(tp: int, fp: int, fn: int) -> Tuple[float, float, float]:\n precision = safe_division(tp, tp + fp)\n recall = safe_division(tp, tp + fn)\n f1 = safe_division(2 * tp, 2 * tp + fp + fn)\n return (precision, recall, f1)\n\n\ndef average_precision_score(\n y_true_sorted: np.ndarray, y_score_sorted: np.ndarray\n) -> float:\n \"\"\"\n Computes average precision, which summarizes the precision-recall curve as the\n precisions achieved at each threshold weighted by the increase in recall since the\n previous threshold.\n\n Args:\n y_true_sorted: Numpy array sorted according to decreasing confidence scores\n indicating whether each prediction is correct.\n y_score_sorted Numpy array of confidence scores for the predictions in\n decreasing order.\n\n Returns:\n Average precision score.\n\n TODO: This is too slow, improve the performance\n \"\"\"\n ap = 0.0\n tp = 0\n threshold = y_score_sorted[0]\n y_score_sorted = np.append(y_score_sorted[1:], np.NAN)\n total_positive = np.sum(y_true_sorted)\n added_positives = 0\n\n for k, (label, score) in enumerate(zip(y_true_sorted, y_score_sorted)):\n if label:\n added_positives += 1\n if score != threshold:\n threshold = score\n recall_diff = added_positives / total_positive\n tp += added_positives\n added_positives = 0\n p_at_tresh = tp / (k + 1)\n ap += p_at_tresh * recall_diff\n return float(ap)\n\n\ndef sort_by_score(y_true_list: Sequence[bool], y_score_list: Sequence[float]):\n y_true = np.array(y_true_list)\n y_score = np.array(y_score_list)\n sort_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_true = y_true[sort_indices]\n y_score = y_score[sort_indices]\n return y_true, y_score\n\n\ndef recall_at_precision(\n y_true_sorted: np.ndarray, y_score_sorted: np.ndarray, thresholds: Sequence[float]\n) -> Dict[float, float]:\n \"\"\"\n Computes recall at various precision levels\n\n Args:\n y_true_sorted: Numpy array sorted according to decreasing confidence scores\n indicating whether each prediction is correct.\n y_score_sorted: Numpy array of confidence scores for the predictions in\n decreasing order.\n thresholds: Sequence of floats indicating the requested precision thresholds\n\n Returns:\n Dictionary of maximum recall at requested precision thresholds.\n \"\"\"\n y_score_shift = np.append(y_score_sorted[1:], np.nan)\n score_change = (y_score_sorted - y_score_shift) != 0\n cum_sum = np.cumsum(y_true_sorted)\n recall_at_precision_dict = {t: 0.0 for t in thresholds}\n decision_thresh_at_precision_dict = {t: 0.0 for t in thresholds}\n sum_y_true = y_true_sorted.sum()\n if sum_y_true == 0:\n return recall_at_precision_dict, decision_thresh_at_precision_dict\n recall = cum_sum / sum_y_true\n precision = cum_sum / np.array(range(1, len(y_true_sorted) + 1))\n\n for threshold in thresholds:\n meets_requirements = np.logical_and(precision >= threshold, score_change)\n if not np.any(meets_requirements):\n continue\n\n recall_at_precision_dict[threshold] = float(\n max(np.extract(meets_requirements, recall))\n )\n decision_thresh_at_precision_dict[threshold] = float(\n min(np.extract(meets_requirements, y_score_sorted))\n )\n\n return recall_at_precision_dict, decision_thresh_at_precision_dict\n\n\ndef compute_soft_metrics(\n predictions: Sequence[LabelPrediction],\n label_names: Sequence[str],\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> Dict[str, SoftClassificationMetrics]:\n \"\"\"\n Computes soft classification metrics (for now, average precision) given a list of\n label predictions.\n\n Args:\n predictions: Label predictions, including the confidence score for each label.\n label_names: Indexed label names.\n recall_at_precision_thresholds: precision thresholds at which to calculate\n recall\n\n\n Returns:\n Dict from label strings to their corresponding soft metrics.\n \"\"\"\n soft_metrics = {}\n for i, label_name in enumerate(label_names):\n y_true = []\n y_score = []\n for label_scores, _, expected in predictions:\n y_true.append(expected == i)\n y_score.append(label_scores[i])\n y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)\n ap = average_precision_score(y_true_sorted, y_score_sorted)\n recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(\n y_true_sorted, y_score_sorted, recall_at_precision_thresholds\n )\n roc_auc = compute_roc_auc(predictions, target_class=i)\n soft_metrics[label_name] = SoftClassificationMetrics(\n average_precision=ap,\n recall_at_precision=recall_at_precision_dict,\n decision_thresh_at_precision=decision_thresh_at_precision,\n roc_auc=roc_auc,\n )\n return soft_metrics\n\n\ndef compute_multi_label_soft_metrics(\n predictions: Sequence[LabelListPrediction],\n label_names: Sequence[str],\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> Dict[str, SoftClassificationMetrics]:\n \"\"\"\n Computes multi-label soft classification metrics\n (for now, average precision)\n\n Args:\n predictions: multi-label predictions,\n including the confidence score for each label.\n label_names: Indexed label names.\n recall_at_precision_thresholds: precision thresholds at which to calculate\n recall\n\n\n Returns:\n Dict from label strings to their corresponding soft metrics.\n \"\"\"\n soft_metrics = {}\n for i, label_name in enumerate(label_names):\n y_true = []\n y_score = []\n for label_scores, _, expected in predictions:\n y_true.append(i in expected)\n y_score.append(label_scores[i])\n y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)\n ap = average_precision_score(y_true_sorted, y_score_sorted)\n recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(\n y_true_sorted, y_score_sorted, recall_at_precision_thresholds\n )\n roc_auc = compute_roc_auc(predictions, target_class=i)\n soft_metrics[label_name] = SoftClassificationMetrics(\n average_precision=ap,\n recall_at_precision=recall_at_precision_dict,\n decision_thresh_at_precision=decision_thresh_at_precision,\n roc_auc=roc_auc,\n )\n return soft_metrics\n\n\ndef compute_matthews_correlation_coefficients(\n TP: int, FP: int, FN: int, TN: int\n) -> float:\n \"\"\"\n Computes Matthews correlation coefficient, a way to summarize all four counts (TP,\n FP, FN, TN) in the confusion matrix of binary classification.\n\n Args:\n TP: Number of true positives.\n FP: Number of false positives.\n FN: Number of false negatives.\n TN: Number of true negatives.\n\n Returns:\n Matthews correlation coefficient, which is `sqrt((TP + FP) * (TP + FN) *\n (TN + FP) * (TN + FN))`.\n \"\"\"\n mcc = safe_division(\n (TP * TN) - (FP * FN),\n np.sqrt(float((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))),\n )\n return mcc\n\n\ndef compute_roc_auc(\n predictions: Sequence[LabelPrediction], target_class: int = 0\n) -> Optional[float]:\n \"\"\"\n Computes area under the Receiver Operating Characteristic curve, for binary\n classification. Implementation based off of (and explained at)\n https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en.\n \"\"\"\n # Collect scores\n y_true = [expected == target_class for _, _, expected in predictions]\n y_score = [label_scores[target_class] for label_scores, _, _ in predictions]\n y_true_sorted, _ = sort_by_score(y_true, y_score)\n\n # Compute auc as probability that a positive example is scored higher than\n # a negative example.\n n_false = 0\n n_correct_pair_order = 0\n\n for y in reversed(y_true_sorted): # want low predicted to high predicted\n if y:\n n_correct_pair_order += n_false\n else:\n n_false += 1\n\n n_true = len(y_true) - n_false\n if n_true == 0 or n_false == 0:\n return None\n\n return float(n_correct_pair_order / (n_true * n_false))\n\n\ndef compute_classification_metrics(\n predictions: Sequence[LabelPrediction],\n label_names: Sequence[str],\n loss: float,\n average_precisions: bool = True,\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> ClassificationMetrics:\n \"\"\"\n A general function that computes classification metrics given a list of label\n predictions.\n\n Args:\n predictions: Label predictions, including the confidence score for each label.\n label_names: Indexed label names.\n average_precisions: Whether to compute average precisions for labels or not.\n Defaults to True.\n recall_at_precision_thresholds: precision thresholds at which to calculate recall\n\n\n Returns:\n ClassificationMetrics which contains various classification metrics.\n \"\"\"\n num_correct = 0\n per_label_confusions = PerLabelConfusions()\n for _, predicted, expected in predictions:\n predicted_label = label_names[predicted]\n expected_label = label_names[expected]\n if predicted_label == expected_label:\n num_correct += 1\n per_label_confusions.update(expected_label, \"TP\", 1)\n else:\n per_label_confusions.update(expected_label, \"FN\", 1)\n per_label_confusions.update(predicted_label, \"FP\", 1)\n accuracy = safe_division(num_correct, len(predictions))\n macro_prf1_metrics = per_label_confusions.compute_metrics()\n\n soft_metrics = (\n compute_soft_metrics(predictions, label_names, recall_at_precision_thresholds)\n if average_precisions\n else None\n )\n\n if len(label_names) == 2:\n confusion_dict = per_label_confusions.label_confusions_map\n # Since MCC is symmetric, it doesn't matter which label is 0 and which is 1\n TP = confusion_dict[label_names[0]].TP\n FP = confusion_dict[label_names[0]].FP\n FN = confusion_dict[label_names[0]].FN\n TN = confusion_dict[label_names[1]].TP\n mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)\n roc_auc: Optional[float] = compute_roc_auc(predictions)\n else:\n mcc = None\n roc_auc = None\n\n return ClassificationMetrics(\n accuracy=accuracy,\n macro_prf1_metrics=macro_prf1_metrics,\n per_label_soft_scores=soft_metrics,\n mcc=mcc,\n roc_auc=roc_auc,\n loss=loss,\n )\n\n\ndef compute_multi_label_classification_metrics(\n predictions: Sequence[LabelListPrediction],\n label_names: Sequence[str],\n loss: float,\n average_precisions: bool = True,\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> ClassificationMetrics:\n \"\"\"\n A general function that computes classification metrics given a list of multi-label\n predictions.\n\n Args:\n predictions: multi-label predictions,\n including the confidence score for each label.\n label_names: Indexed label names.\n average_precisions: Whether to compute average precisions for labels or not.\n Defaults to True.\n recall_at_precision_thresholds: precision thresholds at which\n to calculate recall\n\n\n Returns:\n ClassificationMetrics which contains various classification metrics.\n \"\"\"\n\n num_correct = 0\n num_expected_labels = 0\n per_label_confusions = PerLabelConfusions()\n for _, predicted, expected in predictions:\n # \"predicted\" is in the format of n_hot_encoding\n # Calculate TP & FN\n for true_label_idx in expected:\n if true_label_idx < 0:\n # padded label \"-1\"\n break\n num_expected_labels += 1\n expected_label = label_names[true_label_idx]\n if predicted[true_label_idx] == 1:\n num_correct += 1\n per_label_confusions.update(expected_label, \"TP\", 1)\n else:\n per_label_confusions.update(expected_label, \"FN\", 1)\n # Calculate FP\n for idx, pred in enumerate(predicted):\n if pred == 1 and idx not in expected:\n predicted_label = label_names[idx]\n per_label_confusions.update(predicted_label, \"FP\", 1)\n\n accuracy = safe_division(num_correct, num_expected_labels)\n macro_prf1_metrics = per_label_confusions.compute_metrics()\n\n soft_metrics = (\n compute_multi_label_soft_metrics(\n predictions, label_names, recall_at_precision_thresholds\n )\n if average_precisions\n else None\n )\n\n if len(label_names) == 2:\n confusion_dict = per_label_confusions.label_confusions_map\n # Since MCC is symmetric, it doesn't matter which label is 0 and which is 1\n TP = confusion_dict[label_names[0]].TP\n FP = confusion_dict[label_names[0]].FP\n FN = confusion_dict[label_names[0]].FN\n TN = confusion_dict[label_names[1]].TP\n mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)\n roc_auc: Optional[float] = compute_roc_auc(predictions)\n else:\n mcc = None\n roc_auc = None\n\n return ClassificationMetrics(\n accuracy=accuracy,\n macro_prf1_metrics=macro_prf1_metrics,\n per_label_soft_scores=soft_metrics,\n mcc=mcc,\n roc_auc=roc_auc,\n loss=loss,\n )\n\n\ndef compute_pairwise_ranking_metrics(\n predictions: Sequence[int], scores: Sequence[float]\n) -> PairwiseRankingMetrics:\n \"\"\"\n Computes metrics for pairwise ranking given sequences of predictions and scores\n\n Args:\n predictions : 1 if ranking was correct, 0 if ranking was incorrect\n scores : score(higher-ranked-sample) - score(lower-ranked-sample)\n\n Returns:\n PairwiseRankingMetrics object\n \"\"\"\n return PairwiseRankingMetrics(\n num_examples=len(predictions),\n accuracy=safe_division(sum(predictions), len(predictions)),\n average_score_difference=safe_division(sum(scores), len(predictions)),\n )\n\n\ndef compute_regression_metrics(\n predictions: Sequence[float], targets: Sequence[float]\n) -> RegressionMetrics:\n \"\"\"\n Computes metrics for regression tasks.abs\n\n Args:\n predictions: 1-D sequence of float predictions\n targets: 1-D sequence of float labels\n\n Returns:\n RegressionMetrics object\n \"\"\"\n preds, targs = np.array(predictions), np.array(targets)\n pred_mean, targ_mean = preds.mean(), targs.mean()\n covariance = (preds - pred_mean).dot(targs - targ_mean) / preds.size\n corr = covariance / preds.std() / targs.std()\n\n mse = np.square(preds - targs).mean()\n return RegressionMetrics(num_examples=len(preds), pearson_correlation=corr, mse=mse)\n", "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom typing import Any, List, Sized, Tuple\n\nimport torch as torch\nimport torch.nn as nn\nfrom pytext.utils.cuda import FloatTensor\nfrom pytext.utils.tensor import xaviervar\nfrom pytext.utils.torch import reverse_tensor_list\n\n\nclass Element:\n \"\"\"\n Generic element representing a token / non-terminal / sub-tree on a stack.\n Used to compute valid actions in the RNNG parser.\n \"\"\"\n\n def __init__(self, node: Any) -> None:\n self.node = node\n\n def __eq__(self, other) -> bool:\n return self.node == other.node\n\n def __repr__(self) -> str:\n return str(self.node)\n\n\nclass StackLSTM(Sized):\n \"\"\"\n The Stack LSTM from Dyer et al: https://arxiv.org/abs/1505.08075\n \"\"\"\n\n def __init__(self, lstm: nn.LSTM):\n \"\"\"\n Shapes:\n initial_state: (lstm_layers, 1, lstm_hidden_dim) each\n \"\"\"\n self.lstm = lstm\n initial_state = (\n FloatTensor(lstm.num_layers, 1, lstm.hidden_size).fill_(0),\n FloatTensor(lstm.num_layers, 1, lstm.hidden_size).fill_(0),\n )\n # Stack of (state, (embedding, element))\n self.stack = [\n (initial_state, (self._lstm_output(initial_state), Element(\"Root\")))\n ]\n\n def _lstm_output(self, state: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Shapes:\n state: (lstm_layers, 1, lstm_hidden_dim) each\n return value: (1, lstm_hidden_dim)\n \"\"\"\n return state[0][-1]\n\n def push(self, expression: torch.Tensor, element: Element) -> None:\n \"\"\"\n Shapes:\n expression: (1, lstm_input_dim)\n \"\"\"\n old_top_state = self.stack[-1][0]\n # Unsqueezing expression for sequence_length = 1\n _, new_top_state = self.lstm(expression.unsqueeze(0), old_top_state)\n # Push in (state, (embedding, element))\n self.stack.append((new_top_state, (self._lstm_output(new_top_state), element)))\n\n def pop(self) -> Tuple[torch.Tensor, Element]:\n \"\"\"\n Pops and returns tuple of output embedding (1, lstm_hidden_dim) and element\n \"\"\"\n\n return self.stack.pop()[1]\n\n def embedding(self) -> torch.Tensor:\n \"\"\"\n Shapes:\n return value: (1, lstm_hidden_dim)\n \"\"\"\n assert len(self.stack) > 0, \"stack size must be greater than 0\"\n\n top_state = self.stack[-1][0]\n return self._lstm_output(top_state)\n\n def element_from_top(self, index: int) -> Element:\n return self.stack[-(index + 1)][1][1]\n\n def __len__(self) -> int:\n return len(self.stack) - 1\n\n def __str__(self) -> str:\n return \"->\".join([str(x[1][1]) for x in self.stack])\n\n def copy(self):\n other = StackLSTM(self.lstm)\n other.stack = list(self.stack)\n return other\n\n\nclass CompositionalNN(torch.jit.ScriptModule):\n \"\"\"\n Combines a list / sequence of embeddings into one using a biLSTM\n \"\"\"\n\n __constants__ = [\"lstm_dim\", \"linear_seq\"]\n\n def __init__(self, lstm_dim: int):\n super().__init__()\n self.lstm_dim = lstm_dim\n self.lstm_fwd = nn.LSTM(lstm_dim, lstm_dim, num_layers=1)\n self.lstm_rev = nn.LSTM(lstm_dim, lstm_dim, num_layers=1)\n self.linear_seq = nn.Sequential(nn.Linear(2 * lstm_dim, lstm_dim), nn.Tanh())\n\n @torch.jit.script_method\n def forward(self, x: List[torch.Tensor], device: str = \"cpu\") -> torch.Tensor:\n \"\"\"\n Embed the sequence. If the input corresponds to [IN:GL where am I at]:\n - x will contain the embeddings of [at I am where IN:GL] in that order.\n - Forward LSTM will embed the sequence [IN:GL where am I at].\n - Backward LSTM will embed the sequence [IN:GL at I am where].\n The final hidden states are concatenated and then projected.\n\n Args:\n x: Embeddings of the input tokens in *reversed* order\n Shapes:\n x: (1, lstm_dim) each\n return value: (1, lstm_dim)\n \"\"\"\n # reset hidden state every time\n lstm_hidden_fwd = (\n xaviervar([1, 1, self.lstm_dim], device=device),\n xaviervar([1, 1, self.lstm_dim], device=device),\n )\n lstm_hidden_rev = (\n xaviervar([1, 1, self.lstm_dim], device=device),\n xaviervar([1, 1, self.lstm_dim], device=device),\n )\n nonterminal_element = x[-1]\n reversed_rest = x[:-1]\n # Always put nonterminal_element at the front\n fwd_input = [nonterminal_element] + reverse_tensor_list(reversed_rest)\n rev_input = [nonterminal_element] + reversed_rest\n stacked_fwd = self.lstm_fwd(torch.stack(fwd_input), lstm_hidden_fwd)[0][0]\n stacked_rev = self.lstm_rev(torch.stack(rev_input), lstm_hidden_rev)[0][0]\n combined = torch.cat([stacked_fwd, stacked_rev], dim=1)\n subtree_embedding = self.linear_seq(combined)\n return subtree_embedding\n\n\nclass CompositionalSummationNN(torch.jit.ScriptModule):\n \"\"\"\n Simpler version of CompositionalNN\n \"\"\"\n\n __constants__ = [\"lstm_dim\", \"linear_seq\"]\n\n def __init__(self, lstm_dim: int):\n super().__init__()\n self.lstm_dim = lstm_dim\n self.linear_seq = nn.Sequential(nn.Linear(lstm_dim, lstm_dim), nn.Tanh())\n\n @torch.jit.script_method\n def forward(self, x: List[torch.Tensor], device: str = \"cpu\") -> torch.Tensor:\n combined = torch.sum(torch.cat(x, dim=0), dim=0, keepdim=True)\n subtree_embedding = self.linear_seq(combined)\n return subtree_embedding\n\n\nclass ParserState:\n \"\"\"\n Maintains state of the Parser. Useful for beam search\n \"\"\"\n\n def __init__(self, parser=None):\n if not parser:\n return\n\n self.buffer_stackrnn = StackLSTM(parser.buff_rnn)\n self.stack_stackrnn = StackLSTM(parser.stack_rnn)\n self.action_stackrnn = StackLSTM(parser.action_rnn)\n\n self.predicted_actions_idx = []\n self.action_scores = []\n\n self.num_open_NT = 0\n self.is_open_NT: List[bool] = []\n self.found_unsupported = False\n self.action_p = torch.Tensor()\n\n # negative cumulative log prob so sort(states) is in descending order\n self.neg_prob = 0\n\n def finished(self):\n return len(self.stack_stackrnn) == 1 and len(self.buffer_stackrnn) == 0\n\n def copy(self):\n other = ParserState()\n other.buffer_stackrnn = self.buffer_stackrnn.copy()\n other.stack_stackrnn = self.stack_stackrnn.copy()\n other.action_stackrnn = self.action_stackrnn.copy()\n other.predicted_actions_idx = self.predicted_actions_idx.copy()\n other.action_scores = self.action_scores.copy()\n other.num_open_NT = self.num_open_NT\n other.is_open_NT = self.is_open_NT.copy()\n other.neg_prob = self.neg_prob\n other.found_unsupported = self.found_unsupported\n\n # detach to avoid making copies, only called in inference to share data\n other.action_p = self.action_p.detach()\n return other\n\n def __gt__(self, other):\n return self.neg_prob > other.neg_prob\n\n def __eq__(self, other):\n return self.neg_prob == other.neg_prob\n" ]
[ [ "numpy.square", "numpy.logical_and", "numpy.cumsum", "numpy.append", "numpy.extract", "numpy.any", "numpy.argsort", "numpy.array", "numpy.sum" ], [ "torch.Tensor", "torch.cat", "torch.nn.LSTM", "torch.nn.Tanh", "torch.nn.Linear", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Seb-Park/tensorflow-for-poets-2
[ "1ef4112553b25f1c687b40b6872d4a97b9d44762" ]
[ "scripts/label_image_flask.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nfrom flask import Flask, escape, request, jsonify\n\nimport argparse\nimport sys\nimport time\n\nimport numpy as np\nimport requests\nimport tensorflow as tf\n\napp = Flask(__name__)\n\[email protected]('/')\ndef mainServer():\n file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n model_file = \"../tf_files/retrained_graph.pb\"#add ../ at beginning if running from scripts folder. If running from classiefier folder\n #e.g.python -m scripts.label_image_flask --graph=tf_files/retrained_graph.pb --image=caterCard.png\n\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n # parser.add_argument(request.args.get('image'))\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n # testString = request.args.get('str')\n # graph = \"../tf_files/retrained_graph.pb\"\n\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n # if args.image:\n # file_name = args.image\n file_name = request.args.get('img')\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_url(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n end=time.time()\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n template = '\"name\":\"{}\", \"score\":\"{:0.5f}\"'\n stringToReturn = '{\"possible_pokemon\": ['\n listOfPossiblePokes = []\n for i in top_k:\n listOfPossiblePokes.append({'name':labels[i],'score':str(results[i])})\n # print(template.format(labels[i], results[i]))\n stringToReturn += \"{\" + template.format(labels[i], results[i])+\"},\"\n stringToReturn = stringToReturn[:-1]#THIS REMOVES THE LAST COMMA\n stringToReturn += ']}'\n # print(jsonify(listOfPossiblePokes))\n return jsonify({'possible_pokemon':listOfPossiblePokes})\n # return (stringToReturn)\n # return \"<h1>Label Image Server!</h1>\" + \"\\n<h2>enter</h2>\"\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n # image_reader = tf.image.decode_jpeg(\n # requests.get(file_name).content, channels=3, name=\"jpeg_reader\")\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n # sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef read_tensor_from_image_url(url, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(url, input_name)\n if url.endswith(\".png\"):\n image_reader = tf.image.decode_png(requests.get(url).content, channels = 3,\n name='png_reader')\n elif url.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(requests.get(url).content,\n name='gif_reader'))\n elif url.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(requests.get(url).content, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(\n requests.get(url).content, channels=3, name=\"jpeg_reader\")\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n # sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=3008, host='0.0.0.0')\n # file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n # model_file = \"tf_files/retrained_graph.pb\"\n # label_file = \"tf_files/retrained_labels.txt\"\n # input_height = 224\n # input_width = 224\n # input_mean = 128\n # input_std = 128\n # input_layer = \"input\"\n # output_layer = \"final_result\"\n #\n # parser = argparse.ArgumentParser()\n # parser.add_argument(\"--image\", help=\"image to be processed\")\n # parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n # parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n # parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n # parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n # parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n # parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n # parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n # parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n # mainServer(parser)\n" ]
[ [ "tensorflow.Graph", "tensorflow.image.resize_bilinear", "tensorflow.import_graph_def", "tensorflow.read_file", "tensorflow.io.gfile.GFile", "numpy.squeeze", "tensorflow.cast", "tensorflow.image.decode_png", "tensorflow.expand_dims", "tensorflow.image.decode_bmp", "tensorflow.subtract", "tensorflow.ConfigProto", "tensorflow.image.decode_gif", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
sanweiliti/HMP
[ "abb37a553c9ebeccf746225331bd90ccc0e33df9" ]
[ "utils/Quaternions.py" ]
[ "import numpy as np\r\n\r\nclass Quaternions:\r\n \"\"\"\r\n Quaternions is a wrapper around a numpy ndarray\r\n that allows it to act as if it were an narray of\r\n a quaternion data type.\r\n \r\n Therefore addition, subtraction, multiplication,\r\n division, negation, absolute, are all defined\r\n in terms of quaternion operations such as quaternion\r\n multiplication.\r\n \r\n This allows for much neater code and many routines\r\n which conceptually do the same thing to be written\r\n in the same way for point data and for rotation data.\r\n \r\n The Quaternions class has been desgined such that it\r\n should support broadcasting and slicing in all of the\r\n usual ways.\r\n \"\"\"\r\n \r\n def __init__(self, qs):\r\n if isinstance(qs, np.ndarray):\r\n \r\n if len(qs.shape) == 1: qs = np.array([qs])\r\n self.qs = qs\r\n return\r\n \r\n if isinstance(qs, Quaternions):\r\n self.qs = qs.qs\r\n return\r\n \r\n raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))\r\n \r\n def __str__(self): return \"Quaternions(\"+ str(self.qs) + \")\"\r\n def __repr__(self): return \"Quaternions(\"+ repr(self.qs) + \")\"\r\n \r\n \"\"\" Helper Methods for Broadcasting and Data extraction \"\"\"\r\n \r\n @classmethod\r\n def _broadcast(cls, sqs, oqs, scalar=False):\r\n \r\n if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])\r\n \r\n ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])\r\n os = np.array(oqs.shape)\r\n \r\n if len(ss) != len(os):\r\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\r\n \r\n if np.all(ss == os): return sqs, oqs\r\n \r\n if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):\r\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\r\n \r\n sqsn, oqsn = sqs.copy(), oqs.copy()\r\n \r\n for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)\r\n for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)\r\n \r\n return sqsn, oqsn\r\n \r\n \"\"\" Adding Quaterions is just Defined as Multiplication \"\"\"\r\n \r\n def __add__(self, other): return self * other\r\n def __sub__(self, other): return self / other\r\n \r\n \"\"\" Quaterion Multiplication \"\"\"\r\n \r\n def __mul__(self, other):\r\n \"\"\"\r\n Quaternion multiplication has three main methods.\r\n \r\n When multiplying a Quaternions array by Quaternions\r\n normal quaternion multiplication is performed.\r\n \r\n When multiplying a Quaternions array by a vector\r\n array of the same shape, where the last axis is 3,\r\n it is assumed to be a Quaternion by 3D-Vector \r\n multiplication and the 3D-Vectors are rotated\r\n in space by the Quaternions.\r\n \r\n When multipplying a Quaternions array by a scalar\r\n or vector of different shape it is assumed to be\r\n a Quaternions by Scalars multiplication and the\r\n Quaternions are scaled using Slerp and the identity\r\n quaternions.\r\n \"\"\"\r\n \r\n \"\"\" If Quaternions type do Quaternions * Quaternions \"\"\"\r\n if isinstance(other, Quaternions):\r\n \r\n sqs, oqs = Quaternions._broadcast(self.qs, other.qs)\r\n \r\n q0 = sqs[...,0]; q1 = sqs[...,1]; \r\n q2 = sqs[...,2]; q3 = sqs[...,3]; \r\n r0 = oqs[...,0]; r1 = oqs[...,1]; \r\n r2 = oqs[...,2]; r3 = oqs[...,3]; \r\n \r\n qs = np.empty(sqs.shape)\r\n qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3\r\n qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2\r\n qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1\r\n qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0\r\n \r\n return Quaternions(qs)\r\n \r\n \"\"\" If array type do Quaternions * Vectors \"\"\"\r\n if isinstance(other, np.ndarray) and other.shape[-1] == 3:\r\n vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))\r\n return (self * (vs * -self)).imaginaries\r\n \r\n \"\"\" If float do Quaternions * Scalars \"\"\"\r\n if isinstance(other, np.ndarray) or isinstance(other, float):\r\n return Quaternions.slerp(Quaternions.id_like(self), self, other)\r\n \r\n raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))\r\n \r\n def __div__(self, other):\r\n \"\"\"\r\n When a Quaternion type is supplied, division is defined\r\n as multiplication by the inverse of that Quaternion.\r\n \r\n When a scalar or vector is supplied it is defined\r\n as multiplicaion of one over the supplied value.\r\n Essentially a scaling.\r\n \"\"\"\r\n \r\n if isinstance(other, Quaternions): return self * (-other)\r\n if isinstance(other, np.ndarray): return self * (1.0 / other)\r\n if isinstance(other, float): return self * (1.0 / other)\r\n raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))\r\n \r\n def __eq__(self, other): return self.qs == other.qs\r\n def __ne__(self, other): return self.qs != other.qs\r\n \r\n def __neg__(self):\r\n \"\"\" Invert Quaternions \"\"\"\r\n return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))\r\n \r\n def __abs__(self):\r\n \"\"\" Unify Quaternions To Single Pole \"\"\"\r\n qabs = self.normalized().copy()\r\n top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)\r\n bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)\r\n qabs.qs[top < bot] = -qabs.qs[top < bot]\r\n return qabs\r\n \r\n def __iter__(self): return iter(self.qs)\r\n def __len__(self): return len(self.qs)\r\n \r\n def __getitem__(self, k): return Quaternions(self.qs[k]) \r\n def __setitem__(self, k, v): self.qs[k] = v.qs\r\n \r\n @property\r\n def lengths(self):\r\n return np.sum(self.qs**2.0, axis=-1)**0.5\r\n \r\n @property\r\n def reals(self):\r\n return self.qs[...,0]\r\n \r\n @property\r\n def imaginaries(self):\r\n return self.qs[...,1:4]\r\n \r\n @property\r\n def shape(self): return self.qs.shape[:-1]\r\n \r\n def repeat(self, n, **kwargs):\r\n return Quaternions(self.qs.repeat(n, **kwargs))\r\n \r\n def normalized(self):\r\n return Quaternions(self.qs / self.lengths[...,np.newaxis])\r\n \r\n def log(self):\r\n norm = abs(self.normalized())\r\n imgs = norm.imaginaries\r\n lens = np.sqrt(np.sum(imgs**2, axis=-1))\r\n lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)\r\n return imgs * lens[...,np.newaxis]\r\n \r\n def constrained(self, axis):\r\n \r\n rl = self.reals\r\n im = np.sum(axis * self.imaginaries, axis=-1)\r\n \r\n t1 = -2 * np.arctan2(rl, im) + np.pi\r\n t2 = -2 * np.arctan2(rl, im) - np.pi\r\n \r\n top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))\r\n bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))\r\n img = self.dot(top) > self.dot(bot)\r\n \r\n ret = top.copy()\r\n ret[ img] = top[ img]\r\n ret[~img] = bot[~img]\r\n return ret\r\n \r\n def constrained_x(self): return self.constrained(np.array([1,0,0]))\r\n def constrained_y(self): return self.constrained(np.array([0,1,0]))\r\n def constrained_z(self): return self.constrained(np.array([0,0,1]))\r\n \r\n def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)\r\n \r\n def copy(self): return Quaternions(np.copy(self.qs))\r\n \r\n def reshape(self, s):\r\n self.qs.reshape(s)\r\n return self\r\n \r\n def interpolate(self, ws):\r\n return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))\r\n \r\n def euler(self, order='xyz'):\r\n \r\n q = self.normalized().qs\r\n q0 = q[...,0]\r\n q1 = q[...,1]\r\n q2 = q[...,2]\r\n q3 = q[...,3]\r\n es = np.zeros(self.shape + (3,))\r\n \r\n if order == 'xyz':\r\n es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\r\n es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))\r\n es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))\r\n elif order == 'yzx':\r\n es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)\r\n es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)\r\n es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))\r\n else:\r\n raise NotImplementedError('Cannot convert from ordering %s' % order)\r\n \r\n \"\"\"\r\n \r\n # These conversion don't appear to work correctly for Maya.\r\n # http://bediyap.com/programming/convert-quaternion-to-euler-rotations/\r\n \r\n if order == 'xyz':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n elif order == 'yzx':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n elif order == 'zxy':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) \r\n elif order == 'xzy':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n elif order == 'yxz':\r\n es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n elif order == 'zyx':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n else:\r\n raise KeyError('Unknown ordering %s' % order)\r\n \r\n \"\"\"\r\n \r\n # https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp\r\n # Use this class and convert from matrix\r\n \r\n return es\r\n \r\n \r\n def average(self):\r\n \r\n if len(self.shape) == 1:\r\n \r\n import numpy.core.umath_tests as ut\r\n system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)\r\n w, v = np.linalg.eigh(system)\r\n qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)\r\n return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))]) \r\n \r\n else:\r\n \r\n raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')\r\n\r\n def angle_axis(self):\r\n \r\n norm = self.normalized() \r\n s = np.sqrt(1 - (norm.reals**2.0))\r\n s[s == 0] = 0.001\r\n \r\n angles = 2.0 * np.arccos(norm.reals)\r\n axis = norm.imaginaries / s[...,np.newaxis]\r\n \r\n return angles, axis\r\n \r\n \r\n def transforms(self):\r\n \r\n qw = self.qs[...,0]\r\n qx = self.qs[...,1]\r\n qy = self.qs[...,2]\r\n qz = self.qs[...,3]\r\n \r\n x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;\r\n xx = qx * x2; yy = qy * y2; wx = qw * x2;\r\n xy = qx * y2; yz = qy * z2; wy = qw * y2;\r\n xz = qx * z2; zz = qz * z2; wz = qw * z2;\r\n \r\n m = np.empty(self.shape + (3,3))\r\n m[...,0,0] = 1.0 - (yy + zz)\r\n m[...,0,1] = xy - wz\r\n m[...,0,2] = xz + wy \r\n m[...,1,0] = xy + wz\r\n m[...,1,1] = 1.0 - (xx + zz)\r\n m[...,1,2] = yz - wx \r\n m[...,2,0] = xz - wy\r\n m[...,2,1] = yz + wx\r\n m[...,2,2] = 1.0 - (xx + yy)\r\n \r\n return m\r\n \r\n def ravel(self):\r\n return self.qs.ravel()\r\n \r\n @classmethod\r\n def id(cls, n):\r\n \r\n if isinstance(n, tuple):\r\n qs = np.zeros(n + (4,))\r\n qs[...,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n if isinstance(n, int) or isinstance(n, long):\r\n qs = np.zeros((n,4))\r\n qs[:,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))\r\n\r\n @classmethod\r\n def id_like(cls, a):\r\n qs = np.zeros(a.shape + (4,))\r\n qs[...,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n @classmethod\r\n def exp(cls, ws):\r\n \r\n ts = np.sum(ws**2.0, axis=-1)**0.5\r\n ts[ts == 0] = 0.001\r\n ls = np.sin(ts) / ts\r\n \r\n qs = np.empty(ws.shape[:-1] + (4,))\r\n qs[...,0] = np.cos(ts)\r\n qs[...,1] = ws[...,0] * ls\r\n qs[...,2] = ws[...,1] * ls\r\n qs[...,3] = ws[...,2] * ls\r\n \r\n return Quaternions(qs).normalized()\r\n \r\n @classmethod\r\n def slerp(cls, q0s, q1s, a):\r\n \r\n fst, snd = cls._broadcast(q0s.qs, q1s.qs)\r\n fst, a = cls._broadcast(fst, a, scalar=True)\r\n snd, a = cls._broadcast(snd, a, scalar=True)\r\n \r\n len = np.sum(fst * snd, axis=-1)\r\n \r\n neg = len < 0.0\r\n len[neg] = -len[neg]\r\n snd[neg] = -snd[neg]\r\n \r\n amount0 = np.zeros(a.shape)\r\n amount1 = np.zeros(a.shape)\r\n\r\n linear = (1.0 - len) < 0.01\r\n omegas = np.arccos(len[~linear])\r\n sinoms = np.sin(omegas)\r\n \r\n amount0[ linear] = 1.0 - a[linear]\r\n amount1[ linear] = a[linear]\r\n amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms\r\n amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms\r\n \r\n return Quaternions(\r\n amount0[...,np.newaxis] * fst + \r\n amount1[...,np.newaxis] * snd)\r\n \r\n @classmethod\r\n def between(cls, v0s, v1s):\r\n a = np.cross(v0s, v1s)\r\n w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)\r\n return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()\r\n \r\n @classmethod\r\n def from_angle_axis(cls, angles, axis):\r\n axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]\r\n sines = np.sin(angles / 2.0)[...,np.newaxis]\r\n cosines = np.cos(angles / 2.0)[...,np.newaxis]\r\n return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))\r\n \r\n @classmethod\r\n def from_euler(cls, es, order='xyz', world=False):\r\n \r\n axis = {\r\n 'x' : np.array([1,0,0]),\r\n 'y' : np.array([0,1,0]),\r\n 'z' : np.array([0,0,1]),\r\n }\r\n \r\n q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])\r\n q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])\r\n q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])\r\n \r\n return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))\r\n \r\n @classmethod\r\n def from_transforms(cls, ts):\r\n \r\n d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]\r\n \r\n q0 = ( d0 + d1 + d2 + 1.0) / 4.0\r\n q1 = ( d0 - d1 - d2 + 1.0) / 4.0\r\n q2 = (-d0 + d1 - d2 + 1.0) / 4.0\r\n q3 = (-d0 - d1 + d2 + 1.0) / 4.0\r\n \r\n q0 = np.sqrt(q0.clip(0,None))\r\n q1 = np.sqrt(q1.clip(0,None))\r\n q2 = np.sqrt(q2.clip(0,None))\r\n q3 = np.sqrt(q3.clip(0,None))\r\n \r\n c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)\r\n c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)\r\n c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)\r\n c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)\r\n \r\n q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])\r\n q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])\r\n q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])\r\n \r\n q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])\r\n q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])\r\n q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0]) \r\n \r\n q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])\r\n q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])\r\n q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2]) \r\n \r\n q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])\r\n q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])\r\n q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2]) \r\n \r\n qs = np.empty(ts.shape[:-2] + (4,))\r\n qs[...,0] = q0\r\n qs[...,1] = q1\r\n qs[...,2] = q2\r\n qs[...,3] = q3\r\n \r\n return cls(qs)\r\n \r\n \r\n " ]
[ [ "numpy.sqrt", "numpy.cos", "numpy.arccos", "numpy.sin", "numpy.all", "numpy.sign", "numpy.arctan2", "numpy.copy", "numpy.where", "numpy.linalg.eigh", "numpy.concatenate", "numpy.cross", "numpy.ones", "numpy.core.umath_tests.matrix_multiply", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RingoIngo/gluon-ts
[ "62fb20c36025fc969653accaffaa783671709564", "62fb20c36025fc969653accaffaa783671709564", "62fb20c36025fc969653accaffaa783671709564", "62fb20c36025fc969653accaffaa783671709564" ]
[ "src/gluonts/nursery/tsbench/src/tsbench/surrogate/deepset.py", "src/gluonts/nursery/SCott/test/evaluation/test_evaluator.py", "src/gluonts/nursery/few_shot_prediction/src/meta/models/EcDc/components/feature.py", "src/gluonts/torch/model/deepar/estimator.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import cast, List, Literal, Optional\nimport numpy as np\nimport numpy.typing as npt\nimport pytorch_lightning as pl\nimport torch\nfrom lightkit.data import DataLoader\nfrom torch import nn\nfrom torch.utils.data import TensorDataset\nfrom tsbench.config import Config, EnsembleConfig\nfrom tsbench.evaluations.tracking import EnsembleTracker\nfrom tsbench.surrogate.torch.deepset import DeepSetModel\nfrom ._base import OutputNormalization, Surrogate\nfrom ._factory import register_ensemble_surrogate\nfrom .torch import DeepSetLightningModule, ListMLELoss\nfrom .transformers import EnsembleConfigTransformer\n\n\n@register_ensemble_surrogate(\"deepset\")\nclass DeepSetSurrogate(Surrogate[EnsembleConfig]):\n \"\"\"\n The DeepSet surrogate is similar to the MLP surrogate but makes predictions for ensembles\n rather than single models. Currently, it does not support the use of dataset features.\n \"\"\"\n\n trainer_: pl.Trainer\n models_: List[nn.Module]\n\n def __init__(\n self,\n tracker: EnsembleTracker,\n objective: Literal[\"regression\", \"ranking\"] = \"ranking\",\n discount: Optional[\n Literal[\"logarithmic\", \"linear\", \"quadratic\"]\n ] = \"linear\",\n hidden_layer_sizes: Optional[List[int]] = None,\n weight_decay: float = 0.01,\n dropout: float = 0.0,\n predict: Optional[List[str]] = None,\n output_normalization: OutputNormalization = None,\n impute_simulatable: bool = False,\n ):\n \"\"\"\n Args:\n tracker: A tracker that can be used to impute latency and number of model parameters\n into model performances. Also, it is required for some input features.\n objective: The optimization objective for the XGBoost estimators.\n discount: The discount to apply for the ranking loss. If provided, it focuses on\n correctly predicting the top values.\n hidden_layer_sizes: The dimensions of the hidden layers. Defaults to two hidden layers\n of size 32.\n weight_decay: The weight decay to apply during optimization.\n dropout: The dropout probability of dropout layers applied after every activation\n function.\n predict: The metrics to predict. All if not provided.\n output_normalization: The type of normalization to apply to the features of each\n dataset independently. `None` applies no normalization, \"quantile\" applies quantile\n normalization, and \"standard\" transforms data to have zero mean and unit variance.\n impute_simulatable: Whether the tracker should impute latency and number of model\n parameters into the returned performance object.\n \"\"\"\n\n super().__init__(\n tracker, predict, output_normalization, impute_simulatable\n )\n\n self.use_ranking = objective == \"ranking\"\n self.hidden_layer_sizes = hidden_layer_sizes or [32, 32]\n self.weight_decay = weight_decay\n self.dropout = dropout\n\n self.config_transformer = EnsembleConfigTransformer()\n\n if objective == \"regression\":\n self.loss = nn.MSELoss()\n elif objective == \"ranking\":\n self.loss = ListMLELoss(discount=discount)\n\n @property\n def required_cpus(self) -> int:\n return 4\n\n def _fit(\n self, X: List[Config[EnsembleConfig]], y: npt.NDArray[np.float32]\n ) -> None:\n # Fit transformers to infer dimensionality\n X_numpy_list = self.config_transformer.fit_transform(X)\n X_numpy = np.concatenate(X_numpy_list)\n X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])\n\n input_dim = len(self.config_transformer.feature_names_)\n output_dim = y.shape[1]\n\n # For initializing data, we prepare group IDs for the datasets\n mapping = {d: i for i, d in enumerate({x.dataset for x in X})}\n\n # For each output variable, we need to train a separate model\n self.models_ = []\n for i in range(output_dim):\n model = self._init_model(input_dim)\n module = DeepSetLightningModule(\n model, self.loss, self.weight_decay\n )\n\n # Train on output variable i\n dataset = TensorDataset(\n torch.from_numpy(X_numpy).float(),\n torch.from_numpy(X_lengths_numpy).long(),\n torch.from_numpy(y[:, i : i + 1]).float(),\n torch.as_tensor(\n [mapping[x.dataset] for x in X], dtype=torch.long\n ),\n )\n train_loader = DataLoader(dataset, batch_size=len(dataset))\n self._trainer.fit(module, train_dataloaders=train_loader)\n\n # Add to models\n self.models_.append(model)\n\n def _predict(\n self, X: List[Config[EnsembleConfig]]\n ) -> npt.NDArray[np.float32]:\n # Get data\n X_numpy_list = self.config_transformer.transform(X)\n X_numpy = np.concatenate(X_numpy_list)\n X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])\n\n dataset = TensorDataset(\n torch.from_numpy(X_numpy).float(),\n torch.from_numpy(X_lengths_numpy).long(),\n )\n test_loader = DataLoader(dataset, batch_size=len(dataset))\n\n # Run prediction\n predictions = []\n for model in self.models_:\n module = DeepSetLightningModule(model, self.loss)\n out = cast(\n List[torch.Tensor], self._trainer.predict(module, test_loader)\n )\n predictions.append(out[0].numpy())\n\n return np.concatenate(predictions, axis=-1)\n\n @property\n def _trainer(self) -> pl.Trainer:\n return pl.Trainer(\n max_epochs=1000,\n logger=False,\n enable_checkpointing=False,\n enable_model_summary=False,\n enable_progress_bar=False,\n gpus=int(torch.cuda.is_available()),\n )\n\n def _init_model(self, input_dim: int) -> nn.Module:\n return DeepSetModel(\n input_dim,\n self.hidden_layer_sizes[-1],\n 1,\n self.hidden_layer_sizes,\n self.hidden_layer_sizes,\n self.dropout,\n )\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n# First-party imports\nfrom pts.evaluation import (\n Evaluator,\n MultivariateEvaluator,\n)\nfrom pts.feature import get_seasonality\nfrom pts.model.forecast import QuantileForecast, SampleForecast\n\nQUANTILES = [str(q / 10.0) for q in range(1, 10)]\n\n\ndef data_iterator(ts):\n \"\"\"\n :param ts: list of pd.Series or pd.DataFrame\n :return:\n \"\"\"\n for i in range(len(ts)):\n yield ts[i]\n\n\ndef fcst_iterator(fcst, start_dates, freq):\n \"\"\"\n :param fcst: list of numpy arrays with the sample paths\n :return:\n \"\"\"\n for i in range(len(fcst)):\n yield SampleForecast(\n samples=fcst[i], start_date=start_dates[i], freq=freq\n )\n\n\ndef iterator(it):\n \"\"\"\n Convenience function to toggle whether to consume dataset and forecasts as iterators or iterables.\n :param it:\n :return: it (as iterator)\n \"\"\"\n return iter(it)\n\n\ndef iterable(it):\n \"\"\"\n Convenience function to toggle whether to consume dataset and forecasts as iterators or iterables.\n :param it:\n :return: it (as iterable)\n \"\"\"\n return list(it)\n\n\ndef naive_forecaster(ts, prediction_length, num_samples=100, target_dim=0):\n \"\"\"\n :param ts: pandas.Series\n :param prediction_length:\n :param num_samples: number of sample paths\n :param target_dim: number of axes of target (0: scalar, 1: array, ...)\n :return: np.array with dimension (num_samples, prediction_length)\n \"\"\"\n\n # naive prediction: last observed value\n naive_pred = ts.values[-prediction_length - 1]\n assert len(naive_pred.shape) == target_dim\n return np.tile(\n naive_pred,\n (num_samples, prediction_length) + tuple(1 for _ in range(target_dim)),\n )\n\n\ndef naive_multivariate_forecaster(ts, prediction_length, num_samples=100):\n return naive_forecaster(ts, prediction_length, num_samples, target_dim=1)\n\n\ndef calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=False,\n forecaster=naive_forecaster,\n input_type=iterator,\n):\n num_timeseries = timeseries.shape[0]\n num_timestamps = timeseries.shape[1]\n\n if has_nans:\n timeseries[0, 1] = np.nan\n timeseries[0, 7] = np.nan\n\n num_samples = 100\n prediction_length = 3\n freq = \"1D\"\n\n ts_start_dates = (\n []\n ) # starting date of each time series - can be different in general\n pd_timeseries = [] # list of pandas.DataFrame\n samples = [] # list of forecast samples\n start_dates = [] # start date of the prediction range\n for i in range(num_timeseries):\n ts_start_dates.append(pd.Timestamp(year=2018, month=1, day=1, hour=1))\n index = pd.date_range(\n ts_start_dates[i], periods=num_timestamps, freq=freq\n )\n\n pd_timeseries.append(ts_datastructure(timeseries[i], index=index))\n samples.append(\n forecaster(pd_timeseries[i], prediction_length, num_samples)\n )\n start_dates.append(\n pd.date_range(\n ts_start_dates[i], periods=num_timestamps, freq=freq\n )[-prediction_length]\n )\n\n # data iterator\n data_iter = input_type(data_iterator(pd_timeseries))\n fcst_iter = input_type(fcst_iterator(samples, start_dates, freq))\n\n # evaluate\n agg_df, item_df = evaluator(data_iter, fcst_iter)\n return agg_df, item_df\n\n\nTIMESERIES_M4 = [\n np.array(\n [\n [\n 2.943_013,\n 2.822_251,\n 4.196_222,\n 1.328_664,\n 4.947_390,\n 3.333_131,\n 1.479_800,\n 2.265_094,\n 3.413_493,\n 3.497_607,\n ],\n [\n -0.126_781_2,\n 3.057_412_2,\n 1.901_594_4,\n 2.772_549_5,\n 3.312_853_1,\n 4.411_818_0,\n 3.709_025_2,\n 4.322_028,\n 2.565_359,\n 3.074_308,\n ],\n [\n 2.542_998,\n 2.336_757,\n 1.417_916,\n 1.335_139,\n 2.523_035,\n 3.645_589,\n 3.382_819,\n 2.075_960,\n 2.643_869,\n 2.772_456,\n ],\n [\n 0.315_685_6,\n 1.892_312_1,\n 2.476_861_2,\n 3.511_628_6,\n 4.384_346_5,\n 2.960_685_6,\n 4.897_572_5,\n 3.280_125,\n 4.768_556,\n 4.958_616,\n ],\n [\n 2.205_877_3,\n 0.782_759_4,\n 2.401_420_8,\n 2.385_643_4,\n 4.845_818_2,\n 3.102_322_9,\n 3.567_723_7,\n 4.878_143,\n 3.735_245,\n 2.218_113,\n ],\n ]\n ),\n np.array(\n [\n [\n 13.11301,\n 13.16225,\n 14.70622,\n 12.00866,\n 15.79739,\n 14.35313,\n 12.66980,\n 13.62509,\n 14.94349,\n 15.19761,\n ],\n [\n 10.04322,\n 13.39741,\n 12.41159,\n 13.45255,\n 14.16285,\n 15.43182,\n 14.89903,\n 15.68203,\n 14.09536,\n 14.77431,\n ],\n [\n 12.71300,\n 12.67676,\n 11.92792,\n 12.01514,\n 13.37303,\n 14.66559,\n 14.57282,\n 13.43596,\n 14.17387,\n 14.47246,\n ],\n [\n 10.48569,\n 12.23231,\n 12.98686,\n 14.19163,\n 15.23435,\n 13.98069,\n 16.08757,\n 14.64012,\n 16.29856,\n 16.65862,\n ],\n [\n 12.37588,\n 11.12276,\n 12.91142,\n 13.06564,\n 15.69582,\n 14.12232,\n 14.75772,\n 16.23814,\n 15.26524,\n 13.91811,\n ],\n ]\n ),\n]\n\nRES_M4 = [\n {\n \"MASE\": 0.816_837_618,\n \"MAPE\": 0.324_517_430_685_928_1,\n \"sMAPE\": 0.326_973_268_4,\n \"seasonal_error\": np.array(\n [1.908_101, 1.258_838, 0.63018, 1.238_201, 1.287_771]\n ),\n },\n {\n \"MASE\": 0.723_948_2,\n \"MAPE\": 0.063_634_129_851_747_6,\n \"sMAPE\": 0.065_310_85,\n \"seasonal_error\": np.array(\n [1.867_847, 1.315_505, 0.602_587_4, 1.351_535, 1.339_179]\n ),\n },\n]\n\n\[email protected](\"timeseries, res\", zip(TIMESERIES_M4, RES_M4))\ndef test_MASE_sMAPE_M4(timeseries, res):\n ts_datastructure = pd.Series\n evaluator = Evaluator(quantiles=QUANTILES)\n agg_df, item_df = calculate_metrics(\n timeseries, evaluator, ts_datastructure\n )\n\n assert abs((agg_df[\"MASE\"] - res[\"MASE\"]) / res[\"MASE\"]) < 0.001, (\n \"Scores for the metric MASE do not match: \"\n \"\\nexpected: {} \\nobtained: {}\".format(res[\"MASE\"], agg_df[\"MASE\"])\n )\n assert abs((agg_df[\"MAPE\"] - res[\"MAPE\"]) / res[\"MAPE\"]) < 0.001, (\n \"Scores for the metric MAPE do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(res[\"MAPE\"], agg_df[\"MAPE\"])\n )\n assert abs((agg_df[\"sMAPE\"] - res[\"sMAPE\"]) / res[\"sMAPE\"]) < 0.001, (\n \"Scores for the metric sMAPE do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(res[\"sMAPE\"], agg_df[\"sMAPE\"])\n )\n assert (\n sum(abs(item_df[\"seasonal_error\"].values - res[\"seasonal_error\"]))\n < 0.001\n ), (\n \"Scores for the metric seasonal_error do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(\n res[\"seasonal_error\"], item_df[\"seasonal_error\"].values\n )\n )\n\n\nTIMESERIES = [\n np.ones((5, 10), dtype=np.float64),\n np.ones((5, 10), dtype=np.float64),\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.array([[np.nan] * 10, [1.0] * 10]),\n]\n\nRES = [\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 14.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 420.0,\n \"abs_target_mean\": 28.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"MAPE\": 0.103_112_211_532_524_85,\n \"sMAPE\": 0.113_254_049_3,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.077_151_674_981_045_956,\n \"ND\": 0.071_428_571_428_571_42,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 5.033_333_333_333_3,\n \"abs_error\": 29.0,\n \"abs_target_sum\": 413.0,\n \"abs_target_mean\": 28.1,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.1,\n \"MAPE\": 0.113_032_846_453_159_77,\n \"sMAPE\": 0.125_854_781_903_299_57,\n \"MSIS\": 84.0,\n \"RMSE\": 2.243_509_156_061_845_6,\n \"NRMSE\": 0.079_840_183_489_745_39,\n \"ND\": 0.070_217_917_675_544_79,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 3.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n]\n\nHAS_NANS = [False, True, False, True, True]\n\n\nINPUT_TYPE = [iterable, iterable, iterator, iterator, iterable]\n\n\[email protected](\n \"timeseries, res, has_nans, input_type\",\n zip(TIMESERIES, RES, HAS_NANS, INPUT_TYPE),\n)\ndef test_metrics(timeseries, res, has_nans, input_type):\n ts_datastructure = pd.Series\n evaluator = Evaluator(quantiles=QUANTILES, num_workers=0)\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\[email protected](\n \"timeseries, res, has_nans, input_type\",\n zip(TIMESERIES, RES, HAS_NANS, INPUT_TYPE),\n)\ndef test_metrics_mp(timeseries, res, has_nans, input_type):\n ts_datastructure = pd.Series\n # Default will be multiprocessing evaluator\n evaluator = Evaluator(quantiles=QUANTILES, num_workers=4)\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\nTIMESERIES_MULTIVARIATE = [\n np.ones((5, 10, 2), dtype=np.float64),\n np.ones((5, 10, 2), dtype=np.float64),\n np.ones((5, 10, 2), dtype=np.float64),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n]\n\nRES_MULTIVARIATE = [\n {\n \"MSE\": 0.0,\n \"0_MSE\": 0.0,\n \"1_MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 30.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 420.0,\n \"abs_target_mean\": 28.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.113_254_049_3,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.077_151_674_981_045_956,\n \"ND\": 0.071_428_571_428_571_42,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 1170.0,\n \"abs_target_mean\": 78.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.026_842_301_756_499_45,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.027_695_473_070_119_065,\n \"ND\": 0.025_641_025_641_025_64,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 60.0,\n \"abs_target_sum\": 1590.0,\n \"abs_target_mean\": 53.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.070_048_175_528_249_73,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.040_759_375_461_684_65,\n \"ND\": 0.037_735_849_056_603_77,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n]\n\nHAS_NANS_MULTIVARIATE = [False, False, False, False, False, False]\n\nEVAL_DIMS = [[0], [1], [0, 1], [0], [1], None]\n\nINPUT_TYPE = [iterable, iterable, iterator, iterator, iterable, iterator]\n\n\[email protected](\n \"timeseries, res, has_nans, eval_dims, input_type\",\n zip(\n TIMESERIES_MULTIVARIATE,\n RES_MULTIVARIATE,\n HAS_NANS_MULTIVARIATE,\n EVAL_DIMS,\n INPUT_TYPE,\n ),\n)\ndef test_metrics_multivariate(\n timeseries, res, has_nans, eval_dims, input_type\n):\n ts_datastructure = pd.DataFrame\n evaluator = MultivariateEvaluator(\n quantiles=QUANTILES,\n eval_dims=eval_dims,\n target_agg_funcs={\"sum\": np.sum},\n )\n\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n forecaster=naive_multivariate_forecaster,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\ndef test_evaluation_with_QuantileForecast():\n start = \"2012-01-11\"\n target = [2.4, 1.0, 3.0, 4.4, 5.5, 4.9] * 11\n index = pd.date_range(start=start, freq=\"1D\", periods=len(target))\n ts = pd.Series(index=index, data=target)\n\n ev = Evaluator(quantiles=(\"0.1\", \"0.2\", \"0.5\"))\n\n fcst = [\n QuantileForecast(\n start_date=pd.Timestamp(\"2012-01-11\"),\n freq=\"D\",\n forecast_arrays=np.array([[2.4, 9.0, 3.0, 2.4, 5.5, 4.9] * 10]),\n forecast_keys=[\"0.5\"],\n )\n ]\n\n agg_metric, _ = ev(iter([ts]), iter(fcst))\n\n assert np.isfinite(agg_metric[\"wQuantileLoss[0.5]\"])\n\n\[email protected](\n \"freq, expected_seasonality\",\n [\n (\"1H\", 24),\n (\"H\", 24),\n (\"2H\", 12),\n (\"3H\", 8),\n (\"4H\", 6),\n (\"15H\", 1),\n (\"5B\", 1),\n (\"1B\", 5),\n (\"2W\", 1),\n (\"3M\", 4),\n (\"1D\", 1),\n (\"7D\", 1),\n (\"8D\", 1),\n ],\n)\ndef test_get_seasonality(freq, expected_seasonality):\n assert get_seasonality(freq) == expected_seasonality\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom abc import ABC, abstractmethod\nimport torch\nfrom torch import nn\nfrom meta.data.batch import SeriesBatch\n\n\nclass FeatureExtractor(nn.Module, ABC):\n \"\"\"\n Base class for feature extractors.\n \"\"\"\n\n @abstractmethod\n def forward(self, series: SeriesBatch) -> SeriesBatch:\n \"\"\"\n Computes features for each time point for each time series.\n\n Parameters\n ----------\n series\n A SeriesBatch containing sequences of size [batch, sequence length, n_input],\n where n_input is typically 1 + * which corresponds to the univariate time series itself\n and additional time features, e.g. relative time.\n\n Returns\n -------\n SeriesBatch\n The transformed batch with sequences of size [batch, sequence length, n_features].\n \"\"\"\n\n\nclass IdentityFeatureExtractor(FeatureExtractor):\n \"\"\"\n Dummy feature extractor which implements the identity function.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.feature_extractor = nn.Identity()\n\n def forward(self, series: SeriesBatch) -> torch.Tensor:\n return self.feature_extractor(series)\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import List, Optional, Iterable, Dict, Any\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom gluonts.core.component import validated\nfrom gluonts.dataset.common import Dataset\nfrom gluonts.dataset.field_names import FieldName\nfrom gluonts.itertools import Cyclic, PseudoShuffled, IterableSlice\nfrom gluonts.time_feature import (\n TimeFeature,\n time_features_from_frequency_str,\n)\nfrom gluonts.torch.modules.loss import DistributionLoss, NegativeLogLikelihood\nfrom gluonts.transform import (\n Transformation,\n Chain,\n RemoveFields,\n SetField,\n AsNumpyArray,\n AddObservedValuesIndicator,\n AddTimeFeatures,\n AddAgeFeature,\n VstackFeatures,\n InstanceSplitter,\n ValidationSplitSampler,\n TestSplitSampler,\n ExpectedNumInstanceSampler,\n SelectFields,\n)\nfrom gluonts.torch.util import (\n IterableDataset,\n)\nfrom gluonts.torch.model.estimator import PyTorchLightningEstimator\nfrom gluonts.torch.model.predictor import PyTorchPredictor\nfrom gluonts.torch.modules.distribution_output import (\n DistributionOutput,\n StudentTOutput,\n)\n\nfrom .module import DeepARModel\nfrom .lightning_module import DeepARLightningModule\n\nPREDICTION_INPUT_NAMES = [\n \"feat_static_cat\",\n \"feat_static_real\",\n \"past_time_feat\",\n \"past_target\",\n \"past_observed_values\",\n \"future_time_feat\",\n]\n\nTRAINING_INPUT_NAMES = PREDICTION_INPUT_NAMES + [\n \"future_target\",\n \"future_observed_values\",\n]\n\n\nclass DeepAREstimator(PyTorchLightningEstimator):\n @validated()\n def __init__(\n self,\n freq: str,\n prediction_length: int,\n context_length: Optional[int] = None,\n num_layers: int = 2,\n hidden_size: int = 40,\n dropout_rate: float = 0.1,\n num_feat_dynamic_real: int = 0,\n num_feat_static_cat: int = 0,\n num_feat_static_real: int = 0,\n cardinality: Optional[List[int]] = None,\n embedding_dimension: Optional[List[int]] = None,\n distr_output: DistributionOutput = StudentTOutput(),\n loss: DistributionLoss = NegativeLogLikelihood(),\n scaling: bool = True,\n lags_seq: Optional[List[int]] = None,\n time_features: Optional[List[TimeFeature]] = None,\n num_parallel_samples: int = 100,\n batch_size: int = 32,\n num_batches_per_epoch: int = 50,\n trainer_kwargs: Optional[Dict[str, Any]] = None,\n ) -> None:\n default_trainer_kwargs = {\n \"max_epochs\": 100,\n \"gradient_clip_val\": 10.0,\n }\n if trainer_kwargs is not None:\n default_trainer_kwargs.update(trainer_kwargs)\n super().__init__(trainer_kwargs=default_trainer_kwargs)\n\n self.freq = freq\n self.context_length = (\n context_length if context_length is not None else prediction_length\n )\n self.prediction_length = prediction_length\n self.distr_output = distr_output\n self.loss = loss\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.dropout_rate = dropout_rate\n self.num_feat_dynamic_real = num_feat_dynamic_real\n self.num_feat_static_cat = num_feat_static_cat\n self.num_feat_static_real = num_feat_static_real\n self.cardinality = (\n cardinality if cardinality and num_feat_static_cat > 0 else [1]\n )\n self.embedding_dimension = embedding_dimension\n self.scaling = scaling\n self.lags_seq = lags_seq\n self.time_features = (\n time_features\n if time_features is not None\n else time_features_from_frequency_str(self.freq)\n )\n\n self.num_parallel_samples = num_parallel_samples\n self.batch_size = batch_size\n self.num_batches_per_epoch = num_batches_per_epoch\n\n self.train_sampler = ExpectedNumInstanceSampler(\n num_instances=1.0, min_future=prediction_length\n )\n self.validation_sampler = ValidationSplitSampler(\n min_future=prediction_length\n )\n\n def create_transformation(self) -> Transformation:\n remove_field_names = []\n if self.num_feat_static_real == 0:\n remove_field_names.append(FieldName.FEAT_STATIC_REAL)\n if self.num_feat_dynamic_real == 0:\n remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)\n\n return Chain(\n [RemoveFields(field_names=remove_field_names)]\n + (\n [SetField(output_field=FieldName.FEAT_STATIC_CAT, value=[0])]\n if not self.num_feat_static_cat > 0\n else []\n )\n + (\n [\n SetField(\n output_field=FieldName.FEAT_STATIC_REAL, value=[0.0]\n )\n ]\n if not self.num_feat_static_real > 0\n else []\n )\n + [\n AsNumpyArray(\n field=FieldName.FEAT_STATIC_CAT,\n expected_ndim=1,\n dtype=int,\n ),\n AsNumpyArray(\n field=FieldName.FEAT_STATIC_REAL,\n expected_ndim=1,\n ),\n AsNumpyArray(\n field=FieldName.TARGET,\n # in the following line, we add 1 for the time dimension\n expected_ndim=1 + len(self.distr_output.event_shape),\n ),\n AddObservedValuesIndicator(\n target_field=FieldName.TARGET,\n output_field=FieldName.OBSERVED_VALUES,\n ),\n AddTimeFeatures(\n start_field=FieldName.START,\n target_field=FieldName.TARGET,\n output_field=FieldName.FEAT_TIME,\n time_features=self.time_features,\n pred_length=self.prediction_length,\n ),\n AddAgeFeature(\n target_field=FieldName.TARGET,\n output_field=FieldName.FEAT_AGE,\n pred_length=self.prediction_length,\n log_scale=True,\n ),\n VstackFeatures(\n output_field=FieldName.FEAT_TIME,\n input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]\n + (\n [FieldName.FEAT_DYNAMIC_REAL]\n if self.num_feat_dynamic_real > 0\n else []\n ),\n ),\n ]\n )\n\n def _create_instance_splitter(\n self, module: DeepARLightningModule, mode: str\n ):\n assert mode in [\"training\", \"validation\", \"test\"]\n\n instance_sampler = {\n \"training\": self.train_sampler,\n \"validation\": self.validation_sampler,\n \"test\": TestSplitSampler(),\n }[mode]\n\n return InstanceSplitter(\n target_field=FieldName.TARGET,\n is_pad_field=FieldName.IS_PAD,\n start_field=FieldName.START,\n forecast_start_field=FieldName.FORECAST_START,\n instance_sampler=instance_sampler,\n past_length=module.model._past_length,\n future_length=self.prediction_length,\n time_series_fields=[\n FieldName.FEAT_TIME,\n FieldName.OBSERVED_VALUES,\n ],\n dummy_value=self.distr_output.value_in_support,\n )\n\n def create_training_data_loader(\n self,\n data: Dataset,\n module: DeepARLightningModule,\n shuffle_buffer_length: Optional[int] = None,\n **kwargs,\n ) -> Iterable:\n transformation = self._create_instance_splitter(\n module, \"training\"\n ) + SelectFields(TRAINING_INPUT_NAMES)\n\n training_instances = transformation.apply(\n Cyclic(data)\n if shuffle_buffer_length is None\n else PseudoShuffled(\n Cyclic(data), shuffle_buffer_length=shuffle_buffer_length\n )\n )\n\n return IterableSlice(\n iter(\n DataLoader(\n IterableDataset(training_instances),\n batch_size=self.batch_size,\n **kwargs,\n )\n ),\n self.num_batches_per_epoch,\n )\n\n def create_validation_data_loader(\n self,\n data: Dataset,\n module: DeepARLightningModule,\n **kwargs,\n ) -> Iterable:\n transformation = self._create_instance_splitter(\n module, \"validation\"\n ) + SelectFields(TRAINING_INPUT_NAMES)\n\n validation_instances = transformation.apply(data)\n\n return DataLoader(\n IterableDataset(validation_instances),\n batch_size=self.batch_size,\n **kwargs,\n )\n\n def create_lightning_module(self) -> DeepARLightningModule:\n model = DeepARModel(\n freq=self.freq,\n context_length=self.context_length,\n prediction_length=self.prediction_length,\n num_feat_dynamic_real=(\n 1 + self.num_feat_dynamic_real + len(self.time_features)\n ),\n num_feat_static_real=max(1, self.num_feat_static_real),\n num_feat_static_cat=max(1, self.num_feat_static_cat),\n cardinality=self.cardinality,\n embedding_dimension=self.embedding_dimension,\n num_layers=self.num_layers,\n hidden_size=self.hidden_size,\n distr_output=self.distr_output,\n dropout_rate=self.dropout_rate,\n lags_seq=self.lags_seq,\n scaling=self.scaling,\n num_parallel_samples=self.num_parallel_samples,\n )\n\n return DeepARLightningModule(model=model, loss=self.loss)\n\n def create_predictor(\n self,\n transformation: Transformation,\n module: DeepARLightningModule,\n ) -> PyTorchPredictor:\n prediction_splitter = self._create_instance_splitter(module, \"test\")\n\n return PyTorchPredictor(\n input_transform=transformation + prediction_splitter,\n input_names=PREDICTION_INPUT_NAMES,\n prediction_net=module.model,\n batch_size=self.batch_size,\n freq=self.freq,\n prediction_length=self.prediction_length,\n device=torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n ),\n )\n" ]
[ [ "torch.from_numpy", "numpy.concatenate", "torch.cuda.is_available", "numpy.array", "torch.nn.MSELoss", "torch.as_tensor" ], [ "pandas.Series", "numpy.isfinite", "pandas.Timestamp", "numpy.arange", "numpy.ones", "pandas.date_range", "numpy.array" ], [ "torch.nn.Identity" ], [ "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Wayne-Mai/DynSLAM
[ "7b62e13d2a33ff58ca888a346433a4891a228a20" ]
[ "preprocessing/MaskRCNN/MaskRCNN_TUM.py" ]
[ "#!/usr/bin/env python3\n#\n# This file is part of https://github.com/martinruenz/maskfusion\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n\n# To use this script, add the MaskRCNN directoy to your PYTHON_PATH\nimport sys\nimport os\n\nmask_rcnn_path = os.path.abspath(\"../Mask_RCNN\")\nsys.path.insert(0, mask_rcnn_path)\n\nimport random\nimport math\nimport numpy as np\nimport scipy.misc\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport argparse\nfrom samples.coco import coco\nfrom mrcnn import utils\nfrom mrcnn import model as modellib\nfrom mrcnn import visualize\nfrom PIL import Image\nfrom helpers import *\nimport time\nimport pytoml as toml\nimport scipy.misc\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", required=True, help=\"Input directory (all files are being processed)\")\nparser.add_argument(\"-c\", required=False, help=\"Optional config file, otherwise MsCoco is assumed\")\nparser.add_argument(\"-o\", required=True, help=\"Output directory\")\nparser.add_argument(\"--filter\", nargs='+', required=False,\n help=\"Specify which labels you would like to use (empty means all), example: --filter teddy_bear pizza baseball_bat\")\nargs = parser.parse_args()\n\n# FURTHER PARAMETERS\nEXTENSIONS = ['jpg', 'png']\nFILTER_IMAGE_NAME = \"\" # only use images, whose name contains this string (eg \"Color\")\nscore_threshold = 0.85\nSPECIAL_ASSIGNMENTS = {} #{'person': 255}\nSINGLE_INSTANCES = False\nOUTPUT_FRAMES = True\nSTORE_CLASS_IDS = True\nSTART_INDEX = 0\n\nIMAGE_DIR = args.i\nOUTPUT_DIR = args.o\nDATA_DIR = os.path.join(mask_rcnn_path, \"data\")\nMODEL_DIR = os.path.join(DATA_DIR, \"logs\")\nmodel_path = os.path.join(DATA_DIR, \"mask_rcnn_coco.h5\")\n\nfilter_classes = []\nif args.filter:\n filter_classes = args.filter\n filter_classes = [f.replace(\"_\", \" \") for f in filter_classes]\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\nif args.c:\n with open(args.c, 'rb') as toml_file:\n toml_config = toml.load(toml_file)\n class_names = toml_config[\"MaskRCNN\"][\"class_names\"]\n model_path = toml_config[\"MaskRCNN\"][\"model_path\"]\n filter_classes = toml_config[\"MaskRCNN\"][\"filter_classes\"]\n score_threshold = toml_config[\"MaskRCNN\"][\"score_threshold\"]\n\nfilter_classes = [class_names.index(x) for x in filter_classes]\nSPECIAL_ASSIGNMENTS = {class_names.index(x): SPECIAL_ASSIGNMENTS[x] for x in SPECIAL_ASSIGNMENTS}\n\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n NUM_CLASSES = len(class_names)\n\nconfig = InferenceConfig()\nconfig.display()\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(model_path, by_name=True)\n\nfile_names = [fn for fn in os.listdir(IMAGE_DIR) if any(fn.endswith(ext) for ext in EXTENSIONS)]\nfile_names.sort()\nif FILTER_IMAGE_NAME and FILTER_IMAGE_NAME != \"\":\n file_names = [fn for fn in file_names if FILTER_IMAGE_NAME in fn]\n\n# ALL TOGETHER:\n# print(\"Loading images...\")\n# loaded_images = [scipy.misc.imread(os.path.join(IMAGE_DIR, f)) for f in file_names]\n# print(\"Starting evaluation...\")\n# start_time = time.time()\n# results = model.detect(loaded_images, verbose=0)\n# duration = time.time() - start_time\n# print(\"Evaluation took {} seconds.\".format(duration))\n# for idx, result in enumerate(results):\n# out_path = os.path.join(\"/tmp/test\", \"{}.png\".format(idx))\n# output_mask_ids(result, out_path)\n\n\n# SEPARATELY\nfig = plt.figure()\nax = fig.add_subplot(111)\n# plt.show(block=False)\nplt.ion()\n#_, ax = plt.subplots(1, figsize=(16, 16))\nfor idx, file_name in enumerate(file_names):\n\n if idx < START_INDEX:\n continue\n\n base_name = str(idx).zfill(4)\n\n if os.path.isfile(os.path.join(OUTPUT_DIR, base_name + \".png\")):\n continue\n\n print(\"Starting to work on frame\", base_name)\n\n image = scipy.misc.imread(os.path.join(IMAGE_DIR, file_name))\n h, w = image.shape[:2]\n\n results = model.detect([image], verbose=0)\n r = results[0]\n\n if len(r['class_ids']) == 0:\n r['masks'] = np.empty(shape=[h, w, 0])\n r['scores'] = []\n r['class_ids'] = []\n r['rois'] = np.empty(shape=[0, 4])\n\n if SINGLE_INSTANCES:\n merge_instances(r)\n\n #out_path = os.path.join(OUTPUT_DIR, \"{}.png\".format(idx))\n id_image, exported_class_ids, exported_rois = generate_id_image(r, score_threshold, filter_classes, SPECIAL_ASSIGNMENTS)\n save_id_image(id_image, OUTPUT_DIR, base_name, exported_class_ids, STORE_CLASS_IDS, exported_rois)\n\n\n # Visualise\n ax.clear()\n filter_result(r, filter_classes)\n #visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n # class_names, r['scores'], score_threshold, ax=ax) # requires patched version\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], ax=ax)\n fig.canvas.draw()\n if OUTPUT_FRAMES:\n plt.savefig(os.path.join(OUTPUT_DIR, base_name+\".jpg\"))\n" ]
[ [ "matplotlib.pyplot.ion", "numpy.empty", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AropJoe/milvus
[ "35612881e33ce19a7407628769f6b51a7518bfe9" ]
[ "tests/benchmark/milvus_benchmark/runners/utils.py" ]
[ "import os\nimport logging\nimport numpy as np\nimport sklearn.preprocessing\nimport h5py\nimport random\nfrom itertools import product\n\nfrom pymilvus import DataType\nfrom milvus_benchmark import config\n\nlogger = logging.getLogger(\"milvus_benchmark.runners.utils\")\n\nDELETE_INTERVAL_TIME = 2\n\nVECTORS_PER_FILE = 1000000\nSIFT_VECTORS_PER_FILE = 100000\nBINARY_VECTORS_PER_FILE = 2000000\n\nMAX_NQ = 10001\nFILE_PREFIX = \"binary_\"\n\nWARM_TOP_K = 1\nWARM_NQ = 1\nDEFAULT_DIM = 512\nDEFAULT_METRIC_TYPE = \"L2\"\n\nRANDOM_SRC_DATA_DIR = config.RAW_DATA_DIR + 'random/'\nSIFT_SRC_DATA_DIR = config.RAW_DATA_DIR + 'sift1b/'\nDEEP_SRC_DATA_DIR = config.RAW_DATA_DIR + 'deep1b/'\nJACCARD_SRC_DATA_DIR = config.RAW_DATA_DIR + 'jaccard/'\nHAMMING_SRC_DATA_DIR = config.RAW_DATA_DIR + 'hamming/'\nSTRUCTURE_SRC_DATA_DIR = config.RAW_DATA_DIR + 'structure/'\nBINARY_SRC_DATA_DIR = config.RAW_DATA_DIR + 'binary/'\nSIFT_SRC_GROUNDTRUTH_DATA_DIR = SIFT_SRC_DATA_DIR + 'gnd'\n\nDEFAULT_F_FIELD_NAME = 'float_vector'\nDEFAULT_B_FIELD_NAME = 'binary_vector'\nDEFAULT_INT_FIELD_NAME = 'int64'\nDEFAULT_FLOAT_FIELD_NAME = 'float'\nDEFAULT_DOUBLE_FIELD_NAME = \"double\"\n\nGROUNDTRUTH_MAP = {\n \"1000000\": \"idx_1M.ivecs\",\n \"2000000\": \"idx_2M.ivecs\",\n \"5000000\": \"idx_5M.ivecs\",\n \"10000000\": \"idx_10M.ivecs\",\n \"20000000\": \"idx_20M.ivecs\",\n \"50000000\": \"idx_50M.ivecs\",\n \"100000000\": \"idx_100M.ivecs\",\n \"200000000\": \"idx_200M.ivecs\",\n \"500000000\": \"idx_500M.ivecs\",\n \"1000000000\": \"idx_1000M.ivecs\",\n}\n\nMETRIC_MAP = {\n \"l2\": \"L2\",\n \"ip\": \"IP\",\n \"jaccard\": \"JACCARD\",\n \"hamming\": \"HAMMING\",\n \"sub\": \"SUBSTRUCTURE\",\n \"super\": \"SUPERSTRUCTURE\"\n}\n\n\ndef get_len_vectors_per_file(data_type, dimension):\n if data_type == \"random\":\n if dimension == 512:\n vectors_per_file = VECTORS_PER_FILE\n elif dimension == 4096:\n vectors_per_file = 100000\n elif dimension == 16384:\n vectors_per_file = 10000\n else:\n raise Exception(\"dimension: %s not supported\" % str(dimension))\n elif data_type == \"sift\":\n vectors_per_file = SIFT_VECTORS_PER_FILE\n elif data_type in [\"binary\"]:\n vectors_per_file = BINARY_VECTORS_PER_FILE\n elif data_type == \"local\":\n vectors_per_file = SIFT_VECTORS_PER_FILE\n else:\n raise Exception(\"data_type: %s not supported\" % data_type)\n return vectors_per_file\n\n\ndef get_vectors_from_binary(nq, dimension, data_type):\n # use the first file, nq should be less than VECTORS_PER_FILE 10001\n if nq > MAX_NQ:\n raise Exception(\"Over size nq\")\n if data_type == \"local\":\n return generate_vectors(nq, dimension)\n elif data_type == \"random\":\n file_name = RANDOM_SRC_DATA_DIR + 'query_%d.npy' % dimension\n elif data_type == \"sift\":\n file_name = SIFT_SRC_DATA_DIR + 'query.npy'\n elif data_type == \"deep\":\n file_name = DEEP_SRC_DATA_DIR + 'query.npy'\n elif data_type == \"binary\":\n file_name = BINARY_SRC_DATA_DIR + 'query.npy'\n else:\n raise Exception(\"There is no corresponding file for this data type %s.\" % str(data_type))\n data = np.load(file_name)\n vectors = data[0:nq].tolist()\n return vectors\n\n\ndef generate_vectors(nb, dim):\n return [[random.random() for _ in range(dim)] for _ in range(nb)]\n\n\ndef generate_values(data_type, vectors, ids):\n values = None\n if data_type in [DataType.INT32, DataType.INT64]:\n values = ids\n elif data_type in [DataType.FLOAT, DataType.DOUBLE]:\n values = [(i + 0.0) for i in ids]\n elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:\n values = vectors\n return values\n\n\ndef generate_entities(info, vectors, ids=None):\n entities = []\n for field in info[\"fields\"]:\n # if field[\"name\"] == \"_id\":\n # continue\n field_type = field[\"type\"]\n entities.append(\n {\"name\": field[\"name\"], \"type\": field_type, \"values\": generate_values(field_type, vectors, ids)})\n return entities\n\n\ndef metric_type_trans(metric_type):\n if metric_type in METRIC_MAP.keys():\n return METRIC_MAP[metric_type]\n else:\n raise Exception(\"metric_type: %s not in METRIC_MAP\" % metric_type)\n\n\ndef get_dataset(hdf5_file_path):\n \"\"\" Determine whether hdf5 file exists, and return the content of hdf5 file \"\"\"\n if not os.path.exists(hdf5_file_path):\n raise Exception(\"%s not existed\" % hdf5_file_path)\n dataset = h5py.File(hdf5_file_path)\n return dataset\n\n\ndef get_default_field_name(data_type=DataType.FLOAT_VECTOR):\n \"\"\" Return field name according to data type \"\"\"\n if data_type == DataType.FLOAT_VECTOR:\n field_name = DEFAULT_F_FIELD_NAME\n elif data_type == DataType.BINARY_VECTOR:\n field_name = DEFAULT_B_FIELD_NAME\n elif data_type == DataType.INT64:\n field_name = DEFAULT_INT_FIELD_NAME\n elif data_type == DataType.FLOAT:\n field_name = DEFAULT_FLOAT_FIELD_NAME\n else:\n logger.error(data_type)\n raise Exception(\"Not supported data type\")\n return field_name\n\n\ndef get_vector_type(data_type):\n \"\"\" Return vector type according to data type \"\"\"\n vector_type = ''\n if data_type in [\"random\", \"sift\", \"deep\", \"glove\", \"local\"]:\n vector_type = DataType.FLOAT_VECTOR\n elif data_type in [\"binary\"]:\n vector_type = DataType.BINARY_VECTOR\n else:\n raise Exception(\"Data type: %s not defined\" % data_type)\n return vector_type\n\n\ndef get_vector_type_from_metric(metric_type):\n if metric_type in [\"hamming\", \"jaccard\"]:\n vector_type = DataType.BINARY_VECTOR\n else:\n vector_type = DataType.FLOAT_VECTOR\n return vector_type\n\n\ndef normalize(metric_type, X):\n if metric_type == \"ip\":\n logger.info(\"Set normalize for metric_type: %s\" % metric_type)\n X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')\n X = X.astype(np.float32)\n elif metric_type == \"l2\":\n X = X.astype(np.float32)\n elif metric_type in [\"jaccard\", \"hamming\", \"sub\", \"super\"]:\n tmp = []\n for item in X:\n new_vector = bytes(np.packbits(item, axis=-1).tolist())\n tmp.append(new_vector)\n X = tmp\n return X\n\n\ndef generate_combinations(args):\n if isinstance(args, list):\n args = [el if isinstance(el, list) else [el] for el in args]\n return [list(x) for x in product(*args)]\n elif isinstance(args, dict):\n flat = []\n for k, v in args.items():\n if isinstance(v, list):\n flat.append([(k, el) for el in v])\n else:\n flat.append([(k, v)])\n return [dict(x) for x in product(*flat)]\n else:\n raise TypeError(\"No args handling exists for %s\" % type(args).__name__)\n\n\ndef gen_file_name(idx, dimension, data_type):\n s = \"%05d\" % idx\n fname = FILE_PREFIX + str(dimension) + \"d_\" + s + \".npy\"\n if data_type == \"random\":\n fname = RANDOM_SRC_DATA_DIR + fname\n elif data_type == \"sift\":\n fname = SIFT_SRC_DATA_DIR + fname\n elif data_type == \"deep\":\n fname = DEEP_SRC_DATA_DIR + fname\n elif data_type == \"jaccard\":\n fname = JACCARD_SRC_DATA_DIR + fname\n elif data_type == \"hamming\":\n fname = HAMMING_SRC_DATA_DIR + fname\n elif data_type == \"sub\" or data_type == \"super\":\n fname = STRUCTURE_SRC_DATA_DIR + fname\n return fname\n\n\ndef get_recall_value(true_ids, result_ids):\n \"\"\"\n Use the intersection length\n true_ids: neighbors taken from the dataset\n result_ids: ids returned by query\n \"\"\"\n sum_radio = 0.0\n for index, item in enumerate(result_ids):\n # tmp = set(item).intersection(set(flat_id_list[index]))\n\n # Get the value of true_ids and the returned value to do the intersection\n tmp = set(true_ids[index]).intersection(set(item))\n\n # Add up each ratio\n sum_radio = sum_radio + len(tmp) / len(item)\n # logger.debug(sum_radio)\n\n # Calculate the average ratio and take three digits after the decimal point\n return round(sum_radio / len(result_ids), 3)\n\n\ndef get_ground_truth_ids(collection_size):\n fname = GROUNDTRUTH_MAP[str(collection_size)]\n fname = SIFT_SRC_GROUNDTRUTH_DATA_DIR + \"/\" + fname\n a = np.fromfile(fname, dtype='int32')\n d = a[0]\n true_ids = a.reshape(-1, d + 1)[:, 1:].copy()\n return true_ids\n\n\ndef normalize(metric_type, X):\n if metric_type == \"ip\":\n logger.info(\"Set normalize for metric_type: %s\" % metric_type)\n X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')\n X = X.astype(np.float32)\n elif metric_type == \"l2\":\n X = X.astype(np.float32)\n elif metric_type in [\"jaccard\", \"hamming\", \"sub\", \"super\"]:\n tmp = []\n for item in X:\n new_vector = bytes(np.packbits(item, axis=-1).tolist())\n tmp.append(new_vector)\n X = tmp\n return X" ]
[ [ "numpy.load", "numpy.fromfile", "numpy.packbits" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gonzrubio/ML_Papers
[ "562f85c81b0afb8771708ff31063f722d838b9d2" ]
[ "GANs/WGANGP_Gulrajani_et_al_2017/driver.py" ]
[ "\"\"\"Improved Training of Wasserstein GANs.\n\nPapers:\n https://arxiv.org/abs/1701.07875\n https://arxiv.org/abs/1704.00028\n\nCreated on Tue Oct 26 15:17:08 2021\n\n@author: gonzo\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms, datasets\nfrom torchvision.utils import make_grid\n\n\nfrom model import Generator, Critic\n\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef gradient_penalty(critic, real, fake, device=device):\n BATCH_SIZE, C, H, W = real.shape\n alpha = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)\n interpolated_images = real * alpha + fake * (1 - alpha)\n\n # Calculate critic scores\n mixed_scores = critic(interpolated_images)\n\n # Take the gradient of the scores with respect to the images\n gradient = torch.autograd.grad(\n inputs=interpolated_images,\n outputs=mixed_scores,\n grad_outputs=torch.ones_like(mixed_scores),\n create_graph=True,\n retain_graph=True,\n )[0]\n gradient = gradient.view(gradient.shape[0], -1)\n gradient_norm = gradient.norm(2, dim=1)\n gradient_penalty = torch.mean((gradient_norm - 1) ** 2)\n\n return gradient_penalty\n\n\ndef main():\n\n # Data\n train_dataset = datasets.CIFAR10(\n root='./data/train',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[-0.0163, -0.0347, -0.1056],\n std=[0.4045, 0.3987, 0.4020]),\n ])\n )\n\n test_dataset = datasets.CIFAR10(\n root='./data/test/',\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[-0.0163, -0.0347, -0.1056],\n std=[0.4045, 0.3987, 0.4020]),\n ])\n )\n\n dataset = ConcatDataset([train_dataset, test_dataset])\n\n # Hyperparameters\n epochs = 200\n critic_iterations = 5\n lambda_gp = 10\n z_dim = 100\n batch_size = 2 ** 9\n fixed_noise = torch.randn((batch_size, z_dim, 1, 1), device=device)\n\n generator = Generator().to(device)\n critic = Critic().to(device)\n\n total_params = sum(p.numel() for p in generator.parameters())\n total_params += sum(p.numel() for p in critic.parameters())\n print(f'Number of parameters: {total_params:,}')\n\n lr_G = 5e-4\n lr_D = 4e-6\n betas = (0.0, 0.9)\n\n optim_G = optim.Adam(generator.parameters(), lr=lr_G, betas=betas)\n optim_C = optim.Adam(critic.parameters(), lr=lr_D, betas=betas)\n sched_G = CosineAnnealingLR(optim_G, T_max=20, eta_min=0)\n sched_C = CosineAnnealingLR(optim_C, T_max=20, eta_min=0)\n\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n writer = SummaryWriter(\"logs/fake\")\n step = 0\n\n for epoch in range(epochs):\n for batch_idx, (real, label) in enumerate(loader):\n\n # real = real.reshape((-1, 3, 32, 32)).to(device)\n real = real.to(device)\n\n for iteration in range(critic_iterations):\n noise = torch.randn((real.shape[0], z_dim, 1, 1), device=device)\n fake = generator(noise)\n critic_real = critic(real).reshape(-1)\n critic_fake = critic(fake).reshape(-1)\n gp = gradient_penalty(critic, real, fake, device=device)\n loss_critic = torch.mean(critic_fake) - torch.mean(critic_real)\n loss_critic += lambda_gp * gp\n loss_C = torch.mean(critic_fake) - torch.mean(critic_real)\n critic.zero_grad(set_to_none=True)\n loss_C.backward(retain_graph=True)\n optim_C.step()\n sched_C.step()\n\n # Minimize Generator\n C_fake = critic(fake)\n loss_G = -torch.mean(C_fake)\n generator.zero_grad(set_to_none=True)\n loss_G.backward()\n optim_G.step()\n sched_G.step()\n\n if batch_idx % 25 == 0:\n\n print(f\"{epoch}.{batch_idx} {loss_C: .3e} {loss_G: .3e}\")\n\n generator.eval()\n with torch.no_grad():\n fake = generator(fixed_noise)\n img_grid = make_grid(fake, normalize=True)\n writer.add_image(\"Fake Images\", img_grid, global_step=step)\n step += 1\n generator.train()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.mean", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.randn", "torch.utils.data.DataLoader", "torch.utils.data.ConcatDataset", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.rand", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KireinaHoro/Ax
[ "16cb868911eecba323759e2e129df8833361e614" ]
[ "ax/modelbridge/factory.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom logging import Logger\nfrom typing import Any, Dict, List, Optional, Type\n\nimport torch\nfrom ax.core.data import Data\nfrom ax.core.experiment import Experiment\nfrom ax.core.multi_type_experiment import MultiTypeExperiment\nfrom ax.core.objective import MultiObjective\nfrom ax.core.observation import ObservationFeatures\nfrom ax.core.optimization_config import OptimizationConfig\nfrom ax.core.search_space import SearchSpace\nfrom ax.core.types import TConfig\nfrom ax.modelbridge.discrete import DiscreteModelBridge\nfrom ax.modelbridge.multi_objective_torch import MultiObjectiveTorchModelBridge\nfrom ax.modelbridge.random import RandomModelBridge\nfrom ax.modelbridge.registry import (\n Cont_X_trans,\n Models,\n MT_MTGP_trans,\n ST_MTGP_trans,\n Y_trans,\n)\nfrom ax.modelbridge.torch import TorchModelBridge\nfrom ax.modelbridge.transforms.base import Transform\nfrom ax.modelbridge.transforms.convert_metric_names import tconfig_from_mt_experiment\nfrom ax.models.torch.botorch import (\n BotorchModel,\n TAcqfConstructor,\n TModelConstructor,\n TModelPredictor,\n TOptimizer,\n)\nfrom ax.models.torch.botorch_defaults import (\n get_and_fit_model,\n get_NEI,\n predict_from_model,\n scipy_optimizer,\n)\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import checked_cast\n\n\nlogger: Logger = get_logger(__name__)\n\n\nDEFAULT_TORCH_DEVICE = torch.device(\"cpu\")\nDEFAULT_EHVI_BATCH_LIMIT = 5\n\n\n\"\"\"\nModule containing functions that generate standard models, such as Sobol,\nGP+EI, etc.\n\nNote: a special case here is a composite generator, which requires an\nadditional ``GenerationStrategy`` and is able to delegate work to multiple models\n(for instance, to a random model to generate the first trial, and to an\noptimization model for subsequent trials).\n\n\"\"\"\n\n\ndef get_sobol(\n search_space: SearchSpace,\n seed: Optional[int] = None,\n deduplicate: bool = False,\n init_position: int = 0,\n scramble: bool = True,\n) -> RandomModelBridge:\n \"\"\"Instantiates a Sobol sequence quasi-random generator.\n\n Args:\n search_space: Sobol generator search space.\n kwargs: Custom args for sobol generator.\n\n Returns:\n RandomModelBridge, with SobolGenerator as model.\n \"\"\"\n return checked_cast(\n RandomModelBridge,\n Models.SOBOL(\n search_space=search_space,\n seed=seed,\n deduplicate=deduplicate,\n init_position=init_position,\n scramble=scramble,\n ),\n )\n\n\ndef get_uniform(\n search_space: SearchSpace, deduplicate: bool = False, seed: Optional[int] = None\n) -> RandomModelBridge:\n \"\"\"Instantiate uniform generator.\n\n Args:\n search_space: Uniform generator search space.\n kwargs: Custom args for uniform generator.\n\n Returns:\n RandomModelBridge, with UniformGenerator as model.\n \"\"\"\n return checked_cast(\n RandomModelBridge,\n Models.UNIFORM(search_space=search_space, seed=seed, deduplicate=deduplicate),\n )\n\n\ndef get_botorch(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n model_constructor: TModelConstructor = get_and_fit_model,\n model_predictor: TModelPredictor = predict_from_model,\n acqf_constructor: TAcqfConstructor = get_NEI, # pyre-ignore[9]\n acqf_optimizer: TOptimizer = scipy_optimizer, # pyre-ignore[9]\n refit_on_cv: bool = False,\n refit_on_update: bool = True,\n optimization_config: Optional[OptimizationConfig] = None,\n) -> TorchModelBridge:\n \"\"\"Instantiates a BotorchModel.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"`BotorchModel` requires non-empty data.\")\n return checked_cast(\n TorchModelBridge,\n Models.BOTORCH(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n transforms=transforms,\n transform_configs=transform_configs,\n model_constructor=model_constructor,\n model_predictor=model_predictor,\n acqf_constructor=acqf_constructor,\n acqf_optimizer=acqf_optimizer,\n refit_on_cv=refit_on_cv,\n refit_on_update=refit_on_update,\n optimization_config=optimization_config,\n ),\n )\n\n\ndef get_GPEI(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with EI.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP+EI BotorchModel requires non-empty data.\")\n return checked_cast(\n TorchModelBridge,\n Models.BOTORCH(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n ),\n )\n\n\ndef get_GPKG(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n cost_intercept: float = 0.01,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n **kwargs: Any,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with KG.\"\"\"\n if search_space is None:\n search_space = experiment.search_space\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP+KG BotorchModel requires non-empty data.\")\n\n inputs = {\n \"search_space\": search_space,\n \"experiment\": experiment,\n \"data\": data,\n \"cost_intercept\": cost_intercept,\n \"torch_dtype\": dtype,\n \"torch_device\": device,\n \"transforms\": transforms,\n \"transform_configs\": transform_configs,\n }\n\n if any(p.is_fidelity for k, p in experiment.parameters.items()):\n inputs[\"linear_truncated\"] = kwargs.get(\"linear_truncated\", True)\n return checked_cast(TorchModelBridge, Models.GPKG(**inputs)) # pyre-ignore: [16]\n\n\n# TODO[Lena]: how to instantiate MTGP through the enum? The Multi-type MTGP requires\n# a MultiTypeExperiment, so we would need validation for that, but more importantly,\n# we need to create `trial_index_to_type` as in the factory function below.\n# Maybe `MultiTypeExperiment` could have that mapping as a property?\ndef get_MTGP(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n trial_index: Optional[int] = None,\n) -> TorchModelBridge:\n \"\"\"Instantiates a Multi-task Gaussian Process (MTGP) model that generates\n points with EI.\n\n If the input experiment is a MultiTypeExperiment then a\n Multi-type Multi-task GP model will be instantiated.\n Otherwise, the model will be a Single-type Multi-task GP.\n \"\"\"\n\n if isinstance(experiment, MultiTypeExperiment):\n trial_index_to_type = {\n t.index: t.trial_type for t in experiment.trials.values()\n }\n transforms = MT_MTGP_trans\n transform_configs = {\n \"TrialAsTask\": {\"trial_level_map\": {\"trial_type\": trial_index_to_type}},\n \"ConvertMetricNames\": tconfig_from_mt_experiment(experiment),\n }\n else:\n # Set transforms for a Single-type MTGP model.\n transforms = ST_MTGP_trans\n transform_configs = None\n\n # Choose the status quo features for the experiment from the selected trial.\n # If trial_index is None, we will look for a status quo from the last\n # experiment trial to use as a status quo for the experiment.\n if trial_index is None:\n trial_index = len(experiment.trials) - 1\n elif trial_index >= len(experiment.trials):\n raise ValueError(\"trial_index is bigger than the number of experiment trials\")\n\n # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.\n status_quo = experiment.trials[trial_index].status_quo\n if status_quo is None:\n status_quo_features = None\n else:\n status_quo_features = ObservationFeatures(\n parameters=status_quo.parameters, trial_index=trial_index\n )\n\n return TorchModelBridge(\n experiment=experiment,\n search_space=search_space or experiment.search_space,\n data=data,\n model=BotorchModel(),\n transforms=transforms,\n # pyre-fixme[6]: Expected `Optional[Dict[str, Dict[str,\n # typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,\n # int, str]]]]` for 6th param but got `Optional[Dict[str,\n # typing.Union[Dict[str, Dict[str, Dict[int, Optional[str]]]], Dict[str,\n # typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,\n # int, str]]]]]`.\n transform_configs=transform_configs,\n torch_dtype=torch.double,\n torch_device=DEFAULT_TORCH_DEVICE,\n status_quo_features=status_quo_features,\n )\n\n\ndef get_factorial(search_space: SearchSpace) -> DiscreteModelBridge:\n \"\"\"Instantiates a factorial generator.\"\"\"\n return checked_cast(\n DiscreteModelBridge,\n Models.FACTORIAL(search_space=search_space, fit_out_of_design=True),\n )\n\n\ndef get_empirical_bayes_thompson(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n num_samples: int = 10000,\n min_weight: Optional[float] = None,\n uniform_weights: bool = False,\n) -> DiscreteModelBridge:\n \"\"\"Instantiates an empirical Bayes / Thompson sampling model.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"Empirical Bayes Thompson sampler requires non-empty data.\")\n return checked_cast(\n DiscreteModelBridge,\n Models.EMPIRICAL_BAYES_THOMPSON(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n num_samples=num_samples,\n min_weight=min_weight,\n uniform_weights=uniform_weights,\n fit_out_of_design=True,\n ),\n )\n\n\ndef get_thompson(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n num_samples: int = 10000,\n min_weight: Optional[float] = None,\n uniform_weights: bool = False,\n) -> DiscreteModelBridge:\n \"\"\"Instantiates a Thompson sampling model.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"Thompson sampler requires non-empty data.\")\n return checked_cast(\n DiscreteModelBridge,\n Models.THOMPSON(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n num_samples=num_samples,\n min_weight=min_weight,\n uniform_weights=uniform_weights,\n fit_out_of_design=True,\n ),\n )\n\n\ndef get_GPMES(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n cost_intercept: float = 0.01,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n **kwargs: Any,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with MES.\"\"\"\n if search_space is None:\n search_space = experiment.search_space\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP + MES BotorchModel requires non-empty data.\")\n\n inputs = {\n \"search_space\": search_space,\n \"experiment\": experiment,\n \"data\": data,\n \"cost_intercept\": cost_intercept,\n \"torch_dtype\": dtype,\n \"torch_device\": device,\n \"transforms\": transforms,\n \"transform_configs\": transform_configs,\n }\n\n if any(p.is_fidelity for k, p in experiment.parameters.items()):\n inputs[\"linear_truncated\"] = kwargs.get(\"linear_truncated\", True)\n return checked_cast(TorchModelBridge, Models.GPMES(**inputs)) # pyre-ignore: [16]\n\n\ndef get_MOO_EHVI(\n experiment: Experiment,\n data: Data,\n ref_point: Dict[str, float],\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = (\n torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n ),\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a multi-objective model that generates points with EHVI.\n\n Requires a `ref_point`, a dictionary of the metric name to the reference point value\n for every objective being optimized. An arm only improves hypervolume if it is\n strictly better than this point in all metrics.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-objective optimization requires multiple objectives.\")\n if data.df.empty: # pragma: no cover\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\"sequential\": True},\n \"optimizer_kwargs\": {\n # having a batch limit is very important for avoiding\n # memory issues in the initialization\n \"batch_limit\": DEFAULT_EHVI_BATCH_LIMIT\n },\n },\n ),\n )\n\n\ndef get_MOO_PAREGO(\n experiment: Experiment,\n data: Data,\n ref_point: Optional[List[float]] = None,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a multi-objective model that generates points with ParEGO.\n\n qParEGO optimizes random augmented chebyshev scalarizations of the multiple\n objectives. This allows it to explore non-convex pareto frontiers.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-Objective optimization requires multiple objectives\")\n if data.df.empty:\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n acqf_constructor=get_NEI,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\n \"chebyshev_scalarization\": True,\n \"sequential\": True,\n }\n },\n ),\n )\n\n\ndef get_MOO_RS(\n experiment: Experiment,\n data: Data,\n ref_point: Optional[List[float]] = None,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a Random Scalarization multi-objective model.\n\n Chooses a different random linear scalarization of the objectives\n for generating each new candidate arm. This will only explore the\n convex hull of the pareto frontier.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-Objective optimization requires multiple objectives\")\n if data.df.empty:\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n acqf_constructor=get_NEI,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\n \"random_scalarization\": True,\n \"sequential\": True,\n }\n },\n ),\n )\n" ]
[ [ "torch.device", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maximilian-hoffmann/FINE
[ "62828f5feefefc2208dde0133435979d63398cc1" ]
[ "FINE/storage.py" ]
[ "from FINE.component import Component, ComponentModel\nfrom FINE import utils\nimport pyomo.environ as pyomo\nimport warnings\nimport pandas as pd\n\n\nclass Storage(Component):\n \"\"\"\n A Storage component can store a commodity and thus transfers it between time steps.\n \"\"\"\n def __init__(self, esM, name, commodity, chargeRate=1, dischargeRate=1,\n chargeEfficiency=1, dischargeEfficiency=1, selfDischarge=0, cyclicLifetime=None,\n stateOfChargeMin=0, stateOfChargeMax=1,\n hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,\n hasIsBuiltBinaryVariable=False, bigM=None, doPreciseTsaModeling=False,\n chargeOpRateMax=None, chargeOpRateFix=None, chargeTsaWeight=1,\n dischargeOpRateMax=None, dischargeOpRateFix=None, dischargeTsaWeight=1,\n isPeriodicalStorage=False,\n locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,\n capacityFix=None, isBuiltFix=None,\n investPerCapacity=0, investIfBuilt=0, opexPerChargeOperation=0,\n opexPerDischargeOperation=0, opexPerCapacity=0, opexIfBuilt=0, interestRate=0.08, economicLifetime=10):\n \"\"\"\n Constructor for creating an Storage class instance.\n The Storage component specific input arguments are described below. The general component\n input arguments are described in the Component class.\n\n **Required arguments:**\n\n :param commodity: to the component related commodity.\n :type commodity: string\n\n **Default arguments:**\n\n :param chargeRate: ratio of the maximum storage inflow (in commodityUnit/hour) and the\n storage capacity (in commodityUnit). Example:\\n\n * A hydrogen salt cavern which can store 133 GWh_H2_LHV can be charged 0.45 GWh_H2_LHV during\n one hour. The chargeRate thus equals 0.45/133.\\n\n |br| * the default value is 1\n :type chargeRate: 0 <= float <=1\n\n :param dischargeRate: ratio of the maximum storage outflow (in commodityUnit/hour) and\n the storage capacity (in commodityUnit). Example:\\n\n * A hydrogen salt cavern which can store 133 GWh_H2_LHV can be discharged 0.45 GWh_H2_LHV during\n one hour. The dischargeRate thus equals 0.45/133.\\n\n |br| * the default value is 1\n :type dischargeRate: 0 <= float <=1\n\n :param chargeEfficiency: defines the efficiency with which the storage can be charged (equals\n the percentage of the injected commodity that is transformed into stored commodity).\n Enter 0.98 for 98% etc.\n |br| * the default value is 1\n :type chargeEfficiency: 0 <= float <=1\n\n :param dischargeEfficiency: defines the efficiency with which the storage can be discharged\n (equals the percentage of the withdrawn commodity that is transformed into stored commodity).\n Enter 0.98 for 98% etc.\n |br| * the default value is 1\n :type dischargeEfficiency: 0 <= float <=1\n\n :param selfDischarge: percentage of self-discharge from the storage during one hour\n |br| * the default value is 0\n :type selfDischarge: 0 <= float <=1\n\n :param cyclicLifetime: if specified, the total number of full cycle equivalents that are supported\n by the technology.\n |br| * the default value is None\n :type cyclicLifetime: positive float\n\n :param stateOfChargeMin: threshold (percentage) that the state of charge can not drop under\n |br| * the default value is 0\n :type stateOfChargeMin: 0 <= float <=1\n\n :param stateOfChargeMax: threshold (percentage) that the state of charge can not exceed\n |br| * the default value is 1\n :type stateOfChargeMax: 0 <= float <=1\n\n :param doPreciseTsaModeling: determines whether the state of charge is limited precisely (True) or\n with a simplified method (False). The error is small if the selfDischarge is small.\n |br| * the default value is False\n :type doPreciseTsaModeling: boolean\n\n :param chargeOpRateMax: if specified indicates a maximum charging rate for each location and each time\n step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the charged commodity (before multiplying the charging efficiency)\n during one time step.\n |br| * the default value is None\n :type chargeOpRateMax: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param chargeOpRateFix: if specified indicates a fixed charging rate for each location and each time\n step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodity, referring to the charged commodity (before multiplying the charging efficiency)\n during one time step.\n |br| * the default value is None\n :type chargeOpRateFix: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param chargeTsaWeight: weight with which the chargeOpRate (max/fix) time series of the\n component should be considered when applying time series aggregation.\n |br| * the default value is 1\n :type chargeTsaWeight: positive (>= 0) float\n\n :param dischargeOpRateMax: if specified indicates a maximum discharging rate for each location and each\n time step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the discharged commodity (after multiplying the discharging\n efficiency) during one time step.\n |br| * the default value is None\n :type dischargeOpRateMax: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param dischargeOpRateFix: if specified indicates a fixed discharging rate for each location and each\n time step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the charged commodity (after multiplying the discharging\n efficiency) during one time step.\n |br| * the default value is None\n :type dischargeOpRateFix: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param dischargeTsaWeight: weight with which the dischargeOpRate (max/fix) time series of the\n component should be considered when applying time series aggregation.\n |br| * the default value is 1\n :type dischargeTsaWeight: positive (>= 0) float\n\n :param isPeriodicalStorage: indicates if the state of charge of the storage has to be at the same value\n after the end of each period. This is especially relevant when using daily periods where short term\n storage can be restrained to daily cycles. Benefits the run time of the model.\n |br| * the default value is False\n :type isPeriodicalStorage: boolean\n\n :param opexPerChargeOperation: cost which is directly proportional to the charge operation of the\n component is obtained by multiplying the opexPerOperation parameter with the annual sum of the\n operational time series of the components. The opexPerOperation can either be given as a float\n or a Pandas Series with location specific values.\n The cost unit in which the parameter is given has to match the one specified in the energy\n system model (i.e. Euro, Dollar, 1e6 Euro).\n |br| * the default value is 0\n :type opexPerChargeOperation: positive (>=0) float or Pandas Series with positive (>=0) values.\n The indices of the series have to equal the in the energy system model specified locations.\n\n :param opexPerDischargeOperation: cost which is directly proportional to the discharge operation\n of the component is obtained by multiplying the opexPerOperation parameter with the annual sum\n of the operational time series of the components. The opexPerOperation can either be given as\n a float or a Pandas Series with location specific values.\n The cost unit in which the parameter is given has to match the one specified in the energy\n system model (i.e. Euro, Dollar, 1e6 Euro).\n |br| * the default value is 0\n\n :type opexPerDischargeOperation: positive (>=0) float or Pandas Series with positive (>=0) values.\n The indices of the series have to equal the in the energy system model specified locations.\n component (in the physicalUnit of the component) and not of the specific operation itself are\n obtained by multiplying the capacity of the component at a location with the opexPerCapacity\n factor. The opexPerCapacity can either be given as a float or a Pandas Series with location\n specific values.\n \"\"\"\n Component. __init__(self, esM, name, dimension='1dim', hasCapacityVariable=hasCapacityVariable,\n capacityVariableDomain=capacityVariableDomain, capacityPerPlantUnit=capacityPerPlantUnit,\n hasIsBuiltBinaryVariable=hasIsBuiltBinaryVariable, bigM=bigM,\n locationalEligibility=locationalEligibility, capacityMin=capacityMin,\n capacityMax=capacityMax, sharedPotentialID=sharedPotentialID, capacityFix=capacityFix,\n isBuiltFix=isBuiltFix, investPerCapacity=investPerCapacity, investIfBuilt=investIfBuilt,\n opexPerCapacity=opexPerCapacity, opexIfBuilt=opexIfBuilt, interestRate=interestRate,\n economicLifetime=economicLifetime)\n\n # Set general storage component data\n utils.checkCommodities(esM, {commodity})\n self.commodity, self.commodityUnit = commodity, esM.commodityUnitsDict[commodity]\n # TODO unit and type checks\n self.chargeRate, self.dischargeRate = chargeRate, dischargeRate\n self.chargeEfficiency, self.dischargeEfficiency = chargeEfficiency, dischargeEfficiency\n self.selfDischarge = selfDischarge\n self.cyclicLifetime = cyclicLifetime\n self.stateOfChargeMin, self.stateOfChargeMax = stateOfChargeMin, stateOfChargeMax\n self.isPeriodicalStorage = isPeriodicalStorage\n self.doPreciseTsaModeling = doPreciseTsaModeling\n self.modelingClass = StorageModel\n\n # Set additional economic data\n self.opexPerChargeOperation = utils.checkAndSetCostParameter(esM, name, opexPerChargeOperation, '1dim',\n locationalEligibility)\n self.opexPerDischargeOperation = utils.checkAndSetCostParameter(esM, name, opexPerDischargeOperation, '1dim',\n locationalEligibility)\n\n # Set location-specific operation parameters (Charging rate, discharging rate, state of charge rate)\n # and time series aggregation weighting factor\n if chargeOpRateMax is not None and chargeOpRateFix is not None:\n chargeOpRateMax = None\n if esM.verbose < 2:\n warnings.warn('If chargeOpRateFix is specified, the chargeOpRateMax parameter is not required.\\n' +\n 'The chargeOpRateMax time series was set to None.')\n utils.checkOperationTimeSeriesInputParameters(esM, chargeOpRateMax, locationalEligibility)\n utils.checkOperationTimeSeriesInputParameters(esM, chargeOpRateFix, locationalEligibility)\n\n self.fullChargeOpRateMax = utils.setFormattedTimeSeries(chargeOpRateMax)\n self.aggregatedChargeOpRateMax = None\n self.chargeOpRateMax = None\n\n self.fullChargeOpRateFix = utils.setFormattedTimeSeries(chargeOpRateFix)\n self.aggregatedChargeOpRateFix = None\n self.chargeOpRateFix = None\n\n utils.isPositiveNumber(chargeTsaWeight)\n self.chargeTsaWeight = chargeTsaWeight\n\n if dischargeOpRateMax is not None and dischargeOpRateFix is not None:\n dischargeOpRateMax = None\n if esM.verbose < 2:\n warnings.warn('If dischargeOpRateFix is specified, the dischargeOpRateMax parameter is not required.\\n'\n + 'The dischargeOpRateMax time series was set to None.')\n utils.checkOperationTimeSeriesInputParameters(esM, dischargeOpRateMax, locationalEligibility)\n utils.checkOperationTimeSeriesInputParameters(esM, dischargeOpRateFix, locationalEligibility)\n\n self.fullDischargeOpRateMax = utils.setFormattedTimeSeries(dischargeOpRateMax)\n self.aggregatedDischargeOpRateMax = None\n self.dischargeOpRateMax = None\n\n self.fullDischargeOpRateFix = utils.setFormattedTimeSeries(dischargeOpRateFix)\n self.aggregatedDischargeOpRateFix = None\n self.dischargeOpRateFix = None\n\n utils.isPositiveNumber(dischargeTsaWeight)\n self.dischargeTsaWeight = dischargeTsaWeight\n\n # Set locational eligibility\n timeSeriesData = None\n tsNb = sum([0 if data is None else 1 for data in [chargeOpRateMax, chargeOpRateFix, dischargeOpRateMax,\n dischargeOpRateFix, ]])\n if tsNb > 0:\n timeSeriesData = sum([data for data in [chargeOpRateMax, chargeOpRateFix, dischargeOpRateMax,\n dischargeOpRateFix, ] if data is not None])\n self.locationalEligibility = \\\n utils.setLocationalEligibility(esM, self.locationalEligibility, self.capacityMax, self.capacityFix,\n self.isBuiltFix, self.hasCapacityVariable, timeSeriesData)\n\n def addToEnergySystemModel(self, esM):\n super().addToEnergySystemModel(esM)\n\n def setTimeSeriesData(self, hasTSA):\n self.chargeOpRateMax = self.aggregatedChargeOpRateMax if hasTSA else self.fullChargeOpRateMax\n self.chargeOpRateFix = self.aggregatedChargeOpRateFix if hasTSA else self.fullChargeOpRateFix\n self.dischargeOpRateMax = self.aggregatedChargeOpRateMax if hasTSA else self.fullDischargeOpRateMax\n self.dischargeOpRateFix = self.aggregatedChargeOpRateFix if hasTSA else self.fullDischargeOpRateFix\n\n def getDataForTimeSeriesAggregation(self):\n weightDict, data = {}, []\n I = [(self.fullChargeOpRateFix, self.fullChargeOpRateMax, 'chargeRate_', self.chargeTsaWeight),\n (self.fullDischargeOpRateFix, self.fullDischargeOpRateMax, 'dischargeRate_', self.dischargeTsaWeight)]\n\n for rateFix, rateMax, rateName, rateWeight in I:\n weightDict, data = self.prepareTSAInput(rateFix, rateMax, rateName, rateWeight, weightDict, data)\n return (pd.concat(data, axis=1), weightDict) if data else (None, {})\n\n def setAggregatedTimeSeriesData(self, data):\n\n self.aggregatedChargeOpRateFix = self.getTSAOutput(self.fullChargeOpRateFix, 'chargeRate_', data)\n self.aggregatedChargeOpRateMax = self.getTSAOutput(self.fullChargeOpRateMax, 'chargeRate_', data)\n\n self.aggregatedDischargeOpRateFix = self.getTSAOutput(self.fullDischargeOpRateFix, 'dischargeRate_', data)\n self.aggregatedDischargeOpRateMax = self.getTSAOutput(self.fullDischargeOpRateMax, 'dischargeRate_', data)\n\n\nclass StorageModel(ComponentModel):\n \"\"\" Doc \"\"\"\n\n def __init__(self):\n self.abbrvName = 'stor'\n self.dimension = '1dim'\n self.componentsDict = {}\n self.capacityVariablesOptimum, self.isBuiltVariablesOptimum = None, None\n self.chargeOperationVariablesOptimum, self.dischargeOperationVariablesOptimum = None, None\n self.stateOfChargeOperationVariablesOptimum = None\n self.optSummary = None\n\n ####################################################################################################################\n # Declare sparse index sets #\n ####################################################################################################################\n\n def declareSets(self, esM, pyM):\n \"\"\" Declares sets and dictionaries \"\"\"\n compDict = self.componentsDict\n\n # Declare design variable sets\n self.declareDesignVarSet(pyM)\n self.declareContinuousDesignVarSet(pyM)\n self.declareDiscreteDesignVarSet(pyM)\n self.declareDesignDecisionVarSet(pyM)\n\n if pyM.hasTSA:\n varSet = getattr(pyM, 'designDimensionVarSet_' + self.abbrvName)\n\n def initDesignVarSimpleTSASet(pyM):\n return ((loc, compName) for loc, compName in varSet if not compDict[compName].doPreciseTsaModeling)\n setattr(pyM, 'designDimensionVarSetSimple_' + self.abbrvName,\n pyomo.Set(dimen=2, initialize=initDesignVarSimpleTSASet))\n\n def initDesignVarPreciseTSASet(pyM):\n return ((loc, compName) for loc, compName in varSet if compDict[compName].doPreciseTsaModeling)\n setattr(pyM, 'designDimensionVarSetPrecise_' + self.abbrvName,\n pyomo.Set(dimen=2, initialize=initDesignVarPreciseTSASet))\n\n # Declare operation variable set\n self.declareOpVarSet(esM, pyM)\n\n # Declare sets for case differentiation of operating modes\n # * Charge operation\n self.declareOperationModeSets(pyM, 'chargeOpConstrSet', 'chargeOpRateMax', 'chargeOpRateFix')\n # * Discharge operation\n self.declareOperationModeSets(pyM, 'dischargeOpConstrSet', 'dischargeOpRateMax', 'dischargeOpRateFix')\n\n ####################################################################################################################\n # Declare variables #\n ####################################################################################################################\n\n def declareVariables(self, esM, pyM):\n \"\"\" Declares design and operation variables \"\"\"\n\n # Capacity variables in [commodityUnit*hour]\n self.declareCapacityVars(pyM)\n # (Continuous) numbers of installed components in [-]\n self.declareRealNumbersVars(pyM)\n # (Discrete/integer) numbers of installed components in [-]\n self.declareIntNumbersVars(pyM)\n # Binary variables [-] indicating if a component is considered at a location or not in [-]\n self.declareBinaryDesignDecisionVars(pyM)\n # Energy amount injected into a storage (before injection efficiency losses) between two time steps\n self.declareOperationVars(pyM, 'chargeOp')\n # Energy amount delivered from a storage (after delivery efficiency losses) between two time steps\n self.declareOperationVars(pyM, 'dischargeOp')\n\n # Inventory of storage components [commodityUnit*hour]\n if not pyM.hasTSA:\n # Energy amount stored at the beginning of a time step during the (one) period (the i-th state of charge\n # refers to the state of charge at the beginning of the i-th time step, the last index is the state of\n # charge after the last time step)\n setattr(pyM, 'stateOfCharge_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), pyM.interTimeStepsSet, domain=pyomo.NonNegativeReals))\n else:\n # (Virtual) energy amount stored during a period (the i-th state of charge refers to the state of charge at\n # the beginning of the i-th time step, the last index is the state of charge after the last time step)\n setattr(pyM, 'stateOfCharge_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), pyM.interTimeStepsSet, domain=pyomo.Reals))\n # (Virtual) minimum amount of energy stored within a period\n setattr(pyM, 'stateOfChargeMin_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), esM.typicalPeriods, domain=pyomo.Reals))\n # (Virtual) maximum amount of energy stored within a period\n setattr(pyM, 'stateOfChargeMax_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), esM.typicalPeriods, domain=pyomo.Reals))\n # (Real) energy amount stored at the beginning of a period between periods(the i-th state of charge refers\n # to the state of charge at the beginning of the i-th period, the last index is the state of charge after\n # the last period)\n setattr(pyM, 'stateOfChargeInterPeriods_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_'\n + self.abbrvName), esM.interPeriodTimeSteps, domain=pyomo.NonNegativeReals))\n\n ####################################################################################################################\n # Declare component constraints #\n ####################################################################################################################\n\n def connectSOCs(self, pyM, esM):\n \"\"\" Constraint for connecting the state of charge with the charge and discharge operation \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n\n def connectSOCs(pyM, loc, compName, p, t):\n return (SOC[loc, compName, p, t+1] - SOC[loc, compName, p, t] *\n (1 - compDict[compName].selfDischarge) ** esM.hoursPerTimeStep ==\n chargeOp[loc, compName, p, t] * compDict[compName].chargeEfficiency -\n dischargeOp[loc, compName, p, t] / compDict[compName].dischargeEfficiency)\n setattr(pyM, 'ConstrConnectSOC_' + abbrvName, pyomo.Constraint(opVarSet, pyM.timeSet, rule=connectSOCs))\n\n def cyclicState(self, pyM, esM):\n \"\"\" Constraint for connecting the state of charge with the charge and discharge operation \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n if not pyM.hasTSA:\n def cyclicState(pyM, loc, compName):\n return SOC[loc, compName, 0, 0] == SOC[loc, compName, 0, esM.timeStepsPerPeriod[-1] + 1]\n else:\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n def cyclicState(pyM, loc, compName):\n return SOCInter[loc, compName, 0] == SOCInter[loc, compName, esM.interPeriodTimeSteps[-1]]\n setattr(pyM, 'ConstrCyclicState_' + abbrvName, pyomo.Constraint(opVarSet, rule=cyclicState))\n\n def cyclicLifetime(self, pyM, esM):\n \"\"\" Constraint for limiting the number of full cycle equivalents to stay below cyclic lifetime \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, capVar = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n capVarSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n def cyclicLifetime(pyM, loc, compName):\n return (sum(chargeOp[loc, compName, p, t] * esM.periodOccurrences[p] for p, t in pyM.timeSet) /\n esM.numberOfYears <= capVar[loc, compName] *\n (compDict[compName].stateOfChargeMax - compDict[compName].stateOfChargeMin) *\n compDict[compName].cyclicLifetime / compDict[compName].economicLifetime[loc]\n if compDict[compName].cyclicLifetime is not None else pyomo.Constraint.Skip)\n setattr(pyM, 'ConstrCyclicLifetime_' + abbrvName, pyomo.Constraint(capVarSet, rule=cyclicLifetime))\n\n def connectInterPeriodSOC(self, pyM, esM):\n \"\"\"\n The state of charge at the end of each period is equivalent to the state of charge of the period\n before it (minus its self discharge) plus the change in the state of charge which happened during\n the typical period which was assigned to that period\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n def connectInterSOC(pyM, loc, compName, pInter):\n return SOCInter[loc, compName, pInter + 1] == \\\n SOCInter[loc, compName, pInter] * (1 - compDict[compName].selfDischarge) ** \\\n ((esM.timeStepsPerPeriod[-1] + 1) * esM.hoursPerTimeStep) + \\\n SOC[loc, compName, esM.periodsOrder[pInter], esM.timeStepsPerPeriod[-1] + 1]\n setattr(pyM, 'ConstrInterSOC_' + abbrvName, pyomo.Constraint(opVarSet, esM.periods, rule=connectInterSOC))\n\n def intraSOCstart(self, pyM, esM):\n \"\"\" The (virtual) state of charge at the beginning of a typical period is zero \"\"\"\n abbrvName = self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n\n def intraSOCstart(pyM, loc, compName, p):\n return SOC[loc, compName, p, 0] == 0\n setattr(pyM, 'ConstrSOCPeriodStart_' + abbrvName,\n pyomo.Constraint(opVarSet, esM.typicalPeriods, rule=intraSOCstart))\n\n def equalInterSOC(self, pyM, esM):\n \"\"\" If periodic storage is selected, the states of charge between periods have the same value \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n def equalInterSOC(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] == SOCInter[loc, compName, pInter + 1]\n if compDict[compName].isPeriodicalStorage else pyomo.Constraint.Skip)\n setattr(pyM, 'ConstrEqualInterSOC_' + abbrvName, pyomo.Constraint(opVarSet, esM.periods, rule=equalInterSOC))\n\n def minSOC(self, pyM):\n \"\"\"\n The state of charge [energyUnit] has to be larger than the installed capacity [energyUnit] multiplied\n with the relative minimum state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n capVarSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n\n def SOCMin(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] >= capVar[loc, compName] * compDict[compName].stateOfChargeMin\n setattr(pyM, 'ConstrSOCMin_' + abbrvName, pyomo.Constraint(capVarSet, pyM.timeSet, rule=SOCMin))\n\n def limitSOCwithSimpleTsa(self, pyM, esM):\n \"\"\"\n Simplified version of the state of charge limitation control.\n The error compared to the precise version is small in cases of small selfDischarge.\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n capVarSimpleSet = getattr(pyM, 'designDimensionVarSetSimple_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n SOCmax, SOCmin = getattr(pyM, 'stateOfChargeMax_' + abbrvName), getattr(pyM, 'stateOfChargeMin_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n # The maximum (virtual) state of charge during a typical period is larger than all occurring (virtual)\n # states of charge in that period (the last time step is considered in the subsequent period for t=0)\n def SOCintraPeriodMax(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] <= SOCmax[loc, compName, p]\n setattr(pyM, 'ConstSOCintraPeriodMax_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, pyM.timeSet, rule=SOCintraPeriodMax))\n\n # The minimum (virtual) state of charge during a typical period is smaller than all occurring (virtual)\n # states of charge in that period (the last time step is considered in the subsequent period for t=0)\n def SOCintraPeriodMin(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] >= SOCmin[loc, compName, p]\n setattr(pyM, 'ConstSOCintraPeriodMin_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, pyM.timeSet, rule=SOCintraPeriodMin))\n\n # The state of charge at the beginning of one period plus the maximum (virtual) state of charge\n # during that period has to be smaller than the installed capacities multiplied with the relative maximum\n # state of charge\n def SOCMaxSimple(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] + SOCmax[loc, compName, esM.periodsOrder[pInter]]\n <= capVar[loc, compName] * compDict[compName].stateOfChargeMax)\n setattr(pyM, 'ConstrSOCMaxSimple_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, esM.periods, rule=SOCMaxSimple))\n\n # The state of charge at the beginning of one period plus the minimum (virtual) state of charge\n # during that period has to be larger than the installed capacities multiplied with the relative minimum\n # state of charge\n def SOCMinSimple(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] * (1 - compDict[compName].selfDischarge) **\n ((esM.timeStepsPerPeriod[-1] + 1) * esM.hoursPerTimeStep)\n + SOCmin[loc, compName, esM.periodsOrder[pInter]]\n >= capVar[loc, compName] * compDict[compName].stateOfChargeMin)\n setattr(pyM, 'ConstrSOCMinSimple_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, esM.periods, rule=SOCMinSimple))\n\n def operationModeSOC(self, pyM, esM):\n \"\"\"\n State of charge [energyUnit] limited by the installed capacity [powerUnit] and the relative maximum\n state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVar, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n constrSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by the hours per time step\n def op(pyM, loc, compName, p, t):\n return (opVar[loc, compName, p, t] <=\n esM.hoursPerTimeStep * compDict[compName].stateOfChargeMax * capVar[loc, compName])\n setattr(pyM, 'ConstrSOCMaxPrecise_' + abbrvName, pyomo.Constraint(constrSet, pyM.timeSet, rule=op))\n\n def operationModeSOCwithTSA(self, pyM, esM):\n \"\"\"\n State of charge [energyUnit] limited by the installed capacity [powerUnit] and the relative maximum\n state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n constrSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n def SOCMaxPrecise(pyM, loc, compName, pInter, t):\n if compDict[compName].doPreciseTsaModeling:\n return (SOCinter[loc, compName, pInter] *\n ((1 - compDict[compName].selfDischarge) ** (t * esM.hoursPerTimeStep)) +\n SOC[loc, compName, esM.periodsOrder[pInter], t]\n <= capVar[loc, compName] * compDict[compName].stateOfChargeMax)\n else:\n return pyomo.Constraint.Skip\n setattr(pyM, 'ConstrSOCMaxPrecise_' + abbrvName,\n pyomo.Constraint(constrSet, esM.periods, esM.timeStepsPerPeriod, rule=SOCMaxPrecise))\n\n def minSOCwithTSAprecise(self, pyM, esM):\n \"\"\"\n The state of charge at each time step cannot be smaller than the installed capacity multiplied with the\n relative minimum state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n capVarPreciseSet = getattr(pyM, 'designDimensionVarSetPrecise_' + abbrvName)\n\n def SOCMinPrecise(pyM, loc, compName, pInter, t):\n return (SOCinter[loc, compName, pInter] * ((1 - compDict[compName].selfDischarge) **\n (t * esM.hoursPerTimeStep)) + SOC[loc, compName, esM.periodsOrder[pInter], t]\n >= capVar[loc, compName] * compDict[compName].stateOfChargeMin)\n setattr(pyM, 'ConstrSOCMinPrecise_' + abbrvName,\n pyomo.Constraint(capVarPreciseSet, esM.periods, esM.timeStepsPerPeriod, rule=SOCMinPrecise))\n\n def declareComponentConstraints(self, esM, pyM):\n \"\"\" Declares time independent and dependent constraints\"\"\"\n\n ################################################################################################################\n # Declare time independent constraints #\n ################################################################################################################\n\n # Determine the components' capacities from the number of installed units\n self.capToNbReal(pyM)\n # Determine the components' capacities from the number of installed units\n self.capToNbInt(pyM)\n # Enforce the consideration of the binary design variables of a component\n self.bigM(pyM)\n # Enforce the consideration of minimum capacities for components with design decision variables\n self.capacityMinDec(pyM)\n # Sets, if applicable, the installed capacities of a component\n self.capacityFix(pyM)\n # Sets, if applicable, the binary design variables of a component\n self.designBinFix(pyM)\n\n ################################################################################################################\n # Declare time dependent constraints #\n ################################################################################################################\n\n # Constraint for connecting the state of charge with the charge and discharge operation\n self.connectSOCs(pyM, esM)\n\n # Constraints for enforcing charging operation modes #\n\n # Charging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging rate factor [powerUnit/energyUnit]\n self.operationMode1(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp', 'chargeRate')\n # Charging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode2(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Charging of storage [energyUnit] equal to the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode3(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Operation [energyUnit] limited by the operation time series [energyUnit]\n self.operationMode4(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Operation [energyUnit] equal to the operation time series [energyUnit]\n self.operationMode5(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n\n # Constraints for enforcing discharging operation modes #\n\n # Discharging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the discharging rate factor [powerUnit/energyUnit]\n self.operationMode1(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp', 'dischargeRate')\n # Discharging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode2(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Discharging of storage [energyUnit] equal to the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode3(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Operation [energyUnit] limited by the operation time series [energyUnit]\n self.operationMode4(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Operation [energyUnit] equal to the operation time series [energyUnit]\n self.operationMode5(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n\n # Cyclic constraint enforcing that all storages have the same state of charge at the the beginning of the first\n # and the end of the last time step\n self.cyclicState(pyM, esM)\n\n # Constraint for limiting the number of full cycle equivalents to stay below cyclic lifetime\n self.cyclicLifetime(pyM, esM)\n\n if pyM.hasTSA:\n # The state of charge at the end of each period is equivalent to the state of charge of the period before it\n # (minus its self discharge) plus the change in the state of charge which happened during the typical\n # # period which was assigned to that period\n self.connectInterPeriodSOC(pyM, esM)\n # The (virtual) state of charge at the beginning of a typical period is zero\n self.intraSOCstart(pyM, esM)\n # If periodic storage is selected, the states of charge between periods have the same value\n self.equalInterSOC(pyM, esM)\n\n # Ensure that the state of charge is within the operating limits of the installed capacities\n if not pyM.hasTSA:\n # Constraints for enforcing a state of charge operation mode within given limits #\n\n # State of charge [energyUnit] limited by the installed capacity [energyUnit] and the relative maximum\n # state of charge\n self.operationModeSOC(pyM, esM)\n\n # The state of charge [energyUnit] has to be larger than the installed capacity [energyUnit] multiplied\n # with the relative minimum state of charge\n self.minSOC(pyM)\n\n else:\n # Simplified version of the state of charge limitation control #\n # (The error compared to the precise version is small in cases of small selfDischarge) #\n self.limitSOCwithSimpleTsa(pyM, esM)\n\n # Precise version of the state of charge limitation control #\n\n # Constraints for enforcing a state of charge operation within given limits\n\n # State of charge [energyUnit] limited by the installed capacity [energyUnit] and the relative maximum\n # state of charge\n self.operationModeSOCwithTSA(pyM, esM)\n\n # The state of charge at each time step cannot be smaller than the installed capacity multiplied with the\n # relative minimum state of charge\n self.minSOCwithTSAprecise(pyM, esM)\n\n ####################################################################################################################\n # Declare component contributions to basic EnergySystemModel constraints and its objective function #\n ####################################################################################################################\n\n def getSharedPotentialContribution(self, pyM, key, loc):\n \"\"\" Gets contributions to shared location potential \"\"\"\n return super().getSharedPotentialContribution(pyM, key, loc)\n\n def hasOpVariablesForLocationCommodity(self, esM, loc, commod):\n return any([comp.commodity == commod and comp.locationalEligibility[loc] == 1\n for comp in self.componentsDict.values()])\n\n def getCommodityBalanceContribution(self, pyM, commod, loc, p, t):\n \"\"\" Gets contribution to a commodity balance \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n opVarDict = getattr(pyM, 'operationVarDict_' + abbrvName)\n return sum(dischargeOp[loc, compName, p, t] - chargeOp[loc, compName, p, t]\n for compName in opVarDict[loc] if commod == self.componentsDict[compName].commodity)\n\n def getObjectiveFunctionContribution(self, esM, pyM):\n \"\"\" Gets contribution to the objective function \"\"\"\n\n capexCap = self.getEconomicsTI(pyM, ['investPerCapacity'], 'cap', 'CCF')\n capexDec = self.getEconomicsTI(pyM, ['investIfBuilt'], 'designBin', 'CCF')\n opexCap = self.getEconomicsTI(pyM, ['opexPerCapacity'], 'cap')\n opexDec = self.getEconomicsTI(pyM, ['opexIfBuilt'], 'designBin')\n opexOp1 = self.getEconomicsTD(pyM, esM, ['opexPerChargeOperation'], 'chargeOp', 'operationVarDict')\n opexOp2 = self.getEconomicsTD(pyM, esM, ['opexPerDischargeOperation'], 'dischargeOp', 'operationVarDict')\n\n return capexCap + capexDec + opexCap + opexDec + opexOp1 + opexOp2\n\n ####################################################################################################################\n # Return optimal values of the component class #\n ####################################################################################################################\n\n def setOptimalValues(self, esM, pyM):\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n\n # Set optimal design dimension variables and get basic optimization summary\n optSummaryBasic = super().setOptimalValues(esM, pyM, esM.locations, 'commodityUnit', '*h')\n\n # Set optimal operation variables and append optimization summary\n props = ['operationCharge', 'operationDischarge', 'opexCharge', 'opexDischarge']\n units = ['[-]', '[-]', '[' + esM.costUnit + '/a]', '[' + esM.costUnit + '/a]']\n tuples = [(compName, prop, unit) for compName in compDict.keys() for prop, unit in zip(props, units)]\n tuples = list(map(lambda x: (x[0], x[1], '[' + compDict[x[0]].commodityUnit + '*h/a]')\n if x[1] == 'operationCharge' else x, tuples))\n tuples = list(map(lambda x: (x[0], x[1], '[' + compDict[x[0]].commodityUnit + '*h/a]')\n if x[1] == 'operationDischarge' else x, tuples))\n mIndex = pd.MultiIndex.from_tuples(tuples, names=['Component', 'Property', 'Unit'])\n optSummary = pd.DataFrame(index=mIndex, columns=sorted(esM.locations)).sort_index()\n\n # * charge variables and contributions\n optVal = utils.formatOptimizationOutput(chargeOp.get_values(), 'operationVariables', '1dim', esM.periodsOrder)\n self.chargeOperationVariablesOptimum = optVal\n\n if optVal is not None:\n opSum = optVal.sum(axis=1).unstack(-1)\n ox = opSum.apply(lambda op: op * compDict[op.name].opexPerChargeOperation[op.index], axis=1)\n optSummary.loc[[(ix, 'operationCharge', '[' + compDict[ix].commodityUnit + '*h/a]')\n for ix in opSum.index], opSum.columns] = opSum.values/esM.numberOfYears\n optSummary.loc[[(ix, 'opexCharge', '[' + esM.costUnit + '/a]') for ix in ox.index],\n ox.columns] = ox.values/esM.numberOfYears\n\n # * discharge variables and contributions\n optVal = utils.formatOptimizationOutput(dischargeOp.get_values(), 'operationVariables', '1dim',\n esM.periodsOrder)\n self.dischargeOperationVariablesOptimum = optVal\n\n if optVal is not None:\n opSum = optVal.sum(axis=1).unstack(-1)\n ox = opSum.apply(lambda op: op * compDict[op.name].opexPerDischargeOperation[op.index], axis=1)\n optSummary.loc[[(ix, 'operationDischarge', '[' + compDict[ix].commodityUnit + '*h/a]')\n for ix in opSum.index], opSum.columns] = opSum.values/esM.numberOfYears\n optSummary.loc[[(ix, 'opexDischarge', '[' + esM.costUnit + '/a]') for ix in ox.index],\n ox.columns] = ox.values/esM.numberOfYears\n\n # * set state of charge variables\n if not pyM.hasTSA:\n optVal = utils.formatOptimizationOutput(SOC.get_values(), 'operationVariables', '1dim', esM.periodsOrder)\n # Remove the last column (by applying the cycle constraint, the first and the last columns are equal to each\n # other)\n optVal = optVal.loc[:, :len(optVal.columns) - 2]\n self.stateOfChargeOperationVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_stateOfChargeVariablesOptimum', compDict)\n else:\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n stateOfChargeIntra = SOC.get_values()\n stateOfChargeInter = SOCinter.get_values()\n if stateOfChargeIntra is not None:\n # Convert dictionary to DataFrame, transpose, put the period column first and sort the index\n # Results in a one dimensional DataFrame\n stateOfChargeIntra = pd.DataFrame(stateOfChargeIntra, index=[0]).T.swaplevel(i=0, j=-2).sort_index()\n stateOfChargeInter = pd.DataFrame(stateOfChargeInter, index=[0]).T.swaplevel(i=0, j=1).sort_index()\n # Unstack time steps (convert to a two dimensional DataFrame with the time indices being the columns)\n stateOfChargeIntra = stateOfChargeIntra.unstack(level=-1)\n stateOfChargeInter = stateOfChargeInter.unstack(level=-1)\n # Get rid of the unnecessary 0 level\n stateOfChargeIntra.columns = stateOfChargeIntra.columns.droplevel()\n stateOfChargeInter.columns = stateOfChargeInter.columns.droplevel()\n # Concat data\n data = []\n for count, p in enumerate(esM.periodsOrder):\n data.append((stateOfChargeInter.loc[:, count] +\n stateOfChargeIntra.loc[p].loc[:, :esM.timeStepsPerPeriod[-1]].T).T)\n optVal = pd.concat(data, axis=1, ignore_index=True)\n else:\n optVal = None\n self.stateOfChargeOperationVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_stateOfChargeVariablesOptimum', compDict)\n\n # Append optimization summaries\n optSummary = optSummary.append(optSummaryBasic).sort_index()\n\n # Summarize all contributions to the total annual cost\n optSummary.loc[optSummary.index.get_level_values(1) == 'TAC'] = \\\n optSummary.loc[(optSummary.index.get_level_values(1) == 'TAC') |\n (optSummary.index.get_level_values(1) == 'opexCharge') |\n (optSummary.index.get_level_values(1) == 'opexDischarge')].groupby(level=0).sum().values\n\n self.optSummary = optSummary\n\n def getOptimalValues(self, name='all'):\n \"\"\"\n Returns optimal values of the components\n\n :param name: name of the variables of which the optimal values should be returned:\\n\n * 'capacityVariables',\n * 'isBuiltVariables',\n * 'chargeOperationVariablesOptimum',\n * 'dischargeOperationVariablesOptimum',\n * 'stateOfChargeOperationVariablesOptimum',\n * 'all' or another input: all variables are returned.\\n\n :type name: string\n \"\"\"\n if name == 'capacityVariablesOptimum':\n return {'values': self.capacityVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'isBuiltVariablesOptimum':\n return {'values': self.isBuiltVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'chargeOperationVariablesOptimum':\n return {'values': self.chargeOperationVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension}\n elif name == 'dischargeOperationVariablesOptimum':\n return {'values': self.dischargeOperationVariablesOptimum, 'timeDependent': True, 'dimension':\n self.dimension}\n elif name == 'stateOfChargeOperationVariablesOptimum':\n return {'values': self.stateOfChargeOperationVariablesOptimum, 'timeDependent': True, 'dimension':\n self.dimension}\n else:\n return {'capacityVariablesOptimum': {'values': self.capacityVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'isBuiltVariablesOptimum': {'values': self.isBuiltVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'chargeOperationVariablesOptimum': {'values': self.chargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension},\n 'dischargeOperationVariablesOptimum': {'values': self.dischargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension},\n 'stateOfChargeOperationVariablesOptimum': {'values': self.stateOfChargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension}}\n" ]
[ [ "pandas.concat", "pandas.MultiIndex.from_tuples", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
yzR1991/Copulas
[ "72c0d9c398f7fe3eb075b56591911fea377cdc33", "72c0d9c398f7fe3eb075b56591911fea377cdc33" ]
[ "tests/unit/multivariate/test_gaussian.py", "tests/unit/univariate/test_selection.py" ]
[ "from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nimport numpy as np\nimport pandas as pd\n\nfrom copulas import get_qualified_name\nfrom copulas.multivariate.gaussian import GaussianMultivariate\nfrom copulas.univariate import GaussianUnivariate\n\n\nclass TestGaussianMultivariate(TestCase):\n\n def setUp(self):\n \"\"\"Defines random variable to use on tests. \"\"\"\n\n self.data = pd.DataFrame({\n 'column1': np.array([\n 2641.16233666, 921.14476418, -651.32239137, 1223.63536668,\n 3233.37342355, 1373.22400821, 1959.28188858, 1076.99295365,\n 2029.25100261, 1835.52188141, 1170.03850556, 739.42628394,\n 1866.65810627, 3703.49786503, 1719.45232017, 258.90206528,\n 219.42363944, 609.90212377, 1618.44207239, 2323.2775272,\n 3251.78732274, 1430.63989981, -180.57028875, -592.84497457,\n ]),\n 'column2': np.array([\n 180.2425623, 192.35609972, 150.24830291, 156.62123653,\n 173.80311908, 191.0922843, 163.22252158, 190.73280428,\n 158.52982435, 163.0101334, 205.24904026, 175.42916046,\n 208.31821984, 178.98351969, 160.50981075, 163.19294974,\n 173.30395132, 215.18996298, 164.71141696, 178.84973821,\n 182.99902513, 217.5796917, 201.56983421, 174.92272693\n ]),\n 'column3': np.array([\n -1.42432446, -0.14759864, 0.66476302, -0.04061445, 0.64305762,\n 1.79615407, 0.70450457, -0.05886671, -0.36794788, 1.39331262,\n 0.39792831, 0.0676313, -0.96761759, 0.67286132, -0.55013279,\n -0.53118328, 1.23969655, -0.35985016, -0.03568531, 0.91456357,\n 0.49077378, -0.27428204, 0.45857406, 2.29614033\n ])\n })\n\n def test__transform_to_normal_numpy_1d(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = np.array([\n [3, 5],\n ])\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_numpy_2d(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0, 0.5, 1])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3, 0.5, 0.7])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = np.array([\n [3, 5],\n [4, 6],\n [5, 7],\n ])\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n [0.0, 0.0],\n [5.166579, 0.524401]\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3, 4, 5])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5, 6, 7])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_series(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = pd.Series({'a': 3, 'b': 5})\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_dataframe(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0, 0.5, 1])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3, 0.5, 0.7])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = pd.DataFrame({\n 'a': [3, 4, 5],\n 'b': [5, 6, 7]\n })\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n [0.0, 0.0],\n [5.166579, 0.524401]\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3, 4, 5])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5, 6, 7])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__get_covariance(self):\n \"\"\"_get_covariance computes the covariance matrix of normalized values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()\n\n def test_fit_default_distribution(self):\n \"\"\"On fit, a distribution is created for each column along the covariance and means\"\"\"\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_fit_distribution_arg(self):\n \"\"\"On fit, the distributions for each column use instances of copula.distribution.\"\"\"\n # Setup\n distribution = 'copulas.univariate.gaussian_kde.GaussianKDE'\n copula = GaussianMultivariate(distribution=distribution)\n\n # Run\n copula.fit(self.data)\n\n # Check\n assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE'\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_fit_distribution_selector(self):\n \"\"\"\n On fit, it should use the correct distributions for those that are\n specified and default to using the base class otherwise.\n \"\"\"\n copula = GaussianMultivariate(distribution={\n 'column1': 'copulas.univariate.beta.BetaUnivariate',\n 'column2': 'copulas.univariate.gaussian_kde.GaussianKDE',\n })\n copula.fit(self.data)\n\n assert get_qualified_name(\n copula.univariates[0].__class__) == 'copulas.univariate.beta.BetaUnivariate'\n assert get_qualified_name(\n copula.univariates[1].__class__) == 'copulas.univariate.gaussian_kde.GaussianKDE'\n assert get_qualified_name(\n copula.univariates[2].__class__) == 'copulas.univariate.base.Univariate'\n\n def test_fit_numpy_array(self):\n \"\"\"Fit should work indistinctly with numpy arrays and pandas dataframes \"\"\"\n # Setup\n copula = GaussianMultivariate(\n distribution='copulas.univariate.gaussian.GaussianUnivariate')\n\n # Run\n copula.fit(self.data.values)\n\n # Check\n for key, (column, univariate) in enumerate(zip(self.data.columns, copula.univariates)):\n assert univariate._params['loc'] == np.mean(self.data[column])\n assert univariate._params['scale'] == np.std(self.data[column])\n\n expected_covariance = copula._get_covariance(pd.DataFrame(self.data.values))\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_probability_density(self):\n \"\"\"Probability_density computes probability for the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n self.assertAlmostEqual(result, expected_result)\n\n def test_cumulative_distribution_fit_df_call_np_array(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n def test_cumulative_distribution_fit_call_np_array(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.values)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n def test_cumulative_distribution_fit_call_pd(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.values)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n @patch('copulas.multivariate.gaussian.np.random.multivariate_normal')\n def test_sample(self, normal_mock):\n \"\"\"Sample use the inverse-transform method to generate new samples.\"\"\"\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n normal_mock.return_value = np.array([\n [0.1, 0.1, 0.1],\n [0.2, 0.2, 0.2],\n [0.4, 0.4, 0.4],\n [0.6, 0.6, 0.6],\n [0.8, 0.8, 0.8]\n ])\n\n expected_result = pd.DataFrame([\n {'A': 22.678232998312527, 'B': 70.70710678118655, 'C': 284.35270009440734},\n {'A': 23.356465996625055, 'B': 71.41421356237309, 'C': 298.7054001888146},\n {'A': 24.712931993250110, 'B': 72.82842712474618, 'C': 327.4108003776293},\n {'A': 26.069397989875164, 'B': 74.24264068711929, 'C': 356.116200566444},\n {'A': 27.425863986500215, 'B': 75.65685424949238, 'C': 384.8216007552586}\n ])\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.equals(expected_result)\n\n assert normal_mock.called_once_with(\n np.zeros(instance.covariance.shape[0]),\n instance.covariance,\n 5\n )\n\n def test_sample_random_state(self):\n \"\"\"When random_state is set the samples are the same.\"\"\"\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate, random_seed=0)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n expected_result = pd.DataFrame(\n np.array([\n [25.19031668, 61.96527251, 543.43595269],\n [31.50262306, 49.70971698, 429.06537124],\n [20.31636799, 64.3492326, 384.27561823],\n [25.00302427, 72.06019812, 415.85215123],\n [23.07525773, 66.70901743, 390.8226672]\n ]),\n columns=['A', 'B', 'C']\n )\n\n # Run\n result = instance.sample(5)\n\n # Check\n pd.testing.assert_frame_equal(result, expected_result, check_less_precise=True)\n\n def test_to_dict(self):\n \"\"\"To_dict returns the parameters to replicate the copula.\"\"\"\n # Setup\n copula = GaussianMultivariate()\n copula.fit(self.data)\n\n # Run\n result = copula.to_dict()\n\n # Asserts\n assert result['type'] == 'copulas.multivariate.gaussian.GaussianMultivariate'\n assert result['columns'] == ['column1', 'column2', 'column3']\n assert len(result['univariates']) == 3\n\n expected_cov = copula._get_covariance(self.data).tolist()\n np.testing.assert_equal(result['covariance'], expected_cov)\n\n for univariate, result_univariate in zip(copula.univariates, result['univariates']):\n assert univariate.to_dict() == result_univariate\n\n def test_from_dict(self):\n \"\"\"from_dict generates a new instance from its parameters.\"\"\"\n # Setup\n copula = GaussianMultivariate()\n copula.fit(self.data)\n copula_dict = copula.to_dict()\n\n # Run\n new_copula = GaussianMultivariate.from_dict(copula_dict)\n\n # Asserts\n assert isinstance(new_copula, GaussianMultivariate)\n assert new_copula.columns == ['column1', 'column2', 'column3']\n assert len(new_copula.univariates) == 3\n\n for new_univariate, old_univariate in zip(copula.univariates, new_copula.univariates):\n assert new_univariate.to_dict() == old_univariate.to_dict()\n\n def test_sample_constant_column(self):\n \"\"\"Gaussian copula can sample after being fit with a constant column.\n\n This process will raise warnings when computing the covariance matrix\n \"\"\"\n # Setup\n instance = GaussianMultivariate()\n X = np.array([\n [1.0, 2.0],\n [1.0, 3.0],\n [1.0, 4.0],\n [1.0, 5.0]\n ])\n instance.fit(X)\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.shape == (5, 2)\n assert result[~result.isnull()].all().all()\n assert result.loc[:, 0].equals(pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=0))\n\n # This is to check that the samples on the non constant column are not constant too.\n assert len(result.loc[:, 1].unique()) > 1\n\n covariance = instance.covariance\n assert (~pd.isnull(covariance)).all().all()\n", "from unittest import TestCase\n\nimport numpy as np\nfrom scipy.stats import truncnorm\n\nfrom copulas.univariate import GaussianKDE, GaussianUnivariate, TruncatedGaussian\nfrom copulas.univariate.selection import select_univariate\n\n\nclass TestSelectUnivariate(TestCase):\n\n def setUp(self):\n size = 1000\n np.random.seed(42)\n\n # Binary Data\n self.binary_data = np.random.randint(0, 2, size=10000)\n\n # Truncated Normal\n a, b, loc, scale = -1.0, 0.5, 0.0, 1.0\n self.truncated_data = truncnorm.rvs(a, b, loc=loc, scale=scale, size=10000)\n\n # Mixture of Normals\n mask = np.random.normal(size=size) > 0.5\n mode1 = np.random.normal(size=size) * mask\n mode2 = np.random.normal(size=size, loc=10) * (1.0 - mask)\n self.bimodal_data = mode1 + mode2\n\n def test_select_univariate(self):\n \"\"\"\n Suppose the data follows a bimodal distribution. The model selector should be able to\n figure out that the GaussianKDE is best.\n \"\"\"\n model = select_univariate(\n self.bimodal_data, [\n GaussianKDE, GaussianUnivariate, TruncatedGaussian])\n assert isinstance(model, GaussianKDE)\n\n def test_binary(self):\n \"\"\"\n Suppose the data follows a Bernoulli distribution. The KS statistic should be larger\n for a TruncatedGaussian model than a GaussianKDE model which can somewhat capture a\n Bernoulli distribution as it resembles a bimodal distribution.\n \"\"\"\n model = select_univariate(self.binary_data, [GaussianKDE(), TruncatedGaussian()])\n assert isinstance(model, GaussianKDE)\n\n def test_truncated(self):\n \"\"\"\n Suppose the data follows a truncated normal distribution. The KS statistic should be\n larger for a Gaussian model than a TruncatedGaussian model (since the fit is worse).\n \"\"\"\n model = select_univariate(self.truncated_data, [GaussianUnivariate(), TruncatedGaussian()])\n assert isinstance(model, TruncatedGaussian)\n" ]
[ [ "numpy.testing.assert_equal", "pandas.Series", "pandas.isnull", "pandas.DataFrame", "numpy.std", "pandas.testing.assert_frame_equal", "numpy.mean", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.isclose" ], [ "numpy.random.normal", "scipy.stats.truncnorm.rvs", "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Cheol-H-Jeong/Doridori-Counter
[ "c16da56dbbcccdc24033ddb9435d13506feb8b99" ]
[ "doridori.py" ]
[ "import cv2\nimport mediapipe as mp\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nfrom scipy.signal import find_peaks\nfrom celluloid import Camera\nfrom tqdm import tqdm\n\nclass Doridori:\n def __init__(self,filepath):\n self.cap = cv2.VideoCapture(filepath)\n self.total_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.df = np.array([])\n self.distance_list = np.array([])\n self.peaks = np.array([])\n \n def detect_face(self):\n frame_cnt = 0\n nose_x = list()\n nose_y = list()\n nose_z = list()\n mp_face_mesh = mp.solutions.face_mesh\n with mp_face_mesh.FaceMesh(\n static_image_mode=True,\n max_num_faces=1,\n min_detection_confidence=0.5) as face_mesh:\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret:\n frame_cnt += 1\n results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n if results.multi_face_landmarks:\n x, y, z = self.__getNose(results.multi_face_landmarks)\n nose_x.append(x)\n nose_y.append(y)\n nose_z.append(z)\n if frame_cnt >= self.total_frame:\n print(\"============End Video============\")\n self.df = np.array([nose_x, nose_y, nose_z]).T\n break\n self.cap.release()\n cv2.destroyAllWindows()\n return self.df\n\n def fit(self, data = np.array([]), threshold=0.004, min_peak_distance = 12, display_mode = True):\n distance_list = list()\n if data.size == 0:\n df = self.df\n else:\n df = data\n for i in range(1, len(df)):\n distance_list.append(distance.euclidean(df[i-1,:], df[i,:]))\n peaks_index = find_peaks(distance_list, distance=min_peak_distance)[0]\n low_peak_index = list()\n for i, j in enumerate (peaks_index):\n if distance_list[j] < threshold:\n low_peak_index.append(i)\n peaks_index= np.delete(peaks_index, low_peak_index)\n print(f\"total_doridori_count : {len(peaks_index)}\")\n peaks = list()\n for i, value in enumerate (distance_list):\n if i in peaks_index:\n peaks.append(value)\n else:\n peaks.append(np.nan)\n if display_mode:\n plt.figure(figsize=(25,8))\n plt.plot(distance_list)\n plt.plot(peaks, 'ro')\n \n self.distance_list = distance_list\n self.peaks = peaks\n \n return len(peaks_index)\n \n def save_video(self, filepath, display_frame = 100, frame_rate = 30.0, video_size=(25,8)):\n fig, ax = plt.subplots(figsize=video_size)\n camera = Camera(fig)\n padding_nan = np.empty(display_frame)\n padding_nan[:] = np.nan\n distance_with_nan = np.concatenate([padding_nan, self.distance_list])\n peaks_with_nan = np.concatenate([padding_nan, self.peaks])\n for i in tqdm(range(display_frame, len(distance_with_nan))):\n ax.plot(distance_with_nan[i-display_frame:i], c='blue')\n ax.plot(peaks_with_nan[i-display_frame:i], 'ro')\n camera.snap()\n print(f\"saving to {filepath}\")\n animation = camera.animate(interval=1000.0/frame_rate)\n animation.save(filepath)\n plt.close(fig)\n \n def __getNose(self, landmarks):\n x = 0\n y = 0\n z = 0\n landmark = list(landmarks)\n for mark in landmark:\n x = mark.landmark[0].x\n y = mark.landmark[0].y\n z = mark.landmark[0].z\n return x, y, z\n " ]
[ [ "scipy.signal.find_peaks", "matplotlib.pyplot.subplots", "scipy.spatial.distance.euclidean", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.delete", "matplotlib.pyplot.close", "numpy.array", "numpy.empty", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] } ]
MobTgZhang/CIAlgorithms
[ "3aa1b249f526d75fb8e9bf7f37516f18a025d50a" ]
[ "ACO/ACO.py" ]
[ "\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport urllib.request\nimport os\nimport time\ndef download(root_path,filename):\n if not os.path.exists(root_path):\n os.mkdir(root_path)\n if not os.path.exists(os.path.join(root_path,filename)):\n url = \"http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/\"+filename\n urllib.request.urlretrieve(url,os.path.join(root_path,filename))\n print(\"The data set: %s downloaded!\"%os.path.join(root_path,filename))\n else:\n print(\"The data set: %s already has downloaded!\"%os.path.join(root_path,filename))\ndef get_data(filename):\n data_list = []\n with open(filename,mode=\"r\") as f:\n flag = False\n while True:\n line = f.readline()\n if \"EOF\" in line:\n break\n elif \"NODE_COORD_SECTION\" in line:\n flag = True\n elif flag:\n tmp = line.strip().split(\" \")\n data_list.append([float(item) for item in tmp])\n return np.array(data_list)\nclass ACO:\n def __init__(self,ant_num,alpha,beta,rho,Q,epoches):\n self.ant_num = ant_num\n self.alpha = alpha\n self.beta = beta\n self.rho = rho\n self.Q = Q\n self.epoches = epoches\n self.citys_mat = None\n self.E_best = None\n self.sol_best = None\n self.length_list = None\n self.name = time.strftime(\"%Y%m%d%H%M\", time.localtime(time.time()))\n def solve(self,citys_mat):\n self.citys_mat = citys_mat\n citys_num = citys_mat.shape[0]\n # 获取邻接矩阵\n citys_x = citys_mat[:, 0].reshape(citys_num, 1).dot(np.ones((1, citys_num)))\n citys_y = citys_mat[:, 1].reshape(citys_num, 1).dot(np.ones((1, citys_num)))\n citys_distance = np.sqrt(np.square(citys_x - citys_x.T) + np.square(citys_y - citys_y.T))\n # 初始化启发函数\n Heu_f = 1.0/(citys_distance + np.diag([np.inf] * citys_num))\n # 信息素矩阵\n Tau_table = np.ones((citys_num,citys_num))\n # 每一次迭代过程中每个蚂蚁的路径记录表\n Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)\n # 每一次迭代过程中的最佳路径\n Route_best = np.zeros((self.epoches,citys_num),dtype=np.int)\n # 每一次迭代过程中最佳路径记录表\n Length_best = np.zeros(self.epoches)\n # 每次迭代过程中蚂蚁的平均路径长度\n Length_average = np.zeros(self.epoches)\n # 每次迭代过程中当前路径长度\n Length_current = np.zeros(self.ant_num)\n iter = 0\n while iter <self.epoches:\n # 产生城市集合表\n # 随机产生各个蚂蚁的起点城市\n Route_table[:,0]= self.randseed(citys_num)\n # 更新信息素\n Delta_tau = np.zeros((citys_num, citys_num))\n for k in range(self.ant_num):\n # 用于记录蚂蚁下一个访问的城市集合\n # 蚂蚁已经访问过的城市\n tabu = [Route_table[k,0]]\n allow_set = list(set(range(citys_num))-set(tabu))\n city_index = Route_table[k,0]\n for i in range(1,citys_num):\n # 初始化城市之间的转移概率\n P_table = np.zeros(len(allow_set))\n # 计算城市之间的转移概率\n for j in range(len(allow_set)):\n P_table[j] = np.power(Tau_table[city_index,allow_set[j]],self.alpha)*\\\n np.power(Heu_f[city_index,allow_set[j]],self.beta)\n P_table = P_table/np.sum(P_table)\n\n # 轮盘赌算法来选择下一个访问的城市\n #out_prob = np.cumsum(P_table)\n while True:\n r = np.random.rand()\n index_need = np.where(P_table > r)[0]\n if len(index_need) >0:\n city_index2 = allow_set[index_need[0]]\n break\n Route_table[k,i] = city_index2\n tabu.append(city_index2)\n allow_set = list(set(range(0,citys_num))-set(tabu))\n city_index = city_index2\n tabu.append(tabu[0])\n # 计算蚂蚁路径的距离信息\n for j in range(citys_num):\n Length_current[k] = Length_current[k] + citys_distance[tabu[j],tabu[j+1]]\n for j in range(citys_num):\n Delta_tau[tabu[j],tabu[j+1]] = Delta_tau[tabu[j],tabu[j+1]] + self.Q / Length_current[k]\n # 计算最短路径、最短路径长度以及平均路径长度\n Length_best[iter] = np.min(Length_current)\n index = np.where(Length_current == np.min(Length_current))[0][0]\n Route_best[iter] = Route_table[index]\n Length_average[iter] = np.mean(Length_current)\n #更新信息素\n Tau_table = (1-self.rho)*Tau_table + Delta_tau\n #Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)\n Length_current = np.zeros(self.ant_num)\n\n print(\"epoches:%d,best value every epoches%.4f\"%(iter, Length_best[iter]))\n iter = iter + 1\n self.E_best = np.min(Length_best)\n index = np.where(Length_best == np.min(Length_best))[0][0]\n self.sol_best = Route_table[index]\n self.length_list = Length_average\n def randseed(self,citys_num):\n if self.ant_num <citys_num:\n initial_route = np.random.permutation(range(citys_num))[:self.ant_num]\n else:\n initial_route = np.zeros((self.ant_num,))\n initial_route[:citys_num] = np.random.permutation(range(citys_num))\n tmp_index = citys_num\n while tmp_index + citys_num <= self.ant_num:\n initial_route[tmp_index:citys_num + tmp_index] = np.random.permutation(range(citys_num))\n tmp_index += citys_num\n tmp_left = self.ant_num % citys_num\n if tmp_left != 0:\n initial_route[tmp_index:] = np.random.permutation(range(citys_num))[:tmp_left]\n return initial_route\n def draw(self):\n print(self.sol_best)\n print(self.E_best)\n if not os.path.exists(\"log\"):\n os.mkdir(\"log\")\n # draw loss\n x = np.linspace(0, len(self.length_list) - 1, len(self.length_list))\n y = np.array(self.length_list)\n plt.plot(x, y)\n plt.title(label=\"loss\")\n plt.savefig(os.path.join(\"log\", \"%s_loss.png\" % self.name))\n plt.close()\n # draw dots\n for k in range(0, len(self.sol_best) - 1):\n start = self.citys_mat[self.sol_best[k]]\n end = self.citys_mat[self.sol_best[k + 1]]\n plt.plot(start[0], start[1], \"bo\")\n plt.plot(end[0], end[1], \"bo\")\n plt.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1],\n length_includes_head=True, head_width=0.2, head_length=0.3, lw=1,\n color=\"r\")\n start = self.citys_mat[self.sol_best[-1]]\n end = self.citys_mat[self.sol_best[0]]\n plt.plot(start[0], start[1], \"bo\")\n plt.plot(end[0], end[1], \"bo\")\n plt.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1],\n length_includes_head=True, head_width=0.2, head_length=0.3, lw=1,\n color=\"r\")\n plt.title(label=\"length:%.2f\" % (self.E_best))\n plt.savefig(os.path.join(\"log\", \"%s_route.png\" % self.name))\n plt.show()\ndef main():\n filename = \"eil51.tsp\"\n root_path = \"data\"\n download(root_path,filename)\n data_list = get_data(os.path.join(root_path,filename))\n ant_num = 500\n alpha = 1\n beta = 5\n rho = 0.2\n Q = 10\n epoches = 20\n model = ACO(ant_num, alpha, beta, rho, Q, epoches)\n model.solve(data_list[:,1:])\n model.draw()\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.square", "numpy.diag", "matplotlib.pyplot.title", "numpy.min", "numpy.power", "numpy.ones", "matplotlib.pyplot.plot", "numpy.mean", "numpy.random.rand", "matplotlib.pyplot.close", "numpy.where", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.arrow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uwe-iben/torchphysics
[ "775d9aca71752a568f1fca972c958b99107f3b7c", "775d9aca71752a568f1fca972c958b99107f3b7c" ]
[ "src/torchphysics/utils/user_fun.py", "experiments/heat_equation/heat_equation_D_variable.py" ]
[ "\"\"\"Contains a class which extracts the needed arguments of an arbitrary \nmethode/function and wraps them for future usage. E.g correctly choosing \nthe needed arguments and passing them on to the original function.\n\"\"\"\nimport inspect\nimport copy \nimport torch\n\nfrom ..problem.spaces.points import Points\n\n\nclass UserFunction:\n \"\"\"Wraps a function, so that it can be called with arbitrary input arguments.\n \n Parameters\n ----------\n fun : callable\n The original function that should be wrapped.\n defaults : dict, optional\n Possible defaults arguments of the function. If none are specified will\n check by itself if there are any. \n args : dict, optional\n All arguments of the function. If none are specified will\n check by itself if there are any. \n\n Notes\n -----\n Uses inspect.getfullargspec(fun) to get the possible input arguments.\n When called just extracts the needed arguments and passes them to the \n original function. \n \"\"\"\n def __init__(self, fun, defaults={}, args={}):\n if isinstance(fun, (UserFunction, DomainUserFunction)):\n self.fun = fun.fun\n self.defaults = fun.defaults\n self.args = fun.args\n else:\n self._transform_to_user_function(fun, defaults, args)\n\n def _transform_to_user_function(self, fun, defaults, args):\n self.fun = fun\n self.defaults = defaults\n self.args = args\n if callable(self.fun) and self.defaults == {} and self.args == {}:\n self._set_input_args_for_function()\n\n def _set_input_args_for_function(self):\n f_args = inspect.getfullargspec(self.fun).args\n\n # we check that the function defines all needed parameters\n if inspect.getfullargspec(self.fun).varargs is not None or \\\n inspect.getfullargspec(self.fun).varkw is not None:\n raise ValueError(\"\"\"\n Variable arguments are not supported in\n UserFunctions. Please use keyword arguments.\n \"\"\")\n\n f_defaults = inspect.getfullargspec(self.fun).defaults\n f_kwonlyargs = inspect.getfullargspec(self.fun).kwonlyargs\n #f_kwonlydefaults = inspect.getfullargspec(self.fun).kwonlydefaults\n # NOTE: By above check, there should not be kwonlyargs. However, we still catch\n # this case here.\n self.args = f_args + f_kwonlyargs\n\n # defaults always align at the end of the args\n self.defaults = {}\n if not f_defaults is None:\n self.defaults = {self.args[-i]: f_defaults[-i] \n for i in range(len(f_defaults), 0, -1)}\n #if not f_kwonlydefaults is None:\n # self.defaults.update(f_kwonlydefaults)\n\n def __call__(self, args={}, vectorize=False):\n \"\"\"To evalute the function. Will automatically extract the needed arguments \n from the input data and will set the possible default values.\n\n Parameters\n ----------\n args : dict or torchphysics.Points\n The input data, where the function should be evaluated.\n vectorize : bool, optional\n If the original function can work with a batch of data, or\n a loop needs to be used to evaluate the function.\n default is False, which means that we assume the function\n can work with a batch of data.\n\n Returns\n -------\n torch.tensor\n The output values of the function.\n \"\"\"\n if isinstance(args, Points):\n args = args.coordinates\n # check that every necessary arg is given\n for key in self.necessary_args:\n assert key in args, \\\n f\"The argument '{key}' is necessary in {self.__name__} but not given.\"\n # if necessary, pass defaults\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n if not vectorize:\n return self.evaluate_function(**inp)\n else:\n return self.apply_to_batch(inp)\n\n def evaluate_function(self, **inp):\n \"\"\"Evaluates the original input function. Should not be used directly, \n rather use the call-methode.\n \"\"\"\n if callable(self.fun):\n return self.fun(**inp)\n return self.fun\n\n def apply_to_batch(self, inp):\n \"\"\"Apply the function to a batch of elements by running a for-loop.\n we assume that all inputs either have batch (i.e. maximum) dimension or\n are a constant param.\n\n Parameters\n ----------\n inp : torchphysics.points\n The Points-object of the input data\n\n Returns\n -------\n torch.tensor\n The output values of the function, for each input.\n\n \"\"\"\n batch_size = max(len(inp[key]) for key in inp)\n out = []\n for i in range(batch_size):\n inp_i = {}\n for key in inp:\n if len(inp[key]) == batch_size:\n inp_i[key] = inp[key][i]\n else:\n inp_i[key] = inp[key]\n o = self.fun(**inp_i)\n if o is not None:\n out.append(o)\n return out\n\n def partially_evaluate(self, **args):\n \"\"\"(partially) evaluates a given function.\n\n Parameters\n ----------\n **args :\n The arguments where the function should be (partially) evaluated.\n\n Returns\n -------\n Out : value or UserFunction\n If the input arguments are enough to evalate the whole function, the \n corresponding output is returned. \n If some needed arguments are missing, a copy of this UserFunction will \n be returned. Whereby the values of **args will be added to the \n default values of the returned UserFunction.\n \"\"\"\n if callable(self.fun):\n if all(arg in args for arg in self.necessary_args):\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n return self.fun(**inp)\n else:\n # to avoid manipulation of given param obj, we create a copy\n copy_self = copy.deepcopy(self)\n copy_self.set_default(**args)\n return copy_self\n return self.fun\n\n def __name__(self):\n \"\"\"The name of the function\n\n Returns\n -------\n str\n The name of the function\n \"\"\"\n return self.fun.__name__\n\n def set_default(self, **args):\n \"\"\"Sets a input argument to given value.\n\n Parameters\n ----------\n **args:\n The value the input should be set to.\n \"\"\"\n self.defaults.update({key: args[key] for key in args if key in self.args})\n\n def remove_default(self, *args, **kwargs):\n \"\"\"Removes an default value of a input argument.\n\n Parameters\n ----------\n *args, **kwargs:\n The arguments for which the default values should be deleted.\n \"\"\"\n for key in args:\n self.defaults.pop(key)\n for key in kwargs.keys():\n self.defaults.pop(key)\n\n def __deepcopy__(self, memo):\n \"\"\"Creates a copy of the function\n \"\"\"\n cls = self.__class__\n copy_object = cls.__new__(cls, self.fun)\n memo[id(self)] = copy_object\n for k, v in self.__dict__.items():\n setattr(copy_object, k, copy.deepcopy(v, memo))\n return copy_object\n\n @property\n def necessary_args(self):\n \"\"\"Returns the function arguments that are needed to evaluate this function.\n\n Returns\n -------\n list :\n The needed arguments.\n \"\"\"\n return [arg for arg in self.args if arg not in self.defaults]\n\n @property\n def optional_args(self):\n \"\"\"Returns the function arguments that are optional to evaluate this function.\n\n Returns\n -------\n list :\n The optional arguments.\n \"\"\"\n return [arg for arg in self.args if arg in self.defaults]\n\n\nclass DomainUserFunction(UserFunction):\n \"\"\"Extension of the original UserFunctions, that are used in the Domain-Class.\n \n Parameters\n ----------\n fun : callable\n The original function that should be wrapped.\n defaults : dict, optional\n Possible defaults arguments of the function. If none are specified will\n check by itself if there are any. \n args : dict, optional\n All arguments of the function. If none are specified will\n check by itself if there are any. \n\n Notes\n -----\n The only difference to normal UserFunction is how the evaluation \n of the original function is handled. Since all Domains use Pytorch, \n we check that the output always is a torch.tensor. In the case that the function\n is not constant, we also append an extra dimension to the output, so that the \n domains can work with it correctly. \n \"\"\"\n def __call__(self, args={}, device='cpu'):\n \"\"\"To evalute the function. Will automatically extract the needed arguments \n from the input data and will set the possible default values.\n\n Parameters\n ----------\n args : dict or torchphysics.Points\n The input data, where the function should be evaluated.\n device : str, optional\n The device on which the output of th efunction values should lay.\n Default is 'cpu'.\n\n Returns\n -------\n torch.tensor\n The output values of the function.\n \"\"\"\n if isinstance(args, Points):\n args = args.coordinates\n if len(args) != 0: # set the device correctly\n device = args[list(args.keys())[0]].device\n # check that every necessary arg is given\n for key in self.necessary_args:\n assert key in args, \\\n f\"The argument '{key}' is necessary in {self.__name__} but not given.\"\n # if necessary, pass defaults\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n return self.evaluate_function(device=device, **inp)\n\n def evaluate_function(self, device='cpu', **inp):\n \"\"\"Evaluates the original input function. Should not be used directly, \n rather use the call-methode.\n\n Parameters\n ----------\n device : str, optional\n The device on which the output of th efunction values should lay.\n Default is 'cpu'.\n inp \n The input values.\n \"\"\"\n if callable(self.fun):\n fun_eval = self.fun(**inp)\n if not isinstance(fun_eval, torch.Tensor):\n fun_eval = torch.tensor(fun_eval, device=device)\n return fun_eval[:, None]\n else:\n if isinstance(self.fun, torch.Tensor):\n self.fun = self.fun.to(device)\n return self.fun\n else: \n return torch.tensor(self.fun, device=device).float()", "\"\"\"Example script to check how good the model\ncan approximate solutions with different thermal conductivity D\n\"\"\"\nimport os\nimport json\n\nimport torch\nimport numpy as np\nimport pytorch_lightning as pl\nfrom timeit import default_timer as timer\n\nfrom torchphysics.problem import (Variable,\n Setting)\nfrom torchphysics.problem.domain import (Rectangle,\n Interval)\nfrom torchphysics.problem.condition import (DirichletCondition,\n DiffEqCondition,\n DataCondition)\nfrom torchphysics.models import SimpleFCN\nfrom torchphysics import PINNModule\nfrom torchphysics.utils import laplacian, grad\nfrom torchphysics.utils.fdm import FDM, create_validation_data\nfrom torchphysics.utils.plot import Plotter\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n#pl.seed_everything(43)\n\nw, h = 50, 50\nt0, tend = 0, 1\ntemp_hot = 10\nD_low, D_up = 5, 25 # set here the interval boundary for D\n\nx = Variable(name='x',\n order=2,\n domain=Rectangle(corner_dl=[0, 0],\n corner_dr=[w, 0],\n corner_tl=[0, h]),\n train_conditions={},\n val_conditions={})\nt = Variable(name='t',\n order=1,\n domain=Interval(low_bound=0,\n up_bound=tend),\n train_conditions={},\n val_conditions={})\nD = Variable(name='D',\n order=0,\n domain=Interval(low_bound=D_low,\n up_bound=D_up),\n train_conditions={},\n val_conditions={})\n\n\ndef x_dirichlet_fun(input):\n return torch.zeros_like(input['t'])\n\n\nnorm = torch.nn.MSELoss()\n\n\nx.add_train_condition(DirichletCondition(dirichlet_fun=x_dirichlet_fun,\n name='dirichlet',\n norm=norm,\n batch_size=500,\n dataset_size=500,\n num_workers=4,\n data_plot_variables=('x','t')))\n\n\ndef t_dirichlet_fun(input):\n return temp_hot*torch.sin(np.pi/w*input['x'][:, :1])*torch.sin(np.pi/h*input['x'][:, 1:])\n\n\nt.add_train_condition(DirichletCondition(dirichlet_fun=t_dirichlet_fun,\n name='dirichlet',\n norm=norm,\n batch_size=500,\n dataset_size=500,\n num_workers=4,\n boundary_sampling_strategy='lower_bound_only',\n data_plot_variables=('x','t')))\n\n\ndef pde(u, input):\n return grad(u, input['t']) - input['D']*laplacian(u, input['x'])\n\n\ntrain_cond = DiffEqCondition(pde=pde,\n norm=norm,\n batch_size=5000,\n dataset_size=5000,\n num_workers=8,\n data_plot_variables=('x','t'))\n\n# FDM:\ndomain_dic = {'x': [[0, w], [0, h]]}\ndx, dy = 0.5, 0.5\nstep_width_dict = {'x': [dx, dy]}\ntime_interval = [t0, tend]\n\n\ndef inital_condition(input):\n return temp_hot * np.sin(np.pi/w*input['x'][:, :1]) * np.sin(np.pi/h*input['x'][:, 1:])\n\n\nD_list = [5, 10, 15, 20, 25]\n# ^Here you can add many different values for D, e.g [18.8,2.5,20,....]\n# The FDM-Methode will compute solutions for all D.\n# For too many D this will become really memory expensive, since\n# the FDM uses a forward euler!\nfdm_start = timer()\ndomain, time, u = FDM(domain_dic, step_width_dict, time_interval,\n D_list, inital_condition)\nfdm_end = timer()\nprint('Time for FDM-Solution:', fdm_end-fdm_start)\ndata_x, data_u = create_validation_data(domain, time, u, D_list, D_is_input=True)\n# True: if D is input of the model\n\n\nclass InfNorm(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, input_a, input_b):\n return torch.max(torch.abs(input_a-input_b))\n\n\nmax_norm = InfNorm()\n\nval_cond = DataCondition(data_x=data_x,\n data_u=data_u,\n name='validation',\n norm=norm,\n batch_size=len(data_u[:, 0])//100,\n num_workers=16)\n\nsetup = Setting(variables=(x, t, D),\n train_conditions={'pde': train_cond},\n val_conditions={'validation': val_cond})\n\nplotter = Plotter(plot_variables=setup.variables['x'],\n points=400,\n dic_for_other_variables={'t': 1.0, 'D': 15.0},\n all_variables=setup.variables,\n log_interval=10)\n\nsolver = PINNModule(model=SimpleFCN(input_dim=4), # TODO: comput input_dim in setting\n problem=setup,\n #optimizer=torch.optim.Adam,\n #lr=1e-3,\n #log_plotter=plotter\n )\n\n#print(json.dumps(solver.serialize(), indent=2))\n\ntrainer = pl.Trainer(gpus=None,#'-1',\n #accelerator='ddp',\n #plugins=pl.plugins.DDPPlugin(find_unused_parameters=False),\n num_sanity_val_steps=2,\n check_val_every_n_epoch=100,\n log_every_n_steps=1,\n max_epochs=10000,\n # limit_val_batches=10, # The validation dataset is probably pretty big,\n # so you need to see how much you want to\n # check every validation\n # checkpoint_callback=False)\n )\n\ntrainer.fit(solver)\n" ]
[ [ "torch.tensor" ], [ "torch.abs", "torch.sin", "torch.zeros_like", "numpy.sin", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DevRx28/pokemon-type
[ "2f62d4b88856dcd9aff79bdda993a4ddc093d7b7" ]
[ "prepro.py" ]
[ "import numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nimport time\nimport argparse\nimport cv2\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport csv \nimport sys\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom keras.preprocessing.image import img_to_array\n\n\nimg = cv2.imread('pokemonimages/Groudon.jpg',cv2.COLOR_BGR2RGB)\nprint (img.shape)\nim = Image.open(\"pokemonimages/Groudon.jpg\")\nim1 = im.resize((200,200))\n#im1= img_to_array(im1, dtype='uint8')\nprint(im1)\n\n\n\ndef remove_transparency(im, bg_colour=(255, 255, 255)):\n\n # Only process if image has transparency (http://stackoverflow.com/a/1963146)\n if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):\n\n # Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)\n alpha = im.convert('RGBA').split()[-1]\n\n # Create a new background image of our matt color.\n # Must be RGBA because paste requires both images have the same format\n # (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)\n bg = Image.new(\"RGBA\", im.size, bg_colour + (255,))\n bg.paste(im, mask=alpha)\n return bg\n\n else:\n return im\n\ny=remove_transparency(im1)\n\ny=y.convert(\"RGB\")\nprint(\"rgb\")\ny.show()\ny= img_to_array(y, dtype='uint8')\nprint(y.shape)\n\n\n\n#img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n\n\n\nmask = np.zeros(img.shape[:2],np.uint8)\n\nbgdModel = np.zeros((1,65),np.float64)\n\nfgdModel = np.zeros((1,65),np.float64)\nheight, width = img.shape[:2]\n\nrect = (0,0,width-10,height-10)\ncv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)\n\nmask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\nimgnew= img*mask2[:,:,np.newaxis]\nbackground=img-imgnew\nbackground[np.where((background>[0,0,0]).all(axis=2))]=[255,255,255]\n\nfinal=background+imgnew\n#print mask2\n\n#plt.imshow(final)\n#plt.show()" ]
[ [ "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/lasso-python
[ "319bf590599b4a4d50d9345e83e8030afe044aec" ]
[ "lasso/dyna/FemzipMapper.py" ]
[ "\r\nimport logging\r\nimport re\r\nimport traceback\r\nfrom typing import Dict, List, Set, Tuple, Union\r\n\r\nimport numpy as np\r\nfrom lasso.dyna.ArrayType import ArrayType\r\nfrom lasso.femzip.femzip_api import FemzipAPI, FemzipFileMetadata, VariableInfo\r\nfrom lasso.femzip.fz_config import (FemzipArrayType, FemzipVariableCategory,\r\n get_last_int_of_line)\r\n\r\nTRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE: Dict[Tuple[FemzipArrayType, FemzipVariableCategory], Set[str]] = {\r\n # GLOBAL\r\n (FemzipArrayType.global_data, FemzipVariableCategory.GLOBAL): {\r\n # ArrayType.global_timesteps,\r\n ArrayType.global_internal_energy,\r\n ArrayType.global_kinetic_energy,\r\n ArrayType.global_total_energy,\r\n ArrayType.global_velocity,\r\n },\r\n # PART\r\n (FemzipArrayType.part_results, FemzipVariableCategory.PART): {\r\n ArrayType.part_hourglass_energy,\r\n ArrayType.part_internal_energy,\r\n ArrayType.part_kinetic_energy,\r\n ArrayType.part_mass,\r\n ArrayType.part_velocity,\r\n },\r\n # NODE\r\n (FemzipArrayType.node_displacement, FemzipVariableCategory.NODE): {\r\n ArrayType.node_displacement\r\n },\r\n (FemzipArrayType.node_accelerations, FemzipVariableCategory.NODE): {\r\n ArrayType.node_acceleration\r\n },\r\n (FemzipArrayType.node_velocities, FemzipVariableCategory.NODE): {\r\n ArrayType.node_velocity\r\n },\r\n (FemzipArrayType.node_temperatures, FemzipVariableCategory.NODE): {\r\n ArrayType.node_temperature\r\n },\r\n (FemzipArrayType.node_heat_flux, FemzipVariableCategory.NODE): {\r\n ArrayType.node_heat_flux\r\n },\r\n (FemzipArrayType.node_mass_scaling, FemzipVariableCategory.NODE): {\r\n ArrayType.node_mass_scaling\r\n },\r\n (FemzipArrayType.node_temperature_gradient, FemzipVariableCategory.NODE): {\r\n ArrayType.node_temperature_gradient\r\n },\r\n # BEAM\r\n (FemzipArrayType.beam_axial_force, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_force\r\n },\r\n (FemzipArrayType.beam_s_bending_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_bending_moment\r\n },\r\n (FemzipArrayType.beam_t_bending_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_bending_moment\r\n },\r\n (FemzipArrayType.beam_s_shear_resultant, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_force\r\n },\r\n (FemzipArrayType.beam_t_shear_resultant, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_force\r\n },\r\n (FemzipArrayType.beam_torsional_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_torsion_moment\r\n },\r\n (FemzipArrayType.beam_axial_stress, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_stress\r\n },\r\n (FemzipArrayType.beam_shear_stress_rs, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_stress\r\n },\r\n (FemzipArrayType.beam_shear_stress_tr, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_stress\r\n },\r\n (FemzipArrayType.beam_plastic_strain, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_plastic_strain\r\n },\r\n (FemzipArrayType.beam_axial_strain, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_strain\r\n },\r\n # SHELL\r\n (FemzipArrayType.stress_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_effective_plastic_strain\r\n },\r\n (FemzipArrayType.history_vars, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_history_vars\r\n },\r\n (FemzipArrayType.bending_moment_mx, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.bending_moment_my, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.bending_moment_mxy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.shear_force_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_shear_force\r\n },\r\n (FemzipArrayType.shear_force_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_shear_force\r\n },\r\n (FemzipArrayType.normal_force_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.normal_force_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.normal_force_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.thickness, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_thickness\r\n },\r\n (FemzipArrayType.unknown_1, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_unknown_variables\r\n },\r\n (FemzipArrayType.unknown_2, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_unknown_variables\r\n },\r\n (FemzipArrayType.strain_inner_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.internal_energy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_internal_energy\r\n },\r\n # THICK SHELL\r\n ((FemzipArrayType.stress_x, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_y, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_z, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_xy, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_yz, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_xz, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_effective_plastic_strain\r\n },\r\n (FemzipArrayType.strain_outer_x, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_y, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_z, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_x, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_y, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_z, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n # SOLID\r\n (FemzipArrayType.stress_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_effective_plastic_strain\r\n },\r\n (FemzipArrayType.history_vars, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_history_variables\r\n },\r\n (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n # AIRBAG\r\n (FemzipArrayType.airbag_state_geom, FemzipVariableCategory.CPM_AIRBAG): {\r\n ArrayType.airbag_n_active_particles,\r\n ArrayType.airbag_bag_volume,\r\n },\r\n # AIRBAG PARTICLES\r\n (FemzipArrayType.airbag_particle_gas_chamber_id, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_gas_id\r\n },\r\n (FemzipArrayType.airbag_particle_chamber_id, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_chamber_id\r\n },\r\n (FemzipArrayType.airbag_particle_leakage, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_leakage\r\n },\r\n (FemzipArrayType.airbag_particle_mass, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_mass\r\n },\r\n (FemzipArrayType.airbag_particle_pos_x, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_pos_y, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_pos_z, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_vel_x, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_vel_y, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_vel_z, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_radius, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_radius\r\n },\r\n (FemzipArrayType.airbag_particle_spin_energy, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_spin_energy\r\n },\r\n (FemzipArrayType.airbag_particle_tran_energy, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_translation_energy\r\n },\r\n (FemzipArrayType.airbag_particle_neighbor_dist, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_nearest_segment_distance\r\n },\r\n}\r\n\r\n# indexes for various femzip arrays\r\nstress_index = {\r\n FemzipArrayType.stress_x.value: 0,\r\n FemzipArrayType.stress_y.value: 1,\r\n FemzipArrayType.stress_z.value: 2,\r\n FemzipArrayType.stress_xy.value: 3,\r\n FemzipArrayType.stress_yz.value: 4,\r\n FemzipArrayType.stress_xz.value: 5,\r\n FemzipArrayType.normal_force_x.value: 0,\r\n FemzipArrayType.normal_force_y.value: 1,\r\n FemzipArrayType.normal_force_xy.value: 2,\r\n FemzipArrayType.shear_force_x.value: 0,\r\n FemzipArrayType.shear_force_y.value: 1,\r\n FemzipArrayType.strain_inner_x.value: 0,\r\n FemzipArrayType.strain_inner_y.value: 1,\r\n FemzipArrayType.strain_inner_z.value: 2,\r\n FemzipArrayType.strain_inner_xy.value: 3,\r\n FemzipArrayType.strain_inner_yz.value: 4,\r\n FemzipArrayType.strain_inner_xz.value: 5,\r\n FemzipArrayType.strain_outer_x.value: 0,\r\n FemzipArrayType.strain_outer_y.value: 1,\r\n FemzipArrayType.strain_outer_z.value: 2,\r\n FemzipArrayType.strain_outer_xy.value: 3,\r\n FemzipArrayType.strain_outer_yz.value: 4,\r\n FemzipArrayType.strain_outer_xz.value: 5,\r\n FemzipArrayType.beam_s_shear_resultant.value: 0,\r\n FemzipArrayType.beam_t_shear_resultant.value: 1,\r\n FemzipArrayType.beam_s_bending_moment.value: 0,\r\n FemzipArrayType.beam_t_bending_moment.value: 1,\r\n\r\n FemzipArrayType.strain_x.value: 0,\r\n FemzipArrayType.strain_y.value: 1,\r\n FemzipArrayType.strain_z.value: 2,\r\n FemzipArrayType.strain_xy.value: 3,\r\n FemzipArrayType.strain_yz.value: 4,\r\n FemzipArrayType.strain_xz.value: 5,\r\n\r\n FemzipArrayType.beam_shear_stress_rs.value: 0,\r\n FemzipArrayType.beam_shear_stress_tr.value: 1,\r\n\r\n FemzipArrayType.airbag_particle_pos_x.value: 0,\r\n FemzipArrayType.airbag_particle_pos_y.value: 1,\r\n FemzipArrayType.airbag_particle_pos_z.value: 2,\r\n FemzipArrayType.airbag_particle_vel_x.value: 0,\r\n FemzipArrayType.airbag_particle_vel_y.value: 1,\r\n FemzipArrayType.airbag_particle_vel_z.value: 2,\r\n\r\n FemzipArrayType.bending_moment_mx.value: 0,\r\n FemzipArrayType.bending_moment_my.value: 1,\r\n FemzipArrayType.bending_moment_mxy.value: 2,\r\n\r\n FemzipArrayType.unknown_1.value: 0,\r\n FemzipArrayType.unknown_2.value: 1,\r\n}\r\n\r\n\r\ndef femzip_to_d3plot(\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n ) -> Dict[str, np.ndarray]:\r\n \"\"\"Map femzip arrays to d3plot arrays\r\n\r\n Parameters\r\n ----------\r\n result_arrays:\r\n femzip arrays\r\n \"\"\"\r\n a = FemzipMapper()\r\n a.map(result_arrays)\r\n\r\n return a.d3plot_arrays\r\n\r\n\r\nclass ArrayShapeInfo:\r\n n_layers: Union[int, None] = None\r\n n_vars: Union[int, None] = None\r\n n_entries: Union[int, None] = None\r\n n_timesteps: Union[int, None] = None\r\n\r\n def _set_attr(self, attr_name: str, value: Union[int, None]) -> None:\r\n self_attr_value = getattr(self, attr_name)\r\n if value is not None:\r\n if self_attr_value is None:\r\n setattr(self, attr_name, value)\r\n else:\r\n setattr(self, attr_name, max(self_attr_value, value))\r\n\r\n def set_n_layers(self, n_layers: Union[int, None]) -> None:\r\n self._set_attr(\"n_layers\", n_layers)\r\n\r\n def set_n_vars(self, n_vars: Union[int, None]) -> None:\r\n self._set_attr(\"n_vars\", n_vars)\r\n\r\n def set_n_entries(self, n_entries: Union[int, None]) -> None:\r\n self._set_attr(\"n_entries\", n_entries)\r\n\r\n def set_n_timesteps(self, n_timesteps: Union[int, None]) -> None:\r\n self._set_attr(\"n_timesteps\", n_timesteps)\r\n\r\n def to_shape(self) -> Tuple[int, ...]:\r\n shape = [self.n_timesteps, self.n_entries]\r\n fortran_offset = 1\r\n if self.n_layers is not None:\r\n shape.append(self.n_layers + fortran_offset)\r\n if self.n_vars is not None:\r\n shape.append(self.n_vars + fortran_offset)\r\n return tuple(shape)\r\n\r\n\r\nclass D3plotArrayMapping:\r\n d3plot_array_type: str\r\n d3_layer_slice: Union[slice, int, None] = None\r\n d3_var_slice: Union[slice, int, None] = None\r\n\r\n fz_layer_slice: Union[slice, int, None] = None\r\n fz_var_slice: Union[slice, int, None] = None\r\n\r\n just_assign: bool = False\r\n\r\n def to_slice(self) -> Tuple[Union[int, slice], ...]:\r\n slices: List[Union[slice, int]] = [slice(None), slice(None)]\r\n if self.d3_layer_slice is not None:\r\n slices.append(self.d3_layer_slice)\r\n if self.d3_var_slice is not None:\r\n slices.append(self.d3_var_slice)\r\n return tuple(slices)\r\n\r\n\r\nclass FemzipArrayInfo:\r\n full_name: str = \"\"\r\n short_name: str = \"\"\r\n index: int = -1\r\n category: FemzipVariableCategory\r\n array_type: FemzipArrayType\r\n array: np.ndarray\r\n\r\n i_layer: Union[int, None] = None\r\n i_var: Union[int, None] = None\r\n\r\n mappings: List[D3plotArrayMapping]\r\n\r\n def __init__(self):\r\n self.mappings = []\r\n\r\n def __str__(self) -> str:\r\n return f\"\"\"FemzipArrayInfo:\r\n full_name = {self.full_name}\r\n short_name = {self.short_name}\r\n index = {self.index}\r\n category = {self.category}\r\n array_type = {self.array_type}>\r\n i_layer = {self.i_layer}\r\n i_var = {self.i_var}\"\"\"\r\n\r\n\r\nclass FemzipMapper():\r\n \"\"\"Class for mapping femzip variable data to d3plots.\r\n\r\n Takes no arguments.\r\n \"\"\"\r\n # regex pattern for reading variables\r\n name_separation_pattern = re.compile(r\"(^[^\\(\\n]+)(\\([^\\)]+\\))*\")\r\n\r\n FORTRAN_OFFSET: int = 1\r\n\r\n _d3plot_arrays: Dict[str, np.ndarray] = {}\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def map(self, result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]):\r\n \"\"\"Map femzip data to d3plot arrays.\r\n\r\n Parameters\r\n ----------\r\n result_arrays:\r\n femzip variable data\r\n \"\"\"\r\n self._d3plot_arrays = {}\r\n self._fz_array_slices = {}\r\n\r\n # convert to internal datastructure\r\n array_infos = self._convert(result_arrays)\r\n\r\n # build the array shapes\r\n d3plot_array_shapes = self._build(array_infos)\r\n\r\n # init the numpy arrays\r\n self._d3plot_arrays = self._allocate_d3plot_arrays(d3plot_array_shapes)\r\n\r\n # add all the data to its right place\r\n self._map_arrays(array_infos, self._d3plot_arrays)\r\n\r\n def _convert(self,\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n ) -> List[FemzipArrayInfo]:\r\n \"\"\" Convert femzip result arrays into array infos\r\n\r\n Parameters\r\n ----------\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n result arrays from femzip\r\n\r\n Returns\r\n -------\r\n array_infos: List[FemzipArrayInfo]\r\n infos about femzip arrays\r\n \"\"\"\r\n\r\n array_infos = []\r\n\r\n # convert\r\n for (fz_index, fz_name, fz_cat), array in result_arrays.items():\r\n femzip_array_info = FemzipArrayInfo()\r\n femzip_array_info.index = fz_index\r\n femzip_array_info.full_name = fz_name\r\n femzip_array_info.category = fz_cat\r\n femzip_array_info.array = array\r\n femzip_array_info.array_type = FemzipArrayType.from_string(fz_name)\r\n\r\n var_name, i_layer, i_stress, i_history = self._parse_femzip_name(\r\n fz_name, fz_cat)\r\n\r\n femzip_array_info.short_name = var_name\r\n femzip_array_info.i_layer = i_layer\r\n femzip_array_info.i_var = i_stress if i_stress is not None else i_history\r\n\r\n array_infos.append(femzip_array_info)\r\n\r\n return array_infos\r\n\r\n @staticmethod\r\n def _build(fz_arrays: List[FemzipArrayInfo]) -> Dict[str, Tuple[int, ...]]:\r\n \"\"\" Counts the occurence of all variables in the result array such as the\r\n number of layers and stresses.\r\n\r\n Paramters\r\n ---------\r\n fz_arrays: List[FemzipArrayInfo]\r\n infos about femzip arrays\r\n\r\n Returns\r\n -------\r\n d3plot_array_shapes:\r\n shapes of the d3plot arrays required to be allocated\r\n\r\n Notes\r\n -----\r\n Some variables only have partial stress results written for Sigma-x and Sigma-y\r\n and layers one to three for example.\r\n \"\"\"\r\n shape_infos: Dict[str, ArrayShapeInfo] = {}\r\n name_count: Dict[Tuple[str, FemzipVariableCategory], int] = {}\r\n\r\n for arr_info in fz_arrays:\r\n # print(arr_info)\r\n\r\n d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n arr_info.array_type, arr_info.category)]\r\n\r\n # var_name = var_name.strip()\r\n for array_type in d3_array_types:\r\n # print(array_type)\r\n array_shape_info = shape_infos.get(array_type) or ArrayShapeInfo()\r\n\r\n # beam layer vars always have same name but\r\n # must be counted up as layers\r\n if (arr_info.full_name, arr_info.category) in name_count:\r\n count = name_count[(arr_info.full_name, arr_info.category)]\r\n i_layer = count + 1\r\n name_count[(arr_info.full_name, arr_info.category)] = i_layer\r\n else:\r\n name_count[(arr_info.full_name, arr_info.category)] = 0\r\n\r\n # update shape\r\n array_shape_info.set_n_timesteps(arr_info.array.shape[0])\r\n array_shape_info.set_n_entries(arr_info.array.shape[1])\r\n array_shape_info.set_n_layers(arr_info.i_layer)\r\n array_shape_info.set_n_vars(arr_info.i_var)\r\n\r\n shape_infos[array_type] = array_shape_info\r\n\r\n # where to put it\r\n mapping = D3plotArrayMapping()\r\n mapping.d3plot_array_type = array_type\r\n if arr_info.i_layer is not None:\r\n mapping.d3_layer_slice = arr_info.i_layer\r\n if arr_info.i_var is not None:\r\n mapping.d3_var_slice = arr_info.i_var\r\n # arrays to copy:\r\n # - node displacement, veloctiy, acceleration\r\n # - airbag integer vars (so we don't need to cast)\r\n if arr_info.array.ndim == 3 \\\r\n or arr_info.category == FemzipVariableCategory.CPM_INT_VAR:\r\n mapping.just_assign = True\r\n\r\n arr_info.mappings.append(mapping)\r\n\r\n # correct layers\r\n # if a field has the same name for multiple\r\n # layers such as beam axial stress, we needed\r\n # to count in order to determine if it had layers\r\n # now we need to correct i_layers from None to 0 for them\r\n name_count2 = {}\r\n for arr_info in fz_arrays:\r\n count = name_count[(arr_info.full_name, arr_info.category)]\r\n\r\n if count != 0 and arr_info.i_layer is None:\r\n count2 = name_count2.get((arr_info.full_name, arr_info.category), -1)\r\n count2 += 1\r\n arr_info.i_layer = count2\r\n name_count2[(arr_info.full_name, arr_info.category)] = count2\r\n\r\n for mapping in arr_info.mappings:\r\n shape_info = shape_infos[mapping.d3plot_array_type]\r\n shape_info.set_n_layers(count)\r\n mapping.d3_layer_slice = count2\r\n\r\n # all arrays which are simply copied (slice has len 2 and only one target)\r\n # get a just assign flag\r\n if (len(arr_info.mappings) == 2 and\r\n len(arr_info.mappings[0].to_slice()) == 2):\r\n arr_info.mappings[0].just_assign = True\r\n\r\n d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n arr_info.array_type, arr_info.category)]\r\n\r\n for array_type in d3_array_types:\r\n del shape_infos[array_type]\r\n\r\n return {name: info.to_shape() for name, info in shape_infos.items()}\r\n\r\n def _map_arrays(self, array_infos: List[FemzipArrayInfo], d3plot_arrays: Dict[str, np.ndarray]):\r\n \"\"\"Allocate a femzip variable to its correct position in\r\n the d3plot array dictionary.\r\n\r\n Paramters\r\n ---------\r\n array_infos: List[FemzipArrayInfo]\r\n femzip variables stored in a dictionary\r\n d3plot_array: Dict[str, np.ndarray]\r\n d3plot arrays preallocated\r\n\r\n Notes\r\n -----\r\n The keys are the femzip array name (unparsed)\r\n and the category of the variable as an enum.\r\n \"\"\"\r\n for arr_info in array_infos:\r\n if arr_info.category == FemzipVariableCategory.CPM_AIRBAG:\r\n d3plot_arrays[ArrayType.airbag_n_active_particles] = arr_info.array[:, :, 0].view(\r\n np.int32)\r\n d3plot_arrays[ArrayType.airbag_bag_volume] = arr_info.array[:, :, 1]\r\n else:\r\n for mapping in arr_info.mappings:\r\n if mapping.just_assign:\r\n d3plot_arrays[mapping.d3plot_array_type] = arr_info.array\r\n continue\r\n\r\n slices = mapping.to_slice()\r\n d3plot_array = d3plot_arrays[mapping.d3plot_array_type]\r\n\r\n # for femzip arrays with same name first var_index is missing\r\n if d3plot_array.ndim == 3 and len(slices) == 2 and arr_info.array.ndim == 2:\r\n slices = (*slices, 0)\r\n\r\n d3plot_array[slices] = arr_info.array\r\n\r\n def _allocate_d3plot_arrays(self,\r\n array_shapes: Dict[str, Tuple[int, ...]]) -> Dict[str, np.ndarray]:\r\n \"\"\"Initialize all the d3plot arrays.\r\n\r\n Parameters\r\n ----------\r\n array_shapes: array_shapes: Dict[str, Tuple[int, ...]]\r\n array shapes required to be allocated\r\n\r\n Returns\r\n -------\r\n d3plot_arrays: Dict[str, np.ndarray]\r\n d3plot arrays preallocated\r\n \"\"\"\r\n d3plot_arrays = {}\r\n for key, shape in array_shapes.items():\r\n d3plot_arrays[key] = np.empty(shape, dtype=np.float32)\r\n return d3plot_arrays\r\n\r\n @ property\r\n def d3plot_arrays(self):\r\n \"\"\"Returns the mapped d3plot arrays.\r\n \"\"\"\r\n return self._d3plot_arrays\r\n\r\n def _parse_femzip_name(self,\r\n fz_name: str,\r\n var_type: FemzipVariableCategory) -> Tuple[str,\r\n Union[int, None],\r\n Union[int, None],\r\n Union[int, None]]:\r\n \"\"\"Parses the femzip variable names.\r\n\r\n Parameters\r\n ----------\r\n fz_name:\r\n cryptic femzip variable name we need to parse\r\n var_type:\r\n the category of this varialbe e.g. shells, parts, global etc.\r\n\r\n Returns\r\n -------\r\n var_name:\r\n femzip variable name without integration and layer info\r\n i_layer:\r\n layer index\r\n i_stress:\r\n stress index\r\n i_history:\r\n history variable index\r\n \"\"\"\r\n matches = self.name_separation_pattern.findall(fz_name)\r\n if not len(matches) == 1:\r\n err_msg = \"Could not match femzip array name: {0}\"\r\n raise ValueError(err_msg.format(fz_name))\r\n if not len(matches[0]) == 2:\r\n err_msg = \"Could not match femzip array name: {0}\"\r\n raise ValueError(err_msg.format(fz_name))\r\n\r\n (first_grp, second_grp) = matches[0]\r\n var_name, extra_value = get_last_int_of_line(first_grp)\r\n var_name = var_name.strip()\r\n\r\n # the slice 1:-1 leaves out the brackets '(' and ')'\r\n _, i_layer = get_last_int_of_line(\r\n second_grp[1:-1])\r\n\r\n if i_layer is not None:\r\n i_layer -= self.FORTRAN_OFFSET\r\n\r\n i_history: Union[int, None] = None\r\n\r\n if var_type != FemzipVariableCategory.PART or \\\r\n var_type != FemzipVariableCategory.GLOBAL:\r\n i_history = extra_value\r\n\r\n if i_history:\r\n i_history -= self.FORTRAN_OFFSET\r\n\r\n # set var name to the unformatted femzip array type name\r\n if \"Epsilon\" in var_name:\r\n var_name = fz_name.strip()\r\n if \"inner\" in var_name:\r\n i_layer = 0\r\n elif \"outer\" in var_name:\r\n i_layer = 1\r\n else:\r\n # solid strain\r\n i_layer = 0\r\n\r\n i_stress: Union[int, None] = stress_index.get(var_name, None)\r\n\r\n return var_name, i_layer, i_stress, i_history\r\n\r\n\r\ndef filter_femzip_variables(file_metadata: FemzipFileMetadata,\r\n d3plot_array_filter: Union[Set[str], None]) -> FemzipFileMetadata:\r\n \"\"\" Filters variable infos regarding d3plot array types\r\n\r\n Parameters\r\n ----------\r\n file_metadata: FemzipFileMetadata\r\n metadata of femzip file including contained variables\r\n d3plot_array_filter: Union[Set[str], None]\r\n array types to filter for if wanted\r\n\r\n Returns\r\n -------\r\n file_metadata: FemzipFileMetadata\r\n filtered array according to array types\r\n \"\"\"\r\n\r\n # find out which arrays we need and\r\n vars_to_copy: List[int] = list()\r\n\r\n for i_var in range(file_metadata.number_of_variables):\r\n try:\r\n var_info: VariableInfo = file_metadata.variable_infos[i_var]\r\n var_type: int = var_info.var_type\r\n var_index: int = var_info.var_index\r\n var_name: str = var_info.name.decode(\"utf-8\")\r\n\r\n logging.debug(f\"{var_type}, {var_index}, {var_name.strip()}\")\r\n\r\n if var_type == FemzipVariableCategory.GEOMETRY.value:\r\n continue\r\n\r\n # find out which array from name\r\n try:\r\n fz_array_type = FemzipArrayType.from_string(var_name)\r\n except ValueError:\r\n warn_msg = (\"Warning: lasso-python does not support femzip result\"\r\n \" field '{0}' category type '{1}'.\")\r\n logging.warning(warn_msg.format(var_name.strip(), var_type))\r\n continue\r\n\r\n # check if we asked for the array\r\n matching_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n fz_array_type, FemzipVariableCategory(var_type))]\r\n\r\n if d3plot_array_filter is not None:\r\n if not matching_array_types.intersection(d3plot_array_filter):\r\n continue\r\n vars_to_copy.append(i_var)\r\n except Exception:\r\n trb_msg = traceback.format_exc()\r\n err_msg = \"An error ocurred while preprocessing femzip variable information: {0}\"\r\n logging.warning(err_msg.format(trb_msg))\r\n\r\n # copy filtered data\r\n filtered_file_metadata = FemzipFileMetadata()\r\n FemzipAPI.copy_struct(file_metadata, filtered_file_metadata)\r\n filtered_file_metadata.number_of_variables = len(vars_to_copy)\r\n\r\n FilteredVariableInfoArrayType = len(vars_to_copy) * VariableInfo\r\n filtered_info_array_data = FilteredVariableInfoArrayType()\r\n\r\n for i_var, src_i_var in enumerate(vars_to_copy):\r\n FemzipAPI.copy_struct(\r\n file_metadata.variable_infos[src_i_var],\r\n filtered_info_array_data[i_var])\r\n filtered_file_metadata.variable_infos = filtered_info_array_data\r\n\r\n return filtered_file_metadata\r\n" ]
[ [ "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rewin123/NNPreprocessingTomography
[ "b630f4c2cb9705c3c8432480498e4307ed511edf" ]
[ "open_test.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import Sequential\n\n#model definition\nclass Unet1D(nn.Module):\n def __init__(self):\n super(Unet1D, self).__init__()\n \n ch = 32\n self.maxpool = nn.MaxPool2d((1,2))\n self.unpool = nn.Upsample(scale_factor=(1,2))\n self.startLayer = nn.Conv2d(1, ch, (1,3), padding=(0,1))\n self.endLayer = nn.Conv2d(ch, 1, (1,1))\n self.tb1 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb2 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb3 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb4 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n\n self.db1 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db2 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db3 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db4 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n\n\n def forward(self, x):\n data = self.startLayer(x)\n\n data1 = self.tb1(data)\n data2 = self.tb2(self.maxpool(data1))\n data3 = self.tb3(self.maxpool(data2))\n data4 = self.tb4(self.maxpool(data3))\n data5 = self.tb5(self.maxpool(data4))\n\n \n data5 = self.db5(data5)\n data4 = self.db4(torch.cat([data4, nn.Upsample(size=(data4.shape[2], data4.shape[3]))(data5)], dim=1))\n data3 = self.db3(torch.cat([data3, nn.Upsample(size=(data3.shape[2], data3.shape[3]))(data4)], dim=1))\n data2 = self.db2(torch.cat([data2, nn.Upsample(size=(data2.shape[2], data2.shape[3]))(data3)], dim=1))\n data1 = self.db1(torch.cat([data1, nn.Upsample(size=(data1.shape[2], data1.shape[3]))(data2)], dim=1))\n\n return self.endLayer(data1)\n\n#we use cuda for model\nmodel = torch.load(\"model_unet1d.pkl\").cpu()\n\nimport numpy as np\n#load train and val data\n#input sinograms with noise\nnoised_sin = torch.from_numpy(np.load(\"data/noised_sin.npy\")).unsqueeze(1)\n#filtered sinograms without noise\nfiltered_sin = torch.from_numpy(np.load(\"data/clear_sin.npy\")).unsqueeze(1)\n#groundtruth phantoms\nphantoms = torch.from_numpy(np.load(\"data/phantoms.npy\")).unsqueeze(1)\n\n\nimport odl\n#define radon scheme\ndetectors = 183\nangles = 128\nangles_parallel = np.linspace(0, 180, angles, False)\n\nreco_space = odl.uniform_discr(min_pt=[-20,-20], max_pt=[20,20], shape=[128, 128], dtype='float32')\n\nphantom = odl.phantom.shepp_logan(reco_space, modified=True)\n\nimport math\nl = 40 * math.sqrt(2)\n\nangle_partition = odl.uniform_partition(-np.pi / 2, np.pi / 2, angles)\ndetector_partition = odl.uniform_partition(-l / 2, l / 2, detectors)\ngeometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)\nray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=\"astra_cuda\")\n\ndef ramp_op(ray_trafo):\n fourier = odl.trafos.FourierTransform(ray_trafo.range, axes=[1])\n # Create ramp in the detector direction\n ramp_function = fourier.range.element(lambda x: np.abs(x[1]) / (2 * np.pi))\n # Create ramp filter via the convolution formula with fourier transforms\n ramp_filter = fourier.inverse * ramp_function * fourier\n return ramp_filter\n\nramp = ramp_op(ray_trafo)\n\ntest_data_idx = 1000\n\ninp = noised_sin[test_data_idx:test_data_idx+1]\nf_sin = filtered_sin[test_data_idx]\ngroundtruth = phantoms[test_data_idx, 0].numpy()\n\n#plot and measure experiments\nimport matplotlib.pyplot as plt\n\nfig, axs = plt.subplots(2, 3)\nfig.set_figheight(15)\nfig.set_figwidth(15)\n\nproposed_rec = ray_trafo.adjoint(model(inp).detach().numpy()[0,0]).data\nproposed_rec *= (proposed_rec > 0)\nfbp_rec = ray_trafo.adjoint(ramp(inp[0,0])).data\nfbp_rec *= (fbp_rec > 0)\n\nproposed_diff = np.abs(proposed_rec - groundtruth)\nfbp_diff = np.abs(fbp_rec - groundtruth)\n\n# diff_max = max(np.max(proposed_diff), np.max(fbp_diff))\n# proposed_diff /= diff_max\n# fbp_diff /= diff_max\n\n\n#show phantom\nim_ph = axs[0,0].imshow(groundtruth, cmap='gray')\naxs[0,0].set_title('a) Фантом')\n\n#show fbp reconstruction\naxs[0,1].imshow(fbp_rec, cmap='gray')\naxs[0,1].set_title('б) FBP')\naxs[0,1].axhline(y=64, color='orange', ls='--')\n\n#show reconstruction of proposed models\naxs[0,2].imshow(proposed_rec, cmap='gray')\naxs[0,2].set_title('в) UNet1D')\naxs[0,2].axhline(y=64, color='blue', ls='--')\n\n\n#show diff slice\n# axs[1, 2].plot(groundtruth[64], label='Phantom')\naxs[1, 0].plot(proposed_rec[64], '-', label='UNet1D', color='blue')\naxs[1, 0].plot(fbp_rec[64], '--', label='FBP', color='orange')\naxs[1, 0].set_title('г) Срез реконструкции от FBP и UNet1D')\naxs[1, 0].grid()\naxs[1, 0].legend()\n\n#diff fbp to groundtruth\naxs[1,1].imshow(fbp_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')\naxs[1,1].set_title('д) Разница между FBP и фантомом')\n\n#diff proposed to groundtruth\naxs[1,2].imshow(proposed_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')\naxs[1,2].set_title('е) Разница между UNet1D и фантомом')\n\n\n\nfig.subplots_adjust(right=0.9)\ncbar_ax = fig.add_axes([0.91, 0.53, 0.02, 0.35])\nfig.colorbar(im_ph, cax=cbar_ax)\n\nplt.show()" ]
[ [ "numpy.abs", "numpy.linspace", "torch.load", "torch.nn.Conv2d", "matplotlib.pyplot.subplots", "torch.nn.MaxPool2d", "torch.nn.Upsample", "numpy.load", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rran9235/GazePal-Application
[ "88d6a74daeddd18ab37c0f2953a118f1f59e06a5" ]
[ "src/python/GazePal_PC.py" ]
[ "\"\"\"\nGazePal Application\nAuthor: Rishi Rangarajan\nYear: 2021 \n\nFile: GazePal_PC.py\nInfo: GazePal_PC class definition\n\"\"\"\n\n# Imports\nimport csv\nimport os\nimport pyautogui\nimport time\nimport torch\nimport torchvision\nimport cv2 as cv\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom collections import Counter\nfrom threading import Thread\nfrom Class_CNN import CNN\n\n# Class declaration\nclass GazePal_PC:\n\n # Class constructor\n def __init__(self):\n \n # Initialise screen-based parameters\n self.screen_init()\n # Initialise gaze tracking camera\n self.camera_init()\n # Initialise CNN\n self.CNN_init()\n\n # Initialise timer\n self.timer = 0\n\n print(\"[INFO]: GazePal initialised.\") \n\n # Initialise screen-based parameters\n def screen_init(self):\n # Set FR names\n self.classes = [\"FR1\", \"FR2\", \"FR3\", \"FR4\", \"FR5\", \"FR6\", \"FR7\", \"FR8\", \"FR9\"]\n # Get screen resolution\n width_px, height_px = pyautogui.size()\n w_r = width_px/3\n h_r = height_px/3\n # Set xy coordinates for FRs\n self.regions = {\"FR1\" : [1*w_r/2, 1*h_r/2], \"FR2\" : [3*w_r/2, 1*h_r/2], \"FR3\" : [5*w_r/2, 1*h_r/2],\n \"FR4\" : [1*w_r/2, 3*h_r/2], \"FR5\" : [3*w_r/2, 3*h_r/2], \"FR6\" : [5*w_r/2, 3*h_r/2],\n \"FR7\" : [1*w_r/2, 5*h_r/2], \"FR8\" : [3*w_r/2, 5*h_r/2], \"FR9\" : [5*w_r/2, 5*h_r/2]}\n # Initialise buffer\n self.buffer = [4]*20\n # Initialise message\n self.old_FR = \"FR5\"\n\n # Initialise gaze tracking camera-based parameters\n def camera_init(self):\n # Create camera object\n self.GazePal_Camera = cv.VideoCapture(0)\n (self.status, self.frame) = self.GazePal_Camera.read()\n self.stopped = False\n self.thread = Thread(target=self.update, args=())\n self.thread.start()\n\n # Initialise CNN-based parameters\n def CNN_init(self):\n # Create CNN\n self.model_path = os.path.join(\"models\", \"GazePal-Latest.pth\")\n self.GazePal_CNN = CNN()\n self.GazePal_CNN.load_state_dict(torch.load(self.model_path))\n # Set image transforms\n self.img_transform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((100,100)),\n transforms.Grayscale(3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n # Create Haar Cascade classifier for Eyes\n self.haar_eye = cv.CascadeClassifier(os.path.join(\"models\", \"haarcascade_eye.xml\"))\n self.haar_face = cv.CascadeClassifier(os.path.join(\"models\", \"haarcascade_frontalface_default.xml\"))\n \n print(\"[INFO]: Created Haar Classifier.\")\n\n # Repeatedly acquire images from gaze tracking camera\n def update(self):\n # Loop infinitely\n while True:\n # If stopped then break from loop\n if self.stopped:\n break\n # Read image frame from camera\n (self.status, self.frame) = self.GazePal_Camera.read()\n\n # Returns acquired image frame\n def read(self):\n # Return frame\n return self.frame\n\n # Exit protocol\n def stop(self):\n self.stopped = True\n # Release gaze tracking camera object\n self.GazePal_Camera.release()\n # Join threads\n self.thread.join() \n # Close all OpenCV windows\n cv.destroyAllWindows()\n\n\n # Move cursor to within a FR\n def absolute_movement(self, FR):\n # Retrieve xy position inside FR\n pos_x = self.regions[FR][0]\n pos_y = self.regions[FR][1]\n # Move cursor to location\n pyautogui.moveTo(pos_x, pos_y, 0.5)\n\n # Initiate left-mouse button click\n def cursor_click(self):\n # Perform click\n pyautogui.leftClick()\n\n # Function to detect faces\n def detect_faces(self, img):\n # Convert to grayscale\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # Detect face(s) in the image\n faces = self.haar_face.detectMultiScale(gray, 1.3, 5)\n \n # Loop through each face detected\n for (fx, fy, fw, fh) in faces:\n # Draw rectangle(s) over every face\n cv.rectangle(img, (fx,fy), (fx+fw,fy+fh), (225,0,0), 2)\n\n return faces\n\n # Detect eyes in image frame\n def detect_eyes(self, img):\n # Convert to grayscale \n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # Detect eye(s) in the image\n eyes = self.haar_eye.detectMultiScale(gray)\n # Extract image dimensions\n height = np.size(img, 0)\n width = np.size(img, 1)\n # Initialise that no eyes are detected\n left_eye = None\n right_eye = None\n\n # Loop through each eye detected\n for (ex, ey, ew, eh) in eyes:\n # Check if detected eye is in bottom half of face\n if ey > height/2:\n pass\n else:\n # Centre between the eyes\n centre = ex + ew/2\n # Check if centre of eye is on the right or left\n if centre > width/2:\n # Draw rectangle(s) over every eye\n cv.rectangle(img, (ex,ey), (ex+ew,ey+eh), (225,0,0), 2)\n # Crop eye\n eye = img[ey:ey+eh, ex:ex+ew]\n\n # Return cropped image of left eye and right eye\n return eye\n\n # Predict the gaze for a given image frame\n def predict_gaze(self):\n \n # Reset previous time\n prev_time = time.time()\n\n # Read frame\n frame = self.read()\n\n # Flip frame\n frame = cv.flip(frame, 1)\n\n try:\n # Detect face\n faces = self.detect_faces(frame)\n # Crop face image\n face_img = frame[faces[0][1]:faces[0][1]+faces[0][3], faces[0][0]:faces[0][0]+faces[0][2]]\n # Detect the eyes in face image\n eye = self.detect_eyes(face_img) \n # Transform image before CNN pass-through\n torch_img = self.image_loader(eye)\n # Obtain CNN prediction distribution\n output = self.GazePal_CNN(torch_img)\n # Extract prediction with highest energy\n _, prediction = torch.max(output.data, 1)\n\n # Remove oldest prediction in buffer\n self.buffer.pop(0)\n # Append new prediction to buffer\n self.buffer.append(int(prediction))\n # Determine most frequently predicted FR\n prediction = max(self.buffer, key=self.buffer.count)\n # Obtain FR name\n FR = str(self.classes[prediction])\n\n # Check if FR is not same as previous FR\n if self.old_FR != FR:\n # If not, then move to new FR\n self.absolute_movement(FR)\n # Reset old FR value to current FR\n self.old_FR = FR\n # Reset timer\n self.timer = time.time()\n else:\n # If same, then check time elapsed\n if (time.time() - self.timer) > 2:\n # If more than 2 seconds on same FR,\n # then reset time\n self.timer = time.time()\n # Initiate cursor click\n self.cursor_click()\n except:\n # Process errors as FR not available\n FR = \"N/A\"\n\n # Print FR number in frame\n cv.putText(frame, FR, (20,50), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)\n\n # Update new time\n new_time = time.time()\n # Compute and write fps\n time_taken = (new_time - prev_time)\n fps = \"FPS: {0:.1f}\".format(1/time_taken)\n cv.putText(frame, fps, (120,50), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)\n # Update previous time\n prev_time = new_time\n \n # Show image frame\n cv.imshow(\"frame\", frame)\n \n # Repeatedly predict user gaze\n def gaze_tracking(self):\n # Loop infinitely\n while True:\n # Predict user gaze\n self.predict_gaze()\n\n # Wait for keystroke\n k = cv.waitKey(1)\n\n # If keystroke is ESC\n if k == 27:\n # Print message\n print(\"[INFO]: ESC pressed; quitting program.\")\n # Break from loop\n break\n\n # # If keystroke is SPACE\n # elif k == 32:\n # if self.logging:\n # self.logging = False\n # print(\"[INFO]: SPACE pressed; stopping logging.\")\n # else:\n # print(\"[INFO]: SPACE pressed; starting logging.\")\n # self.logging = True\n \n # Exit GazePal \n self.stop()\n\n # Transforms to image before CNN prediction\n def image_loader(self, img):\n\n # Transform image\n torch_img = self.img_transform(img).float()\n # Convert to tensor\n torch_img = torch.tensor(torch_img, requires_grad=True)\n # Unsqueeze\n torch_img = torch_img.unsqueeze(0)\n # Return new torch image\n return torch_img\n\n\n\n\n\n\n\n" ]
[ [ "numpy.size", "torch.load", "torch.max", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ICRC-BME/PySigView
[ "8ac60960dea0e5c70757c76545a896c76a95f68d", "8ac60960dea0e5c70757c76545a896c76a95f68d" ]
[ "pysigview/widgets/transforms/filters.py", "pysigview/core/source_manager.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 29 09:23:17 2017\n\nAnnotations plugin for pysigview\n\nIng.,Mgr. (MSc.) Jan Cimbálník\nBiomedical engineering\nInternational Clinical Research Center\nSt. Anne's University Hospital in Brno\nCzech Republic\n&\nMayo systems electrophysiology lab\nMayo Clinic\n200 1st St SW\nRochester, MN\nUnited States\n\"\"\"\n\n# Standard library imports\n\n# Third party imports\nfrom PyQt5.QtCore import pyqtSignal, Qt\nfrom PyQt5.QtWidgets import (QVBoxLayout,\n QWidget, QLineEdit,\n QComboBox, QLabel, QMessageBox, QPushButton)\n\nfrom scipy.signal import butter, filtfilt\n\n# Local imports\nfrom pysigview.core.plot_transform import BasePlotTransform\n\n\nclass FilterTransform(BasePlotTransform):\n\n def __init__(self):\n super().__init__()\n\n self.name = 'filter'\n self.a = None\n self.b = None\n\n def apply_transform(self, data):\n return filtfilt(self.b, self.a, data)\n\n @property\n def transform_variables(self):\n return (self.a, self.b)\n\n @transform_variables.setter\n def transforms_variables(self, a, b):\n self.a = a\n self.b = b\n\n\nclass Filters(QWidget):\n\n # Attributes\n CONF_SUBSECTION = 'filters'\n IMG_PATH = 'images'\n shortcut = None\n\n # Signals\n filters_transform_changed = pyqtSignal(name='filters_transform_changed')\n\n def __init__(self, parent):\n super(Filters, self).__init__(parent)\n\n self.transform_list_stack = self.parent()\n self.preview = self.transform_list_stack.parent().signal_preview\n self.main = self.transform_list_stack.main\n\n self.title = 'Filters'\n\n # Transform\n self.preview_transform = None\n\n # Master layout\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n\n # Filter design widget layout\n filter_layout = QVBoxLayout()\n filter_layout.setContentsMargins(0, 0, 0, 0)\n\n # Filter selector\n self.filter_selector_label = QLabel('Select filter type:', self)\n self.filter_selector = QComboBox(self)\n self.filter_selector.addItem('Butterworth')\n\n # Filter cut-offs\n self.low_cutoff_label = QLabel('Low cutoff:', self)\n self.low_cutoff_le = QLineEdit(self)\n\n self.high_cutoff_label = QLabel('High cutoff:', self)\n self.high_cutoff_le = QLineEdit(self)\n\n # Poles\n self.poles_label = QLabel('N poles:', self)\n self.poles_le = QLineEdit(self)\n\n # Set button\n self.set_button = QPushButton('Set', self)\n\n # Vipy canvas with axes for FFT\n\n #TODO - filter for number only in lineedits\n # Asseble the layout\n filter_layout.addWidget(self.filter_selector_label)\n filter_layout.addWidget(self.filter_selector)\n\n filter_layout.addWidget(self.low_cutoff_label)\n filter_layout.addWidget(self.low_cutoff_le)\n\n filter_layout.addWidget(self.high_cutoff_label)\n filter_layout.addWidget(self.high_cutoff_le)\n\n filter_layout.addWidget(self.poles_label)\n filter_layout.addWidget(self.poles_le)\n\n filter_layout.addWidget(self.set_button)\n\n layout.addLayout(filter_layout)\n layout.setAlignment(Qt.AlignTop)\n\n self.setLayout(layout)\n\n # Connect signals\n self.filter_selector.currentIndexChanged.connect(\n self.set_preview_transform)\n self.low_cutoff_le.returnPressed.connect(self.set_preview_transform)\n self.high_cutoff_le.returnPressed.connect(self.set_preview_transform)\n self.poles_le.returnPressed.connect(self.set_preview_transform)\n self.set_button.clicked.connect(self.set_preview_transform)\n\n def create_transform(self, vc):\n\n fs = vc.fsamp\n if fs is None:\n return\n\n # Design the filter\n selected_filter = self.filter_selector.currentText()\n low_fc_str = self.low_cutoff_le.text()\n high_fc_str = self.high_cutoff_le.text()\n poles_str = self.poles_le.text()\n\n if poles_str == '':\n QMessageBox.Warning('Number of poles must by specified')\n return\n else:\n poles = int(poles_str)\n\n if low_fc_str == '':\n low_fc = None\n else:\n low_fc = float(low_fc_str)\n if high_fc_str == '':\n high_fc = None\n else:\n high_fc = float(high_fc_str)\n\n if low_fc is not None and high_fc is not None and low_fc >= high_fc:\n QMessageBox.Warning('Low cut-off frequency cannot be higher',\n 'than high cut-off frequency')\n return\n\n if selected_filter == 'Butterworth':\n if low_fc and high_fc:\n b, a = butter(poles, [low_fc/(fs/2),\n high_fc/(fs/2)], 'bandpass')\n elif low_fc and not high_fc:\n b, a = butter(poles, low_fc/(fs/2), 'highpass')\n elif not low_fc and high_fc:\n b, a = butter(poles, high_fc/(fs/2), 'lowpass')\n else:\n return\n\n # Greate the transform object\n transform = FilterTransform()\n transform.a = a\n transform.b = b\n transform.name = (' / ' + selected_filter + '; '\n + '-'.join([low_fc_str, high_fc_str]) + 'Hz')\n\n return transform\n\n # ??? Should be part of transforms API??\n def set_preview_transform(self):\n\n vc = self.preview.preview_pvc\n self.preview.preview_temp_transform = self.create_transform(vc)\n self.preview.update_trans_sig()\n\n # ----- Transforms API -----\n def get_transform_title(self):\n \"\"\"Return widget title\"\"\"\n return self.title\n\n def register_transform(self):\n \"\"\"\n Register transform in Transforms plugin.\n \"\"\"\n\n # Connect signals\n\n return\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 22 13:14:09 2017\n\nIng.,Mgr. (MSc.) Jan Cimbálník, PhD.\nBiomedical engineering\nInternational Clinical Research Center\nSt. Anne's University Hospital in Brno\nCzech Republic\n&\nMayo systems electrophysiology lab\nMayo Clinic\n200 1st St SW\nRochester, MN\nUnited States\n\"\"\"\n\n# Std imports\n\n# Third pary imports\nimport numpy as np\n\n# Local imports\n\n# =============================================================================\n# Data maps\n# =============================================================================\n\n\nclass DataMap:\n \"\"\"\n Class to store channel maps and uutc maps\n \"\"\"\n def __init__(self):\n super(DataMap, self).__init__()\n\n self._map = np.array([], dtype=[('channels', object),\n ('ch_set', np.bool),\n ('uutc_ss', np.int64, 2)])\n\n def __getitem__(self, item):\n return self._map[item]\n\n def __setitem__(self, item, value):\n self._map[item] = value\n return\n\n def __len__(self):\n return len(self._map)\n\n# def __repr__(self):\n# return self.print_data_map()\n#\n# def __str__(self):\n# return self.print_data_map()\n#\n# def print_data_map(self):\n# header = self._map.dtype.names\n# row_format =\"{:>15}\" * (len(header) + 1)\n# print(row_format.format('', *header))\n# for i in range(len(self)):\n# self[i]\n\n def setup_data_map(self, dmap):\n self._map = np.copy(dmap)\n return\n\n def set_data_map(self, channels, uutc_sss):\n for channel, uutc_ss in zip(channels, uutc_sss):\n self.set_channel(channel, uutc_ss)\n return\n\n def reset_data_map(self):\n for channel in self._map['channels']:\n self.remove_channel(channel)\n return\n\n def remove_channel(self, channel):\n ci = np.in1d(self._map['channels'], channel)\n self._map['ch_set'][ci] = False\n self._map['uutc_ss'][ci] = [0, 0]\n return\n\n def set_channel(self, channel, uutc_ss):\n ci = np.in1d(self._map['channels'], channel)\n self._map['ch_set'][ci] = True\n self._map['uutc_ss'][ci] = uutc_ss\n return\n\n def get_active_channels(self):\n return self._map['channels'][self._map['ch_set']]\n\n def get_active_uutc_ss(self):\n return self._map['uutc_ss'][self._map['ch_set']]\n\n def get_active_largest_ss(self):\n uutc_ss = self.get_active_uutc_ss()\n if len(uutc_ss):\n return np.array([np.min(uutc_ss[:, 0]), np.max(uutc_ss[:, 1])])\n else:\n return np.array([0, 0])\n\n\n# =============================================================================\n# Data sources\n# =============================================================================\n\nclass DataSource:\n \"\"\"\n Superclass for data sources (files, clients, streams, buffers)\n \"\"\"\n def __init__(self):\n super(DataSource, self).__init__()\n\n# self.ODS_name = None\n self.data_map = DataMap()\n self.recording_info = None\n\n def load_metadata(self):\n return None\n\n def get_metadata(self):\n return None\n\n def get_data(self):\n return None\n\n\nclass FileDataSource(DataSource):\n \"\"\"\n Class for source that will read data directly from the disk\n \"\"\"\n def __init__(self):\n super(FileDataSource, self).__init__()\n\n self.extension = None\n self._path = None\n self._password = None\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n self._path = path\n\n @property\n def password(self):\n return self._password\n\n @password.setter\n def password(self, password):\n self._password = password\n\n\nclass ClientDataSource(DataSource):\n \"\"\"\n Class for source that will read from server\n \"\"\"\n def __init__(self, ip=None, port=None):\n super(ClientDataSource, self).__init__()\n\n self._ip = ip\n self._port = port\n\n @property\n def ip(self):\n return self._ip\n\n @ip.setter\n def ip(self, ip):\n self._ip = ip\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, port):\n self._port = port\n\n def connect(self):\n return None\n\n def get_directory_tree(self):\n return None\n\n\nclass StreamDataSource(DataSource):\n \"\"\"\n Class for source that will read from server\n \"\"\"\n def __init__(self):\n super(StreamDataSource, self).__init__()\n\n\nclass BufferDataSource(DataSource):\n \"\"\"\n Superclass for buffer data sources\n \"\"\"\n def __init__(self):\n super(BufferDataSource, self).__init__()\n\n def is_available(self, check_dm):\n \"\"\"\n Compares check_dm with the intrnal data map\n \"\"\"\n\n # First find out whether the check_dm channels are in\n if sum(check_dm['ch_set']\n & self.data_map['ch_set']) != sum(check_dm['ch_set']):\n return False\n\n # Now find out whether the check_dm uutc_sss are in\n for loc, check in zip(self.data_map['uutc_ss'][check_dm['ch_set']],\n check_dm['uutc_ss'][check_dm['ch_set']]):\n\n if not (loc[0] <= check[0] <= loc[1]\n and loc[0] <= check[1] <= loc[1]):\n return False\n\n return True\n\n\n# =============================================================================\n# Cross-module constants\n# =============================================================================\n\n# Original data source (i.e: file/stream/client)\nODS = DataSource()\n\n# Provider data source (i.e: the above + buffers)\nPDS = DataSource()\n" ]
[ [ "scipy.signal.butter", "scipy.signal.filtfilt" ], [ "numpy.min", "numpy.in1d", "numpy.max", "numpy.copy", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
monferrand/pandas
[ "a3477c769b3d2ea4950ae69f8867e3b291b743c1" ]
[ "pandas/core/arrays/datetimes.py" ]
[ "from datetime import datetime, time, timedelta\nfrom typing import Union\nimport warnings\n\nimport numpy as np\nfrom pytz import utc\n\nfrom pandas._libs import lib, tslib\nfrom pandas._libs.tslibs import (\n NaT,\n Timestamp,\n ccalendar,\n conversion,\n fields,\n iNaT,\n normalize_date,\n resolution as libresolution,\n timezones,\n tzconversion,\n)\nimport pandas._libs.tslibs.frequencies as libfrequencies\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n INT64_DTYPE,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_object_dtype,\n is_period_dtype,\n is_string_dtype,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import checked_add_with_arr\nfrom pandas.core.arrays import datetimelike as dtl\nfrom pandas.core.arrays._ranges import generate_regular_range\nimport pandas.core.common as com\n\nfrom pandas.tseries.frequencies import get_period_alias, to_offset\nfrom pandas.tseries.offsets import Day, Tick\n\n_midnight = time(0, 0)\n\n\ndef tz_to_dtype(tz):\n \"\"\"\n Return a datetime64[ns] dtype appropriate for the given timezone.\n\n Parameters\n ----------\n tz : tzinfo or None\n\n Returns\n -------\n np.dtype or Datetime64TZDType\n \"\"\"\n if tz is None:\n return DT64NS_DTYPE\n else:\n return DatetimeTZDtype(tz=tz)\n\n\ndef _field_accessor(name, field, docstring=None):\n def f(self):\n values = self.asi8\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n\n if field in self._bool_ops:\n if field.endswith((\"start\", \"end\")):\n freq = self.freq\n month_kw = 12\n if freq:\n kwds = freq.kwds\n month_kw = kwds.get(\"startingMonth\", kwds.get(\"month\", 12))\n\n result = fields.get_start_end_field(\n values, field, self.freqstr, month_kw\n )\n else:\n result = fields.get_date_field(values, field)\n\n # these return a boolean by-definition\n return result\n\n if field in self._object_ops:\n result = fields.get_date_name_field(values, field)\n result = self._maybe_mask_results(result, fill_value=None)\n\n else:\n result = fields.get_date_field(values, field)\n result = self._maybe_mask_results(\n result, fill_value=None, convert=\"float64\"\n )\n\n return result\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n\nclass DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps):\n \"\"\"\n Pandas ExtensionArray for tz-naive or tz-aware datetime data.\n\n .. versionadded:: 0.24.0\n\n .. warning::\n\n DatetimeArray is currently experimental, and its API may change\n without warning. In particular, :attr:`DatetimeArray.dtype` is\n expected to change to always be an instance of an ``ExtensionDtype``\n subclass.\n\n Parameters\n ----------\n values : Series, Index, DatetimeArray, ndarray\n The datetime data.\n\n For DatetimeArray `values` (or a Series or Index boxing one),\n `dtype` and `freq` will be extracted from `values`.\n\n dtype : numpy.dtype or DatetimeTZDtype\n Note that the only NumPy dtype allowed is 'datetime64[ns]'.\n freq : str or Offset, optional\n The frequency.\n copy : bool, default False\n Whether to copy the underlying array of values.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n \"\"\"\n\n _typ = \"datetimearray\"\n _scalar_type = Timestamp\n _recognized_scalars = (datetime, np.datetime64)\n _is_recognized_dtype = is_datetime64_any_dtype\n\n # define my properties & methods for delegation\n _bool_ops = [\n \"is_month_start\",\n \"is_month_end\",\n \"is_quarter_start\",\n \"is_quarter_end\",\n \"is_year_start\",\n \"is_year_end\",\n \"is_leap_year\",\n ]\n _object_ops = [\"freq\", \"tz\"]\n _field_ops = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"weekofyear\",\n \"week\",\n \"weekday\",\n \"dayofweek\",\n \"dayofyear\",\n \"quarter\",\n \"days_in_month\",\n \"daysinmonth\",\n \"microsecond\",\n \"nanosecond\",\n ]\n _other_ops = [\"date\", \"time\", \"timetz\"]\n _datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops\n _datetimelike_methods = [\n \"to_period\",\n \"tz_localize\",\n \"tz_convert\",\n \"normalize\",\n \"strftime\",\n \"round\",\n \"floor\",\n \"ceil\",\n \"month_name\",\n \"day_name\",\n ]\n\n # ndim is inherited from ExtensionArray, must exist to ensure\n # Timestamp.__richcmp__(DateTimeArray) operates pointwise\n\n # ensure that operations with numpy arrays defer to our implementation\n __array_priority__ = 1000\n\n # -----------------------------------------------------------------\n # Constructors\n\n _dtype: Union[np.dtype, DatetimeTZDtype]\n _freq = None\n\n def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False):\n if isinstance(values, (ABCSeries, ABCIndexClass)):\n values = values._values\n\n inferred_freq = getattr(values, \"_freq\", None)\n\n if isinstance(values, type(self)):\n # validation\n dtz = getattr(dtype, \"tz\", None)\n if dtz and values.tz is None:\n dtype = DatetimeTZDtype(tz=dtype.tz)\n elif dtz and values.tz:\n if not timezones.tz_compare(dtz, values.tz):\n msg = (\n \"Timezone of the array and 'dtype' do not match. \"\n f\"'{dtz}' != '{values.tz}'\"\n )\n raise TypeError(msg)\n elif values.tz:\n dtype = values.dtype\n\n if freq is None:\n freq = values.freq\n values = values._data\n\n if not isinstance(values, np.ndarray):\n raise ValueError(\n f\"Unexpected type '{type(values).__name__}'. 'values' must be \"\n \"a DatetimeArray ndarray, or Series or Index containing one of those.\"\n )\n if values.ndim not in [1, 2]:\n raise ValueError(\"Only 1-dimensional input arrays are supported.\")\n\n if values.dtype == \"i8\":\n # for compat with datetime/timedelta/period shared methods,\n # we can sometimes get here with int64 values. These represent\n # nanosecond UTC (or tz-naive) unix timestamps\n values = values.view(DT64NS_DTYPE)\n\n if values.dtype != DT64NS_DTYPE:\n raise ValueError(\n \"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. \"\n f\"Got {values.dtype} instead.\"\n )\n\n dtype = _validate_dt64_dtype(dtype)\n\n if freq == \"infer\":\n raise ValueError(\n \"Frequency inference not allowed in DatetimeArray.__init__. \"\n \"Use 'pd.array()' instead.\"\n )\n\n if copy:\n values = values.copy()\n if freq:\n freq = to_offset(freq)\n if getattr(dtype, \"tz\", None):\n # https://github.com/pandas-dev/pandas/issues/18595\n # Ensure that we have a standard timezone for pytz objects.\n # Without this, things like adding an array of timedeltas and\n # a tz-aware Timestamp (with a tz specific to its datetime) will\n # be incorrect(ish?) for the array as a whole\n dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))\n\n self._data = values\n self._dtype = dtype\n self._freq = freq\n\n if inferred_freq is None and freq is not None:\n type(self)._validate_frequency(self, freq)\n\n @classmethod\n def _simple_new(cls, values, freq=None, dtype=DT64NS_DTYPE):\n assert isinstance(values, np.ndarray)\n if values.dtype != DT64NS_DTYPE:\n assert values.dtype == \"i8\"\n values = values.view(DT64NS_DTYPE)\n\n result = object.__new__(cls)\n result._data = values\n result._freq = freq\n result._dtype = dtype\n return result\n\n @classmethod\n def _from_sequence(\n cls,\n data,\n dtype=None,\n copy=False,\n tz=None,\n freq=lib.no_default,\n dayfirst=False,\n yearfirst=False,\n ambiguous=\"raise\",\n ):\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n\n subarr, tz, inferred_freq = sequence_to_dt64ns(\n data,\n dtype=dtype,\n copy=copy,\n tz=tz,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n ambiguous=ambiguous,\n )\n\n freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)\n if explicit_none:\n freq = None\n\n dtype = tz_to_dtype(tz)\n result = cls._simple_new(subarr, freq=freq, dtype=dtype)\n\n if inferred_freq is None and freq is not None:\n # this condition precludes `freq_infer`\n cls._validate_frequency(result, freq, ambiguous=ambiguous)\n\n elif freq_infer:\n # Set _freq directly to bypass duplicative _validate_frequency\n # check.\n result._freq = to_offset(result.inferred_freq)\n\n return result\n\n @classmethod\n def _generate_range(\n cls,\n start,\n end,\n periods,\n freq,\n tz=None,\n normalize=False,\n ambiguous=\"raise\",\n nonexistent=\"raise\",\n closed=None,\n ):\n\n periods = dtl.validate_periods(periods)\n if freq is None and any(x is None for x in [periods, start, end]):\n raise ValueError(\"Must provide freq argument if no data is supplied\")\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, \"\n \"and freq, exactly three must be specified\"\n )\n freq = to_offset(freq)\n\n if start is not None:\n start = Timestamp(start)\n\n if end is not None:\n end = Timestamp(end)\n\n if start is None and end is None:\n if closed is not None:\n raise ValueError(\n \"Closed has to be None if not both of start and end are defined\"\n )\n if start is NaT or end is NaT:\n raise ValueError(\"Neither `start` nor `end` can be NaT\")\n\n left_closed, right_closed = dtl.validate_endpoints(closed)\n\n start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)\n\n tz = _infer_tz_from_endpoints(start, end, tz)\n\n if tz is not None:\n # Localize the start and end arguments\n start = _maybe_localize_point(\n start,\n getattr(start, \"tz\", None),\n start,\n freq,\n tz,\n ambiguous,\n nonexistent,\n )\n end = _maybe_localize_point(\n end, getattr(end, \"tz\", None), end, freq, tz, ambiguous, nonexistent\n )\n if freq is not None:\n # We break Day arithmetic (fixed 24 hour) here and opt for\n # Day to mean calendar day (23/24/25 hour). Therefore, strip\n # tz info from start and day to avoid DST arithmetic\n if isinstance(freq, Day):\n if start is not None:\n start = start.tz_localize(None)\n if end is not None:\n end = end.tz_localize(None)\n\n values, _tz = generate_regular_range(start, end, periods, freq)\n index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))\n\n if tz is not None and index.tz is None:\n arr = conversion.tz_localize_to_utc(\n index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n\n index = cls(arr)\n\n # index is localized datetime64 array -> have to convert\n # start/end as well to compare\n if start is not None:\n start = start.tz_localize(tz).asm8\n if end is not None:\n end = end.tz_localize(tz).asm8\n else:\n # Create a linearly spaced date_range in local time\n # Nanosecond-granularity timestamps aren't always correctly\n # representable with doubles, so we limit the range that we\n # pass to np.linspace as much as possible\n arr = (\n np.linspace(0, end.value - start.value, periods, dtype=\"int64\")\n + start.value\n )\n dtype = tz_to_dtype(tz)\n index = cls._simple_new(\n arr.astype(\"M8[ns]\", copy=False), freq=None, dtype=dtype\n )\n\n if not left_closed and len(index) and index[0] == start:\n index = index[1:]\n if not right_closed and len(index) and index[-1] == end:\n index = index[:-1]\n\n dtype = tz_to_dtype(tz)\n return cls._simple_new(index.asi8, freq=freq, dtype=dtype)\n\n # -----------------------------------------------------------------\n # DatetimeLike Interface\n\n def _unbox_scalar(self, value):\n if not isinstance(value, self._scalar_type) and value is not NaT:\n raise ValueError(\"'value' should be a Timestamp.\")\n if not isna(value):\n self._check_compatible_with(value)\n return value.value\n\n def _scalar_from_string(self, value):\n return Timestamp(value, tz=self.tz)\n\n def _check_compatible_with(self, other, setitem: bool = False):\n if other is NaT:\n return\n self._assert_tzawareness_compat(other)\n if setitem:\n # Stricter check for setitem vs comparison methods\n if not timezones.tz_compare(self.tz, other.tz):\n raise ValueError(f\"Timezones don't match. '{self.tz} != {other.tz}'\")\n\n def _maybe_clear_freq(self):\n self._freq = None\n\n # -----------------------------------------------------------------\n # Descriptive Properties\n\n @property\n def _box_func(self):\n return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)\n\n @property\n def dtype(self) -> Union[np.dtype, DatetimeTZDtype]:\n \"\"\"\n The dtype for the DatetimeArray.\n\n .. warning::\n\n A future version of pandas will change dtype to never be a\n ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will\n always be an instance of an ``ExtensionDtype`` subclass.\n\n Returns\n -------\n numpy.dtype or DatetimeTZDtype\n If the values are tz-naive, then ``np.dtype('datetime64[ns]')``\n is returned.\n\n If the values are tz-aware, then the ``DatetimeTZDtype``\n is returned.\n \"\"\"\n return self._dtype\n\n @property\n def tz(self):\n \"\"\"\n Return timezone, if any.\n\n Returns\n -------\n datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None\n Returns None when the array is tz-naive.\n \"\"\"\n # GH 18595\n return getattr(self.dtype, \"tz\", None)\n\n @tz.setter\n def tz(self, value):\n # GH 3746: Prevent localizing or converting the index by setting tz\n raise AttributeError(\n \"Cannot directly set timezone. Use tz_localize() \"\n \"or tz_convert() as appropriate\"\n )\n\n @property\n def tzinfo(self):\n \"\"\"\n Alias for tz attribute\n \"\"\"\n return self.tz\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _timezone(self):\n \"\"\"\n Comparable timezone both for pytz / dateutil\n \"\"\"\n return timezones.get_timezone(self.tzinfo)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def is_normalized(self):\n \"\"\"\n Returns True if all of the dates are at midnight (\"no time\")\n \"\"\"\n return conversion.is_date_array_normalized(self.asi8, self.tz)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution(self):\n return libresolution.resolution(self.asi8, self.tz)\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def __array__(self, dtype=None) -> np.ndarray:\n if dtype is None and self.tz:\n # The default for tz-aware is object, to preserve tz info\n dtype = object\n\n return super().__array__(dtype=dtype)\n\n def __iter__(self):\n \"\"\"\n Return an iterator over the boxed values\n\n Yields\n ------\n tstamp : Timestamp\n \"\"\"\n\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = int(length / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = tslib.ints_to_pydatetime(\n data[start_i:end_i], tz=self.tz, freq=self.freq, box=\"timestamp\"\n )\n for v in converted:\n yield v\n\n def astype(self, dtype, copy=True):\n # We handle\n # --> datetime\n # --> period\n # DatetimeLikeArrayMixin Super handles the rest.\n dtype = pandas_dtype(dtype)\n\n if is_datetime64_ns_dtype(dtype) and not is_dtype_equal(dtype, self.dtype):\n # GH#18951: datetime64_ns dtype but not equal means different tz\n new_tz = getattr(dtype, \"tz\", None)\n if getattr(self.dtype, \"tz\", None) is None:\n return self.tz_localize(new_tz)\n result = self.tz_convert(new_tz)\n if copy:\n result = result.copy()\n if new_tz is None:\n # Do we want .astype('datetime64[ns]') to be an ndarray.\n # The astype in Block._astype expects this to return an\n # ndarray, but we could maybe work around it there.\n result = result._data\n return result\n elif is_datetime64tz_dtype(self.dtype) and is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n elif is_period_dtype(dtype):\n return self.to_period(freq=dtype.freq)\n return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)\n\n # -----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None, **kwargs):\n from pandas.io.formats.format import _get_format_datetime64_from_values\n\n fmt = _get_format_datetime64_from_values(self, date_format)\n\n return tslib.format_array_from_datetime(\n self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep\n ).reshape(self.shape)\n\n # -----------------------------------------------------------------\n # Comparison Methods\n\n def _has_same_tz(self, other):\n zzone = self._timezone\n\n # vzone shouldn't be None if value is non-datetime like\n if isinstance(other, np.datetime64):\n # convert to Timestamp as np.datetime64 doesn't have tz attr\n other = Timestamp(other)\n vzone = timezones.get_timezone(getattr(other, \"tzinfo\", \"__no_tz__\"))\n return zzone == vzone\n\n def _assert_tzawareness_compat(self, other):\n # adapted from _Timestamp._assert_tzawareness_compat\n other_tz = getattr(other, \"tzinfo\", None)\n if is_datetime64tz_dtype(other):\n # Get tzinfo from Series dtype\n other_tz = other.dtype.tz\n if other is NaT:\n # pd.NaT quacks both aware and naive\n pass\n elif self.tz is None:\n if other_tz is not None:\n raise TypeError(\n \"Cannot compare tz-naive and tz-aware datetime-like objects.\"\n )\n elif other_tz is None:\n raise TypeError(\n \"Cannot compare tz-naive and tz-aware datetime-like objects\"\n )\n\n # -----------------------------------------------------------------\n # Arithmetic Methods\n\n def _sub_datetime_arraylike(self, other):\n \"\"\"subtract DatetimeArray/Index or ndarray[datetime64]\"\"\"\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n assert is_datetime64_dtype(other)\n other = type(self)(other)\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\n f\"{type(self).__name__} subtraction must have the same \"\n \"timezones or no timezones\"\n )\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n arr_mask = self._isnan | other._isnan\n new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)\n if self._hasnans or other._hasnans:\n new_values[arr_mask] = iNaT\n return new_values.view(\"timedelta64[ns]\")\n\n def _add_offset(self, offset):\n if self.ndim == 2:\n return self.ravel()._add_offset(offset).reshape(self.shape)\n\n assert not isinstance(offset, Tick)\n try:\n if self.tz is not None:\n values = self.tz_localize(None)\n else:\n values = self\n result = offset.apply_index(values).tz_localize(self.tz)\n\n except NotImplementedError:\n warnings.warn(\n \"Non-vectorized DateOffset being applied to Series or DatetimeIndex\",\n PerformanceWarning,\n )\n result = self.astype(\"O\") + offset\n if not len(self):\n # GH#30336 _from_sequence won't be able to infer self.tz\n return type(self)._from_sequence(result).tz_localize(self.tz)\n\n return type(self)._from_sequence(result)\n\n def _sub_datetimelike_scalar(self, other):\n # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]\n assert isinstance(other, (datetime, np.datetime64))\n assert other is not NaT\n other = Timestamp(other)\n if other is NaT:\n return self - NaT\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\n \"Timestamp subtraction must have the same timezones or no timezones\"\n )\n\n i8 = self.asi8\n result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)\n result = self._maybe_mask_results(result)\n return result.view(\"timedelta64[ns]\")\n\n # -----------------------------------------------------------------\n # Timezone Conversion and Localization Methods\n\n def _local_timestamps(self):\n \"\"\"\n Convert to an i8 (unix-like nanosecond timestamp) representation\n while keeping the local timezone and not using UTC.\n This is used to calculate time-of-day information as if the timestamps\n were timezone-naive.\n \"\"\"\n return tzconversion.tz_convert(self.asi8, utc, self.tz)\n\n def tz_convert(self, tz):\n \"\"\"\n Convert tz-aware Datetime Array/Index from one time zone to another.\n\n Parameters\n ----------\n tz : str, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted\n to this time zone of the Datetime Array/Index. A `tz` of None will\n convert to UTC and remove the timezone information.\n\n Returns\n -------\n Array or Index\n\n Raises\n ------\n TypeError\n If Datetime Array/Index is tz-naive.\n\n See Also\n --------\n DatetimeIndex.tz : A timezone that has a variable offset from UTC.\n DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a\n given time zone, or remove timezone from a tz-aware DatetimeIndex.\n\n Examples\n --------\n With the `tz` parameter, we can change the DatetimeIndex\n to other time zones:\n\n >>> dti = pd.date_range(start='2014-08-01 09:00',\n ... freq='H', periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert('US/Central')\n DatetimeIndex(['2014-08-01 02:00:00-05:00',\n '2014-08-01 03:00:00-05:00',\n '2014-08-01 04:00:00-05:00'],\n dtype='datetime64[ns, US/Central]', freq='H')\n\n With the ``tz=None``, we can remove the timezone (after converting\n to UTC if necessary):\n\n >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',\n ... periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert(None)\n DatetimeIndex(['2014-08-01 07:00:00',\n '2014-08-01 08:00:00',\n '2014-08-01 09:00:00'],\n dtype='datetime64[ns]', freq='H')\n \"\"\"\n tz = timezones.maybe_get_tz(tz)\n\n if self.tz is None:\n # tz naive, use tz_localize\n raise TypeError(\n \"Cannot convert tz-naive timestamps, use tz_localize to localize\"\n )\n\n # No conversion since timestamps are all UTC to begin with\n dtype = tz_to_dtype(tz)\n return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)\n\n def tz_localize(self, tz, ambiguous=\"raise\", nonexistent=\"raise\"):\n \"\"\"\n Localize tz-naive Datetime Array/Index to tz-aware\n Datetime Array/Index.\n\n This method takes a time zone (tz) naive Datetime Array/Index object\n and makes this time zone aware. It does not move the time to another\n time zone.\n Time zone localization helps to switch from time zone aware to time\n zone unaware objects.\n\n Parameters\n ----------\n tz : str, pytz.timezone, dateutil.tz.tzfile or None\n Time zone to convert timestamps to. Passing ``None`` will\n remove the time zone information preserving local time.\n ambiguous : 'infer', 'NaT', bool array, default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n\n nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \\\ndefault 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Same type as self\n Array/Index converted to the specified time zone.\n\n Raises\n ------\n TypeError\n If the Datetime Array/Index is tz-aware and tz is not None.\n\n See Also\n --------\n DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from\n one time zone to another.\n\n Examples\n --------\n >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)\n >>> tz_naive\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq='D')\n\n Localize DatetimeIndex in US/Eastern time zone:\n\n >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')\n >>> tz_aware\n DatetimeIndex(['2018-03-01 09:00:00-05:00',\n '2018-03-02 09:00:00-05:00',\n '2018-03-03 09:00:00-05:00'],\n dtype='datetime64[ns, US/Eastern]', freq=None)\n\n With the ``tz=None``, we can remove the time zone information\n while keeping the local time (not converted to UTC):\n\n >>> tz_aware.tz_localize(None)\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n Be careful with DST changes. When there is sequential data, pandas can\n infer the DST time:\n\n >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.dt.tz_localize('CET', ambiguous='infer')\n 0 2018-10-28 01:30:00+02:00\n 1 2018-10-28 02:00:00+02:00\n 2 2018-10-28 02:30:00+02:00\n 3 2018-10-28 02:00:00+01:00\n 4 2018-10-28 02:30:00+01:00\n 5 2018-10-28 03:00:00+01:00\n 6 2018-10-28 03:30:00+01:00\n dtype: datetime64[ns, CET]\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 0 2018-10-28 01:20:00+02:00\n 1 2018-10-28 02:36:00+02:00\n 2 2018-10-28 03:46:00+01:00\n dtype: datetime64[ns, CET]\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n\n >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 0 2015-03-29 03:00:00+02:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 0 2015-03-29 01:59:59.999999999+01:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 0 2015-03-29 03:30:00+02:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n if self.tz is not None:\n if tz is None:\n new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz)\n else:\n raise TypeError(\"Already tz-aware, use tz_convert to convert.\")\n else:\n tz = timezones.maybe_get_tz(tz)\n # Convert to UTC\n\n new_dates = conversion.tz_localize_to_utc(\n self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n new_dates = new_dates.view(DT64NS_DTYPE)\n dtype = tz_to_dtype(tz)\n\n freq = None\n if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):\n # we can preserve freq\n # TODO: Also for fixed-offsets\n freq = self.freq\n elif tz is None and self.tz is None:\n # no-op\n freq = self.freq\n return self._simple_new(new_dates, dtype=dtype, freq=freq)\n\n # ----------------------------------------------------------------\n # Conversion Methods - Vectorized analogues of Timestamp methods\n\n def to_pydatetime(self) -> np.ndarray:\n \"\"\"\n Return Datetime Array/Index as object ndarray of datetime.datetime\n objects.\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)\n\n def normalize(self):\n \"\"\"\n Convert times to midnight.\n\n The time component of the date-time is converted to midnight i.e.\n 00:00:00. This is useful in cases, when the time does not matter.\n Length is unaltered. The timezones are unaffected.\n\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on Datetime Array/Index.\n\n Returns\n -------\n DatetimeArray, DatetimeIndex or Series\n The same type as the original data. Series will have the same\n name and index. DatetimeIndex will have the same name.\n\n See Also\n --------\n floor : Floor the datetimes to the specified freq.\n ceil : Ceil the datetimes to the specified freq.\n round : Round the datetimes to the specified freq.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',\n ... periods=3, tz='Asia/Calcutta')\n >>> idx\n DatetimeIndex(['2014-08-01 10:00:00+05:30',\n '2014-08-01 11:00:00+05:30',\n '2014-08-01 12:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq='H')\n >>> idx.normalize()\n DatetimeIndex(['2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq=None)\n \"\"\"\n if self.tz is None or timezones.is_utc(self.tz):\n not_null = ~self.isna()\n DAY_NS = ccalendar.DAY_SECONDS * 1_000_000_000\n new_values = self.asi8.copy()\n adjustment = new_values[not_null] % DAY_NS\n new_values[not_null] = new_values[not_null] - adjustment\n else:\n new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)\n return type(self)(new_values)._with_freq(\"infer\").tz_localize(self.tz)\n\n def to_period(self, freq=None):\n \"\"\"\n Cast to PeriodArray/Index at a particular frequency.\n\n Converts DatetimeArray/Index to PeriodArray/Index.\n\n Parameters\n ----------\n freq : str or Offset, optional\n One of pandas' :ref:`offset strings <timeseries.offset_aliases>`\n or an Offset object. Will be inferred by default.\n\n Returns\n -------\n PeriodArray/Index\n\n Raises\n ------\n ValueError\n When converting a DatetimeArray/Index with non-regular values,\n so that a frequency cannot be inferred.\n\n See Also\n --------\n PeriodIndex: Immutable ndarray holding ordinal values.\n DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"y\": [1, 2, 3]},\n ... index=pd.to_datetime([\"2000-03-31 00:00:00\",\n ... \"2000-05-31 00:00:00\",\n ... \"2000-08-31 00:00:00\"]))\n >>> df.index.to_period(\"M\")\n PeriodIndex(['2000-03', '2000-05', '2000-08'],\n dtype='period[M]', freq='M')\n\n Infer the daily frequency\n\n >>> idx = pd.date_range(\"2017-01-01\", periods=2)\n >>> idx.to_period()\n PeriodIndex(['2017-01-01', '2017-01-02'],\n dtype='period[D]', freq='D')\n \"\"\"\n from pandas.core.arrays import PeriodArray\n\n if self.tz is not None:\n warnings.warn(\n \"Converting to PeriodArray/Index representation \"\n \"will drop timezone information.\",\n UserWarning,\n )\n\n if freq is None:\n freq = self.freqstr or self.inferred_freq\n\n if freq is None:\n raise ValueError(\n \"You must pass a freq argument as current index has none.\"\n )\n\n res = get_period_alias(freq)\n\n # https://github.com/pandas-dev/pandas/issues/33358\n if res is None:\n base, stride = libfrequencies.base_and_stride(freq)\n res = f\"{stride}{base}\"\n\n freq = res\n\n return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)\n\n def to_perioddelta(self, freq):\n \"\"\"\n Calculate TimedeltaArray of difference between index\n values and index converted to PeriodArray at specified\n freq. Used for vectorized offsets.\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n \"\"\"\n # TODO: consider privatizing (discussion in GH#23113)\n from pandas.core.arrays.timedeltas import TimedeltaArray\n\n i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8\n m8delta = i8delta.view(\"m8[ns]\")\n return TimedeltaArray(m8delta)\n\n # -----------------------------------------------------------------\n # Properties - Vectorized Timestamp Properties/Methods\n\n def month_name(self, locale=None):\n \"\"\"\n Return the month names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the month name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of month names.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2018-01', freq='M', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],\n dtype='datetime64[ns]', freq='M')\n >>> idx.month_name()\n Index(['January', 'February', 'March'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, \"month_name\", locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n def day_name(self, locale=None):\n \"\"\"\n Return the day names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the day name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of day names.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],\n dtype='datetime64[ns]', freq='D')\n >>> idx.day_name()\n Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, \"day_name\", locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n @property\n def time(self):\n \"\"\"\n Returns numpy array of datetime.time. The time part of the Timestamps.\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"time\")\n\n @property\n def timetz(self):\n \"\"\"\n Returns numpy array of datetime.time also containing timezone\n information. The time part of the Timestamps.\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, self.tz, box=\"time\")\n\n @property\n def date(self):\n \"\"\"\n Returns numpy array of python datetime.date objects (namely, the date\n part of Timestamps without timezone information).\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"date\")\n\n def isocalendar(self):\n \"\"\"\n Returns a DataFrame with the year, week, and day calculated according to\n the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n See Also\n --------\n Timestamp.isocalendar\n datetime.date.isocalendar\n\n Examples\n --------\n >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)\n >>> idx.isocalendar()\n year week day\n 0 2019 52 7\n 1 2020 1 1\n 2 2020 1 2\n 3 2020 1 3\n >>> idx.isocalendar().week\n 0 52\n 1 1\n 2 1\n 3 1\n Name: week, dtype: UInt32\n \"\"\"\n from pandas import DataFrame\n\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n sarray = fields.build_isocalendar_sarray(values)\n iso_calendar_df = DataFrame(\n sarray, columns=[\"year\", \"week\", \"day\"], dtype=\"UInt32\"\n )\n if self._hasnans:\n iso_calendar_df.iloc[self._isnan] = None\n return iso_calendar_df\n\n year = _field_accessor(\n \"year\",\n \"Y\",\n \"\"\"\n The year of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"Y\")\n ... )\n >>> datetime_series\n 0 2000-12-31\n 1 2001-12-31\n 2 2002-12-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.year\n 0 2000\n 1 2001\n 2 2002\n dtype: int64\n \"\"\",\n )\n month = _field_accessor(\n \"month\",\n \"M\",\n \"\"\"\n The month as January=1, December=12.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"M\")\n ... )\n >>> datetime_series\n 0 2000-01-31\n 1 2000-02-29\n 2 2000-03-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.month\n 0 1\n 1 2\n 2 3\n dtype: int64\n \"\"\",\n )\n day = _field_accessor(\n \"day\",\n \"D\",\n \"\"\"\n The day of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"D\")\n ... )\n >>> datetime_series\n 0 2000-01-01\n 1 2000-01-02\n 2 2000-01-03\n dtype: datetime64[ns]\n >>> datetime_series.dt.day\n 0 1\n 1 2\n 2 3\n dtype: int64\n \"\"\",\n )\n hour = _field_accessor(\n \"hour\",\n \"h\",\n \"\"\"\n The hours of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"h\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n minute = _field_accessor(\n \"minute\",\n \"m\",\n \"\"\"\n The minutes of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"T\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:01:00\n 2 2000-01-01 00:02:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.minute\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n second = _field_accessor(\n \"second\",\n \"s\",\n \"\"\"\n The seconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"s\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> datetime_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n microsecond = _field_accessor(\n \"microsecond\",\n \"us\",\n \"\"\"\n The microseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"us\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000\n 1 2000-01-01 00:00:00.000001\n 2 2000-01-01 00:00:00.000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.microsecond\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n nanosecond = _field_accessor(\n \"nanosecond\",\n \"ns\",\n \"\"\"\n The nanoseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"ns\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000000\n 1 2000-01-01 00:00:00.000000001\n 2 2000-01-01 00:00:00.000000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.nanosecond\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n weekofyear = _field_accessor(\n \"weekofyear\",\n \"woy\",\n \"\"\"\n The week ordinal of the year.\n \"\"\",\n )\n week = weekofyear\n _dayofweek_doc = \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Return the day of the week. It is assumed the week starts on\n Monday, which is denoted by 0 and ends on Sunday which is denoted\n by 6. This method is available on both Series with datetime\n values (using the `dt` accessor) or DatetimeIndex.\n\n Returns\n -------\n Series or Index\n Containing integers indicating the day number.\n\n See Also\n --------\n Series.dt.dayofweek : Alias.\n Series.dt.weekday : Alias.\n Series.dt.day_name : Returns the name of the day of the week.\n\n Examples\n --------\n >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()\n >>> s.dt.dayofweek\n 2016-12-31 5\n 2017-01-01 6\n 2017-01-02 0\n 2017-01-03 1\n 2017-01-04 2\n 2017-01-05 3\n 2017-01-06 4\n 2017-01-07 5\n 2017-01-08 6\n Freq: D, dtype: int64\n \"\"\"\n dayofweek = _field_accessor(\"dayofweek\", \"dow\", _dayofweek_doc)\n weekday = dayofweek\n\n dayofyear = _field_accessor(\n \"dayofyear\",\n \"doy\",\n \"\"\"\n The ordinal day of the year.\n \"\"\",\n )\n quarter = _field_accessor(\n \"quarter\",\n \"q\",\n \"\"\"\n The quarter of the date.\n \"\"\",\n )\n days_in_month = _field_accessor(\n \"days_in_month\",\n \"dim\",\n \"\"\"\n The number of days in the month.\n \"\"\",\n )\n daysinmonth = days_in_month\n _is_month_doc = \"\"\"\n Indicates whether the date is the {first_or_last} day of the month.\n\n Returns\n -------\n Series or array\n For Series, returns a Series with boolean values.\n For DatetimeIndex, returns a boolean array.\n\n See Also\n --------\n is_month_start : Return a boolean indicating whether the date\n is the first day of the month.\n is_month_end : Return a boolean indicating whether the date\n is the last day of the month.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> s = pd.Series(pd.date_range(\"2018-02-27\", periods=3))\n >>> s\n 0 2018-02-27\n 1 2018-02-28\n 2 2018-03-01\n dtype: datetime64[ns]\n >>> s.dt.is_month_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n >>> s.dt.is_month_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2018-02-27\", periods=3)\n >>> idx.is_month_start\n array([False, False, True])\n >>> idx.is_month_end\n array([False, True, False])\n \"\"\"\n is_month_start = _field_accessor(\n \"is_month_start\", \"is_month_start\", _is_month_doc.format(first_or_last=\"first\")\n )\n\n is_month_end = _field_accessor(\n \"is_month_end\", \"is_month_end\", _is_month_doc.format(first_or_last=\"last\")\n )\n\n is_quarter_start = _field_accessor(\n \"is_quarter_start\",\n \"is_quarter_start\",\n \"\"\"\n Indicator for whether the date is the first day of a quarter.\n\n Returns\n -------\n is_quarter_start : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_end : Similar property for indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_start=df.dates.dt.is_quarter_start)\n dates quarter is_quarter_start\n 0 2017-03-30 1 False\n 1 2017-03-31 1 False\n 2 2017-04-01 2 True\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_start\n array([False, False, True, False])\n \"\"\",\n )\n is_quarter_end = _field_accessor(\n \"is_quarter_end\",\n \"is_quarter_end\",\n \"\"\"\n Indicator for whether the date is the last day of a quarter.\n\n Returns\n -------\n is_quarter_end : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_start : Similar property indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_end=df.dates.dt.is_quarter_end)\n dates quarter is_quarter_end\n 0 2017-03-30 1 False\n 1 2017-03-31 1 True\n 2 2017-04-01 2 False\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_end\n array([False, True, False, False])\n \"\"\",\n )\n is_year_start = _field_accessor(\n \"is_year_start\",\n \"is_year_start\",\n \"\"\"\n Indicate whether the date is the first day of a year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_end : Similar property indicating the last day of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_start\n array([False, False, True])\n \"\"\",\n )\n is_year_end = _field_accessor(\n \"is_year_end\",\n \"is_year_end\",\n \"\"\"\n Indicate whether the date is the last day of the year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_start : Similar property indicating the start of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_end\n array([False, True, False])\n \"\"\",\n )\n is_leap_year = _field_accessor(\n \"is_leap_year\",\n \"is_leap_year\",\n \"\"\"\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day.\n Leap years are years which are multiples of four with the exception\n of years divisible by 100 but not by 400.\n\n Returns\n -------\n Series or ndarray\n Booleans indicating if dates belong to a leap year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> idx = pd.date_range(\"2012-01-01\", \"2015-01-01\", freq=\"Y\")\n >>> idx\n DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],\n dtype='datetime64[ns]', freq='A-DEC')\n >>> idx.is_leap_year\n array([ True, False, False])\n\n >>> dates_series = pd.Series(idx)\n >>> dates_series\n 0 2012-12-31\n 1 2013-12-31\n 2 2014-12-31\n dtype: datetime64[ns]\n >>> dates_series.dt.is_leap_year\n 0 True\n 1 False\n 2 False\n dtype: bool\n \"\"\",\n )\n\n def to_julian_date(self):\n \"\"\"\n Convert Datetime Array to float64 ndarray of Julian Dates.\n 0 Julian date is noon January 1, 4713 BC.\n https://en.wikipedia.org/wiki/Julian_day\n \"\"\"\n\n # http://mysite.verizon.net/aesir_research/date/jdalg2.htm\n year = np.asarray(self.year)\n month = np.asarray(self.month)\n day = np.asarray(self.day)\n testarr = month < 3\n year[testarr] -= 1\n month[testarr] += 12\n return (\n day\n + np.fix((153 * month - 457) / 5)\n + 365 * year\n + np.floor(year / 4)\n - np.floor(year / 100)\n + np.floor(year / 400)\n + 1_721_118.5\n + (\n self.hour\n + self.minute / 60.0\n + self.second / 3600.0\n + self.microsecond / 3600.0 / 1e6\n + self.nanosecond / 3600.0 / 1e9\n )\n / 24.0\n )\n\n\n# -------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef sequence_to_dt64ns(\n data,\n dtype=None,\n copy=False,\n tz=None,\n dayfirst=False,\n yearfirst=False,\n ambiguous=\"raise\",\n):\n \"\"\"\n Parameters\n ----------\n data : list-like\n dtype : dtype, str, or None, default None\n copy : bool, default False\n tz : tzinfo, str, or None, default None\n dayfirst : bool, default False\n yearfirst : bool, default False\n ambiguous : str, bool, or arraylike, default 'raise'\n See pandas._libs.tslibs.conversion.tz_localize_to_utc.\n\n Returns\n -------\n result : numpy.ndarray\n The sequence converted to a numpy array with dtype ``datetime64[ns]``.\n tz : tzinfo or None\n Either the user-provided tzinfo or one inferred from the data.\n inferred_freq : Tick or None\n The inferred frequency of the sequence.\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n\n inferred_freq = None\n\n dtype = _validate_dt64_dtype(dtype)\n\n if not hasattr(data, \"dtype\"):\n # e.g. list, tuple\n if np.ndim(data) == 0:\n # i.e. generator\n data = list(data)\n data = np.asarray(data)\n copy = False\n elif isinstance(data, ABCSeries):\n data = data._values\n if isinstance(data, ABCPandasArray):\n data = data.to_numpy()\n\n if hasattr(data, \"freq\"):\n # i.e. DatetimeArray/Index\n inferred_freq = data.freq\n\n # if dtype has an embedded tz, capture it\n tz = validate_tz_from_dtype(dtype, tz)\n\n if isinstance(data, ABCIndexClass):\n if data.nlevels > 1:\n # Without this check, data._data below is None\n raise TypeError(\"Cannot create a DatetimeArray from a MultiIndex.\")\n data = data._data\n\n # By this point we are assured to have either a numpy array or Index\n data, copy = maybe_convert_dtype(data, copy)\n\n if is_object_dtype(data) or is_string_dtype(data):\n # TODO: We do not have tests specific to string-dtypes,\n # also complex or categorical or other extension\n copy = False\n if lib.infer_dtype(data, skipna=False) == \"integer\":\n data = data.astype(np.int64)\n else:\n # data comes back here as either i8 to denote UTC timestamps\n # or M8[ns] to denote wall times\n data, inferred_tz = objects_to_datetime64ns(\n data, dayfirst=dayfirst, yearfirst=yearfirst\n )\n tz = maybe_infer_tz(tz, inferred_tz)\n\n # `data` may have originally been a Categorical[datetime64[ns, tz]],\n # so we need to handle these types.\n if is_datetime64tz_dtype(data):\n # DatetimeArray -> ndarray\n tz = maybe_infer_tz(tz, data.tz)\n result = data._data\n\n elif is_datetime64_dtype(data):\n # tz-naive DatetimeArray or ndarray[datetime64]\n data = getattr(data, \"_data\", data)\n if data.dtype != DT64NS_DTYPE:\n data = conversion.ensure_datetime64ns(data)\n\n if tz is not None:\n # Convert tz-naive to UTC\n tz = timezones.maybe_get_tz(tz)\n data = conversion.tz_localize_to_utc(\n data.view(\"i8\"), tz, ambiguous=ambiguous\n )\n data = data.view(DT64NS_DTYPE)\n\n assert data.dtype == DT64NS_DTYPE, data.dtype\n result = data\n\n else:\n # must be integer dtype otherwise\n # assume this data are epoch timestamps\n if tz:\n tz = timezones.maybe_get_tz(tz)\n\n if data.dtype != INT64_DTYPE:\n data = data.astype(np.int64, copy=False)\n result = data.view(DT64NS_DTYPE)\n\n if copy:\n # TODO: should this be deepcopy?\n result = result.copy()\n\n assert isinstance(result, np.ndarray), type(result)\n assert result.dtype == \"M8[ns]\", result.dtype\n\n # We have to call this again after possibly inferring a tz above\n validate_tz_from_dtype(dtype, tz)\n\n return result, tz, inferred_freq\n\n\ndef objects_to_datetime64ns(\n data,\n dayfirst,\n yearfirst,\n utc=False,\n errors=\"raise\",\n require_iso8601=False,\n allow_object=False,\n):\n \"\"\"\n Convert data to array of timestamps.\n\n Parameters\n ----------\n data : np.ndarray[object]\n dayfirst : bool\n yearfirst : bool\n utc : bool, default False\n Whether to convert timezone-aware timestamps to UTC.\n errors : {'raise', 'ignore', 'coerce'}\n allow_object : bool\n Whether to return an object-dtype ndarray instead of raising if the\n data contains more than one timezone.\n\n Returns\n -------\n result : ndarray\n np.int64 dtype if returned values represent UTC timestamps\n np.datetime64[ns] if returned values represent wall times\n object if mixed timezones\n inferred_tz : tzinfo or None\n\n Raises\n ------\n ValueError : if data cannot be converted to datetimes\n \"\"\"\n assert errors in [\"raise\", \"ignore\", \"coerce\"]\n\n # if str-dtype, convert\n data = np.array(data, copy=False, dtype=np.object_)\n\n try:\n result, tz_parsed = tslib.array_to_datetime(\n data,\n errors=errors,\n utc=utc,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n require_iso8601=require_iso8601,\n )\n except ValueError as e:\n try:\n values, tz_parsed = conversion.datetime_to_datetime64(data)\n # If tzaware, these values represent unix timestamps, so we\n # return them as i8 to distinguish from wall times\n return values.view(\"i8\"), tz_parsed\n except (ValueError, TypeError):\n raise e\n\n if tz_parsed is not None:\n # We can take a shortcut since the datetime64 numpy array\n # is in UTC\n # Return i8 values to denote unix timestamps\n return result.view(\"i8\"), tz_parsed\n elif is_datetime64_dtype(result):\n # returning M8[ns] denotes wall-times; since tz is None\n # the distinction is a thin one\n return result, tz_parsed\n elif is_object_dtype(result):\n # GH#23675 when called via `pd.to_datetime`, returning an object-dtype\n # array is allowed. When called via `pd.DatetimeIndex`, we can\n # only accept datetime64 dtype, so raise TypeError if object-dtype\n # is returned, as that indicates the values can be recognized as\n # datetimes but they have conflicting timezones/awareness\n if allow_object:\n return result, tz_parsed\n raise TypeError(result)\n else: # pragma: no cover\n # GH#23675 this TypeError should never be hit, whereas the TypeError\n # in the object-dtype branch above is reachable.\n raise TypeError(result)\n\n\ndef maybe_convert_dtype(data, copy):\n \"\"\"\n Convert data based on dtype conventions, issuing deprecation warnings\n or errors where appropriate.\n\n Parameters\n ----------\n data : np.ndarray or pd.Index\n copy : bool\n\n Returns\n -------\n data : np.ndarray or pd.Index\n copy : bool\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n if not hasattr(data, \"dtype\"):\n # e.g. collections.deque\n return data, copy\n\n if is_float_dtype(data.dtype):\n # Note: we must cast to datetime64[ns] here in order to treat these\n # as wall-times instead of UTC timestamps.\n data = data.astype(DT64NS_DTYPE)\n copy = False\n # TODO: deprecate this behavior to instead treat symmetrically\n # with integer dtypes. See discussion in GH#23675\n\n elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):\n # GH#29794 enforcing deprecation introduced in GH#23539\n raise TypeError(f\"dtype {data.dtype} cannot be converted to datetime64[ns]\")\n elif is_period_dtype(data.dtype):\n # Note: without explicitly raising here, PeriodIndex\n # test_setops.test_join_does_not_recur fails\n raise TypeError(\n \"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead\"\n )\n\n elif is_categorical_dtype(data.dtype):\n # GH#18664 preserve tz in going DTI->Categorical->DTI\n # TODO: cases where we need to do another pass through this func,\n # e.g. the categories are timedelta64s\n data = data.categories.take(data.codes, fill_value=NaT)._values\n copy = False\n\n elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype):\n # Includes categorical\n # TODO: We have no tests for these\n data = np.array(data, dtype=np.object_)\n copy = False\n\n return data, copy\n\n\n# -------------------------------------------------------------------\n# Validation and Inference\n\n\ndef maybe_infer_tz(tz, inferred_tz):\n \"\"\"\n If a timezone is inferred from data, check that it is compatible with\n the user-provided timezone, if any.\n\n Parameters\n ----------\n tz : tzinfo or None\n inferred_tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if both timezones are present but do not match\n \"\"\"\n if tz is None:\n tz = inferred_tz\n elif inferred_tz is None:\n pass\n elif not timezones.tz_compare(tz, inferred_tz):\n raise TypeError(\n f\"data is already tz-aware {inferred_tz}, unable to \"\n f\"set specified tz: {tz}\"\n )\n return tz\n\n\ndef _validate_dt64_dtype(dtype):\n \"\"\"\n Check that a dtype, if passed, represents either a numpy datetime64[ns]\n dtype or a pandas DatetimeTZDtype.\n\n Parameters\n ----------\n dtype : object\n\n Returns\n -------\n dtype : None, numpy.dtype, or DatetimeTZDtype\n\n Raises\n ------\n ValueError : invalid dtype\n\n Notes\n -----\n Unlike validate_tz_from_dtype, this does _not_ allow non-existent\n tz errors to go through\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if is_dtype_equal(dtype, np.dtype(\"M8\")):\n # no precision, disallowed GH#24806\n msg = (\n \"Passing in 'datetime64' dtype with no precision is not allowed. \"\n \"Please pass in 'datetime64[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if (isinstance(dtype, np.dtype) and dtype != DT64NS_DTYPE) or not isinstance(\n dtype, (np.dtype, DatetimeTZDtype)\n ):\n raise ValueError(\n f\"Unexpected value for 'dtype': '{dtype}'. \"\n \"Must be 'datetime64[ns]' or DatetimeTZDtype'.\"\n )\n return dtype\n\n\ndef validate_tz_from_dtype(dtype, tz):\n \"\"\"\n If the given dtype is a DatetimeTZDtype, extract the implied\n tzinfo object from it and check that it does not conflict with the given\n tz.\n\n Parameters\n ----------\n dtype : dtype, str\n tz : None, tzinfo\n\n Returns\n -------\n tz : consensus tzinfo\n\n Raises\n ------\n ValueError : on tzinfo mismatch\n \"\"\"\n if dtype is not None:\n if isinstance(dtype, str):\n try:\n dtype = DatetimeTZDtype.construct_from_string(dtype)\n except TypeError:\n # Things like `datetime64[ns]`, which is OK for the\n # constructors, but also nonsense, which should be validated\n # but not by us. We *do* allow non-existent tz errors to\n # go through\n pass\n dtz = getattr(dtype, \"tz\", None)\n if dtz is not None:\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\"cannot supply both a tz and a dtype with a tz\")\n tz = dtz\n\n if tz is not None and is_datetime64_dtype(dtype):\n # We also need to check for the case where the user passed a\n # tz-naive dtype (i.e. datetime64[ns])\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\n \"cannot supply both a tz and a \"\n \"timezone-naive dtype (i.e. datetime64[ns])\"\n )\n\n return tz\n\n\ndef _infer_tz_from_endpoints(start, end, tz):\n \"\"\"\n If a timezone is not explicitly given via `tz`, see if one can\n be inferred from the `start` and `end` endpoints. If more than one\n of these inputs provides a timezone, require that they all agree.\n\n Parameters\n ----------\n start : Timestamp\n end : Timestamp\n tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if start and end timezones do not agree\n \"\"\"\n try:\n inferred_tz = timezones.infer_tzinfo(start, end)\n except AssertionError as err:\n # infer_tzinfo raises AssertionError if passed mismatched timezones\n raise TypeError(\n \"Start and end cannot both be tz-aware with different timezones\"\n ) from err\n\n inferred_tz = timezones.maybe_get_tz(inferred_tz)\n tz = timezones.maybe_get_tz(tz)\n\n if tz is not None and inferred_tz is not None:\n if not timezones.tz_compare(inferred_tz, tz):\n raise AssertionError(\"Inferred time zone not equal to passed time zone\")\n\n elif inferred_tz is not None:\n tz = inferred_tz\n\n return tz\n\n\ndef _maybe_normalize_endpoints(start, end, normalize):\n _normalized = True\n\n if start is not None:\n if normalize:\n start = normalize_date(start)\n _normalized = True\n else:\n _normalized = _normalized and start.time() == _midnight\n\n if end is not None:\n if normalize:\n end = normalize_date(end)\n _normalized = True\n else:\n _normalized = _normalized and end.time() == _midnight\n\n return start, end, _normalized\n\n\ndef _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent):\n \"\"\"\n Localize a start or end Timestamp to the timezone of the corresponding\n start or end Timestamp\n\n Parameters\n ----------\n ts : start or end Timestamp to potentially localize\n is_none : argument that should be None\n is_not_none : argument that should not be None\n freq : Tick, DateOffset, or None\n tz : str, timezone object or None\n ambiguous: str, localization behavior for ambiguous times\n nonexistent: str, localization behavior for nonexistent times\n\n Returns\n -------\n ts : Timestamp\n \"\"\"\n # Make sure start and end are timezone localized if:\n # 1) freq = a Timedelta-like frequency (Tick)\n # 2) freq = None i.e. generating a linspaced range\n if is_none is None and is_not_none is not None:\n # Note: We can't ambiguous='infer' a singular ambiguous time; however,\n # we have historically defaulted ambiguous=False\n ambiguous = ambiguous if ambiguous != \"infer\" else False\n localize_args = {\"ambiguous\": ambiguous, \"nonexistent\": nonexistent, \"tz\": None}\n if isinstance(freq, Tick) or freq is None:\n localize_args[\"tz\"] = tz\n ts = ts.tz_localize(**localize_args)\n return ts\n" ]
[ [ "pandas._libs.tslibs.conversion.datetime_to_datetime64", "pandas.tseries.frequencies.to_offset", "pandas.core.arrays.datetimelike.DatetimeLikeArrayMixin.astype", "pandas._libs.tslibs.Timestamp", "pandas._libs.tslibs.timezones.maybe_get_tz", "pandas.core.dtypes.common.is_datetime64_ns_dtype", "numpy.asarray", "numpy.linspace", "pandas._libs.tslibs.fields.get_start_end_field", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.DataFrame", "pandas._libs.tslibs.fields.get_date_field", "numpy.dtype", "pandas._libs.tslibs.conversion.ensure_datetime64ns", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas._libs.tslibs.timezones.infer_tzinfo", "pandas._libs.tslib.ints_to_pydatetime", "numpy.fix", "pandas._libs.tslibs.fields.get_date_name_field", "pandas.core.arrays.datetimelike.validate_inferred_freq", "pandas._libs.tslibs.conversion.is_date_array_normalized", "pandas.core.arrays._ranges.generate_regular_range", "pandas.tseries.frequencies.get_period_alias", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_string_dtype", "pandas._libs.tslibs.conversion.normalize_i8_timestamps", "pandas._libs.tslibs.timezones.tz_compare", "pandas.core.arrays.datetimelike.maybe_infer_freq", "pandas.core.arrays.timedeltas.TimedeltaArray", "pandas.core.dtypes.common.is_categorical_dtype", "pandas._libs.tslibs.resolution.resolution", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.algorithms.checked_add_with_arr", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.arrays.datetimelike.validate_endpoints", "pandas.core.dtypes.common.is_period_dtype", "pandas._libs.tslibs.timezones.tz_standardize", "pandas.core.dtypes.dtypes.DatetimeTZDtype.construct_from_string", "pandas._libs.tslibs.tzconversion.tz_convert", "numpy.ndim", "pandas._libs.tslibs.frequencies.base_and_stride", "numpy.floor", "pandas._libs.tslibs.normalize_date", "numpy.array", "pandas.core.common.count_not_none", "pandas.core.dtypes.common.is_bool_dtype", "pandas._libs.tslib.array_to_datetime", "pandas.io.formats.format._get_format_datetime64_from_values", "pandas._libs.tslibs.timezones.is_utc", "pandas._libs.tslibs.timezones.get_timezone", "pandas.core.dtypes.common.is_object_dtype", "pandas._libs.tslibs.conversion.tz_localize_to_utc", "pandas._libs.tslibs.fields.build_isocalendar_sarray", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.infer_dtype", "pandas.core.arrays.PeriodArray._from_datetime64", "pandas.core.arrays.datetimelike.validate_periods" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
steventan0110/multiDDS
[ "b77d0ad7b8f38d5b3b1b0e63e2671e0de0e3da00" ]
[ "fairseq/optim/lr_scheduler/inverse_square_root_decay_schedule.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom . import FairseqLRScheduler, register_lr_scheduler\nimport torch\n\n@register_lr_scheduler('inverse_sqrt_decay')\nclass InverseSquareRootDecaySchedule(FairseqLRScheduler):\n \"\"\"Decay the LR based on the inverse square root of the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n decay_factor = args.lr * sqrt(args.warmup_updates)\n lr = decay_factor / sqrt(update_num)\n \"\"\"\n\n def __init__(self, args, optimizer):\n super().__init__(args, optimizer)\n if len(args.lr) > 1:\n raise ValueError(\n 'Cannot use a fixed learning rate schedule with inverse_sqrt.'\n ' Consider --lr-scheduler=fixed instead.'\n )\n warmup_end_lr = args.lr[0]\n if args.warmup_init_lr < 0:\n args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr\n\n # linearly warmup for the first args.warmup_updates\n self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates\n\n # then, decay prop. to the inverse square root of the update number\n self.decay_factor = warmup_end_lr * args.warmup_updates**0.5\n\n # initial learning rate\n self.lr = args.warmup_init_lr\n self.optimizer.set_lr(self.lr)\n self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer.optimizer, patience=0, factor=args.lr_shrink,\n threshold=args.lr_threshold)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add arguments to the parser for this LR scheduler.\"\"\"\n # fmt: off\n parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',\n help='warmup the learning rate linearly for the first N updates')\n parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',\n help='initial learning rate during warmup phase; default is args.lr')\n parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',\n help='shrink factor for annealing, lr_new = (lr * lr_shrink)')\n parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT',\n help='Threshold for measuring the new optimum, \\\n to only focus on significant changes')\n # fmt: on\n\n def step(self, epoch, val_loss=None):\n \"\"\"Update the learning rate at the end of the given epoch.\"\"\"\n super().step(epoch, val_loss)\n if val_loss is not None:\n self.lr_scheduler.step(val_loss, epoch)\n else:\n self.lr_scheduler.last_epoch = epoch\n return self.optimizer.get_lr()\n\n def step_update(self, num_updates):\n \"\"\"Update the learning rate after each update.\"\"\"\n if num_updates < self.args.warmup_updates:\n self.lr = self.args.warmup_init_lr + num_updates*self.lr_step\n else:\n self.lr = self.decay_factor * num_updates**-0.5\n self.optimizer.set_lr(self.lr)\n return self.lr\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mariogeiger/jax
[ "7098088f4eb15cf750398889e4341dbc15cda1b3" ]
[ "tests/lax_numpy_indexing_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom contextlib import contextmanager\nimport enum\nfrom functools import partial\nimport itertools\nimport typing\nfrom typing import Any, Optional, Tuple\nimport warnings\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nimport jax\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import ops\n\nfrom jax._src import dtypes\nfrom jax._src import test_util as jtu\nfrom jax._src import util\nfrom jax._src.lax import lax as lax_internal\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n# We disable the whitespace continuation check in this file because otherwise it\n# makes the test name formatting unwieldy.\n# pylint: disable=bad-continuation\n\n\nARRAY_MSG = r\"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\\[array\\(seq\\)\\]\"\nTUPLE_MSG = r\"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\\[tuple\\(seq\\)\\]\"\n\n\nfloat_dtypes = jtu.dtypes.floating\ndefault_dtypes = float_dtypes + jtu.dtypes.integer\nall_dtypes = default_dtypes + jtu.dtypes.boolean\n\nclass IndexSpec(typing.NamedTuple):\n shape: Tuple[int, ...]\n indexer: Any\n out_shape: Optional[Tuple[int, ...]] = None\n\n\ndef check_grads(f, args, order, atol=None, rtol=None, eps=None):\n # TODO(mattjj,dougalm): add higher-order check\n default_tol = 1e-6 if config.x64_enabled else 1e-2\n atol = atol or default_tol\n rtol = rtol or default_tol\n eps = eps or default_tol\n jtu.check_jvp(f, partial(jax.jvp, f), args, atol, rtol, eps)\n jtu.check_vjp(f, partial(jax.vjp, f), args, atol, rtol, eps)\n\n\nSTATIC_INDEXING_TESTS = [\n (\"OneIntIndex\", [\n IndexSpec(shape=(3,), indexer=1, out_shape=()),\n IndexSpec(shape=(3, 3), indexer=0, out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=2, out_shape=(4, 5)),\n IndexSpec(shape=(3,), indexer=-1, out_shape=()),\n IndexSpec(shape=(3,), indexer=-2, out_shape=()),\n ]),\n (\"TwoIntIndices\", [\n IndexSpec(shape=(3, 3), indexer=(2, 1), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2), out_shape=(5,)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2), out_shape=(5,)),\n ]),\n (\"ThreeIntIndices\", [\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2, 3), out_shape=()),\n ]),\n (\"OneSliceIndex\", [\n IndexSpec(shape=(10,), indexer=slice(1, 3), out_shape=(2,)),\n IndexSpec(shape=(10,), indexer=slice(1, -1), out_shape=(8,)),\n IndexSpec(shape=(10,), indexer=slice(None, -1), out_shape=(9,)),\n IndexSpec(shape=(10,), indexer=slice(None, None, None), out_shape=(10,)),\n IndexSpec(shape=(10, 8), indexer=slice(1, 3), out_shape=(2, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(1, None), out_shape=(9, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, 3), out_shape=(3, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(-3, None), out_shape=(3, 8)),\n ]),\n (\"OneSliceIndexNegativeStride\", [\n IndexSpec(shape=(10,), indexer=slice(3, 1, -1), out_shape=(2,)),\n IndexSpec(shape=(10,), indexer=slice(1, 8, -1), out_shape=(0,)),\n IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),\n IndexSpec(shape=(10,), indexer=slice(None, None, -1), out_shape=(10,)),\n IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1), out_shape=(2, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1), out_shape=(0, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, -1), out_shape=(10, 8)),\n ]),\n (\"OneSliceIndexNonUnitStride\", [\n IndexSpec(shape=(10,), indexer=slice(0, 8, 2), out_shape=(4,)),\n IndexSpec(shape=(10,), indexer=slice(0, 8, 3), out_shape=(3,)),\n IndexSpec(shape=(10,), indexer=slice(1, 3, 2), out_shape=(1,)),\n IndexSpec(shape=(10,), indexer=slice(1, None, 2), out_shape=(5,)),\n IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),\n IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3), out_shape=(3, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, 2), out_shape=(5, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2), out_shape=(4, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, -2), out_shape=(5, 8)),\n ]),\n (\"TwoSliceIndices\", [\n IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2)),\n out_shape=(2, 2)),\n IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2)),\n out_shape=(9, 2)),\n IndexSpec(shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2)),\n out_shape=(10, 2)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2)),\n out_shape=(2, 2, 3)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None)),\n out_shape=(2, 8, 3)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2)),\n out_shape=(9, 2, 3)),\n ]),\n (\"OneColonIndex\", [\n IndexSpec(shape=(3,), indexer=slice(None), out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=slice(None), out_shape=(3, 4)),\n ]),\n (\"MultipleColonIndices\", [\n IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None)),\n out_shape=(3, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None)),\n out_shape=(3, 4, 5)),\n ]),\n (\"MixedSliceIndices\", [\n IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2)),\n out_shape=(10, 2)),\n IndexSpec(shape=(10, 4), indexer=(1, slice(None)),\n out_shape=(4,)),\n ]),\n (\"EllipsisIndex\", [\n IndexSpec(shape=(3,), indexer=Ellipsis, out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=Ellipsis, out_shape=(3, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis), out_shape=(4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3), out_shape=(3,)),\n ]),\n (\"NoneIndex\", [\n IndexSpec(shape=(), indexer=None, out_shape=(1,)),\n IndexSpec(shape=(), indexer=(None, None), out_shape=(1, 1)),\n IndexSpec(shape=(), indexer=(Ellipsis, None), out_shape=(1,)),\n IndexSpec(shape=(3,), indexer=None, out_shape=(1, 3)),\n IndexSpec(shape=(3, 4), indexer=None, out_shape=(1, 3, 4)),\n IndexSpec(shape=(3, 4), indexer=(Ellipsis, None), out_shape=(3, 4, 1)),\n IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis), out_shape=(1, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis), out_shape=(1, 4, 5)),\n ]),\n (\"EmptyIndex\", [\n IndexSpec(shape=(), indexer=(), out_shape=()),\n IndexSpec(shape=(3,), indexer=(), out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=(), out_shape=(3, 4)),\n ]),\n (\"TupleOfIntAndSliceAndIntArray\", [\n IndexSpec(shape=(3, 2, 3), indexer=(0, slice(None), np.arange(3)),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 2, 3), indexer=(np.int32(1), slice(None), np.arange(3)),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 2, 3), indexer=(np.array(2), slice(None), np.arange(3)),\n out_shape=(3, 2)),\n ]),\n]\n\nSTATIC_INDEXING_OUT_OF_BOUNDS_TESTS = [\n (\"OneIntIndex\", [\n IndexSpec(shape=(3,), indexer=-4, out_shape=()),\n IndexSpec(shape=(3, 3), indexer=3, out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=4, out_shape=(4, 5)),\n ]),\n (\"TwoIntIndices\", [\n IndexSpec(shape=(3, 3), indexer=(2, -4), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(3, 2), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(-4, 4), out_shape=(5,)),\n ]),\n]\n\n\nADVANCED_INDEXING_TESTS = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1]),\n out_shape=(4, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32),\n out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 0]]),out_shape=(1, 2)),\n IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1], [0, 1, -1]]),\n out_shape=(2, 3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1], [-1, -2, 1, 0]]),\n out_shape=(2, 4, 4, 5)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2, 0, 1]), np.array([-1, 0, -1, 2])),\n out_shape=(4, 5)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([[0, 2, 0, 1]]), np.array([-1, 0, -1, 2])),\n out_shape=(1, 4, 5)),\n ]),\n (\"ArrayOfInts\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1, 0]), out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]]),\n out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0, 3]])),\n out_shape=(1, 4)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0, 3]])),\n out_shape=(2, 4, 5)),\n ]),\n]\n\nADVANCED_INDEXING_TESTS_NO_REPEATS = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 0]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 1]),\n out_shape=(3, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),\n IndexSpec(shape=(6, 6), indexer=np.array([[1, 2, 0], [3, 4, -1]]),\n out_shape=(2, 3, 6)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([0, 2, 1, 3]), np.array([-1, 0, -2, 1])),\n out_shape=(4, 6)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([[0, 2, -1, 1]]), np.array([-1, 0, -2, 2])),\n out_shape=(1, 4, 6)),\n ]),\n (\"ArrayOfInts\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 2, 1]), out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]]),\n out_shape=(2, 3, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0]])),\n out_shape=(1, 3)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0]])),\n out_shape=(2, 3, 5)),\n ]),\n]\n\nADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([0, 1, 2]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 1, 2]),\n out_shape=(3, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),\n IndexSpec(shape=(6, 6), indexer=np.array([[-1, 0, 1],\n [ 2, 3, 4]]), out_shape=(2, 3, 6)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([0, 1, 2, 3]), np.array([-2, -1, 0, 1])),\n out_shape=(4, 6)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([[-1, 0, 1, 2]]), np.array([-2, -1, 0, 2])),\n out_shape=(1, 4, 6)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[0, 2, 3]]),\n out_shape=(2, 3, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[0, 2, 3]])),\n out_shape=(1, 3)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[0, 2, 3]])),\n out_shape=(2, 3, 5)),\n ]),\n]\n\n\nMIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [\n (\"SlicesAndOneIntArrayIndex\", [\n IndexSpec(shape=(2, 3), indexer=(np.array([0, 1]), slice(1, 2)),\n out_shape=(2, 1)),\n IndexSpec(shape=(2, 3), indexer=(slice(0, 2), np.array([0, 2])),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), slice(None)),\n out_shape=(3, 2, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([[0, 2], [1, 3]]), slice(None)),\n out_shape=(3, 2, 2, 5)),\n ]),\n (\"SlicesAndTwoIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), np.array([-1, 2])),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), Ellipsis, np.array([-1, 2])),\n out_shape=(2, 4)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), np.array([-1, 2]), Ellipsis),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), np.array([-1, 2]), slice(1, 3)),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), slice(1, 3), np.array([-1, 2])),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),\n np.array([-1, 2, 1])),\n out_shape=(3, 2)),\n ]),\n (\"NonesAndIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), None, np.array([-1, 2])),\n out_shape=(2, 1, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), None, None, np.array([-1, 2])),\n out_shape=(2, 1, 1, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), None, None,\n np.array([-1, 2])),\n out_shape=(2, 3, 1, 1)),\n ]),\n (\"IntArrayWithInt32Type\", [\n IndexSpec(shape=(3, 4), indexer=(Ellipsis, np.array(1, dtype=np.int32)),\n out_shape=(3,)),\n ]),\n]\n\n\nMIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [\n (\"SlicesAndOneIntArrayIndex\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([[0, 2], [1, 1]]), slice(None)),\n out_shape=(3, 2, 2, 5)),\n ]),\n (\"SlicesAndTwoIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),\n np.array([-1, 2, -1])),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([[0, 2], [2, 0]]), Ellipsis,\n np.array([[1, 0], [1, 0]])),\n out_shape=(2, 2, 4)),\n ]),\n]\n\nMODES = [\"clip\", \"drop\", \"promise_in_bounds\"]\n\n\nclass IndexingTest(jtu.JaxTestCase):\n \"\"\"Tests for Numpy indexing translation rules.\"\"\"\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": \"{}_inshape={}_indexer={}\".format(\n name, jtu.format_shape_dtype_string( shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer\n } for name, index_specs in STATIC_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes))\n def testStaticIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda x: np.asarray(x)[indexer]\n jnp_fun = lambda x: jnp.asarray(x)[indexer]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n # Tests x.at[...].get(...) as well.\n jnp_fun = lambda x: jnp.asarray(x).at[indexer].get()\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": f\"_{funcname}\", \"funcname\": funcname}\n for funcname in [\"negative\", \"sin\", \"cos\", \"square\", \"sqrt\", \"log\", \"exp\"]))\n def testIndexApply(self, funcname, size=10, dtype='float32'):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), -size, size)\n np_func = getattr(np, funcname)\n jnp_func = getattr(jnp, funcname)\n @jtu.ignore_warning(category=RuntimeWarning)\n def np_op(x, idx):\n y = x.copy()\n np_func.at(y, idx)\n return y\n def jnp_op(x, idx):\n return jnp.asarray(x).at[idx].apply(jnp_func)\n args_maker = lambda: [rng(size, dtype), idx_rng(size, int)]\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n\n @parameterized.named_parameters({\n \"testcase_name\":\n f\"{jtu.format_shape_dtype_string(shape, dtype)}_inshape={name}\"\n f\"_indexer={indexer}_mode={mode}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer, \"mode\": mode\n }\n for mode in MODES\n for name, index_specs in (\n STATIC_INDEXING_TESTS if mode == \"promise_in_bounds\" else\n STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testStaticIndexingGrads(self, shape, dtype, indexer, mode):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n arg = rng(shape, dtype)\n # Use an arbitrary finite fill_value, since NaNs won't work in a numerical\n # gradient test.\n fun = lambda x: jnp.asarray(x).at[indexer].get(mode=mode, fill_value=7)**2\n check_grads(fun, (arg,), 2, tol, tol, tol)\n\n def _ReplaceSlicesWithTuples(self, idx):\n \"\"\"Helper method to replace slices with tuples for dynamic indexing args.\"\"\"\n if isinstance(idx, slice):\n triple = idx.start, idx.stop, idx.step\n isnone = [i for i, elt in enumerate(triple) if elt is None]\n zeros = itertools.repeat(0)\n nones = itertools.repeat(None)\n out = util.subvals(triple, zip(isnone, zeros))\n return out, lambda out: slice(*util.subvals(out, zip(isnone, nones)))\n elif isinstance(idx, (tuple, list)) and idx:\n t = type(idx)\n elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))\n return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))\n else:\n return idx, lambda x: x\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneSliceIndex\",\n [IndexSpec(shape=(5,), indexer=slice(1, 3)),\n IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),\n (\"TwoSliceIndices\",\n [IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),\n IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),\n (\"NonUnitStrides\", [\n IndexSpec(shape=(3,), indexer=slice(None, None, -1)),\n IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),\n IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))\n ]),\n (\"OnlyStartOrStopDynamic\", [\n IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),\n IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))\n ]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testDynamicIndexingWithSlicesErrors(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n @jax.jit\n def fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return x[indexer]\n\n args_maker = lambda: [rng(shape, dtype), unpacked_indexer]\n self.assertRaises(IndexError, lambda: fun(*args_maker()))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneIntIndex\",\n [IndexSpec(shape=(3,), indexer=1),\n IndexSpec(shape=(3, 3), indexer=0),\n IndexSpec(shape=(3, 4, 5), indexer=2),\n IndexSpec(shape=(3,), indexer=-1),\n IndexSpec(shape=(3,), indexer=-2)]),\n (\"TwoIntIndices\",\n [IndexSpec(shape=(3, 3), indexer=(2, 1)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),\n (\"ThreeIntIndices\",\n [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testDynamicIndexingWithIntegers(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n def np_fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return np.asarray(x)[indexer]\n\n def jnp_fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return jnp.array(x)[indexer]\n\n args_maker = lambda: [rng(shape, dtype), unpacked_indexer]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneIntIndex\",\n [IndexSpec(shape=(3,), indexer=1),\n IndexSpec(shape=(3, 3), indexer=0),\n IndexSpec(shape=(3, 4, 5), indexer=2),\n IndexSpec(shape=(3,), indexer=-1),\n IndexSpec(shape=(3,), indexer=-2),\n ]),\n (\"TwoIntIndices\",\n [IndexSpec(shape=(3, 3), indexer=(2, 1)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),\n ]),\n (\"ThreeIntIndices\",\n [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testDynamicIndexingWithIntegersGrads(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n @jax.jit\n def fun(unpacked_indexer, x):\n indexer = pack_indexer(unpacked_indexer)\n return x[indexer]\n\n arr = rng(shape, dtype)\n check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in ADVANCED_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testAdvancedIntegerIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), indexer]\n np_fun = lambda x, idx: np.asarray(x)[idx]\n jnp_fun = lambda x, idx: jnp.asarray(x)[idx]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"_{dtype}\", \"dtype\": dtype}\n for dtype in jtu.dtypes.unsigned + jtu.dtypes.integer)\n def testIndicesNormalizationByType(self, dtype):\n x = jnp.arange(10)\n jaxpr = jax.make_jaxpr(x.__getitem__)(jnp.arange(3, dtype=dtype))\n primitives = [eqn.primitive for eqn in jaxpr.eqns]\n if np.issubdtype(dtype, np.unsignedinteger):\n # Unsigned integers should not require lt, add, and select.\n self.assertEqual(primitives, [lax.convert_element_type_p, lax.broadcast_in_dim_p, lax.gather_p])\n else:\n # May or may not contain convert_element_type.\n self.assertIn(len(primitives), [5, 6])\n self.assertEqual(primitives[:3], [lax.lt_p, lax.add_p, lax.select_n_p])\n self.assertEqual(primitives[-2:], [lax.broadcast_in_dim_p, lax.gather_p])\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"One1DIntArrayIndex\",\n [IndexSpec(shape=(3,), indexer=np.array([0, 1])),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1])),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1])),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1])),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1])),\n ]),\n (\"One2DIntArrayIndex\",\n [IndexSpec(shape=(3,), indexer=np.array([[0, 0]])),\n IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1],\n [0, 1, -1]])),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1],\n [-1, -2, 1, 0]])),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\",\n [IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]),\n np.array([1, 2]))),\n IndexSpec(shape=(3, 4, 5), indexer=(np.array([0, 2, 0, 1]),\n np.array([-1, 0, -1, 2]))),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\",\n [IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]),\n np.array([1, 2]))),\n IndexSpec(shape=(3, 4, 5), indexer=(np.array([[0, 2, 0, 1]]),\n np.array([-1, 0, -1, 2]))),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\",\n [IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1]))),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1,\n np.array([[2, 3, 0, 3]]))),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\",\n [IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0]))),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]],\n np.array([[2, 3, 0, 3]]))),\n ]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testAdvancedIntegerIndexingGrads(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n arg = rng(shape, dtype)\n fun = lambda x: jnp.asarray(x)[indexer]\n check_grads(fun, (arg,), 2, tol, tol, eps=1.)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testMixedAdvancedIntegerIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n indexer_with_dummies = [e if isinstance(e, np.ndarray) else ()\n for e in indexer]\n substitutes = [(i, e) for i, e in enumerate(indexer)\n if not isinstance(e, np.ndarray)]\n args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]\n\n def jnp_fun(x, indexer_with_dummies):\n idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))\n return jnp.asarray(x)[idx]\n\n def np_fun(x, indexer_with_dummies):\n idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))\n return np.asarray(x)[idx]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testAdvancedIndexingManually(self):\n x = self.rng().randn(3, 4, 5)\n index_array = np.array([0, 2, -1, 0])\n\n op = lambda x, index_array: x[..., index_array, :]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n op = lambda x, index_array: x[..., index_array, :, index_array, None]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n def testUnpacking(self):\n\n def foo(x):\n a, b, c = x\n return a + b + c\n\n cfoo = jax.jit(foo)\n\n a1 = foo(np.arange(3))\n a2 = cfoo(np.arange(3))\n\n self.assertAllClose(a1, a2)\n\n def testBooleanIndexingArray1D(self):\n idx = np.array([True, True, False])\n x = jax.device_put(np.arange(3))\n ans = x[idx]\n expected = np.arange(3)[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingList1D(self):\n idx = [True, True, False]\n x = jax.device_put(np.arange(3))\n with self.assertRaisesRegex(TypeError, ARRAY_MSG):\n x[idx]\n\n def testBooleanIndexingArray2DBroadcast(self):\n idx = np.array([True, True, False, True])\n x = np.arange(8).reshape(4, 2)\n ans = jax.device_put(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingList2DBroadcast(self):\n idx = [True, True, False, True]\n x = np.arange(8).reshape(4, 2)\n with self.assertRaisesRegex(TypeError, ARRAY_MSG):\n jax.device_put(x)[idx]\n\n def testBooleanIndexingArray2D(self):\n idx = np.array([[True, False],\n [False, True],\n [False, False],\n [True, True]])\n x = np.arange(8).reshape(4, 2)\n ans = jax.device_put(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis(self):\n # Regression test for https://github.com/google/jax/issues/8412\n x = np.arange(24).reshape(4, 3, 2)\n idx = (..., np.array([True, False]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis2(self):\n # Regression test for https://github.com/google/jax/issues/9050\n x = np.arange(3)\n idx = (..., np.array([True, False, True]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis3(self):\n x = np.arange(6).reshape(2, 3)\n idx = (0, ..., np.array([True, False, True]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean2DIndexingWithEllipsis(self):\n x = np.arange(24).reshape(4, 3, 2)\n idx = (..., np.array([[True, False], [True, False], [False, False]]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithTrailingEllipsis(self):\n x = np.arange(24).reshape(4, 3, 2)\n idx = (np.array([True, False, True, False]), ...)\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingDynamicShapeError(self):\n x = np.zeros(3)\n i = np.array([True, True, False])\n self.assertRaises(IndexError, lambda: jax.jit(lambda x, i: x[i])(x, i))\n\n def testScalarBooleanIndexingNotImplemented(self):\n msg = \"JAX arrays do not support boolean scalar indices\"\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[True]\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[False]\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[..., True]\n\n def testIssue187(self):\n x = jnp.ones((5, 5))\n x[[0, 2, 4], [0, 2, 4]] # doesn't crash\n\n x = np.arange(25).reshape((5, 5))\n ans = jax.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)\n expected = x[[0, 2, 4], [0, 2, 4]]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testJVPOfGradOfIndexing(self):\n # Should return a value, even though we didn't pass a symbolic zero as the\n # index tangent.\n x = jnp.ones((3, 4), jnp.float32)\n i = jnp.ones((3,), jnp.int32)\n f = lambda x, i: jnp.sum(x[i])\n primals, tangents = jax.jvp(jax.grad(f), (x, i),\n (x, np.zeros(i.shape, dtypes.float0)))\n expected = np.broadcast_to(\n np.array([0, 3, 0], dtype=np.float32)[:, None], (3, 4))\n self.assertAllClose(expected, primals)\n self.assertAllClose(np.zeros_like(x), tangents)\n\n def testTrivialGatherIsntGenerated(self):\n # https://github.com/google/jax/issues/1621\n jaxpr = jax.make_jaxpr(lambda x: x[:, None])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 1)\n self.assertNotIn('gather', str(jaxpr))\n\n jaxpr = jax.make_jaxpr(lambda x: x[0:6:1])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 0)\n jaxpr = jax.make_jaxpr(lambda x: x[:4])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 0)\n\n jaxpr = jax.make_jaxpr(lambda x: x[::-1])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 1)\n self.assertEqual(jaxpr.jaxpr.eqns[0].primitive, lax.rev_p)\n\n def testIndexingEmptyDimension(self):\n # Issue 2671: XLA error when indexing into dimension of size 0\n x = jnp.ones((2, 0))\n # The following work, even on axis 1 of size 0\n with jax.numpy_rank_promotion('allow'):\n _ = x[0, :] + x[0, None] + x[0, 1:] + x[0, 1:3:2]\n\n with self.assertRaisesRegex(IndexError,\n \"index .* is out of bounds for axis .* with size 0\"):\n _ = np.ones((2, 0))[0, 0] # The numpy error\n with self.assertRaisesRegex(IndexError,\n \"index is out of bounds for axis .* with size 0\"):\n _ = x[0, 0] # JAX indexing\n with self.assertRaisesRegex(IndexError,\n \"index is out of bounds for axis .* with size 0\"):\n jax.jit(lambda i: x[0, i])(0) # JAX indexing under jit\n\n def testBooleanIndexingWithEmptyResult(self):\n # based on a TensorFlow Probability test that started failing after #1622\n x = jnp.array([-1])\n mask = jnp.array([False])\n ans = x[mask] # doesn't crash\n\n expected = np.array([-1])[np.array([False])]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingShapeMismatch(self):\n # Regression test for https://github.com/google/jax/issues/7329\n x = jnp.arange(4)\n idx = jnp.array([True, False])\n with self.assertRaisesRegex(IndexError, \"boolean index did not match shape.*\"):\n x[idx]\n\n def testNontrivialBooleanIndexing(self):\n # Test nontrivial corner case in boolean indexing shape validation\n rng = jtu.rand_default(self.rng())\n index = (rng((2, 3), np.bool_), rng((6,), np.bool_))\n\n args_maker = lambda: [rng((2, 3, 6), np.int32)]\n np_fun = lambda x: np.asarray(x)[index]\n jnp_fun = lambda x: jnp.asarray(x)[index]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testFloatIndexingError(self):\n BAD_INDEX_TYPE_ERROR = \"Indexer must have integer or boolean type, got indexer with type\"\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2)[0.]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros((2, 2))[(0, 0.)]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros((2, 2))[(0, 0.)]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jax.jit(lambda idx: jnp.zeros((2, 2))[idx])((0, 0.))\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2).at[0.].add(1.)\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2).at[0.].set(1.)\n\n def testIndexOutOfBounds(self): # https://github.com/google/jax/issues/2245\n x = jnp.arange(5, dtype=jnp.int32) + 1\n self.assertAllClose(x, x[:10])\n\n idx = jnp.array([-10, -6, -5, -4, 0, 3, 4, 5, 6, 100])\n self.assertArraysEqual(\n x.at[idx].get(mode=\"clip\"),\n jnp.array([1, 1, 1, 2, 1, 4, 5, 5, 5, 5], jnp.int32))\n nan = np.nan\n self.assertArraysEqual(\n x.astype(jnp.float32).at[idx].get(mode=\"fill\"),\n jnp.array([nan, nan, 1, 2, 1, 4, 5, nan, nan, nan], jnp.float32))\n imin = np.iinfo(np.int32).min\n self.assertArraysEqual(\n x.at[idx].get(mode=\"fill\"),\n jnp.array([imin, imin, 1, 2, 1, 4, 5, imin, imin, imin], jnp.int32))\n umax = np.iinfo(np.uint32).max\n self.assertArraysEqual(\n x.astype(np.uint32).at[idx].get(mode=\"fill\"),\n jnp.array([umax, umax, 1, 2, 1, 4, 5, umax, umax, umax], jnp.uint32))\n self.assertArraysEqual(\n x.at[idx].get(mode=\"fill\", fill_value=7),\n jnp.array([7, 7, 1, 2, 1, 4, 5, 7, 7, 7], jnp.int32))\n\n def testIndexingWeakTypes(self):\n x = lax_internal._convert_element_type(jnp.arange(5), float, weak_type=True)\n\n a = x.at[0].set(1.0)\n self.assertEqual(a.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(a))\n\n b = x.at[0].add(1.0)\n self.assertEqual(b.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(b))\n\n c = x.at[0].mul(1.0)\n self.assertEqual(c.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(c))\n\n def testIndexingTypePromotion(self):\n def _check(x_type, y_type):\n x = jnp.arange(5, dtype=x_type)\n y = y_type(0)\n out = x.at[0].set(y)\n self.assertEqual(x.dtype, out.dtype)\n\n @jtu.ignore_warning(category=np.ComplexWarning,\n message=\"Casting complex values to real\")\n def _check_warns(x_type, y_type, msg):\n with self.assertWarnsRegex(FutureWarning, msg):\n _check(x_type, y_type)\n\n def _check_raises(x_type, y_type, msg):\n with self.assertRaisesRegex(ValueError, msg):\n _check(x_type, y_type)\n\n # Matching dtypes are always OK\n _check(jnp.int32, jnp.int32)\n _check(jnp.float32, jnp.float32)\n _check(jnp.complex64, jnp.complex64)\n\n # Weakly-typed y values promote.\n _check(jnp.int32, int)\n _check(jnp.float32, int)\n _check(jnp.float32, float)\n _check(jnp.complex64, int)\n _check(jnp.complex64, float)\n _check(jnp.complex64, complex)\n\n # in standard promotion mode, strong types can promote.\n msg = \"scatter inputs have incompatible types\"\n with jax.numpy_dtype_promotion('standard'):\n _check(jnp.int32, jnp.int16)\n _check(jnp.float32, jnp.float16)\n _check(jnp.float32, jnp.int32)\n _check(jnp.complex64, jnp.int32)\n _check(jnp.complex64, jnp.float32)\n\n # TODO(jakevdp): make these _check_raises\n _check_warns(jnp.int16, jnp.int32, msg)\n _check_warns(jnp.int32, jnp.float32, msg)\n _check_warns(jnp.int32, jnp.complex64, msg)\n _check_warns(jnp.float16, jnp.float32, msg)\n _check_warns(jnp.float32, jnp.complex64, msg)\n\n # in strict promotion mode, strong types do not promote.\n msg = \"Input dtypes .* have no available implicit dtype promotion path\"\n with jax.numpy_dtype_promotion('strict'):\n _check_raises(jnp.int32, jnp.int16, msg)\n _check_raises(jnp.float32, jnp.float16, msg)\n _check_raises(jnp.float32, jnp.int32, msg)\n _check_raises(jnp.complex64, jnp.int32, msg)\n _check_raises(jnp.complex64, jnp.float32, msg)\n\n _check_raises(jnp.int16, jnp.int32, msg)\n _check_raises(jnp.int32, jnp.float32, msg)\n _check_raises(jnp.int32, jnp.complex64, msg)\n _check_raises(jnp.float16, jnp.float32, msg)\n _check_raises(jnp.float32, jnp.complex64, msg)\n\n\ndef _broadcastable_shapes(shape):\n \"\"\"Returns all shapes that broadcast to `shape`.\"\"\"\n def f(rshape):\n yield []\n if rshape:\n for s in f(rshape[1:]):\n yield rshape[0:1] + s\n if rshape[0] != 1:\n for s in f(rshape[1:]):\n yield [1] + s\n for x in f(list(reversed(shape))):\n yield list(reversed(x))\n\n\n# TODO(jakevdp): move this implementation to jax.dtypes & use in scatter?\ndef _can_cast(from_, to):\n return lax.dtype(to) == dtypes.result_type(from_, to)\n\n\ndef _compatible_dtypes(op, dtype, inexact=False):\n if op == UpdateOps.ADD:\n return [dtype]\n elif inexact:\n return [dt for dt in float_dtypes if _can_cast(dt, dtype)]\n else:\n return [dt for dt in all_dtypes if _can_cast(dt, dtype)]\n\n\nclass UpdateOps(enum.Enum):\n UPDATE = 0\n ADD = 1\n MUL = 2\n DIV = 3\n POW = 4\n MIN = 5\n MAX = 6\n\n def np_fn(op, indexer, x, y):\n x = x.copy()\n x[indexer] = {\n UpdateOps.UPDATE: lambda: y,\n UpdateOps.ADD: lambda: x[indexer] + y,\n UpdateOps.MUL: lambda: x[indexer] * y,\n UpdateOps.DIV: jtu.ignore_warning(category=RuntimeWarning)(\n lambda: x[indexer] / y.astype(x.dtype)),\n UpdateOps.POW: jtu.ignore_warning(category=RuntimeWarning)(\n lambda: x[indexer] ** y.astype(x.dtype)),\n UpdateOps.MIN: lambda: np.minimum(x[indexer], y),\n UpdateOps.MAX: lambda: np.maximum(x[indexer], y),\n }[op]()\n return x\n\n def jax_fn(op, indexer, x, y, indices_are_sorted=False,\n unique_indices=False, mode=None):\n x = jnp.array(x)\n return {\n UpdateOps.UPDATE: x.at[indexer].set,\n UpdateOps.ADD: x.at[indexer].add,\n UpdateOps.MUL: x.at[indexer].multiply,\n UpdateOps.DIV: x.at[indexer].divide,\n UpdateOps.POW: x.at[indexer].power,\n UpdateOps.MIN: x.at[indexer].min,\n UpdateOps.MAX: x.at[indexer].max,\n }[op](y, indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n def dtypes(op):\n if op == UpdateOps.UPDATE:\n return all_dtypes\n elif op == UpdateOps.DIV or op == UpdateOps.POW:\n return jtu.dtypes.inexact\n else:\n return default_dtypes\n\ndef _update_tol(op):\n if op == UpdateOps.POW:\n tol = {np.complex64: 1e-4 if jtu.device_under_test() == \"tpu\" else 1e-5,\n np.complex128: 1e-14}\n else:\n tol = {np.complex128: 1e-14}\n return tol\n\n\nclass IndexedUpdateTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\":\n f\"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}\"\n f\"_indexer={indexer}\"\n f\"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}\"\n f\"_op={op.name}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"mode\": mode,\n } for name, index_specs in s(STATIC_INDEXING_TESTS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype))\n for mode in s(MODES))))\n def testStaticIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op, mode):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,\n unique_indices=True)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testAdvancedIndexingSorted(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(\n op, indexer, x, y, indices_are_sorted=True, unique_indices=True)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, check_dtypes=True,\n tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\":\n f\"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}\"\n f\"_indexer={indexer}\"\n f\"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}\"\n f\"_op={op.name}_mode={mode}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"mode\": mode,\n } for mode in [None] + MODES\n for name, index_specs in (\n STATIC_INDEXING_TESTS if mode == \"promise_in_bounds\" else\n STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)\n for shape, indexer, update_shape in index_specs\n for op in [UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE]\n for dtype in float_dtypes\n for update_shape in _broadcastable_shapes(update_shape)\n for update_dtype in _compatible_dtypes(op, dtype, inexact=True)))\n def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype,\n indexer, op, mode):\n rng = jtu.rand_default(self.rng())\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode,\n unique_indices=True)\n x = rng(shape, dtype)\n y = rng(update_shape, update_dtype)\n check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"unique_indices\": unique_indices,\n } for unique_indices in s([False, True])\n for name, index_specs in s(\n ADVANCED_INDEXING_TESTS_NO_REPEATS if unique_indices\n else ADVANCED_INDEXING_TESTS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(\n [UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE] if unique_indices\n else [UpdateOps.ADD])\n for dtype in s(float_dtypes)\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype, inexact=True)))))\n def testAdvancedIndexingGrads(self, shape, dtype, update_shape, update_dtype,\n indexer, op, unique_indices):\n rng = jtu.rand_default(self.rng())\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,\n unique_indices=unique_indices)\n x = rng(shape, dtype)\n y = rng(update_shape, update_dtype)\n check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)\n\n def testIndexMulGradFailsIfNotUnique(self):\n y = jnp.ones((10,), jnp.int32)\n f = lambda x, z: x.at[y].mul(z)\n\n x = jnp.ones((100,), jnp.float32)\n z = jnp.ones((10,), jnp.float32)\n with self.assertRaises(NotImplementedError,\n msg=\"scatter_mul gradients are only implemented if \"\n \"`unique_indices=True`\"):\n jax.jvp(f, (x, z), (x, z))\n\n def testSegmentSumBehavior(self):\n # testAdvancedIndexing compares against NumPy, and as a result doesn't check\n # repeated indices. This test is just a simple manual check, based on\n # https://www.tensorflow.org/api_docs/python/tf/math/segment_sum\n data = np.array([5, 1, 7, 2, 3, 4, 1, 3])\n segment_ids = np.array([0, 0, 0, 1, 2, 2, 3, 3])\n\n ans = jnp.zeros(np.max(segment_ids) + 1).at[segment_ids].add(data)\n expected = np.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testSegmentSum(self):\n data = jnp.array([5, 1, 7, 2, 3, 4, 1, 3])\n segment_ids = jnp.array([0, 0, 0, 1, 2, 2, 3, 3])\n\n # test with explicit num_segments\n ans = ops.segment_sum(data, segment_ids, num_segments=4)\n expected = jnp.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with explicit num_segments larger than the higher index.\n ans = ops.segment_sum(data, segment_ids, num_segments=5)\n expected = jnp.array([13, 2, 7, 4, 0])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test without explicit num_segments\n ans = ops.segment_sum(data, segment_ids)\n expected = jnp.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with negative segment ids and segment ids larger than num_segments,\n # that will be wrapped with the `mod`.\n segment_ids = jnp.array([0, 4, 8, 1, 2, -6, -1, 3])\n ans = ops.segment_sum(data, segment_ids, num_segments=4)\n expected = jnp.array([5, 2, 3, 3])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with negative segment ids and without without explicit num_segments\n # such as num_segments is defined by the smaller index.\n segment_ids = jnp.array([3, 3, 3, 4, 5, 5, -7, -6])\n ans = ops.segment_sum(data, segment_ids)\n expected = jnp.array([0, 0, 0, 13, 2, 7])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testSegmentSumOutOfBounds(self):\n def fn(data, segment_ids):\n return jax.ops.segment_sum(data, segment_ids, num_segments).sum()\n\n data = np.array([0, 0], dtype=np.float32)\n num_segments = 2\n segment_ids = np.array([2, 3])\n val, grad = jax.value_and_grad(fn)(data, segment_ids)\n self.assertAllClose(val, np.array(0., np.float32))\n self.assertAllClose(grad, np.array([0., 0.], np.float32))\n\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list({\n \"testcase_name\": \"_{}_{}_num_segments={}_bucket_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n reducer.__name__, num_segments, bucket_size),\n \"dtype\": dtype, \"shape\": shape,\n \"reducer\": reducer, \"op\": op, \"identity\": identity,\n \"num_segments\": num_segments, \"bucket_size\": bucket_size}\n for dtype in [np.bool_]\n for shape in [(8,), (7, 4), (6, 4, 2)]\n for bucket_size in [None, 2]\n for num_segments in [None, 1, 3])\n for reducer, op, identity in [\n (ops.segment_min, np.minimum, True),\n (ops.segment_max, np.maximum, False),\n ]))\n def testSegmentReduceBoolean(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)\n args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]\n\n if np.issubdtype(dtype, np.integer):\n if np.isposinf(identity):\n identity = np.iinfo(dtype).max\n elif np.isneginf(identity):\n identity = np.iinfo(dtype).min\n\n jnp_fun = lambda data, segment_ids: reducer(\n data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)\n\n def np_fun(data, segment_ids):\n size = num_segments if num_segments is not None else (segment_ids.max() + 1)\n out = np.full((size,) + shape[1:], identity, dtype)\n for i, val in zip(segment_ids, data):\n if 0 <= i < size:\n out[i] = op(out[i], val).astype(dtype)\n return out\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n if num_segments is not None:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list({\n \"testcase_name\": \"_{}_{}_num_segments={}_bucket_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n reducer.__name__, num_segments, bucket_size),\n \"dtype\": dtype, \"shape\": shape,\n \"reducer\": reducer, \"op\": op, \"identity\": identity,\n \"num_segments\": num_segments, \"bucket_size\": bucket_size}\n for dtype in default_dtypes\n for shape in [(8,), (7, 4), (6, 4, 2)]\n for bucket_size in [None, 2]\n for num_segments in [None, 1, 3])\n for reducer, op, identity in [\n (ops.segment_sum, np.add, 0),\n (ops.segment_prod, np.multiply, 1),\n (ops.segment_min, np.minimum, float('inf')),\n (ops.segment_max, np.maximum, -float('inf')),\n ]))\n def testSegmentReduce(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)\n args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]\n\n if np.issubdtype(dtype, np.integer):\n if np.isposinf(identity):\n identity = np.iinfo(dtype).max\n elif np.isneginf(identity):\n identity = np.iinfo(dtype).min\n\n jnp_fun = lambda data, segment_ids: reducer(\n data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)\n\n def np_fun(data, segment_ids):\n size = num_segments if num_segments is not None else (segment_ids.max() + 1)\n out = np.full((size,) + shape[1:], identity, dtype)\n for i, val in zip(segment_ids, data):\n if 0 <= i < size:\n out[i] = op(out[i], val).astype(dtype)\n return out\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n if num_segments is not None:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testIndexDtypeError(self):\n # https://github.com/google/jax/issues/2795\n jnp.array(1) # get rid of startup warning\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n jnp.zeros(5).at[::2].set(1)\n self.assertLen(w, 0)\n\n @contextmanager\n def assertNoWarnings(self):\n with warnings.catch_warnings(record=True) as caught_warnings:\n yield\n self.assertEmpty(caught_warnings)\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": f\"idx={idx}\", \"idx\": idx, \"idx_type\": idx_type}\n for idx, idx_type in [\n ([0], \"array\"),\n ([0, 0], \"array\"),\n ([[0, 0]], \"tuple\"),\n ([0, [0, 1]], \"tuple\"),\n ([0, np.arange(2)], \"tuple\"),\n ([0, None], \"tuple\"),\n ([0, slice(None)], \"tuple\"),\n ]))\n def testIndexSequenceDeprecation(self, idx, idx_type):\n normalize = {\"array\": np.array, \"tuple\": tuple}[idx_type]\n msg = {\"array\": ARRAY_MSG, \"tuple\": TUPLE_MSG}[idx_type]\n x = jnp.arange(6).reshape(3, 2)\n\n with self.assertRaisesRegex(TypeError, msg):\n x[idx]\n with self.assertNoWarnings():\n x[normalize(idx)]\n\n with self.assertRaisesRegex(TypeError, msg):\n x.at[idx].set(0)\n with self.assertNoWarnings():\n x.at[normalize(idx)].set(0)\n\n def testIndexedUpdateAliasingBug(self):\n # https://github.com/google/jax/issues/7461\n fn = lambda x: x.at[1:].set(1 + x[:-1])\n y = jnp.zeros(8)\n self.assertArraysEqual(fn(y), jax.jit(fn)(y))\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n" ]
[ [ "numpy.minimum", "numpy.maximum", "numpy.asarray", "numpy.arange", "numpy.issubdtype", "numpy.isposinf", "numpy.int32", "numpy.full", "numpy.ones", "numpy.isneginf", "numpy.max", "numpy.zeros_like", "numpy.iinfo", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
archman/python-mpl4qt
[ "f84fefb95113492407899206269ff82b609279b2" ]
[ "mpl4qt/widgets/mplbasewidget.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmplbasewidget.py\n\nBase class for matplotlib widget for PyQt.\n\nCopyright (C) 2018 Tong Zhang <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\"\"\"\n\nimport time\nimport numpy as np\nfrom collections import deque\nfrom collections import OrderedDict\nfrom functools import partial\n\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtCore import QVariant\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtProperty\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtGui import QFontDatabase\nfrom PyQt5.QtGui import QGuiApplication\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QPalette\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtGui import QResizeEvent\n\nfrom PyQt5.QtWidgets import QAction\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtWidgets import QMenu\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtWidgets import QSizePolicy\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QWidget\n\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom matplotlib.lines import Line2D\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.ticker import NullLocator\n\nfrom mpl4qt.widgets.mpltoolbar import MToolbar\nfrom mpl4qt.widgets.utils import ALL_COLORMAPS\nfrom mpl4qt.widgets.utils import AUTOFORMATTER\nfrom mpl4qt.widgets.utils import AUTOFORMATTER_MATHTEXT\nfrom mpl4qt.widgets.utils import BOOTSTRAP_GREEN\nfrom mpl4qt.widgets.utils import BOOTSTRAP_RED\nfrom mpl4qt.widgets.utils import LINE_STY_VALS\nfrom mpl4qt.widgets.utils import LINE_DS_VALS\nfrom mpl4qt.widgets.utils import MatplotlibCurveWidgetSettings\nfrom mpl4qt.widgets.utils import SCALE_STY_VALS\nfrom mpl4qt.widgets.utils import cycle_list_next\nfrom mpl4qt.widgets.utils import mfont_to_qfont\nfrom mpl4qt.widgets.utils import mplcolor2hex\nfrom mpl4qt.widgets.utils import set_font\nfrom mpl4qt.widgets.utils import generate_formatter\nfrom mpl4qt.widgets.utils import is_cmap_valid\n\nMPL_VERSION = mpl.__version__\nDTMSEC = 500 # msec\nDTSEC = DTMSEC / 1000.0 # sec\n\n\nclass BasePlotWidget(QWidget):\n # combo keyshorts, keystring, timestamp\n keycombo_cached = pyqtSignal(str, float)\n\n # indices list of points selected by lasso tool,\n # ind: array, pts: array (selected)\n # for i,idx in enumerate(ind): idx, pts[i]\n selectedIndicesUpdated = pyqtSignal(QVariant, QVariant)\n\n # zoomed ROI changed\n zoom_roi_changed = pyqtSignal(tuple, tuple)\n\n # grid\n gridOnUpdated = pyqtSignal(bool)\n\n # legend\n legendOnUpdated = pyqtSignal(bool)\n\n # autoscale\n autoScaleOnUpdated = pyqtSignal(bool)\n\n # bg color\n bgColorChanged = pyqtSignal(QColor)\n\n # xy pos, x,y (default) or x,y,z\n xyposUpdated = pyqtSignal(list)\n\n # cross markers updated, is_new_marker?, x, y, mk_name\n markerUpdated = pyqtSignal(bool, float, float, 'QString')\n\n # selected point/line\n selectedPointChanged = pyqtSignal(float, float)\n selectedLineChanged = pyqtSignal(Line2D)\n\n # shaded area updated (mpltoolbar)\n shaded_area_updated = pyqtSignal(tuple, tuple)\n\n # xlimit is changed\n xlimitMinChanged = pyqtSignal(float)\n xlimitMaxChanged = pyqtSignal(float)\n # ylimit is changed\n ylimitMinChanged = pyqtSignal(float)\n ylimitMaxChanged = pyqtSignal(float)\n\n def __init__(self, parent=None, show_toolbar=True, **kws):\n super(BasePlotWidget, self).__init__(parent)\n self.widget_type = '__BasePlotWidget'\n self.figure = Figure()\n self.axes = self.figure.add_subplot(111)\n self.axes.set_picker(True)\n self.init_figure()\n self.canvas = FigureCanvas(self.figure)\n self.setParent(parent)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.sys_bg_color = self.palette().color(QPalette.Background)\n self.sys_fg_color = self.palette().color(QPalette.Foreground)\n DEFAULT_FONTS = {\n 'title': QFontDatabase.systemFont(QFontDatabase.TitleFont),\n 'fixed': QFontDatabase.systemFont(QFontDatabase.FixedFont),\n 'general': QFontDatabase.systemFont(QFontDatabase.GeneralFont),\n }\n self.sys_label_font = DEFAULT_FONTS['general']\n self.sys_title_font = DEFAULT_FONTS['title']\n self.post_style_figure()\n # set up layout\n self.set_up_layout()\n\n self.adjustSize()\n self.set_context_menu()\n\n # track (x,y)\n self.canvas.mpl_connect('motion_notify_event', self.on_motion)\n\n # key press\n self.canvas.mpl_connect('key_press_event', self.on_key_press)\n\n # key release\n self.canvas.mpl_connect('key_release_event', self.on_key_release)\n\n # pick\n self.canvas.mpl_connect('pick_event', self.on_pick)\n\n # button\n self.canvas.mpl_connect('button_press_event', self.on_press)\n self.canvas.mpl_connect('button_release_event', self.on_release)\n\n self.canvas.mpl_connect('scroll_event', self.on_scroll)\n\n self.canvas.setFocusPolicy(Qt.ClickFocus)\n self.canvas.setFocus()\n\n # patches container: mk_area,\n # see draw_shade_area()\n self._patches = {}\n\n # dnd\n self.setAcceptDrops(True)\n\n # window/widget/dialog handlers\n self._handlers = {}\n\n # cross markers\n self._markers = OrderedDict() # list of {mk_name: [hl,vl,cp,pt,(x,y)]}\n self._to_add_marker = False\n self._added_marker = False # if added or not\n self._marker_id = 1 # initial marker id, always increase, even for deletion\n self._marker_with_xy = False # anote with (x,y)\n self._visible_hvlines = True # default visibility\n self.markerUpdated.connect(self.on_cross_markers_update)\n\n # pan\n self._pan_on = False\n\n # keypress cache\n self.dq_keycombo = deque([], 2)\n self.keycombo_cached.connect(self.on_update_keycombo_cache)\n\n # tb_toggle\n self._fig_tb_toggle = show_toolbar\n if self._fig_tb_toggle:\n # show mpltool\n self.__show_mpl_tools()\n\n #\n self.as_ann = None\n self.autoScaleOnUpdated.connect(self.on_autoscale_toggled)\n\n # add marker mpltool\n self._mk_add_hint_ann = None\n\n # [(lbl, (o,lw,mw))]\n self._last_sel_lines = {}\n\n def on_cross_markers_update(self):\n # cross markers updated.\n if len(self._markers) == 2:\n w = self._handlers.get('w_mpl_tools', None)\n if w is None:\n return # usually is not None\n w.on_show_mks()\n w.mk_view.close()\n\n def draw_shade_area(self, p1, p2, **kws):\n # see markers view\n from matplotlib.patches import Rectangle\n def f(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n pts = sorted([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])\n p1, p4 = pts[0], pts[-1]\n return p1, p4[0] - p1[0], p4[1] - p1[1]\n p = Rectangle(*f(p1, p2), **kws)\n self._patches['mk_area'] = p\n self.axes.add_patch(p)\n self.update_figure()\n\n @pyqtSlot(bool)\n def on_autoscale_toggled(self, auto_scale_enabled):\n # if auto scale is enabled, put text label\n if auto_scale_enabled:\n if self.as_ann is None:\n self.as_ann = self.axes.annotate('AutoScale is Enabled',\n xy=(1.0, 1.01),\n ha='right', va='bottom',\n xycoords=('axes fraction'),\n color='w',\n bbox=dict(\n boxstyle='round,pad=0.3',\n fc=BOOTSTRAP_GREEN, ec=BOOTSTRAP_GREEN,\n lw=1.0, alpha=0.8))\n else:\n self.as_ann.set_visible(True)\n else:\n if self.as_ann is not None:\n self.as_ann.set_visible(False)\n self.update_figure()\n\n @pyqtSlot(bool, 'QString', bool)\n def on_marker_add_checked(self, is_checked, mk_name, update_flag):\n # Add marker tool is checked.\n if update_flag:\n text = \"Updating Marker ({}) is Activated, Finish by CTRL+M\\nStart New by CTRL+M\".format(mk_name)\n else:\n text = \"Adding Marker ({}) is Activated, Finish by CTRL+M\\nStart New by CTRL+M\".format(mk_name)\n if is_checked:\n if self._mk_add_hint_ann is None:\n self._mk_add_hint_ann = self.axes.annotate(\n text,\n xy=(0, 1.01),\n ha='left', va='bottom',\n xycoords=('axes fraction'),\n color='w',\n bbox=dict(\n boxstyle='round,pad=0.3',\n fc=BOOTSTRAP_RED, ec=BOOTSTRAP_RED,\n lw=1.0, alpha=0.8))\n else:\n self._mk_add_hint_ann.set_text(text)\n self._mk_add_hint_ann.set_visible(True)\n else:\n if self._mk_add_hint_ann is not None:\n self._mk_add_hint_ann.set_visible(False)\n self.update_figure()\n\n def get_crossmk_config(self, name):\n # get cross marker (w/ lines, text) config by name\n hl, _, cp, pt, _, = self._markers[name]\n return {'ls': hl.get_ls(), 'lw': hl.get_lw(),\n 'c': hl.get_c(),\n 'line_visible': hl.get_visible(),\n 'line_alpha': hl.get_alpha(),\n 'ms': cp.get_ms(), 'mk': cp.get_marker(),\n 'mew': cp.get_mew(), 'mec': cp.get_mec(),\n 'mfc': cp.get_mfc(),\n 'mk_visible': cp.get_visible(),\n 'mk_alpha': cp.get_alpha(),\n 'text_visible': pt.get_visible(),\n 'text_color': pt.get_color(),\n 'text_content': pt.get_text(),\n 'text_alpha': pt.get_bbox_patch().get_alpha(),}\n\n def draw_hvlines(self, x0, y0, name, mc=None):\n if name in self._markers:\n is_new_marker = False\n hl, vl, cp, pt, _ = self._markers[name]\n if mc is None:\n mc = hl.get_color()\n else:\n is_new_marker = True\n hl, vl, cp, pt = None, None, None, None\n assert mc is not None # mc must be given\n\n if hl is None:\n hl = self.axes.axhline(y0,\n alpha=0.8, color=mc, ls='--')\n hl.set_label('_H-Line {}'.format(name))\n else:\n hl.set_ydata([y0, y0])\n\n if vl is None:\n vl = self.axes.axvline(x0,\n alpha=0.8, color=mc, ls='--')\n vl.set_label('_V-Line {}'.format(name))\n else:\n vl.set_xdata([x0, x0])\n\n if cp is None:\n cp, = self.axes.plot([x0], [y0], 'o',\n mec=mc, mfc='w', mew=2.0, alpha=0.9)\n cp.set_label('_Cross-Point {}'.format(name))\n if self._marker_with_xy:\n text = '{0:g},{1:g}'.format(x0, y0)\n else:\n text = name\n pt = self.axes.annotate(text,\n color='#000000', xy=(x0, y0), xytext=(15, 15),\n xycoords=\"data\", textcoords=\"offset pixels\",\n bbox=dict(boxstyle=\"round\", fc='w'))\n pt.get_bbox_patch().set_alpha(0.5)\n else:\n cp.set_data([x0], [y0])\n pt.xy = (x0, y0)\n if self._marker_with_xy:\n pt.set_text('{0:g},{1:g}'.format(x0, y0))\n else:\n pt.set_text(name)\n self._markers[name][-1] = (x0, y0)\n\n if is_new_marker:\n self._markers[name] = [hl, vl, cp, pt, (x0, y0)]\n\n self.markerUpdated.emit(is_new_marker, x0, y0, name)\n self.update_figure()\n\n def set_visible_hvlines(self, flag=True):\n \"\"\"Set all markers visible (*flag* is True) or invisible (*flag* is False).\n \"\"\"\n self._visible_hvlines = flag\n for name, (hl, vl, cp, pt, _,) in self._markers.items():\n for o in (hl, vl, cp, pt):\n o.set_visible(flag)\n self.update_figure()\n\n def __show_mpl_tools(self):\n if 'w_mpl_tools' in self._handlers:\n w = self._handlers['w_mpl_tools']\n else:\n w = MToolbar(self.figure.canvas, self)\n self._handlers['w_mpl_tools'] = w\n w.selectedIndicesUpdated.connect(self.on_selected_indices)\n w.zoom_roi_changed.connect(self.on_zoom_roi_changed)\n w.shaded_area_updated.connect(self.on_shaded_area_updated)\n w.show_toolbar()\n w.floatable_changed.emit(False)\n\n @pyqtSlot(QVariant, QVariant)\n def on_selected_indices(self, ind, pts):\n self.selectedIndicesUpdated.emit(ind, pts)\n\n @pyqtSlot(tuple, tuple)\n def on_shaded_area_updated(self, xlim, ylim):\n self.shaded_area_updated.emit(xlim, ylim)\n\n @pyqtSlot(tuple, tuple)\n def on_zoom_roi_changed(self, xlim, ylim):\n # print(\"Zoomed Rect ROI: \", xlim, ylim)\n self.zoom_roi_changed.emit(xlim, ylim)\n\n def set_up_layout(self):\n self.vbox = QVBoxLayout()\n self.vbox.setContentsMargins(0, 0, 0, 0)\n self.vbox.addWidget(self.canvas, 1)\n self.setLayout(self.vbox)\n\n def post_style_figure(self):\n self.init_prop_settings()\n self.set_figure_color()\n\n def init_prop_settings(self):\n \"\"\"Initial settings for properties.\n \"\"\"\n ## fonts:\n # xy labels\n lbl = self.axes.xaxis.label\n self._fig_xylabel_font = mfont_to_qfont(lbl.get_fontproperties())\n self._fig_xylabel_visible = lbl.get_visible()\n # xy ticklabels\n tklbl = self.axes.get_xticklabels()[0]\n self._fig_xyticks_font = mfont_to_qfont(tklbl.get_fontproperties())\n # title\n title = self.axes.title\n self._fig_title_font = mfont_to_qfont(title.get_fontproperties())\n self._fig_title_visible = title.get_visible()\n\n ## border, if auto scale is enabled, style could not be changed.\n o = list(self.axes.spines.values())[0]\n # c, lw, ls, vis,\n self._fig_border_color = QColor(mplcolor2hex(o.get_ec()))\n self._fig_border_lw = o.get_linewidth()\n self._fig_border_ls = o.get_linestyle()\n self._fig_border_visible = o.get_visible()\n\n # aspect\n self._fig_aspect = str(self.axes.get_aspect())\n\n # tight?\n self._fig_tight_layout = False\n\n # lbls,title\n self._fig_title = ''\n self._fig_xlabel = ''\n self._fig_ylabel = ''\n\n # figure, w,h,dpi\n self._fig_width, self._fig_height = self.figure.get_size_inches()\n self._fig_dpi = self.figure.get_dpi()\n\n # bg color\n self._fig_bgcolor = self.sys_bg_color\n\n # grid color\n self._fig_grid_color = QColor('gray')\n # grid toggle\n self._fig_grid_toggle = False\n\n # mticks toggle\n self._fig_mticks_toggle = False\n\n # legend toggle\n self._legend_toggle = False\n\n # legend location\n self._legend_location = 0\n\n # xyticks angle\n self._fig_xticks_angle = 0\n self._fig_yticks_angle = 0\n\n # xyticks color\n self._fig_ticks_color = self.sys_fg_color\n\n # tick format\n self._fig_xtick_formatter_type = 'Auto'\n self._fig_xtick_formatter = None # placeholder only\n self._fig_xtick_cfmt = '' # c string format for FuncFormatter\n self._fig_ytick_formatter_type = 'Auto'\n self._fig_ytick_formatter = None # placeholder only\n self._fig_ytick_cfmt = '' # c string format for FuncFormatter\n self._fig_ticks_enable_mathtext = False # use math text or not\n\n # xy axis scale\n self._fig_xscale = 'linear'\n self._fig_yscale = 'linear'\n\n # xylimits\n self._xlim_min, self._xlim_max = self.axes.get_xlim()\n self._ylim_min, self._ylim_max = self.axes.get_ylim()\n\n # ticklabels visibility\n xtklbl = self.axes.get_xticklabels()[0]\n ytklbl = self.axes.get_yticklabels()[0]\n self._fig_xticks_visible = xtklbl.get_visible()\n self._fig_yticks_visible = ytklbl.get_visible()\n\n # auto scale\n self._fig_auto_scale = False # default disable autoscale\n\n def on_scroll(self, e):\n if e.inaxes is None:\n return\n if e.step < 0:\n f = 1.05 ** (-e.step)\n else:\n f = 0.95 ** e.step\n self.zoom(e, f)\n\n def zoom(self, e, factor):\n x0, y0 = e.xdata, e.ydata\n x_left, x_right = self.axes.get_xlim()\n y_bottom, y_up = self.axes.get_ylim()\n\n self.axes.set_xlim((x0 - (x0 - x_left) * factor,\n x0 + (x_right - x0) * factor))\n self.axes.set_ylim((y0 - (y0 - y_bottom) * factor,\n y0 + (y_up - y0) * factor))\n self.update_figure()\n\n def on_motion(self, evt):\n if evt.inaxes is None:\n return\n x_pos, y_pos = evt.xdata, evt.ydata\n self.xyposUpdated.emit([x_pos, y_pos])\n\n def on_key_press(self, e):\n k, t = e.key, time.time()\n self.keycombo_cached.emit(k, t)\n QTimer.singleShot(DTMSEC, partial(self._on_delay_pop, k, t))\n\n def on_key_release(self, e):\n if len(self.dq_keycombo) != 2:\n return\n (k1, t1) = self.dq_keycombo.popleft()\n (k2, t2) = self.dq_keycombo.popleft()\n if t2 - t1 < DTSEC:\n self.process_keyshort_combo(k1, k2)\n\n def on_pick(self, evt):\n o = evt.artist\n if isinstance(o, Line2D):\n lw0, mw0 = o.get_lw(), o.get_mew()\n x, y = o.get_data()\n ind = evt.ind\n x0, y0 = x[ind][0], y[ind][0]\n o.set_lw(lw0 * 2)\n o.set_mew(mw0 * 2)\n self._last_sel_lines.setdefault(\n o.get_label(),\n (o, lw0, mw0))\n self.selectedPointChanged.emit(x0, y0)\n self.selectedLineChanged.emit(o)\n self.update_figure()\n elif isinstance(evt.artist, Axes):\n if self._last_sel_lines:\n for lbl, (o, lw0, mw0) in self._last_sel_lines.items():\n o.set_lw(lw0)\n o.set_mew(mw0)\n self.update_figure()\n self._last_sel_lines = {}\n\n def on_press(self, e):\n if e.inaxes is None:\n return\n if e.button == 1 and self._to_add_marker:\n self.draw_hvlines(e.xdata, e.ydata, self._mk_name, self._current_mc)\n self.set_visible_hvlines(self._visible_hvlines)\n self._added_marker = True\n QGuiApplication.restoreOverrideCursor()\n\n def on_release(self, e):\n pass\n\n def dragEnterEvent(self, e):\n pass\n\n def dropEvent(self, e):\n pass\n\n def init_figure(self):\n raise NotImplementedError\n\n def update_figure(self):\n if self._fig_auto_scale:\n try:\n self.axes.relim()\n except:\n pass\n else:\n self.axes.autoscale()\n self.canvas.draw_idle()\n\n def contextMenuEvent(self, evt):\n self._create_ctxtmenu().exec_(self.mapToGlobal(evt.pos()))\n\n def _create_ctxtmenu(self):\n menu = QMenu(self)\n config_action = QAction(QIcon(QPixmap(\":/tools/config.png\")),\n \"Config\", menu)\n config_action.setShortcut(\"c,c\")\n config_action.setObjectName('config_action')\n export_action = QAction(QIcon(QPixmap(\":/tools/export.png\")),\n \"Export\", menu)\n import_action = QAction(QIcon(QPixmap(\":/tools/import.png\")),\n \"Import\", menu)\n reset_action = QAction(QIcon(QPixmap(\":/tools/reset.png\")),\n \"Reset\", menu)\n tb_action = QAction(QIcon(QPixmap(\":/tools/tools.png\")),\n \"Tools\", menu)\n tb_action.setObjectName('tb_action')\n tb_action.setShortcut(\"t,t\")\n fitting_action = QAction(QIcon(QPixmap(\":/tools/fitting.png\")),\n \"Fitting\", menu)\n export_data_action = QAction(QIcon(QPixmap(\":/tools/export.png\")),\n \"Export Data\", menu)\n info_action = QAction(QIcon(QPixmap(\":/tools/info.png\")),\n \"About\", menu)\n keyshort_action = QAction(QIcon(QPixmap(\":/tools/keyshort.png\")),\n \"Shortcuts\", menu)\n\n menu.addAction(config_action)\n menu.addAction(export_action)\n menu.addAction(import_action)\n menu.addAction(reset_action)\n menu.addSeparator()\n menu.addAction(tb_action)\n menu.addAction(fitting_action)\n menu.addAction(export_data_action)\n menu.addSeparator()\n menu.addAction(keyshort_action)\n menu.addAction(info_action)\n\n menu.setStyleSheet('QMenu {margin: 2px;}')\n\n config_action.triggered.connect(self.on_config)\n export_action.triggered.connect(self.on_export_config)\n import_action.triggered.connect(self.on_import_config)\n reset_action.triggered.connect(self.on_reset_config)\n tb_action.triggered.connect(self.toggle_mpl_tools)\n fitting_action.triggered.connect(self.on_fitting_data)\n export_data_action.triggered.connect(self.on_export_data)\n info_action.triggered.connect(self.on_info)\n keyshort_action.triggered.connect(self.kbd_help)\n\n return menu\n\n @pyqtSlot()\n def on_fitting_data(self):\n \"\"\"Fitting data.\n \"\"\"\n raise NotImplementedError(\"Fitting data is to be implemented.\")\n\n @pyqtSlot()\n def on_export_data(self):\n raise NotImplementedError(\"Export data is to be implemented.\")\n\n @pyqtSlot()\n def on_info(self):\n from ._info import get_pkg_info\n QMessageBox.about(self, 'About mpl4qt', get_pkg_info())\n\n @pyqtSlot()\n def toggle_mpl_tools(self):\n self.setToolbarToggle(not self.getToolbarToggle())\n\n @pyqtSlot()\n def on_reset_config(self):\n # apply default settings\n raise NotImplementedError(\"Reset config is to be implemented.\")\n\n @pyqtSlot()\n def on_config(self):\n raise NotImplementedError(\"Config panel is to be implemented.\")\n\n @pyqtSlot()\n def on_export_config(self):\n filepath, _ = QFileDialog.getSaveFileName(self,\n \"Save Settings\",\n \"./mpl_settings.json\",\n \"JSON Files (*.json)\")\n if not filepath:\n return\n try:\n s = self.get_mpl_settings()\n s.write(filepath, sort_keys=False)\n except:\n QMessageBox.warning(self, \"Warning\",\n \"Cannot export settings to {}\".format(filepath),\n QMessageBox.Ok)\n else:\n QMessageBox.information(self, \"Information\",\n \"Successfully export settings to {}\".format(filepath),\n QMessageBox.Ok)\n\n @pyqtSlot()\n def on_import_config(self):\n filepath, _ = QFileDialog.getOpenFileName(self,\n \"Open Settings\",\n \"./mpl_settings.json\",\n \"JSON Files (*.json)\")\n if not filepath:\n return\n self._import_mpl_settings(filepath)\n\n def apply_mpl_settings(self, settings):\n pass\n\n def _import_mpl_settings(self, filepath):\n try:\n s = MatplotlibCurveWidgetSettings(filepath)\n self.apply_mpl_settings(s)\n except:\n QMessageBox.warning(self, \"Warning\",\n \"Cannot import&apply settings with {}\".format(filepath),\n QMessageBox.Ok)\n else:\n QMessageBox.information(self, \"Information\",\n \"Successfully import&apply settings with {}\".format(filepath),\n QMessageBox.Ok)\n\n def get_mpl_settings(self):\n \"\"\"Return all the settings for the current figure.\n \"\"\"\n pass\n\n def resize_figure(self):\n \"\"\"Must be triggered for set fig size.\n \"\"\"\n self.canvas.resizeEvent(QResizeEvent(self.canvas.size(), self.canvas.size()))\n\n def set_figure_color(self, color=None):\n if color is None:\n color = self.sys_bg_color.getRgbF()\n self.figure.set_facecolor(color)\n self.figure.set_edgecolor(color)\n if MPL_VERSION > \"1.5.1\":\n self.axes.set_facecolor(color)\n else:\n self.axes.set_axis_bgcolor(color)\n\n def set_ticks_color(self, color=None):\n if color is None:\n color = self.sys_bg_color.getRgbF()\n all_lbls = self.axes.get_xticklabels() + self.axes.get_yticklabels()\n [lbl.set_color(color) for lbl in all_lbls]\n\n def set_ticks_visible(self, visible, xoy=\"x\"):\n if getattr(self, \"_fig_{}ticks_visible\".format(xoy)):\n tklbls = getattr(self.axes, 'get_{}ticklabels'.format(xoy))()\n # !hiding cannot be reversed!\n [i.set_visible(visible) for i in tklbls]\n else:\n getattr(self.axes, '{}axis'.format(xoy)).reset_ticks()\n self.rotate_ticks(self._fig_xticks_angle, 'x')\n self.rotate_ticks(self._fig_yticks_angle, 'y')\n\n def set_xticks(self, tks):\n self.axes.set_xticks(tks)\n [set_font(lbl, self._fig_xyticks_font) for lbl in self.axes.get_xticklabels()]\n self.update_figure()\n\n def set_yticks(self, tks):\n self.axes.set_yticks(tks)\n [set_font(lbl, self._fig_xyticks_font) for lbl in self.axes.get_yticklabels()]\n self.update_figure()\n\n def set_xticklabels(self, tklbls):\n self.axes.set_xticklabels(tklbls)\n self.update_figure()\n\n def set_yticklabels(self, tklbls):\n self.axes.set_yticklabels(tklbls)\n self.update_figure()\n\n def toggle_mticks(self, f):\n if f:\n self.axes.xaxis.set_minor_locator(AutoMinorLocator())\n self.axes.yaxis.set_minor_locator(AutoMinorLocator())\n else:\n self.axes.xaxis.set_minor_locator(NullLocator())\n self.axes.yaxis.set_minor_locator(NullLocator())\n\n def toggle_grid(self,\n toggle_checked=False,\n which='major',\n b=None,\n color=None,\n **kws):\n if toggle_checked:\n which = 'both' if kws.get('mticks', True) else 'major'\n self.axes.grid(which=which, color=color, linestyle='--')\n else:\n self.axes.grid(b=False, which='minor')\n self.axes.grid(b=False)\n\n def set_xylabel_font(self, font=None):\n if font is None:\n font = self.sys_label_font\n set_font(self.axes.xaxis.label, font)\n set_font(self.axes.yaxis.label, font)\n\n def set_xyticks_font(self, font=None):\n if font is None:\n font = self.sys_label_font\n all_lbls = self.axes.get_xticklabels() + self.axes.get_yticklabels()\n [set_font(lbl, font) for lbl in all_lbls]\n\n def set_title_font(self, font=None):\n if font is None:\n font = self.sys_title_font\n set_font(self.axes.title, font)\n\n def set_context_menu(self, ):\n self.setContextMenuPolicy(Qt.DefaultContextMenu)\n\n def clear_figure(self):\n self.axes.clear()\n self.update_figure()\n\n def clear_data(self):\n \"\"\"Set with empty canvas.\n \"\"\"\n pass\n\n @pyqtSlot('QString', float)\n def on_update_keycombo_cache(self, key, ts):\n self.dq_keycombo.append((key, ts))\n\n def _on_delay_pop(self, k, t):\n if (k, t) not in self.dq_keycombo:\n return\n self.process_keyshort(k)\n self.dq_keycombo.remove((k, t))\n\n def set_border_color(self, c):\n for _, o in self.axes.spines.items():\n o.set_color(c.getRgbF())\n\n def set_border_lw(self, x):\n for _, o in self.axes.spines.items():\n o.set_linewidth(x)\n\n def set_border_ls(self, s):\n for _, o in self.axes.spines.items():\n o.set_linestyle(s)\n\n def set_border_visible(self, f):\n for _, o in self.axes.spines.items():\n o.set_visible(f)\n\n def getFigureAutoScale(self):\n return self._fig_auto_scale\n\n @pyqtSlot(bool)\n def setFigureAutoScale(self, f):\n \"\"\"Set xy limits as autoscale or not.\n\n Parameters\n ----------\n f : bool\n Toggle for the autoscale.\n \"\"\"\n self._fig_auto_scale = f\n if f:\n self.set_autoscale()\n #\n self.autoScaleOnUpdated.emit(f)\n\n figureAutoScale = pyqtProperty(bool, getFigureAutoScale,\n setFigureAutoScale)\n\n def getFigureBorderColor(self):\n return self._fig_border_color\n\n @pyqtSlot(QColor)\n def setFigureBorderColor(self, c, **kws):\n \"\"\"Set color for the data boundaries.\n\n Parameters\n ----------\n c : QColor\n Color of the boundaries.\n \"\"\"\n self._fig_border_color = c\n self.set_border_color(c)\n self.update_figure()\n\n figureBorderColor = pyqtProperty(QColor, getFigureBorderColor,\n setFigureBorderColor)\n\n def getFigureBorderLineWidth(self):\n return self._fig_border_lw\n\n @pyqtSlot(float)\n def setFigureBorderLineWidth(self, x):\n \"\"\"Set line width for the border.\n\n Parameters\n ----------\n x : float\n Line width.\n \"\"\"\n self._fig_border_lw = x\n self.set_border_lw(x)\n self.update_figure()\n\n figureBorderLineWidth = pyqtProperty(float, getFigureBorderLineWidth,\n setFigureBorderLineWidth)\n\n def getFigureBorderLineStyle(self):\n return self._fig_border_ls\n\n @pyqtSlot('QString')\n def setFigureBorderLineStyle(self, s):\n \"\"\"Set line style for the border.\n\n Parameters\n ----------\n s : str\n String for the line style, see `line style <https://matplotlib.org/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_.\n \"\"\"\n if s not in LINE_STY_VALS:\n return\n self._fig_border_ls = s\n self.set_border_ls(s)\n self.update_figure()\n\n figureBorderLineStyle = pyqtProperty('QString',\n getFigureBorderLineStyle, setFigureBorderLineStyle)\n\n def getFigureBorderVisible(self):\n return self._fig_border_visible\n\n @pyqtSlot(bool)\n def setFigureBorderVisible(self, f):\n \"\"\"Set borders visible or not.\n\n Parameters\n ----------\n f : bool\n Line visible (True) or not (False).\n \"\"\"\n self._fig_border_visible = f\n self.set_border_visible(f)\n self.update_figure()\n\n figureBorderVisible = pyqtProperty(bool, getFigureBorderVisible,\n setFigureBorderVisible)\n\n def getFigureAspectRatio(self):\n return self._fig_aspect\n\n @pyqtSlot('QString')\n def setFigureAspectRatio(self, s):\n \"\"\"Set aspect ratio of the axes.\n\n Parameters\n ----------\n s : str\n Aspect ratio, 'auto', 'equal' and any number.\n \"\"\"\n try:\n float(s)\n except ValueError:\n if s in ('auto', 'equal'):\n self._fig_aspect = s\n else:\n return\n else:\n if float(s) <= 0:\n return\n self._fig_aspect = s\n finally:\n self.axes.set_aspect(self._fig_aspect)\n self.update_figure()\n\n figureAspectRatio = pyqtProperty('QString', getFigureAspectRatio,\n setFigureAspectRatio)\n\n def getTightLayoutToggle(self):\n return self._fig_tight_layout\n\n @pyqtSlot(bool)\n def setTightLayoutToggle(self, f):\n \"\"\"Toggle for the tight layout.\n\n Parameters\n ----------\n f : bool\n Tight layout toggle.\n \"\"\"\n self._fig_tight_layout = f\n if f:\n # self.figure.set_tight_layout({'pad': 0.1})\n self.figure.subplots_adjust(left=0.05, right=0.98, top=0.98, bottom=0.06)\n else:\n # self.figure.set_tight_layout({'pad': 1.2})\n self.figure.subplots_adjust(left=0.125, right=0.9, top=0.9, bottom=0.10)\n self.update_figure()\n\n figureTightLayout = pyqtProperty(bool, getTightLayoutToggle,\n setTightLayoutToggle)\n\n def getFigureXlabel(self):\n return self._fig_xlabel\n\n @pyqtSlot('QString')\n def setFigureXlabel(self, s):\n \"\"\"Set xlabel string.\n\n Parameters\n ----------\n s : str\n String for xlabel.\n \"\"\"\n self._fig_xlabel = s\n self.axes.set_xlabel(s)\n set_font(self.axes.xaxis.label, self._fig_xylabel_font)\n self.update_figure()\n\n figureXlabel = pyqtProperty('QString', getFigureXlabel, setFigureXlabel)\n\n def getFigureYlabel(self):\n return self._fig_ylabel\n\n @pyqtSlot('QString')\n def setFigureYlabel(self, s):\n \"\"\"Set ylabel string.\n\n Parameters\n ----------\n s : str\n String for ylabel.\n \"\"\"\n self._fig_ylabel = s\n self.axes.set_ylabel(s)\n set_font(self.axes.yaxis.label, self._fig_xylabel_font)\n self.update_figure()\n\n figureYlabel = pyqtProperty('QString', getFigureYlabel, setFigureYlabel)\n\n\n def getFigureXYlabelVisible(self):\n return self._fig_xylabel_visible\n\n @pyqtSlot(bool)\n def setFigureXYlabelVisible(self, f):\n \"\"\"Set figure xylabels visible or not.\n\n Parameters\n ----------\n f : bool\n Figure xylabels visible or not.\n \"\"\"\n self._fig_xylabel_visible = f\n self.axes.xaxis.label.set_visible(f)\n self.axes.yaxis.label.set_visible(f)\n self.update_figure()\n\n figureXYlabelVisible = pyqtProperty(bool, getFigureXYlabelVisible,\n setFigureXYlabelVisible)\n\n def getFigureTitleVisible(self):\n return self._fig_title_visible\n\n @pyqtSlot(bool)\n def setFigureTitleVisible(self, f):\n \"\"\"Set figure title visible or not.\n\n Parameters\n ----------\n f : bool\n Figure title visible or not.\n \"\"\"\n self._fig_title_visible = f\n self.axes.title.set_visible(f)\n self.update_figure()\n\n figureTitleVisible = pyqtProperty(bool, getFigureTitleVisible,\n setFigureTitleVisible)\n\n def getFigureTitle(self):\n return self._fig_title\n\n @pyqtSlot('QString')\n def setFigureTitle(self, s):\n \"\"\"Set figure title.\n\n Parameters\n ----------\n s : str\n Title for the figure.\n \"\"\"\n self._fig_title = s\n self.axes.set_title(s)\n set_font(self.axes.title, self._fig_title_font)\n self.update_figure()\n\n figureTitle = pyqtProperty('QString', getFigureTitle, setFigureTitle)\n\n def getFigureXYlabelFont(self):\n return self._fig_xylabel_font\n\n @pyqtSlot(QFont)\n def setFigureXYlabelFont(self, font):\n \"\"\"Set font for x and y labels.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_xylabel_font = font\n self.set_xylabel_font(font)\n self.update_figure()\n\n figureXYlabelFont = pyqtProperty(QFont, getFigureXYlabelFont,\n setFigureXYlabelFont)\n\n def getFigureTitleFont(self):\n return self._fig_title_font\n\n @pyqtSlot(QFont)\n def setFigureTitleFont(self, font):\n \"\"\"Set font for figure title.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_title_font = font\n self.set_title_font(font)\n self.update_figure()\n\n figureTitleFont = pyqtProperty(QFont, getFigureTitleFont,\n setFigureTitleFont)\n\n def getFigureWidth(self):\n return self._fig_width\n\n @pyqtSlot(float)\n def setFigureWidth(self, w):\n \"\"\"Set figure width in inch.\n\n Parameters\n ----------\n w : float\n Figure width in inch (>= 2.0).\n \"\"\"\n self._fig_width = max(w, 2.0)\n self.figure.set_size_inches([self._fig_width, self._fig_height])\n self.resize_figure()\n self.update_figure()\n\n figureWidth = pyqtProperty(float, getFigureWidth, setFigureWidth)\n\n def getFigureHeight(self):\n return self._fig_height\n\n @pyqtSlot(float)\n def setFigureHeight(self, h):\n \"\"\"Set figure height in inch.\n\n Parameters\n ----------\n h : float\n Figure height in inch (>= 2.0).\n \"\"\"\n self._fig_height = max(h, 2.0)\n self.figure.set_size_inches([self._fig_width, self._fig_height])\n self.resize_figure()\n self.update_figure()\n\n figureHeight = pyqtProperty(float, getFigureHeight, setFigureHeight)\n\n def getFigureDpi(self):\n return self._fig_dpi\n\n @pyqtSlot(float)\n def setFigureDpi(self, d):\n \"\"\"Set figure dpi.\n\n Parameters\n ----------\n d : float\n Figure dpi in [50.0, 600.0].\n \"\"\"\n self._fig_dpi = min(600.0, max(d, 50.0))\n self.figure.set_dpi(d)\n self.resize_figure()\n self.update_figure()\n\n figureDPI = pyqtProperty(float, getFigureDpi, setFigureDpi)\n\n def getXLimitMin(self):\n return self._xlim_min\n\n @pyqtSlot(float)\n def setXLimitMin(self, x=None):\n \"\"\"Set minimum of xlimit.\n\n Parameters\n ----------\n x : float\n Minimum of xlimit.\n \"\"\"\n if x is None:\n x, _ = self._get_default_xlim()\n self._xlim_min = x\n xmin, xmax = self.get_xlim()\n if x < xmax:\n self.axes.set_xlim([x, xmax])\n self.update_figure()\n self.xlimitMinChanged.emit(x)\n\n figureXLimitMin = pyqtProperty(float, getXLimitMin, setXLimitMin)\n\n def getXLimitMax(self):\n return self._xlim_max\n\n @pyqtSlot(float)\n def setXLimitMax(self, x=None):\n \"\"\"Set maximum of xlimit.\n\n Parameters\n ----------\n x : float\n Maximum of xlimit.\n \"\"\"\n if x is None:\n _, x = self._get_default_xlim()\n self._xlim_max = x\n xmin, xmax = self.get_xlim()\n if x > xmin:\n self.axes.set_xlim([xmin, x])\n self.update_figure()\n self.xlimitMaxChanged.emit(x)\n\n figureXLimitMax = pyqtProperty(float, getXLimitMax, setXLimitMax)\n\n def getYLimitMin(self):\n return self._ylim_min\n\n @pyqtSlot(float)\n def setYLimitMin(self, y=None):\n \"\"\"Set minimum of ylimit.\n\n Parameters\n ----------\n y : float\n Minimum of ylimit.\n \"\"\"\n if y is None:\n y, _ = self._get_default_ylim()\n self._ylim_min = y\n ymin, ymax = self.get_ylim()\n if y < ymax:\n self.axes.set_ylim([y, ymax])\n self.update_figure()\n self.ylimitMinChanged.emit(y)\n\n figureYLimitMin = pyqtProperty(float, getYLimitMin, setYLimitMin)\n\n def getYLimitMax(self):\n return self._ylim_max\n\n @pyqtSlot(float)\n def setYLimitMax(self, y=None):\n \"\"\"Set maximum of ylimit.\n\n Parameters\n ----------\n y : float\n Maximum of ylimit.\n \"\"\"\n if y is None:\n _, y = self._get_default_ylim()\n self._ylim_max = y\n ymin, ymax = self.get_ylim()\n if y > ymin:\n self.axes.set_ylim([ymin, y])\n self.update_figure()\n self.ylimitMaxChanged.emit(y)\n\n figureYLimitMax = pyqtProperty(float, getYLimitMax, setYLimitMax)\n\n def getFigureXTicksVisible(self):\n return self._fig_xticks_visible\n\n @pyqtSlot(bool)\n def setFigureXTicksVisible(self, f):\n \"\"\"Set xticklabels visible or not.\n\n Parameters\n ----------\n f : bool\n Object visible (True) or not (False).\n \"\"\"\n self.set_ticks_visible(f, \"x\")\n self.update_figure()\n self._fig_xticks_visible = f\n\n figureXTicksVisible = pyqtProperty(bool, getFigureXTicksVisible,\n setFigureXTicksVisible)\n\n def getFigureYTicksVisible(self):\n return self._fig_yticks_visible\n\n @pyqtSlot(bool)\n def setFigureYTicksVisible(self, f):\n \"\"\"Set yticklabels visible or not.\n\n Parameters\n ----------\n f : bool\n Object visible (True) or not (False).\n \"\"\"\n self.set_ticks_visible(f, \"y\")\n self.update_figure()\n self._fig_yticks_visible = f\n\n figureYTicksVisible = pyqtProperty(bool, getFigureYTicksVisible,\n setFigureYTicksVisible)\n\n def getFigureBgColor(self):\n return self._fig_bgcolor\n\n @pyqtSlot(QColor)\n def setFigureBgColor(self, color):\n \"\"\"Set figure background color.\n\n Parameters\n ----------\n color : QColor\n Color to set.\n \"\"\"\n self._fig_bgcolor = color\n self.set_figure_color(color.getRgbF())\n self.update_figure()\n self.bgColorChanged.emit(color)\n\n figureBackgroundColor = pyqtProperty(QColor, getFigureBgColor,\n setFigureBgColor)\n\n def getFigureGridColor(self):\n return self._fig_grid_color\n\n @pyqtSlot(QColor)\n def setFigureGridColor(self, c, **kws):\n \"\"\"Set color for the grid line.\n\n Parameters\n ----------\n c : QColor\n Color of the grid line.\n \"\"\"\n self._fig_grid_color = c\n self.toggle_grid(\n toggle_checked=self._fig_grid_toggle,\n color=c.getRgbF(),\n **{\n k: v\n for k, v in kws.items() if k not in ('toggle_checked', 'color')\n })\n self.update_figure()\n\n figureGridColor = pyqtProperty(QColor, getFigureGridColor,\n setFigureGridColor)\n\n def getFigureGridToggle(self):\n return self._fig_grid_toggle\n\n @pyqtSlot(bool)\n def setFigureGridToggle(self, f, **kws):\n \"\"\"Toggle for the figure grid.\n\n Parameters\n ----------\n f : bool\n Figure grid toggle.\n \"\"\"\n self._fig_grid_toggle = f\n self.toggle_grid(\n toggle_checked=f,\n color=self._fig_grid_color.getRgbF(),\n **{\n k: v\n for k, v in kws.items() if k not in ('toggle_checked', 'color')\n })\n self.update_figure()\n #\n self.gridOnUpdated.emit(f)\n\n figureGridToggle = pyqtProperty(bool, getFigureGridToggle,\n setFigureGridToggle)\n\n def getFigureMTicksToggle(self):\n return self._fig_mticks_toggle\n\n @pyqtSlot(bool)\n def setFigureMTicksToggle(self, f):\n \"\"\"Toggle for the minor ticks.\n\n Note\n ----\n Before toggle on, be sure the axis scale is linear.\n\n Parameters\n ----------\n f : bool\n Minor ticks on/off toggle.\n \"\"\"\n self._fig_mticks_toggle = f\n\n xscale = self.getFigureXScale()\n yscale = self.getFigureYScale()\n if xscale != 'linear':\n self.setFigureXScale('linear')\n if yscale != 'linear':\n self.setFigureYScale('linear')\n self.toggle_mticks(f)\n if xscale != 'linear':\n self.setFigureXScale(xscale)\n if yscale != 'linear':\n self.setFigureYScale(yscale)\n\n self.update_figure()\n\n figureMTicksToggle = pyqtProperty(bool, getFigureMTicksToggle,\n setFigureMTicksToggle)\n\n def getLegendToggle(self):\n return self._legend_toggle\n\n @pyqtSlot(bool)\n def setLegendToggle(self, f):\n \"\"\"Toggle for figure legend.\n\n Parameters\n ----------\n f : bool\n Figure legend on/off toggle.\n \"\"\"\n self._legend_toggle = f\n if f:\n self._legend_box = self.axes.legend(loc=self._legend_location)\n else:\n try:\n self._legend_box.set_visible(False)\n except AttributeError:\n pass\n self.update_figure()\n #\n self.legendOnUpdated.emit(f)\n\n figureLegendToggle = pyqtProperty(bool, getLegendToggle, setLegendToggle)\n\n def getLegendLocation(self):\n return self._legend_location\n\n @pyqtSlot(int)\n def setLegendLocation(self, i):\n \"\"\"Set legend location.\n\n Parameters\n ----------\n i : int\n Index number of legend location,\n see `matplotlib.pyplot.legend <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>`_.\n \"\"\"\n self._legend_location = i\n if self._legend_toggle:\n self._legend_box = self.axes.legend(loc=i)\n self.update_figure()\n\n figureLegendLocation = pyqtProperty(int, getLegendLocation,\n setLegendLocation)\n\n def getFigureXTicksAngle(self):\n return self._fig_xticks_angle\n\n @pyqtSlot(float)\n def setFigureXTicksAngle(self, angle):\n \"\"\"Set rotation angle for the xtick labels.\n\n Parameters\n ----------\n angle : float\n Angle in degree to rotate.\n \"\"\"\n self._fig_xticks_angle = angle\n self.rotate_ticks(angle, 'x')\n self.update_figure()\n\n figureXTicksAngle = pyqtProperty(float, getFigureXTicksAngle,\n setFigureXTicksAngle)\n\n def getFigureYTicksAngle(self):\n return self._fig_yticks_angle\n\n @pyqtSlot(float)\n def setFigureYTicksAngle(self, angle):\n \"\"\"Set rotation angle for the ytick labels.\n\n Parameters\n ----------\n angle : float\n Angle in degree to rotate.\n \"\"\"\n self._fig_yticks_angle = angle\n self.rotate_ticks(angle, 'y')\n self.update_figure()\n\n figureYTicksAngle = pyqtProperty(float, getFigureYTicksAngle,\n setFigureYTicksAngle)\n\n def getFigureXYticksFont(self):\n return self._fig_xyticks_font\n\n @pyqtSlot(QFont)\n def setFigureXYticksFont(self, font):\n \"\"\"Set font for the tick labels.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_xyticks_font = font\n self.set_xyticks_font(font)\n self.update_figure()\n\n figureXYticksFont = pyqtProperty(QFont, getFigureXYticksFont,\n setFigureXYticksFont)\n\n def getFigureXYticksColor(self):\n return self._fig_ticks_color\n\n @pyqtSlot(QColor)\n def setFigureXYticksColor(self, color):\n \"\"\"Set color for the ticks.\n\n Parameters\n ----------\n color : QColor\n Color to set.\n \"\"\"\n self._fig_ticks_color = color\n self.set_ticks_color(color.getRgbF())\n self.update_figure()\n\n figureXYticksColor = pyqtProperty(QColor, getFigureXYticksColor,\n setFigureXYticksColor)\n\n def getFigureXScale(self):\n return self._fig_xscale\n\n @pyqtSlot('QString')\n def setFigureXScale(self, s):\n \"\"\"Set x-axis scale.\n\n Parameters\n ----------\n s : str\n Scale type, 'linear', 'log', 'symlog', 'logit', etc.\n \"\"\"\n self._fig_xscale = s\n self.axes.set_xscale(s)\n self.update_figure()\n\n figureXScale = pyqtProperty('QString', getFigureXScale, setFigureXScale)\n\n def getFigureYScale(self):\n return self._fig_yscale\n\n @pyqtSlot('QString')\n def setFigureYScale(self, s):\n \"\"\"Set y-axis scale.\n\n Parameters\n ----------\n s : str\n Scale type, 'linear', 'log', 'symlog', 'logit', etc.\n \"\"\"\n self._fig_yscale = s\n self.axes.set_yscale(s)\n self.update_figure()\n\n figureYScale = pyqtProperty('QString', getFigureYScale, setFigureYScale)\n\n def getToolbarToggle(self):\n return self._fig_tb_toggle\n\n @pyqtSlot(bool)\n def setToolbarToggle(self, f):\n \"\"\"Toggle for the mpl toolbar.\n\n Parameters\n ----------\n f : bool\n Turn on/off mpl toolbar.\n \"\"\"\n self._fig_tb_toggle = f\n w = self._handlers.get('w_mpl_tools', None)\n if w is not None and not f:\n w.floatable_changed.emit(True)\n w.close()\n else:\n self.__show_mpl_tools()\n self.update_figure()\n\n figureToolbarToggle = pyqtProperty(bool, getToolbarToggle, setToolbarToggle)\n\n def _get_default_xlim(self):\n \"\"\"limit range from data\n \"\"\"\n try:\n xmin, xmax = self._x_data.min(), self._x_data.max()\n except:\n xmin, xmax = self.axes.get_xlim()\n x0, xhw = (xmin + xmax) * 0.5, (xmax - xmin) * 0.5\n return x0 - xhw * 1.1, x0 + xhw * 1.1\n\n def get_xlim(self):\n return self.axes.get_xlim()\n\n def _get_default_ylim(self):\n \"\"\"limit range from data\n \"\"\"\n try:\n ymin, ymax = self._y_data.min(), self._y_data.max()\n except:\n ymin, ymax = self.axes.get_ylim()\n y0, yhw = (ymin + ymax) * 0.5, (ymax - ymin) * 0.5\n return y0 - yhw * 1.1, y0 + yhw * 1.1\n\n def get_ylim(self):\n return self.axes.get_ylim()\n\n @pyqtSlot('QString', 'QString')\n def setXTickFormat(self, ftype, cfmt):\n if ftype == 'Auto':\n self._setXTickAutoFormat(ftype)\n elif ftype == 'Custom':\n self._setXTickCustomFormat(ftype, cfmt)\n\n def _setXTickAutoFormat(self, ftype):\n \"\"\"Set x-axis ticks formatter with Auto style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Auto'.\n \"\"\"\n self._fig_xtick_formatter_type = ftype\n if self._fig_ticks_enable_mathtext:\n formatter = AUTOFORMATTER_MATHTEXT\n else:\n formatter = AUTOFORMATTER\n self._fig_xtick_formatter = formatter\n self.axes.xaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def _setXTickCustomFormat(self, ftype, cfmt):\n \"\"\"Set x-axis ticks formatter with custom style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Custom'.\n cfmt : str\n C style string specifier.\n \"\"\"\n self._fig_xtick_formatter_type = ftype\n self._fig_xtick_cfmt = cfmt\n formatter = generate_formatter(cfmt, math_text=self._fig_ticks_enable_mathtext)\n self._fig_xtick_formatter = formatter\n self.axes.xaxis.set_major_formatter(formatter)\n self.update_figure()\n\n @pyqtSlot('QString', 'QString')\n def setYTickFormat(self, ftype, cfmt):\n if ftype == 'Auto':\n self._setYTickAutoFormat(ftype)\n elif ftype == 'Custom':\n self._setYTickCustomFormat(ftype, cfmt)\n\n def _setYTickAutoFormat(self, ftype):\n \"\"\"Set y-axis ticks formatter with Auto style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Auto'.\n \"\"\"\n self._fig_ytick_formatter_type = ftype\n if self._fig_ticks_enable_mathtext:\n formatter = AUTOFORMATTER_MATHTEXT\n else:\n formatter = AUTOFORMATTER\n self._fig_ytick_formatter = formatter\n self.axes.yaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def _setYTickCustomFormat(self, ftype, cfmt):\n \"\"\"Set y-axis ticks formatter with custom style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Custom'.\n cfmt : str\n C style string specifier.\n \"\"\"\n self._fig_ytick_formatter_type = ftype\n self._fig_ytick_cfmt = cfmt\n formatter = generate_formatter(cfmt, math_text=self._fig_ticks_enable_mathtext)\n self._fig_ytick_formatter = formatter\n self.axes.yaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def rotate_ticks(self, angle, axis):\n \"\"\"Rotate *axis* ticks by *angle* in degree.\n \"\"\"\n lbls = getattr(self.axes, \"get_{}ticklabels\".format(axis))()\n for o in lbls:\n o.set_rotation(angle)\n\n def set_autoscale(self, axis='both'):\n self.axes.relim(visible_only=True)\n self.axes.autoscale(axis=axis)\n self.update_figure()\n\n def process_keyshort_combo(self, k1, k2):\n \"\"\"Override this method to define combo keyshorts.\n \"\"\"\n # print(\"Capture key combo: \", k1, k2)\n if k1 == 'a' and k2 == 'x':\n # auto xscale\n self.set_autoscale('x')\n elif k1 == 'a' and k2 == 'y':\n # auto yscale\n self.set_autoscale('y')\n elif k1 == 'a' and k2 == 'a':\n if self.widget_type == 'image':\n self.setAutoColorLimit(not self.getAutoColorLimit())\n # turn on/off autoscale\n self.setFigureAutoScale(not self.getFigureAutoScale())\n elif k1 == 'a' and k2 == 'c' and self.widget_type == 'image':\n # auto color range\n self.on_auto_clim()\n elif k1 == 'shift' and k2 == '?':\n # help msgbox\n self.kbd_help()\n elif k1 == 'c' and k2 == 'c':\n self._create_ctxtmenu().findChild(QAction, 'config_action').triggered.emit()\n elif k1 == 't' and k2 == 't':\n self._create_ctxtmenu().findChild(QAction, 'tb_action').triggered.emit()\n elif k1 == 'd' and k2 == 's' and self.widget_type in ('curve', 'errorbar'):\n # circulate curve drawstyle\n self.setLineDrawStyle(\n cycle_list_next(list(LINE_DS_VALS), self.getLineDrawStyle()))\n\n def process_keyshort(self, k):\n \"\"\"Override this method to define keyshorts.\n \"\"\"\n # print(\"Capture key: \", k)\n if k == 'g':\n # turn on/off grid\n self.setFigureGridToggle(not self.getFigureGridToggle())\n elif k == 'a': # and self.widget_type != 'image':\n # autoscale\n self.set_autoscale()\n elif k == 'm':\n # turn on/off mticks\n self.setFigureMTicksToggle(not self.getFigureMTicksToggle())\n elif k == 't':\n # turn on/off tightlayout\n self.setTightLayoutToggle(not self.getTightLayoutToggle())\n elif k == 'l':\n # turn on/off legend\n self.setLegendToggle(not self.getLegendToggle())\n elif k == 'r':\n # force refresh\n self.force_update()\n elif k == 's' and self.widget_type != 'image':\n # circulate y-axis scale type\n self.setFigureYScale(\n cycle_list_next(SCALE_STY_VALS, self.getFigureYScale()))\n elif k == 'c' and self.widget_type == 'image':\n # circulate image colormap\n self.setColorMap(\n cycle_list_next(ALL_COLORMAPS, self.getColorMap()))\n\n def kbd_help(self):\n \"\"\"Help message box for keyboard shortcuts.\n \"\"\"\n from .kbdhelpdialog import KbdHelpDialog\n w = KbdHelpDialog(self)\n w.setWindowTitle(\"Keyboard Shortcuts Help\")\n w.exec_()\n\n def set_xlimit(self, *args):\n \"\"\"Set xlimit with new limit, e.g. `set_xlimit(xmin, xmax)`.\n\n See Also\n --------\n setXLimitMin, setXLimitMax\n \"\"\"\n self.axes.set_xlim(args)\n self.update_figure()\n\n def set_ylimit(self, *args):\n \"\"\"Set ylimit with new limit.\n\n See Also\n --------\n setYLimitMin, setYLimitMax\n \"\"\"\n self.axes.set_ylim(args)\n self.update_figure()\n\n\nclass MatplotlibBaseWidget(BasePlotWidget):\n \"\"\"MatplotlibBaseWidget(BasePlotWidget)\n \"\"\"\n\n def __init__(self, parent=None):\n super(MatplotlibBaseWidget, self).__init__(parent)\n self.widget_type = 'base'\n\n def init_figure(self):\n pass\n\n @pyqtSlot()\n def on_config(self):\n from .mplconfig import MatplotlibConfigPanel\n config_panel = MatplotlibConfigPanel(self)\n config_panel.exec_()\n\n def update_figure(self):\n if self._fig_auto_scale:\n try:\n self.axes.relim()\n except:\n pass\n else:\n self.axes.autoscale()\n self.canvas.draw_idle()\n\n\nclass MatplotlibCMapWidget(BasePlotWidget):\n def __init__(self, parent=None):\n super(MatplotlibCMapWidget, self).__init__(parent, False)\n self.figure.set_size_inches((self.getFigureWidth(), 0.2))\n self.figure.set_tight_layout(True)\n self.figure.subplots_adjust(\n top=0.9999, bottom=0.0001, left=0.0001, right=0.9999)\n self.axes.set_axis_off()\n\n # reverse cmap flag, '' or '_r'\n self._rcmap = ''\n\n def init_figure(self):\n gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n self.im = self.axes.imshow(gradient, aspect='auto')\n\n def set_cmap(self, c):\n if not is_cmap_valid(c):\n return\n self._cmap = c\n self.im.set_cmap(self._cmap + self._rcmap)\n self.update_figure()\n\n def set_reverse_cmap(self, f):\n self._rcmap = '_r' if f else ''\n self.set_cmap(self._cmap)\n\n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication([])\n window = MatplotlibBaseWidget()\n window.show()\n\n app.exec_()\n" ]
[ [ "numpy.linspace", "matplotlib.figure.Figure", "matplotlib.ticker.AutoMinorLocator", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "matplotlib.ticker.NullLocator", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
msusicky/ockovani-covid
[ "2835943b5796b04a3542782ecda125b6766cd317" ]
[ "app/fetcher/used_fetcher.py" ]
[ "import pandas as pd\n\nfrom app import db, app\nfrom app.fetcher.fetcher import Fetcher\nfrom app.models import OckovaniSpotreba, OckovaciMisto\n\n\nclass UsedFetcher(Fetcher):\n \"\"\"\n Class for updating used vaccines table.\n \"\"\"\n\n USED_CSV = 'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani-spotreba.csv'\n\n def __init__(self):\n super().__init__(OckovaniSpotreba.__tablename__, self.USED_CSV)\n\n def fetch(self, import_id: int) -> None:\n df = pd.read_csv(self._url)\n\n df = df.rename(columns={'ockovaci_misto_kod': 'ockovaci_misto_id'})\n\n df['kraj_nuts_kod'] = df['kraj_nuts_kod'].fillna('-')\n\n df['pouzite_davky'] = df['pouzite_davky'].fillna(0).astype('int')\n df['znehodnocene_davky'] = df['znehodnocene_davky'].fillna(0).astype('int')\n\n df = df.groupby(['datum', 'ockovaci_misto_id', 'ockovaci_latka', 'vyrobce'], dropna=False).sum().reset_index()\n\n # filter out missing centers\n size = len(df)\n mista_ids = [r[0] for r in db.session.query(OckovaciMisto.id).all()]\n df = df[df['ockovaci_misto_id'].isin(mista_ids)]\n\n if size > len(df):\n app.logger.warning(\"Some centers doesn't exist - {} rows skipped.\".format(size - len(df)))\n\n self._truncate()\n\n df.to_sql(self._table, db.engine, if_exists='append', index=False, method=Fetcher._psql_insert_copy)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
rodrigodelazcano/AerialRobotics
[ "44d7929721eaf3c817cf7f70966e805b36f66981" ]
[ "assignment1/src/offboard_UMD/script/plot_position.py" ]
[ "import numpy as np\nimport rosbag\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom tf.transformations import euler_from_quaternion\n\n# Read bag file\nbag = rosbag.Bag('2021-09-21-19-57-22.bag')\n\nx = []\ny = []\nz = []\nroll = []\npitch = []\nyaw = []\ntime = []\ncycles = []\ncycle_time = []\n\ninit_time = 0\n\nfor topic, msg, t in bag.read_messages(topics=['/mavros/local_position/pose', '/mavros/path_cycle']):\n if topic == '/mavros/local_position/pose':\n current_time = t.to_sec()\n x.append(msg.pose.position.x)\n y.append(msg.pose.position.y)\n z.append(msg.pose.position.z)\n\n orientation_q = msg.pose.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (r, p, ya) = euler_from_quaternion (orientation_list)\n\n roll.append(r)\n pitch.append(p)\n yaw.append(ya)\n\n if init_time == 0:\n time.append(0)\n init_time = current_time\n else:\n time.append(current_time - init_time)\n else:\n cycles.append(msg.cycle)\n cycle_time.append(t.to_sec() - init_time)\n\ndata = np.stack((x, y, z, roll, pitch, yaw, time))\n\ncycles.append(5)\ncycle_step = 0\ncycle_data = {}\npast_idx = 0\n\nfor idx, tim in enumerate(time):\n if cycle_time[cycle_step] < tim:\n cycle_data['cycle_{}'.format(cycle_step)] = data[:, past_idx:idx]\n cycle_step += 1\n past_idx = idx\n if cycle_step > 4:\n cycle_data['cycle_{}'.format(cycle_step)] = data[:, idx+1:]\n break\n\n## Plot position ##\n##################\n\nfig1, ax1 = plt.subplots(figsize=(20,20))\n\nax1.set_ylim([-3, 33])\nax1.set_xlim([0, 320])\nax1.plot(time, x, linewidth=2.5, label='x')\nax1.plot(time, y, linewidth=2.5, label='y')\nax1.plot(time, z, linewidth=2.5, label='z')\nax1.set_title(\"XYZ Position\", fontweight = 'heavy')\nax1.set(xlabel=\"Time [s]\", ylabel=\"Distance [m]\")\nax1.legend(shadow=True, fancybox=True, loc='upper right')\n\nfor value in [5, 10, 25]:\n ax1.axhline(y=value, color='k', linestyle='--', alpha=0.4)\n\n## Plot orientation ##\n######################\n\nfig2, ax2 = plt.subplots(figsize=(20,20))\n\nax2.set_ylim([-1, 1.5])\nax2.set_xlim([0, 320])\nax2.plot(time, roll, linewidth=2.5, label='roll')\nax2.plot(time, pitch, linewidth=2.5, label='pitch')\nax2.plot(time, yaw, linewidth=2.5, label='yaw')\nax2.set_title(\"RPY Orientation\", fontweight = 'heavy')\nax2.set(xlabel=\"Time [s]\", ylabel=\"Angle [rad]\")\nax2.legend(shadow=True, fancybox=True, loc='upper right')\n\nlast_tim = 0\nfor c, tim in enumerate(cycle_time):\n ax1.axvline(x=tim, color='k', linestyle='--', alpha=0.4)\n ax1.annotate(s='', xy=(last_tim,28), xytext=(tim,28), arrowprops=dict(arrowstyle='<->'))\n\n ax2.axvline(x=tim, color='k', linestyle='--', alpha=0.4)\n ax2.annotate(s='', xy=(last_tim,1), xytext=(tim,1), arrowprops=dict(arrowstyle='<->'))\n\n if c == 0:\n l = \"Takeoff\"\n else:\n l = \"Cycle {}\".format(c)\n\n ax1.text((tim-last_tim)/2 + last_tim, 29.1, l, horizontalalignment='center',\n verticalalignment='center', weight='bold')\n\n ax2.text((tim-last_tim)/2 + last_tim, 1.1, l, horizontalalignment='center',\n verticalalignment='center', weight='bold')\n\n \n last_tim = tim\n\nax1.annotate(s='', xy=(last_tim,28), xytext=(data[6, -1],28), arrowprops=dict(arrowstyle='<->'))\nax1.text((data[6, -1]-last_tim)/2 + last_tim, 29.5, 'Landing', horizontalalignment='center',\n verticalalignment='center', fontsize=10, weight='bold')\n\nax2.annotate(s='', xy=(last_tim,1), xytext=(data[6, -1],1), arrowprops=dict(arrowstyle='<->'))\nax2.text((data[6, -1]-last_tim)/2 + last_tim, 1.1, 'Landing', horizontalalignment='center',\n verticalalignment='center', fontsize=10, weight='bold')\n\n\n## Position 3D plot ##\n######################\n\nxs = [0, 0, 10, 10, 10]\nys = [0, 0, 0, 0, 5]\nzs = [0, 10, 10, 25, 25]\nfig3 = plt.figure(figsize=(20, 20))\nax3 = Axes3D(fig3, alpha=0.1)\nax3.set_title(\"3D XYZ Trajectory\", fontweight = 'heavy')\n\nfor c in cycles:\n data = cycle_data['cycle_{}'.format(c)]\n if c > 0 and c < 5:\n l = 'cycle_{}'.format(c)\n elif c == 0:\n l = 'takeoff'\n else:\n l = 'landing'\n ax3.plot3D(data[0, :], data[1, :], data[2, :], label=l, linewidth=2.5)\nax3.legend(shadow=True, fancybox=True)\nax3.scatter(xs, ys, zs, s=35, c='k')\nfor xt, yt, zt in zip(xs, ys, zs):\n ax3.text3D(xt + 0.1, yt + 0.1, zt + 0.1, '({},{},{})'.format(xt, yt, zt), \n fontsize=10, fontweight = 'heavy')\nax3.set(xlabel=\"X [m]\", ylabel=\"Y [m]\", zlabel=\"Z [m]\")\n\n## Plot trajectories in X-Y X-Z & Y-Z planes ##\n###############################################\n\nfig4 = plt.figure(figsize=(20,20))\nax4 = fig4.add_subplot(131)\nax5 = fig4.add_subplot(132)\nax6 = fig4.add_subplot(133)\n\nfor c in cycles:\n data = cycle_data['cycle_{}'.format(c)]\n if c > 0 and c < 5:\n l = 'cycle_{}'.format(c)\n elif c == 0:\n l = 'takeoff'\n else:\n l = 'landing'\n ax4.plot(data[0, :], data[1, :], label=l, linewidth=2.5)\n ax5.plot(data[0, :], data[2, :], label=l, linewidth=2.5)\n ax6.plot(data[1, :], data[2, :], label=l, linewidth=2.5)\n\nax4.set_title(\"Trajectory XY\", fontweight = 'heavy')\nax4.set(xlabel=\"X [m]\", ylabel=\"Y [m]\")\nax4.legend(shadow=True, fancybox=True, loc='upper left')\n\nax5.set_title(\"Trajectory XZ\", fontweight = 'heavy')\nax5.set(xlabel=\"X [m]\", ylabel=\"Z [m]\")\nax5.legend(shadow=True, fancybox=True, loc='lower right')\n\nax6.set_title(\"Trajectory YZ\", fontweight = 'heavy')\nax6.set(xlabel=\"Y [m]\", ylabel=\"Z [m]\")\nax6.legend(shadow=True, fancybox=True, loc='lower right')\n\nfor xt, yt, zt in zip(xs, ys, zs):\n ax4.text(xt + 0.2, yt + 0.2, '({},{})'.format(xt, yt), \n fontsize=10, fontweight = 'heavy')\n ax5.text(xt + 0.2, zt + 0.2, '({},{})'.format(xt, zt), \n fontsize=10, fontweight = 'heavy')\n ax6.text(yt + 0.2, zt + 0.2, '({},{})'.format(yt, zt), \n fontsize=10, fontweight = 'heavy')\n\nplt.show()" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.stack", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kizill/coremltools
[ "11e143089a66ee219ce3a2ed98aa1aae794d4794" ]
[ "coremltools/models/_graph_visualization.py" ]
[ "# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\n\"\"\"\nFunctions related to graph visualization of mlmodels\n\"\"\"\n\nimport ast as _ast\nimport json as _json\nimport os as _os\nimport numpy as _np\nfrom ._infer_shapes_nn_mlmodel import infer_shapes as _infer_shapes\nfrom coremltools.proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2\n\n\ndef _calculate_edges(cy_nodes, cy_edges, shape_dict=None):\n \"\"\"\n\n Parameters\n ----------\n cy_nodes: list of nodes for graph\n cy_edges: list of edges to be updated for graph\n shape_dict: shape_dict required for inferring shape information\n\n Returns\n -------\n\n cy_nodes: list of nodes for graph\n cy_edges: list of edges to be updated for graph\n\n \"\"\"\n node_len = len(cy_nodes)\n\n for upper_index in range(0, node_len):\n for lower_index in range(upper_index + 1, node_len):\n\n if 'outputs' in cy_nodes[upper_index]['data']['info'].keys() and \\\n 'inputs' in cy_nodes[upper_index]['data']['info'].keys() \\\n and 'outputs' in cy_nodes[lower_index]['data']['info'].keys() \\\n and 'inputs' in cy_nodes[lower_index]['data']['info'].keys():\n outputs = _ast.literal_eval(\n cy_nodes[upper_index]['data']['info']['outputs']\n )\n inputs = _ast.literal_eval(\n cy_nodes[lower_index]['data']['info']['inputs']\n )\n for output in outputs:\n if output in inputs:\n if shape_dict is None or output not in shape_dict.keys():\n label = None\n else:\n label = str(shape_dict[output])\n\n cy_edges.append(\n {\n 'data':{'id':\n '{}.{}.{}'.format(\n output,\n cy_nodes[upper_index]['data']['id'],\n cy_nodes[lower_index]['data']['id']\n ),\n 'source': cy_nodes[upper_index]['data']['id'],\n 'target': cy_nodes[lower_index]['data']['id'],\n 'label': label,\n 'shape': label\n }\n }\n )\n\n return cy_nodes, cy_edges\n\n\ndef _layer_specific_info(layer):\n \"\"\"\n\n Parameters\n ----------\n layer : Can be one of : 'activation', 'add', 'average', 'batchnorm',\n 'biDirectionalLSTM', 'bias', 'concat', 'convolution', 'crop', 'dot',\n 'embedding', 'flatten', 'gru', 'innerProduct', 'input', 'l2normalize',\n 'loadConstant', 'lrn', 'max', 'min', 'multiply', 'mvn', 'name', 'output',\n 'padding', permute', 'pooling', 'reduce', 'reorganizeData', 'reshape',\n 'scale', 'sequenceRepeat', 'simpleRecurrent', 'slice', 'softmax', 'split',\n 'unary', 'uniDirectionalLSTM', 'upsample'\n\n Returns\n -------\n info : info specific to layer type\n\n \"\"\"\n if layer.WhichOneof('layer') == 'convolution':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'outputChannels': _json.dumps(str(layer.convolution.outputChannels)),\n 'kernelChannels': _json.dumps(str(layer.convolution.kernelChannels)),\n 'groups': _json.dumps(str(layer.convolution.nGroups)),\n 'kernelSize': _json.dumps(str(layer.convolution.kernelSize)),\n 'stride': _json.dumps(str(layer.convolution.stride)),\n 'dilationFactor': _json.dumps(str(layer.convolution.dilationFactor)),\n 'isDeconvolution': _json.dumps(str(layer.convolution.isDeconvolution)),\n 'paddingType' : _json.dumps(layer.convolution.WhichOneof('ConvolutionPaddingType')),\n 'desc': 'A layer that performs spatial convolution'\n }\n if _json.dumps(layer.convolution.isDeconvolution) == 'true':\n info['type'] = 'deconvolution'\n info['desc'] = 'A layer that performs spatial deconvolution'\n\n elif layer.WhichOneof('layer') == 'activation':\n params = layer.activation\n act_type = params.WhichOneof('NonlinearityType')\n info = {\n 'type': layer.WhichOneof('layer'),\n 'activationType': act_type,\n 'desc': 'Applies specified type of activation function to input.'\n }\n if act_type == 'linear':\n info['alpha'] = _json.dumps(str(params.linear.alpha))\n info['beta'] = _json.dumps(str(params.linear.beta))\n if act_type == 'leakyReLU':\n info['alpha'] = _json.dumps(str(params.leakyReLU.alpha))\n if act_type == 'thresholdedReLU':\n info['alpha'] = _json.dumps(str(params.thresholdedReLU.alpha))\n if act_type == 'scaledTanh':\n info['alpha'] = _json.dumps(str(params.scaledTanh.alpha))\n info['beta'] = _json.dumps(str(params.scaledTanh.beta))\n if act_type == 'sigmoidHard':\n info['alpha'] = _json.dumps(str(params.sigmoidHard.alpha))\n info['beta'] = _json.dumps(str(params.sigmoidHard.beta))\n if act_type == 'ELU':\n info['alpha'] = _json.dumps(str(params.ELU.alpha))\n\n elif layer.WhichOneof('layer') == 'pooling':\n params = layer.pooling\n paddingType = params.WhichOneof('PoolingPaddingType')\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'Spatial Pooling layer to reduce dimensions of input using the '\n 'specified kernel size and type.'\n }\n if params.globalPooling:\n info['globalPooling'] = 'True'\n info['poolingType'] = 'global pooling'\n else:\n info['poolingType'] = _json.dumps(_NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Name(params.type))\n info['stride'] = _json.dumps(str(params.stride))\n info['kernelSize'] = _json.dumps(str(params.kernelSize))\n info['paddingType'] = _json.dumps(paddingType)\n\n elif layer.WhichOneof('layer') == 'add':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.add.alpha)),\n 'desc': 'A layer that performs elementwise addition.'\n }\n elif layer.WhichOneof('layer') == 'batchnorm':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'channels': _json.dumps(str(layer.batchnorm.channels)),\n 'computeMeanVar': _json.dumps(str(layer.batchnorm.computeMeanVar)),\n 'instanceNormalization': _json.dumps(str(layer.batchnorm.instanceNormalization)),\n 'desc': 'A layer that performs batch normalization, \\n'\n 'which is performed along the channel axis, \\n'\n 'and repeated along the other axes, if present.'\n }\n elif layer.WhichOneof('layer') == 'biDirectionalLSTM':\n forward_activations = \"\"\n for activation in layer.biDirectionalLSTM.activationsForwardLSTM:\n forward_activations += str(activation)[:-5] + \", \"\n backward_activations = \"\"\n for activation in layer.biDirectionalLSTM.activationsBackwardLSTM:\n backward_activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.biDirectionalLSTM.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.biDirectionalLSTM.outputVectorSize)),\n 'forward_activations': _json.dumps(forward_activations),\n 'backward_activations': _json.dumps(backward_activations),\n 'lstm_params': _json.dumps(str(layer.biDirectionalLSTM.params)),\n 'desc': 'Bidirectional long short-term memory (LSTM) layer\\n'\n 'The first LSTM operates on the input sequence in the forward direction.\\n'\n 'The second LSTM operates on the input sequence in the reverse direction.'\n }\n elif layer.WhichOneof('layer') == 'uniDirectionalLSTM':\n activations = \"\"\n for activation in layer.uniDirectionalLSTM.activations:\n activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.uniDirectionalLSTM.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.uniDirectionalLSTM.outputVectorSize)),\n 'activations': _json.dumps(activations),\n 'lstm_params': _json.dumps(str(layer.uniDirectionalLSTM.params)),\n 'reverse_input': _json.dumps(str(layer.uniDirectionalLSTM.reverseInput)),\n 'desc': 'A unidirectional long short-term memory (LSTM) layer.'\n\n }\n elif layer.WhichOneof('layer') == 'gru':\n activations = \"\"\n for activation in layer.gru.activations:\n activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.gru.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.gru.outputVectorSize)),\n 'activations': _json.dumps(activations),\n 'hasBiasVectors': _json.dumps(str(layer.gru.hasBiasVectors)),\n 'reverseInput': _json.dumps(str(layer.gru.reverseInput)),\n 'sequenceOutput': _json.dumps(str(layer.gru.sequenceOutput)),\n 'desc': 'Gated-Recurrent Unit (GRU) Layer.\\n'\n\n }\n elif layer.WhichOneof('layer') == 'simpleRecurrent':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.simpleRecurrent.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.simpleRecurrent.outputVectorSize)),\n 'activation': _json.dumps(str(layer.simpleRecurrent.activation)),\n 'hasBiasVector': _json.dumps(str(layer.simpleRecurrent.hasBiasVector)),\n 'reverseInput': _json.dumps(str(layer.simpleRecurrent.reverseInput)),\n 'sequenceOutput': _json.dumps(str(layer.simpleRecurrent.sequenceOutput)),\n 'desc': 'A simple recurrent layer.'\n }\n elif layer.WhichOneof('layer') == 'bias':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shape': _json.dumps(str(layer.bias.shape)),\n 'desc': 'A layer that performs elementwise addition of a bias,\\n'\n 'which is broadcasted to match the input shape.'\n }\n elif layer.WhichOneof('layer') == 'concat':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'sequenceConcat': _json.dumps(str(layer.concat.sequenceConcat)),\n 'desc': 'A layer that concatenates along the channel axis (default) or sequence axis.'\n }\n elif layer.WhichOneof('layer') == 'crop':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'cropAmounts': _json.dumps(str(layer.crop.cropAmounts)),\n 'offset': _json.dumps(str(layer.crop.offset)),\n 'desc': 'A layer that crops the spatial dimensions of an input.\\n'\n 'If two inputs are provided, the shape of the second '\n 'input is used as the reference shape.'\n }\n elif layer.WhichOneof('layer') == 'dot':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'cosineSimilarity': _json.dumps(str(layer.dot.cosineSimilarity)),\n 'desc': 'If true, inputs are normalized first, '\n 'thereby computing the cosine similarity.'\n }\n elif layer.WhichOneof('layer') == 'embedding':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputDim': _json.dumps(str(layer.embedding.inputDim)),\n 'outputChannels': _json.dumps(str(layer.embedding.outputChannels)),\n 'hasBias': _json.dumps(str(layer.embedding.inputDim)),\n 'desc': 'A layer that performs a matrix lookup and optionally adds a bias.'\n }\n elif layer.WhichOneof('layer') == 'flatten':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Name(layer.flatten.mode)),\n 'desc': 'A layer that flattens the input.'\n }\n elif layer.WhichOneof('layer') == 'innerProduct':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputChannels': _json.dumps(str(layer.innerProduct.inputChannels)),\n 'outputChannels': _json.dumps(str(layer.innerProduct.outputChannels)),\n 'hasBias': _json.dumps(str(layer.innerProduct.hasBias)),\n 'desc': 'A layer that performs a matrix vector product.\\n'\n 'This is equivalent to a fully-connected, or dense layer.'\n }\n elif layer.WhichOneof('layer') == 'l2normalize':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'epsilon': _json.dumps(str(layer.l2normalize.epsilon)),\n 'desc': 'A layer that performs L2 normalization, i.e. divides by the \\n'\n 'the square root of the sum of squares of all elements of input.'\n }\n elif layer.WhichOneof('layer') == 'loadConstant':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shape': _json.dumps(str(layer.loadConstant.shape)),\n 'desc': 'The shape of the constant to be loaded'\n }\n elif layer.WhichOneof('layer') == 'lrn':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.lrn.alpha)),\n 'beta': _json.dumps(str(layer.lrn.beta)),\n 'localSize': _json.dumps(str(layer.lrn.localSize)),\n 'k': _json.dumps(str(layer.lrn.k)),\n 'desc': 'A layer that performs local response normalization (LRN).'\n }\n elif layer.WhichOneof('layer') == 'multiply':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.multiply.alpha)),\n 'desc': 'A layer that performs elementwise multiplication.'\n }\n elif layer.WhichOneof('layer') == 'mvn':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'acrossChannels': _json.dumps(str(layer.mvn.acrossChannels)),\n 'normalizeVariance': _json.dumps(str(layer.mvn.normalizeVariance)),\n 'epsilon': _json.dumps(str(layer.mvn.epsilon)),\n 'desc': 'A layer that performs mean variance normalization.'\n }\n elif layer.WhichOneof('layer') == 'padding':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'paddingAmounts': _json.dumps(str(layer.padding.paddingAmounts)),\n 'paddingType': _json.dumps(str(layer.padding.WhichOneof('PaddingType'))),\n 'desc': 'Fill a constant value in the padded region.'\n }\n elif layer.WhichOneof('layer') == 'permute':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'axis': _json.dumps(str(layer.permute.axis)),\n 'desc': 'A layer that rearranges the dimensions and data of an input.'\n }\n elif layer.WhichOneof('layer') == 'reduce':\n params = layer.reduce\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(str(params.mode)),\n 'epsilon': _json.dumps(str(params.epsilon)),\n 'axis': _json.dumps(_NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Name(params.axis)),\n 'desc': 'A layer that reduces the input using a specified operation.'\n }\n elif layer.WhichOneof('layer') == 'reorganizeData':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Name(layer.reorganizeData.mode)),\n 'blockSize': _json.dumps(str(layer.reorganizeData.blockSize)),\n 'desc': 'A layer that reorganizes data in the input in: \\n'\n '1. SPACE_TO_DEPTH\\n'\n '2. DEPTH_TO_SPACE'\n }\n elif layer.WhichOneof('layer') == 'reshape':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Name(layer.reshape.mode)),\n 'targetShape': _json.dumps(str(layer.reshape.targetShape)),\n 'desc': 'A layer that recasts the input into a new shape.'\n }\n elif layer.WhichOneof('layer') == 'scale':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shapeScale': _json.dumps(str(layer.scale.shapeScale)),\n 'hasBias': _json.dumps(str(layer.scale.hasBias)),\n 'shapeBias': _json.dumps(str(layer.scale.shapeBias)),\n 'desc': 'A layer that performs elmentwise multiplication by a scale factor\\n'\n 'and optionally adds a bias;'\n }\n elif layer.WhichOneof('layer') == 'sequenceRepeat':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'nRepetitions': _json.dumps(str(layer.sequenceRepeat.nRepetitions)),\n 'desc': 'A layer that repeats a sequence.'\n }\n elif layer.WhichOneof('layer') == 'slice':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'startIndex': _json.dumps(str(layer.slice.startIndex)),\n 'endIndex': _json.dumps(str(layer.slice.endIndex)),\n 'stride': _json.dumps(str(layer.slice.stride)),\n 'axis': _json.dumps(_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Name(layer.slice.axis)),\n 'desc': 'A layer that slices the input data along a given axis.'\n }\n elif layer.WhichOneof('layer') == 'split':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'nOutputs': _json.dumps(str(layer.split.nOutputs)),\n 'desc': 'A layer that uniformly splits across the channel dimension\\n'\n 'to produce a specified number of outputs.'\n }\n elif layer.WhichOneof('layer') == 'unary':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'unary_type': _json.dumps(_NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Name(layer.unary.type)),\n 'alpha': _json.dumps(str(layer.unary.alpha)),\n 'epsilon': _json.dumps(str(layer.unary.epsilon)),\n 'shift': _json.dumps(str(layer.unary.shift)),\n 'scale': _json.dumps(str(layer.unary.scale)),\n 'desc': 'A layer that applies a unary function.'\n }\n elif layer.WhichOneof('layer') == 'upsample':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'scalingFactor': _json.dumps(str(layer.upsample.scalingFactor)),\n 'mode': _json.dumps(_NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Name(layer.upsample.mode)),\n 'desc': 'A layer that scales up spatial dimensions.\\n'\n 'It supports two modes: '\n 'nearest neighbour (default) and bilinear.'\n }\n elif layer.WhichOneof('layer') == 'max':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise maximum '\n 'over the inputs.'\n }\n elif layer.WhichOneof('layer') == 'min':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise minimum '\n 'over the inputs.'\n }\n elif layer.WhichOneof('layer') == 'average':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise average '\n 'of the inputs.'\n }\n elif layer.WhichOneof('layer') == 'softmax':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that performs softmax normalization.\\n'\n 'Normalization is done along the channel axis.'\n }\n elif layer.WhichOneof('layer') == 'custom':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'className': layer.custom.className,\n 'desc': 'A custom layer'\n }\n if layer.custom.parameters != {}:\n for key in layer.custom.parameters.keys():\n value = _get_custom_layer_value(layer.custom.parameters[key])\n info[key] = value\n if layer.custom.description:\n info['desc'] = layer.custom.description\n\n else:\n info = {\n 'type': layer.WhichOneof('layer')\n }\n\n info['inputs'] = str(layer.input)\n info['outputs'] = str(layer.output)\n\n return info\n\ndef _get_custom_layer_value(parameter):\n\n if 'intValue' in str(parameter):\n return str(parameter.intValue)\n elif 'doubleValue' in str(parameter):\n return str(parameter.doubleValue)\n elif 'boolValue' in str(parameter):\n return str(parameter.boolValue)\n elif 'longValue' in str(parameter):\n return str(parameter.longValue)\n elif 'stringValue' in str(parameter):\n return str(parameter.stringValue)\n\n\n\ndef _pipeline_component_info(model, info):\n \"\"\"\n\n Parameters\n ----------\n model : pipeline model\n info : info dict to dump model related info into\n\n model can be one of 'arrayFeatureExtractor', 'categoricalMapping',\n 'dictVectorizer', 'featureVectorizer', 'glmClassifier', 'glmRegressor',\n 'identity', 'imputer', 'neuralNetwork', 'neuralNetworkClassifier',\n 'neuralNetworkRegressor', 'normalizer', 'oneHotEncoder', 'scaler',\n 'supportVectorClassifier', 'supportVectorRegressor',\n 'treeEnsembleClassifier', 'treeEnsembleRegressor'\n\n Returns\n -------\n info : info dict with required info for model\n\n \"\"\"\n model_type = model.WhichOneof('Type')\n if model_type == 'arrayFeatureExtractor':\n info[\"desc\"] = 'Given an index, extracts the value at ' \\\n 'that index from its array input.\\n' \\\n 'Indexes are zero-based.'\n elif model_type == 'categoricalMapping':\n info[\"mappingType\"] = _json.dumps(str(model.categoricalMapping.WhichOneof('MappingType')))\n info[\"valueOnUnknown\"] = _json.dumps(str(model.categoricalMapping.WhichOneof('ValueOnUnknown')))\n info[\"desc\"] = 'This allows conversion from integers ' \\\n 'to strings, or from strings to integers.'\n elif model_type == 'dictVectorizer':\n info[\"map\"] = _json.dumps(str(model.dictVectorizer.WhichOneof('Map')))\n info[\"desc\"] = 'Uses an index mapping to convert a dictionary ' \\\n 'to an array.\\n The output array will be equal in ' \\\n 'length to the index mapping vector parameter.\\n' \\\n 'All keys in the input dictionary must be present in ' \\\n 'the index mapping vector.'\n elif model_type == 'featureVectorizer':\n info[\"inputList\"] = _json.dumps(str(model.featureVectorizer.inputList))\n info[\"desc\"] = 'A FeatureVectorizer puts one or more features into a single' \\\n ' array.\\n The ordering of features in the output array is ' \\\n 'determined by inputList.'\n elif model_type == 'glmClassifier':\n info[\"offset\"] = _json.dumps(str(model.glmClassifier.offset))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.glmClassifier.postEvaluationTransform))\n info[\"classEncoding\"] = _json.dumps(str(model.glmClassifier.classEncoding))\n info[\"classLabels\"] = _json.dumps(str(model.glmClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'A generalized linear model classifier.'\n elif model_type == 'glmRegressor':\n info[\"offset\"] = _json.dumps(str(model.glmRegressor.offset))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.glmRegressor.postEvaluationTransform))\n info[\"desc\"] = 'A generalized linear model regressor.'\n elif model_type == 'imputer':\n info[\"ImputedValue\"] = _json.dumps(str(model.imputer.WhichOneof('ImputedValue')))\n info[\"desc\"] = 'A transformer that replaces missing values with a ' \\\n 'default value,\\n such as a statistically-derived ' \\\n 'value.\\nIf ``ReplaceValue`` is set, then missing ' \\\n 'values of that type are\\n replaced with the ' \\\n 'corresponding value.'\n elif model_type == 'normalizer':\n info[\"normType\"] = _json.dumps(str(model.normalizer.normType))\n info[\"desc\"] = 'A normalization preprocessor.There are three normalization modes\\n' \\\n '1. Max\\n' \\\n '2. L1\\n' \\\n '3. L2'\n elif model_type == 'oneHotEncoder':\n info[\"CategoryType\"] = _json.dumps(str(model.oneHotEncoder.WhichOneof('CategoryType')))\n info[\"outputSparse\"] = _json.dumps(str(model.oneHotEncoder.outputSparse))\n info[\"handleUnknown\"] = _json.dumps(str(model.oneHotEncoder.handleUnknown))\n info[\"desc\"] = 'Transforms a categorical feature into an array. The array will be all\\n' \\\n 'zeros expect a single entry of one.\\n' \\\n 'Each categorical value will map to an index, this mapping is given by\\n' \\\n 'either the ``stringCategories`` parameter or the ``int64Categories``\\n' \\\n 'parameter.'\n elif model_type == 'scaler':\n info[\"shiftValue\"] = _json.dumps(str(model.scaler.shiftValue))\n info[\"scaleValue\"] = _json.dumps(str(model.scaler.scaleValue))\n info[\"desc\"] = 'A scaling operation.\\n' \\\n 'f(x) = scaleValue \\cdot (x + shiftValue)'\n elif model_type == 'supportVectorClassifier':\n info[\"kernel\"] = _json.dumps(str(model.supportVectorClassifier.kernel))\n info[\"numberOfSupportVectorsPerClass\"] = _json.dumps(str(model.supportVectorClassifier.numberOfSupportVectorsPerClass))\n info[\"rho\"] = _json.dumps(str(model.supportVectorClassifier.rho))\n info[\"probA\"] = _json.dumps(str(model.supportVectorClassifier.probA))\n info[\"probB\"] = _json.dumps(str(model.supportVectorClassifier.probB))\n info[\"ClassLabels\"] = _json.dumps(str(model.supportVectorClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'Support Vector Machine Classifier with one of ' \\\n 'Linear, RBF, Polynomial or Sigmoid ' \\\n 'kernels available'\n elif model_type == 'supportVectorRegressor':\n info[\"kernel\"] = _json.dumps(str(model.supportVectorRegressor.kernel))\n info[\"numberOfSupportVectorsPerClass\"] = _json.dumps(\n str(model.supportVectorRegressor.numberOfSupportVectorsPerClass))\n info[\"rho\"] = _json.dumps(str(model.supportVectorRegressor.rho))\n info[\"desc\"] = 'Support Vector Machine Regressor with one of ' \\\n 'Linear, RBF, Polynomial or Sigmoid kernels available'\n elif model_type == 'treeEnsembleClassifier':\n info[\"treeEnsemble\"] = _json.dumps(str(model.treeEnsembleClassifier.treeEnsemble))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.treeEnsembleClassifier.postEvaluationTransform))\n info[\"ClassLabels\"] = _json.dumps(str(model.treeEnsembleClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'Each tree is a collection of nodes, each of which is identified ' \\\n 'by a unique identifier.\\nEach node is either a branch or a leaf node.' \\\n ' A branch node evaluates a value according to a behavior;\\n' \\\n 'A tree must have exactly one root node, which has no parent node.'\n elif model_type == 'treeEnsembleRegressor':\n info[\"treeEnsemble\"] = _json.dumps(str(model.treeEnsembleRegressor.treeEnsemble))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.treeEnsembleRegressor.postEvaluationTransform))\n info[\"desc\"] = 'Each tree is a collection of nodes, each of which is identified' \\\n ' by a unique identifier.\\nEach node is either a branch or a leaf' \\\n ' node. A branch node evaluates a value according to a behavior;\\n' \\\n 'A tree must have exactly one root node, which has no parent node.'\n return info\n\n\ndef _neural_network_node_info(nn_spec, cy_nodes, child=False, parent=None):\n \"\"\"\n\n Parameters\n ----------\n nn_spec : Neural Network spec of mlmodel\n cy_nodes: list of nodes to update with nn layers\n child: If child of a parent pipeline component\n parent : Parent node of the Neural Network spec\n\n Returns\n -------\n\n cy_nodes: Updated with layer specific information\n\n \"\"\"\n layers = nn_spec.layers\n for layer in layers:\n info = _layer_specific_info(layer)\n if child:\n info[\"name\"] = layer.name\n cy_nodes.append({\n 'data': {\n 'id': layer.name,\n 'name': info[\"type\"],\n 'info': info,\n 'parent': parent\n },\n 'classes': info[\"type\"],\n })\n else:\n info[\"name\"] = layer.name\n cy_nodes.append({\n 'data': {\n 'id': layer.name,\n 'name': info[\"type\"],\n 'info': info\n },\n 'classes': info[\"type\"],\n })\n\n return cy_nodes\n\n\ndef _neural_network_nodes_and_edges(nn_spec,\n cy_nodes,\n cy_edges,\n spec_outputs,\n input_spec,\n input_shape_dict=None\n ):\n \"\"\"\n\n Parameters\n ----------\n nn_spec : Neural Network Spec\n cy_nodes : list to add nn nodes to\n cy_edges : list to add edges for nn nodes to\n spec_outputs : outputs of nn spec\n input_spec : input spec of Neural Network\n\n Returns\n -------\n\n cy_data : concatenated list of updated cy_nodes and cy_edges\n\n \"\"\"\n cy_nodes = _neural_network_node_info(nn_spec, cy_nodes)\n cy_nodes.append({\n 'data': {\n 'id': 'output_node',\n 'name': '',\n 'info': {\n 'type': 'output node'\n },\n 'classes': 'output',\n\n }\n })\n\n for model_output, output_type in spec_outputs:\n cy_nodes.append({\n 'data': {\n 'id': str(model_output),\n 'name': str(model_output),\n 'info': {\n 'type': \"\\n\".join(str(output_type).split(\"\\n\")),\n 'inputs': str([model_output]),\n 'outputs': str([])\n },\n 'parent': 'output_node'\n },\n 'classes': 'output'\n })\n\n shape_dict = _infer_shapes(nn_spec, input_spec, input_shape_dict=input_shape_dict)\n cy_nodes, cy_edges = _calculate_edges(cy_nodes, cy_edges, shape_dict)\n\n cy_data = cy_nodes + cy_edges\n return cy_data\n\n\ndef _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs):\n \"\"\"\n\n Parameters\n ----------\n cy_nodes : list to add nn nodes to\n cy_edges : list to add edges for nn nodes to\n pipeline_spec: Spec of pipeline mlmodel\n spec_outputs: spec outputs of pipeline mlmodel\n\n Returns\n -------\n\n cy_data : concatenated list of updated cy_nodes and cy_edges\n\n \"\"\"\n i = 1\n nn_model_types = ['neuralNetwork', 'neuralNetworkClassifier', 'neuralNetworkRegressor']\n models = pipeline_spec.models\n shape_dict = None\n for model in models:\n sub_model_type = model.WhichOneof('Type')\n if not sub_model_type:\n sub_model_type = 'input'\n info = {}\n input_names = []\n output_names = []\n info['Pipeline Component'] = sub_model_type.upper()\n for model_input in model.description.input:\n input_names.append(model_input.name)\n info['inputs'] = str(input_names)\n\n for model_output in model.description.output:\n output_names.append(model_output.name)\n info['outputs'] = str(output_names)\n\n info = _pipeline_component_info(model, info)\n\n if sub_model_type in nn_model_types:\n cy_nodes.append({\n 'data': {\n 'id': \"{}_{}\".format(sub_model_type, i),\n 'name': sub_model_type,\n 'info': info\n },\n 'classes': 'parent',\n })\n if sub_model_type == 'neuralNetwork':\n nn_spec = model.neuralNetwork\n elif sub_model_type == 'neuralNetworkClassifier':\n nn_spec = model.neuralNetworkClassifier\n elif sub_model_type == 'neuralNetworkRegressor':\n nn_spec = model.neuralNetworkRegressor\n cy_nodes = _neural_network_node_info(nn_spec, cy_nodes, child=True, parent=\"{}_{}\".format(sub_model_type, i))\n shape_dict = _infer_shapes(nn_spec, model.description.input)\n else:\n cy_nodes.append({\n 'data': {\n 'id': \"{}_{}\".format(sub_model_type, i),\n 'name': sub_model_type,\n 'info': info\n },\n 'classes': sub_model_type\n })\n i += 1\n\n cy_nodes.append({\n 'data': {\n 'id': 'output_node',\n 'name': '',\n 'info': {\n 'type': 'output node'\n },\n 'classes': 'output',\n\n }\n })\n\n for model_output, output_type in spec_outputs:\n cy_nodes.append({\n 'data': {\n 'id': str(model_output),\n 'name': str(model_output),\n 'info': {\n 'type': \"\\n\".join(str(output_type).split(\"\\n\")),\n 'inputs': str([model_output]),\n 'outputs': str([])\n },\n 'parent' : 'output_node'\n },\n 'classes': 'output'\n })\n\n\n cy_nodes, cy_edges = _calculate_edges(cy_nodes, cy_edges, shape_dict)\n\n cy_data = cy_nodes + cy_edges\n return cy_data\n\n\ndef _start_server(port, web_dir):\n \"\"\"\n\n Parameters\n ----------\n port : localhost port to start server on\n web_dir: directory containing server files\n\n Returns\n -------\n\n None\n\n \"\"\"\n import subprocess\n import webbrowser\n if port is None:\n port = _np.random.randint(8000, 9000)\n import sys\n if sys.version_info[0] < 3:\n http_server = 'SimpleHTTPServer'\n else:\n http_server = 'http.server'\n subprocess.Popen([sys.executable, '-m', http_server, str(port)], cwd=web_dir)\n webbrowser.open_new_tab('localhost:{}'.format(str(port)))\n return True\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]