repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
m30m/dgl | [
"2190c39d674f76c65db9ee8da7b43d3021f19c29"
] | [
"python/dgl/backend/pytorch/tensor.py"
] | [
"from __future__ import absolute_import\n\nfrom distutils.version import LooseVersion\n\nimport scipy # Weird bug in new pytorch when import scipy after import torch\nimport torch as th\nimport builtins\nfrom torch.utils import dlpack\n\nfrom ... import ndarray as nd\nfrom ... import kernel as K\nfrom ...function.base import TargetCode\nfrom ...base import dgl_warning\n\nif LooseVersion(th.__version__) < LooseVersion(\"1.2.0\"):\n dgl_warning(\"Detected an old version of PyTorch. Suggest using torch>=1.2.0 \"\n \"for the best experience.\")\n\ndef data_type_dict():\n return {'float16' : th.float16,\n 'float32' : th.float32,\n 'float64' : th.float64,\n 'uint8' : th.uint8,\n 'int8' : th.int8,\n 'int16' : th.int16,\n 'int32' : th.int32,\n 'int64' : th.int64,\n 'bool' : th.bool}\n\ndef cpu():\n return th.device('cpu')\n\ndef tensor(data, dtype=None):\n return th.tensor(data, dtype=dtype)\n\ndef as_scalar(data):\n return data.item()\n\ndef get_preferred_sparse_format():\n \"\"\"Get the preferred sparse matrix format supported by the backend.\n\n Different backends have their preferred backend. This info is useful when\n constructing a sparse matrix.\n \"\"\"\n return \"coo\"\n\ndef sparse_matrix(data, index, shape, force_format=False):\n fmt = index[0]\n if fmt != 'coo':\n raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)\n spmat = th.sparse_coo_tensor(index[1], data, shape)\n return spmat, None\n\ndef sparse_matrix_indices(spmat):\n return ('coo', spmat._indices())\n\ndef is_tensor(obj):\n return isinstance(obj, th.Tensor)\n\ndef shape(input):\n return input.shape\n\ndef dtype(input):\n return input.dtype\n\ndef ndim(input):\n return input.dim()\n\ndef context(input):\n return input.device\n\ndef device_type(ctx):\n return ctx.type\n\ndef device_id(ctx):\n if ctx.index is None:\n return 0\n else:\n return ctx.index\n\ndef astype(input, ty):\n return input.type(ty)\n\ndef asnumpy(input):\n if isinstance(input, th.sparse.FloatTensor):\n return input.to_dense().cpu().detach().numpy()\n else:\n return input.cpu().detach().numpy()\n\ndef copy_to(input, ctx):\n if ctx.type == 'cpu':\n return input.cpu()\n elif ctx.type == 'cuda':\n if ctx.index is not None:\n th.cuda.set_device(ctx.index)\n return input.cuda()\n else:\n raise RuntimeError('Invalid context', ctx)\n\ndef sum(input, dim, keepdims=False):\n return th.sum(input, dim=dim, keepdim=keepdims)\n\ndef reduce_sum(input):\n return input.sum()\n\ndef mean(input, dim):\n return th.mean(input, dim=dim)\n\ndef reduce_mean(input):\n return input.mean()\n\ndef max(input, dim):\n # NOTE: the second argmax array is not returned\n return th.max(input, dim=dim)[0]\n\ndef reduce_max(input):\n return input.max()\n\ndef min(input, dim):\n # NOTE: the second argmin array is not returned\n return th.min(input, dim=dim)[0]\n\ndef reduce_min(input):\n return input.min()\n\ndef argsort(input, dim, descending):\n return th.argsort(input, dim=dim, descending=descending)\n\ndef topk(input, k, dim, descending=True):\n return th.topk(input, k, dim, largest=descending)[0]\n\ndef argtopk(input, k, dim, descending=True):\n return th.topk(input, k, dim, largest=descending)[1]\n\ndef exp(input):\n return th.exp(input)\n\ndef softmax(input, dim=-1):\n return th.softmax(input, dim=dim)\n\ndef cat(seq, dim):\n return th.cat(seq, dim=dim)\n\ndef stack(seq, dim):\n return th.stack(seq, dim=dim)\n\ndef split(input, sizes_or_sections, dim):\n return th.split(input, sizes_or_sections, dim)\n\ndef repeat(input, repeats, dim):\n # return th.repeat_interleave(input, repeats, dim) # PyTorch 1.1\n if dim < 0:\n dim += input.dim()\n return th.flatten(th.stack([input] * repeats, dim=dim+1), dim, dim+1)\n\ndef gather_row(data, row_index):\n return th.index_select(data, 0, row_index)\n\ndef slice_axis(data, axis, begin, end):\n return th.narrow(data, axis, begin, end - begin)\n\ndef take(data, indices, dim):\n new_shape = data.shape[:dim] + indices.shape + data.shape[dim+1:]\n return th.index_select(data, dim, indices.view(-1)).view(new_shape)\n\ndef narrow_row(x, start, stop):\n return x[start:stop]\n\ndef scatter_row(data, row_index, value):\n return data.index_copy(0, row_index, value)\n\ndef scatter_row_inplace(data, row_index, value):\n data[row_index] = value\n\ndef squeeze(input, dim):\n return th.squeeze(input, dim)\n\ndef unsqueeze(input, dim):\n return th.unsqueeze(input, dim)\n\ndef reshape(input, shape):\n return th.reshape(input ,shape)\n\ndef swapaxes(input, axis1, axis2):\n return th.transpose(input, axis1, axis2)\n\ndef zeros(shape, dtype, ctx):\n return th.zeros(shape, dtype=dtype, device=ctx)\n\ndef zeros_like(input):\n return th.zeros_like(input)\n\ndef ones(shape, dtype, ctx):\n return th.ones(shape, dtype=dtype, device=ctx)\n\ndef uniform(shape, dtype, ctx, low, high):\n return th.empty(shape, dtype=dtype, device=ctx).uniform_(low, high)\n\ndef pad_packed_tensor(input, lengths, value, l_min=None):\n old_shape = input.shape\n if isinstance(lengths, th.Tensor):\n max_len = as_scalar(lengths.max())\n else:\n max_len = builtins.max(lengths)\n\n if l_min is not None:\n max_len = builtins.max(max_len, l_min)\n\n batch_size = len(lengths)\n device = input.device\n x = input.new(batch_size * max_len, *old_shape[1:])\n x.fill_(value)\n index = []\n for i, l in enumerate(lengths):\n index.extend(range(i * max_len, i * max_len + l))\n index = th.tensor(index).to(device)\n return scatter_row(x, index, input).view(batch_size, max_len, *old_shape[1:])\n\ndef pack_padded_tensor(input, lengths):\n batch_size, max_len = input.shape[:2]\n device = input.device\n index = []\n for i, l in enumerate(lengths):\n index.extend(range(i * max_len, i * max_len + l))\n index = th.tensor(index).to(device)\n return gather_row(input.view(batch_size * max_len, -1), index)\n\ndef unsorted_1d_segment_sum(input, seg_id, n_segs, dim):\n y = th.zeros(n_segs, *input.shape[1:]).to(input)\n seg_id = seg_id.view((-1,) + (1,) * (input.dim() - 1)).expand_as(input)\n y = y.scatter_add_(dim, seg_id, input)\n return y\n\ndef unsorted_1d_segment_mean(input, seg_id, n_segs, dim):\n w = unsorted_1d_segment_sum(th.ones_like(seg_id), seg_id, n_segs, 0).to(input)\n w = w.clamp(min=1) # remove 0 entries\n y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)\n y = y / w.view((-1,) + (1,) * (y.dim() - 1))\n return y\n\ndef boolean_mask(input, mask):\n return input[mask]\n\ndef equal(x, y):\n return x == y\n\ndef logical_not(input):\n return ~input\n\ndef clone(input):\n return input.clone()\n\ndef unique(input):\n return th.unique(input)\n\ndef full_1d(length, fill_value, dtype, ctx):\n return th.full((length,), fill_value, dtype=dtype, device=ctx)\n\ndef nonzero_1d(input):\n x = th.nonzero(input).squeeze()\n return x if x.dim() == 1 else x.view(-1)\n\ndef sort_1d(input):\n return th.sort(input)\n\ndef arange(start, stop):\n return th.arange(start, stop, dtype=th.int64)\n\ndef rand_shuffle(arr):\n idx = th.randperm(len(arr))\n return arr[idx]\n\ndef zerocopy_to_dlpack(input):\n return dlpack.to_dlpack(input.contiguous())\n\ndef zerocopy_from_dlpack(dlpack_tensor):\n return dlpack.from_dlpack(dlpack_tensor)\n\ndef zerocopy_to_numpy(input):\n # NOTE: not zerocopy\n return asnumpy(input)\n\ndef zerocopy_from_numpy(np_array):\n return th.as_tensor(np_array)\n\ndef zerocopy_to_dgl_ndarray(input):\n return nd.from_dlpack(dlpack.to_dlpack(input.contiguous()))\n\ndef zerocopy_from_dgl_ndarray(input):\n return dlpack.from_dlpack(input.to_dlpack())\n\n\n\nclass BinaryReduce(th.autograd.Function):\n @staticmethod\n def forward(ctx, reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,\n out_size, lhs_map, rhs_map, out_map):\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)\n out_shape = feat_shape\n if binary_op == 'dot':\n out_shape = feat_shape[:-1]\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n K.binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, lhs_map[0], rhs_map[0], out_map[0])\n # normalize if mean reducer\n # NOTE(zihao): this is a temporary hack and we should have better solution in the future.\n if reducer == 'mean':\n degs = lhs_data.new_empty((out_data.shape[0],))\n degs_nd = zerocopy_to_dgl_ndarray(degs)\n if lhs != TargetCode.DST: # src or edge\n target = lhs\n n = lhs_data.shape[0]\n in_map = lhs_map[0]\n else: # rhs != TargetCode.DST\n target = rhs\n n = rhs_data.shape[0]\n in_map = rhs_map[0]\n in_ones = lhs_data.new_ones((n,))\n in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)\n K.copy_reduce(\n 'sum', graph, target, in_ones_nd, degs_nd, in_map, out_map[0])\n # reshape\n degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)\n out_data = out_data / degs\n else:\n degs = None\n # save_for_backward can only save variables\n ctx.backward_cache = (reducer, binary_op, graph, lhs, rhs, lhs_map,\n rhs_map, out_map, feat_shape, degs)\n ctx.save_for_backward(lhs_data, rhs_data, out_data)\n return out_data\n\n @staticmethod\n def backward(ctx, grad_out):\n reducer, binary_op, graph, lhs, rhs, lhs_map, rhs_map, out_map, \\\n feat_shape, degs = ctx.backward_cache\n lhs_data, rhs_data, out_data = ctx.saved_tensors\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n grad_lhs = None\n grad_rhs = None\n if reducer == 'mean':\n grad_out = grad_out / degs\n grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)\n if ctx.needs_input_grad[5]:\n grad_lhs = grad_out.new_empty((lhs_data_nd.shape[0],) + feat_shape)\n K.backward_lhs_binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_lhs),\n lhs_map[1], rhs_map[1], out_map[1])\n grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape)\n if ctx.needs_input_grad[6]:\n grad_rhs = grad_out.new_empty((rhs_data_nd.shape[0],) + feat_shape)\n K.backward_rhs_binary_op_reduce(\n reducer if reducer != 'mean' else 'sum',\n binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,\n out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_rhs),\n lhs_map[1], rhs_map[1], out_map[1])\n grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)\n\n return None, None, None, None, None, grad_lhs, grad_rhs, None, None, None, \\\n None, None\n\n\ndef binary_reduce(reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data,\n out_size, lhs_map=(None, None), rhs_map=(None, None), out_map=(None, None)):\n lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)\n rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)\n feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)\n\n out_shape = feat_shape\n if binary_op == 'dot':\n out_shape = feat_shape[:-1]\n out_data = lhs_data.new_empty((out_size,) + out_shape)\n\n return BinaryReduce.apply(\n reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,\n out_size, lhs_map, rhs_map, out_map)\n\n\nclass CopyReduce(th.autograd.Function):\n @staticmethod\n def forward(ctx, reducer, graph, target, in_data, out_data, out_size, in_map,\n out_map):\n in_data_nd = zerocopy_to_dgl_ndarray(in_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n K.copy_reduce(\n reducer if reducer != 'mean' else 'sum',\n graph, target, in_data_nd, out_data_nd, in_map[0], out_map[0])\n # normalize if mean reducer\n # NOTE(zihao): this is a temporary hack and we should have better solution in the future.\n if reducer == 'mean':\n in_ones = in_data.new_ones((in_data.shape[0],))\n degs = in_data.new_empty((out_data.shape[0],))\n in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)\n degs_nd = zerocopy_to_dgl_ndarray(degs)\n K.copy_reduce(\n 'sum', graph, target, in_ones_nd, degs_nd, in_map[0], out_map[0])\n # reshape\n degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)\n out_data = out_data / degs\n else:\n degs = None\n # save_for_backward can only save variables\n ctx.backward_cache = (reducer, graph, target, in_map, out_map, degs)\n ctx.save_for_backward(in_data, out_data)\n return out_data\n\n @staticmethod\n def backward(ctx, grad_out):\n reducer, graph, target, in_map, out_map, degs = ctx.backward_cache\n in_data, out_data = ctx.saved_tensors\n in_data_nd = zerocopy_to_dgl_ndarray(in_data)\n out_data_nd = zerocopy_to_dgl_ndarray(out_data)\n grad_in = None\n if reducer == 'mean':\n grad_out = grad_out / degs\n grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)\n if ctx.needs_input_grad[3]:\n grad_in = grad_out.new_empty(in_data_nd.shape)\n K.backward_copy_reduce(\n reducer if reducer != 'mean' else 'sum',\n graph, target, in_data_nd, out_data_nd, grad_out_nd,\n zerocopy_to_dgl_ndarray(grad_in), in_map[1], out_map[1])\n return None, None, None, grad_in, None, None, None, None\n\n\ndef copy_reduce(reducer, graph, target, in_data, out_size, in_map=(None, None),\n out_map=(None, None)):\n out_data = in_data.new_empty((out_size,) + in_data.shape[1:])\n return CopyReduce.apply(reducer, graph, target, in_data, out_data, out_size, in_map, out_map)\n\n\ndef _reduce_grad(grad, shape):\n \"\"\"Reduce gradient on the broadcast dimension\n\n If there is broadcast in forward pass, gradients need to be reduced on\n broadcast dimension. This function checks the input tensor shape and\n gradient shape and perform the reduction.\n\n Parameters\n ----------\n grad: Tensor\n Gradient tensor\n shape: tuple\n Shape of input tensor\n\n Returns\n -------\n Tensor\n \"\"\"\n grad_shape = grad.shape[1:]\n in_shape = shape[1:]\n if in_shape == grad_shape:\n # no need to reduce\n return grad\n num_to_squeeze = len(grad_shape) - len(in_shape)\n # pad inshape\n in_shape = (1,) * num_to_squeeze + in_shape\n reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))\n reduce_idx += 1 # skip batch dim\n grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)\n return grad.view(shape)\n\ndef sync():\n # Pytorch performs computation synchronously, so no need for synchronization.\n pass\n"
] | [
[
"torch.empty",
"torch.min",
"torch.stack",
"torch.nonzero",
"torch.as_tensor",
"torch.argsort",
"torch.max",
"torch.utils.dlpack.from_dlpack",
"torch.cat",
"torch.softmax",
"torch.arange",
"torch.unique",
"torch.index_select",
"torch.device",
"torch.mean",
"torch.sort",
"torch.unsqueeze",
"torch.cuda.set_device",
"torch.ones",
"torch.ones_like",
"torch.tensor",
"torch.reshape",
"torch.transpose",
"torch.sum",
"torch.split",
"torch.sparse_coo_tensor",
"torch.zeros_like",
"torch.full",
"torch.exp",
"torch.topk",
"torch.narrow",
"torch.zeros",
"torch.squeeze"
]
] |
echaussidon/desispec | [
"8a8bd59653861509dd630ffc8e1cd6c67f6cdd51",
"8a8bd59653861509dd630ffc8e1cd6c67f6cdd51",
"8a8bd59653861509dd630ffc8e1cd6c67f6cdd51"
] | [
"py/desispec/pipeline/db.py",
"py/desispec/scripts/humidity_corrected_fiberflat.py",
"py/desispec/qa/qa_quicklook.py"
] | [
"#\n# See top-level LICENSE.rst file for Copyright information\n#\n# -*- coding: utf-8 -*-\n\"\"\"\ndesispec.pipeline.db\n===========================\n\nPipeline processing database\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\n\nimport re\nfrom collections import OrderedDict\n\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom desiutil.log import get_logger\n\nfrom .. import io\n\nimport fitsio\n\nfrom .defs import (task_states, task_int_to_state, task_state_to_int, task_name_sep)\n\n\ndef all_task_types():\n \"\"\"Get the list of possible task types that are supported.\n\n Returns:\n list: The list of supported task types.\n\n \"\"\"\n from . import tasks\n from .tasks.base import default_task_chain\n ttypes = [\"fibermap\", \"rawdata\"]\n ttypes.extend(tasks.base.default_task_chain)\n # Insert qadata after cframe\n idx = ttypes.index('cframe')\n ttypes.insert(idx+1, 'qadata')\n return ttypes\n\n\ndef task_sort(tasks):\n \"\"\"Sort a list of tasks by type.\n\n This takes a list of arbitrary tasks and sorts them by type. The result\n is placed in an ordered dictionary of lists in run order.\n\n Args:\n tasks (list): the list of input tasks.\n\n Returns:\n (OrderedDict): ordered dictionary of tasks sorted by type.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n sort = dict()\n ttypes = all_task_types()\n for tp in ttypes:\n sort[tp] = list()\n\n for tsk in tasks:\n sort[task_type(tsk)].append(tsk)\n\n ret = OrderedDict()\n for tp in ttypes:\n if len(sort[tp]) > 0:\n ret[tp] = sort[tp]\n return ret\n\n\ndef all_tasks(night, nside, expid=None):\n \"\"\"Get all possible tasks for a single night.\n\n This uses the filesystem to query the raw data for a particular night and\n return a dictionary containing all possible tasks for each task type. For\n objects which span multiple nights (e.g. spectra, redrock), this returns the\n tasks which are touched by the given night.\n\n Args:\n night (str): The night to scan for tasks.\n nside (int): The HEALPix NSIDE value to use.\n expid (int): Only get tasks for this single exposure.\n\n Returns:\n dict: a dictionary whose keys are the task types and where each value\n is a list of task properties.\n\n \"\"\"\n import desimodel.footprint\n\n log = get_logger()\n\n log.debug(\"io.get_exposures night={}\".format(night))\n\n expids = io.get_exposures(night, raw=True)\n\n full = dict()\n for t in all_task_types():\n full[t] = list()\n\n healpix_frames = []\n\n if expid is not None:\n if expid not in expids:\n raise RuntimeError(\"exposure ID {} not valid for night {}\"\\\n .format(expid, night))\n expids = [ expid ]\n\n for ex in sorted(expids):\n\n # get the fibermap for this exposure\n fibermap = io.get_raw_files(\"fibermap\", night, ex)\n\n log.debug(\"read {}\".format(fibermap))\n\n fmdata = io.read_fibermap(fibermap)\n header = fmdata.meta\n\n # fmdata, header = fitsio.read(fibermap, 'FIBERMAP', header=True)\n flavor = header[\"FLAVOR\"].strip().lower()\n if flavor not in [\"arc\",\"flat\",\"science\"] :\n log.error(\"Do not know what do to with fibermap flavor '{}' for file '{}\".format(flavor,fibermap))\n raise ValueError(\"Do not know what do to with fibermap flavor '{}' for file '{}\".format(flavor,fibermap))\n\n fmpix = dict()\n if (flavor != \"arc\") and (flavor != \"flat\"):\n # This will be used to track which healpix pixels are\n # touched by fibers from each spectrograph.\n ra = np.array(fmdata[\"TARGET_RA\"], dtype=np.float64)\n dec = np.array(fmdata[\"TARGET_DEC\"], dtype=np.float64)\n\n # rm NaN (possible depending on versions of fiberassign)\n valid_coordinates = (np.isnan(ra)==False)&(np.isnan(dec)==False)\n\n for spectro in np.unique( fmdata[\"SPECTROID\"] ) :\n ii=np.where(fmdata[\"SPECTROID\"][valid_coordinates]==spectro)[0]\n if ii.size == 0 : continue\n pixels = desimodel.footprint.radec2pix(nside, ra[valid_coordinates][ii], dec[valid_coordinates][ii])\n for pixel in np.unique(pixels) :\n props = dict()\n props[\"night\"] = int(night)\n props[\"expid\"] = int(ex)\n props[\"spec\"] = spectro\n props[\"nside\"] = nside\n props[\"pixel\"] = pixel\n props[\"ntargets\"] = np.sum(pixels==pixel)\n healpix_frames.append(props)\n # all spectro at once\n pixels = np.unique(desimodel.footprint.radec2pix(nside, ra[valid_coordinates], dec[valid_coordinates]))\n for pixel in pixels :\n props = dict()\n props[\"pixel\"] = pixel\n props[\"nside\"] = nside\n props[\"state\"] = \"waiting\"\n exists=False\n for entry in full[\"spectra\"] :\n if entry[\"pixel\"]==props[\"pixel\"] :\n exists=True\n break\n if not exists : full[\"spectra\"].append(props)\n exists=False\n for entry in full[\"redshift\"] :\n if entry[\"pixel\"]==props[\"pixel\"] :\n exists=True\n break\n if not exists : full[\"redshift\"].append(props)\n\n fmprops = dict()\n fmprops[\"night\"] = int(night)\n fmprops[\"expid\"] = int(ex)\n fmprops[\"flavor\"] = flavor\n fmprops[\"state\"] = \"done\"\n\n full[\"fibermap\"].append(fmprops)\n\n rdprops = dict()\n rdprops[\"night\"] = int(night)\n rdprops[\"expid\"] = int(ex)\n rdprops[\"flavor\"] = flavor\n rdprops[\"state\"] = \"done\"\n\n full[\"rawdata\"].append(rdprops)\n\n # Add the preprocessed pixel files\n for band in ['b', 'r', 'z']:\n # need to open the rawdata file to see how many spectros\n # and cameras are there\n for spec in np.unique( fmdata[\"SPECTROID\"] ) :\n pixprops = dict()\n pixprops[\"night\"] = int(night)\n pixprops[\"band\"] = band\n pixprops[\"spec\"] = spec\n pixprops[\"expid\"] = int(ex)\n pixprops[\"flavor\"] = flavor\n pixprops[\"state\"] = \"ready\"\n full[\"preproc\"].append(pixprops)\n\n if flavor == \"arc\" :\n # Add the PSF files\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"psf\"].append(props)\n\n # Add a PSF night file if does not exist\n exists=False\n for entry in full[\"psfnight\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"band\"]==props[\"band\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"psfnight\"].append(props)\n\n if flavor != \"arc\" :\n # Add extractions\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n\n # Add traceshift\n full[\"traceshift\"].append(props)\n\n # Add extractions\n full[\"extract\"].append(props)\n\n if flavor == \"flat\" :\n # Add a fiberflat task\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"fiberflat\"].append(props)\n # Add a fiberflat night file if does not exist\n exists=False\n for entry in full[\"fiberflatnight\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"band\"]==props[\"band\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"fiberflatnight\"].append(props)\n\n if flavor != \"arc\" and flavor != \"flat\":\n # Add sky\n props = dict()\n props[\"night\"] = int(night)\n props[\"band\"] = band\n props[\"spec\"] = spec\n props[\"expid\"] = int(ex)\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"sky\"].append(props)\n # Add fluxcalib\n full[\"fluxcalib\"].append(props)\n # Add cframe\n full[\"cframe\"].append(props)\n # Add QA\n full[\"qadata\"].append(props)\n\n # Add starfit if does not exist\n exists=False\n for entry in full[\"starfit\"] :\n if entry[\"night\"]==props[\"night\"] \\\n and entry[\"expid\"]==props[\"expid\"] \\\n and entry[\"spec\"]==props[\"spec\"] :\n exists=True\n break\n if not exists :\n props = dict()\n props[\"night\"] = int(night)\n props[\"expid\"] = int(ex)\n props[\"spec\"] = spec\n props[\"state\"] = \"waiting\" # see defs.task_states\n full[\"starfit\"].append(props)\n\n log.debug(\"done\")\n return full , healpix_frames\n\n\ndef check_tasks(tasklist, db=None, inputs=None):\n \"\"\"Check a list of tasks and return their state.\n\n If the database is specified, it is used to check the state of the tasks\n and their dependencies. Otherwise the filesystem is checked.\n\n Args:\n tasklist (list): list of tasks.\n db (pipeline.db.DB): The optional database to use.\n inputs (dict): optional dictionary containing the only input\n dependencies that should be considered.\n\n Returns:\n dict: The current state of all tasks.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n states = dict()\n\n if db is None:\n # Check the filesystem to see which tasks are done. Since we don't\n # have a DB, we can only distinguish between \"waiting\", \"ready\", and\n # \"done\" states.\n for tsk in tasklist:\n tasktype = task_type(tsk)\n st = \"waiting\"\n\n # Check dependencies\n deps = task_classes[tasktype].deps(tsk, db=db, inputs=inputs)\n\n if len(deps)==0 :\n # do not set state to ready of tasks with 0 dependencies\n ready = False\n else :\n ready = True\n for k, v in deps.items():\n if not isinstance(v, list):\n v = [ v ]\n for dp in v:\n deptype = task_type(dp)\n depfiles = task_classes[deptype].paths(dp)\n for odep in depfiles:\n if not os.path.isfile(odep):\n ready = False\n break\n if ready:\n st = \"ready\"\n\n done = True\n # Check outputs\n outfiles = task_classes[tasktype].paths(tsk)\n for out in outfiles:\n if not os.path.isfile(out):\n done = False\n break\n if done:\n st = \"done\"\n\n states[tsk] = st\n else:\n states = db.get_states(tasklist)\n\n return states\n\n\nclass DataBase:\n \"\"\"Class for tracking pipeline processing objects and state.\n \"\"\"\n def __init__(self):\n self._conn = None\n return\n\n\n def get_states_type(self, tasktype, tasks):\n \"\"\"Efficiently get the state of many tasks of a single type.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of task names.\n\n Returns:\n dict: the state of each task.\n\n \"\"\"\n states = None\n namelist = \",\".join([ \"'{}'\".format(x) for x in tasks ])\n\n log = get_logger()\n log.debug(\"opening db\")\n\n with self.cursor() as cur:\n log.debug(\"selecting in db\")\n cur.execute(\\\n 'select name, state from {} where name in ({})'.format(tasktype,\n namelist))\n st = cur.fetchall()\n log.debug(\"done\")\n states = { x[0] : task_int_to_state[x[1]] for x in st }\n return states\n\n\n def count_task_states(self, tasktype):\n \"\"\"Return a dictionary of how many tasks are in each state\n\n Args:\n tasktype (str): the type of these tasks.\n\n Returns:\n dict: keyed by state, values are number of tasks in that state0\n \"\"\"\n state_count = OrderedDict()\n for state in task_states:\n state_count[state] = 0\n\n with self.cursor() as cur:\n cur.execute( 'select name, state from {}'.format(tasktype))\n for name, intstate in cur.fetchall():\n state_count[task_int_to_state[intstate]] += 1\n\n return state_count\n\n\n def get_states(self, tasks):\n \"\"\"Efficiently get the state of many tasks at once.\n\n Args:\n tasks (list): list of task names.\n\n Returns:\n dict: the state of each task.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Get state of each type\n states = dict()\n for t, tlist in taskbytype.items():\n states.update(self.get_states_type(t, tlist))\n\n return states\n\n\n def set_states_type(self, tasktype, tasks, postprocessing=True):\n \"\"\"Efficiently get the state of many tasks of a single type.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of tuples containing the task name and the\n state to set.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_classes\n\n log = get_logger()\n log.debug(\"opening db\")\n\n with self.cursor() as cur:\n log.debug(\"updating in db\")\n for tsk in tasks:\n cur.execute(\"update {} set state = {} where name = '{}'\".format(tasktype, task_state_to_int[tsk[1]], tsk[0]))\n if postprocessing and tsk[1]==\"done\" :\n task_classes[tasktype].postprocessing(db=self,name=tsk[0],cur=cur)\n log.debug(\"done\")\n return\n\n\n def set_states(self, tasks):\n \"\"\"Efficiently set the state of many tasks at once.\n\n Args:\n tasks (list): list of tuples containing the task name and the\n state to set.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n # First find the type of each task.\n ttypes = dict()\n for tsk in tasks:\n ttypes[tsk[0]] = task_type(tsk[0])\n\n # Sort tasks into types\n taskbytype = dict()\n for t in all_task_types():\n taskbytype[t] = list()\n for tsk in tasks:\n taskbytype[ttypes[tsk[0]]].append(tsk)\n\n # Process each type\n for t, tlist in taskbytype.items():\n if len(tlist) > 0:\n self.set_states_type(t, tlist)\n return\n\n\n def get_submitted(self, tasks):\n \"\"\"Return the submitted flag for the list of tasks.\n\n Args:\n tasks (list): list of task names.\n\n Returns:\n (dict): the boolean submitted state of each task (True means that\n the task has been submitted).\n\n \"\"\"\n from .tasks.base import task_type\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Process each type\n submitted = dict()\n for t, tlist in taskbytype.items():\n if (t == \"spectra\") or (t == \"redshift\"):\n raise RuntimeError(\"spectra and redshift tasks do not have submitted flag.\")\n namelist = \",\".join([ \"'{}'\".format(x) for x in tlist ])\n with self.cursor() as cur:\n cur.execute(\\\n 'select name, submitted from {} where name in ({})'.format(t, namelist))\n sb = cur.fetchall()\n submitted.update({ x[0] : x[1] for x in sb })\n return submitted\n\n\n def set_submitted_type(self, tasktype, tasks, unset=False):\n \"\"\"Flag a list of tasks of a single type as submitted.\n\n Args:\n tasktype (str): the type of these tasks.\n tasks (list): list of task names.\n unset (bool): if True, invert the behavior and unset the submitted\n flag for these tasks.\n\n Returns:\n Nothing.\n\n \"\"\"\n val = 1\n if unset:\n val = 0\n with self.cursor() as cur:\n for tsk in tasks:\n cur.execute(\"update {} set submitted = {} where name = '{}'\".format(tasktype, val, tsk))\n return\n\n\n def set_submitted(self, tasks, unset=False):\n \"\"\"Flag a list of tasks as submitted.\n\n Args:\n tasks (list): list of task names.\n unset (bool): if True, invert the behavior and unset the submitted\n flag for these tasks.\n\n Returns:\n Nothing.\n\n \"\"\"\n from .tasks.base import task_type\n # Sort by type\n taskbytype = task_sort(tasks)\n\n # Process each type\n for t, tlist in taskbytype.items():\n if (t == \"spectra\") or (t == \"redshift\"):\n raise RuntimeError(\"spectra and redshift tasks do not have submitted flag.\")\n self.set_submitted_type(tlist, unset=unset)\n return\n\n\n def update(self, night, nside, expid=None):\n \"\"\"Update DB based on raw data.\n\n This will use the usual io.meta functions to find raw exposures. For\n each exposure, the fibermap and all following objects will be added to\n the DB.\n\n Args:\n night (str): The night to scan for updates.\n nside (int): The current NSIDE value used for pixel grouping.\n expid (int): Only update the DB for this exposure.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n\n log = get_logger()\n\n alltasks, healpix_frames = all_tasks(night, nside, expid=expid)\n\n with self.cursor() as cur:\n # insert or ignore all healpix_frames\n log.debug(\"updating healpix_frame ...\")\n for entry in healpix_frames:\n # see if we already have this entry\n cmd = \"select exists(select 1 from healpix_frame where (expid = {} and spec = {} and nside = {} and pixel = {} ))\".format(entry[\"expid\"], entry[\"spec\"], entry[\"nside\"], entry[\"pixel\"])\n cur.execute(cmd)\n have_row = cur.fetchone()[0]\n\n if not have_row:\n cur.execute(\"insert into healpix_frame (night,expid,spec,nside,pixel,ntargets,state) values({},{},{},{},{},{},{})\".format(entry[\"night\"],entry[\"expid\"],entry[\"spec\"],entry[\"nside\"],entry[\"pixel\"],entry[\"ntargets\"],0))\n\n # read what is already in db\n tasks_in_db = {}\n for tt in all_task_types():\n cur.execute(\"select name from {}\".format(tt))\n tasks_in_db[tt] = [ x for (x, ) in cur.fetchall()]\n\n for tt in all_task_types():\n log.debug(\"updating {} ...\".format(tt))\n for tsk in alltasks[tt]:\n tname = task_classes[tt].name_join(tsk)\n if tname not in tasks_in_db[tt] :\n log.debug(\"adding {}\".format(tname))\n task_classes[tt].insert(cur, tsk)\n\n return\n\n\n def sync(self, night, specdone=False):\n \"\"\"Update states of tasks based on filesystem.\n\n Go through all tasks in the DB for the given night and determine their\n state on the filesystem. Then update the DB state to match.\n\n Args:\n night (str): The night to scan for updates.\n specdone: If true, set spectra to done if files exist.\n \"\"\"\n from .tasks.base import task_classes\n log = get_logger()\n\n # Get the list of task types excluding spectra and redshifts,\n # which will be handled separately.\n ttypes = [ t for t in all_task_types() if (t != \"spectra\") \\\n and (t != \"redshift\") ]\n\n tasks_in_db = None\n # Grab existing nightly tasks\n with self.cursor() as cur:\n tasks_in_db = {}\n for tt in ttypes:\n cur.execute(\"select name from {} where night = {}\"\\\n .format(tt, night))\n tasks_in_db[tt] = [ x for (x, ) in cur.fetchall() ]\n\n # For each task type, check status WITHOUT the DB, then set state.\n # Save out the cframe states for later use with the healpix_frame table\n cfstates = None\n for tt in ttypes:\n tstates = check_tasks(tasks_in_db[tt], db=None)\n st = [ (x, tstates[x]) for x in tasks_in_db[tt] ]\n self.set_states_type(tt, st)\n if tt == \"cframe\":\n cfstates = tstates.copy()\n\n # Now examine the spectra and redshift files. If the files exist,\n # we assume they are done and completely up to date. If the files\n # are not up to date, they must be manually deleted in order for the\n # sync to correctly reconstruct the database state.\n\n pixrows = self.select_healpix_frame({\"night\" : night})\n # First check the existence of the files touched by this night\n spec_exists = dict()\n red_exists = dict()\n for row in pixrows:\n if row[\"pixel\"] in spec_exists:\n continue\n spec_name = task_classes[\"spectra\"].name_join(row)\n red_name = task_classes[\"redshift\"].name_join(row)\n\n # Check spectra outputs\n outfiles = task_classes[\"spectra\"].paths(spec_name)\n spec_exists[row[\"pixel\"]] = True\n for out in outfiles:\n if not os.path.isfile(out):\n spec_exists[row[\"pixel\"]] = False\n break\n\n # Check redshift outputs\n outfiles = task_classes[\"redshift\"].paths(red_name)\n red_exists[row[\"pixel\"]] = True\n for out in outfiles:\n if not os.path.isfile(out):\n red_exists[row[\"pixel\"]] = False\n break\n\n # Now use all this info. Some internal helpers to avoid code\n # duplication\n def set_hpx_frame_0(row, spec, red, cur):\n self.update_healpix_frame_state(row, 0, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"waiting\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"waiting\", cur)\n return\n\n def set_hpx_frame_1(row, spec, red, cur):\n self.update_healpix_frame_state(row, 1, cur)\n # getready() will do this for us:\n #task_classes[\"spectra\"].state_set(\n # self, spec, \"ready\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"waiting\", cur)\n return\n\n def set_hpx_frame_2(row, spec, red, cur):\n self.update_healpix_frame_state(row, 2, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"done\", cur)\n # getready() will do this:\n #task_classes[\"redshift\"].state_set(\n # self, red, \"ready\", cur)\n return\n\n def set_hpx_frame_3(row, spec, red, cur):\n self.update_healpix_frame_state(row, 3, cur)\n task_classes[\"spectra\"].state_set(\n self, spec, \"done\", cur)\n task_classes[\"redshift\"].state_set(\n self, red, \"done\", cur)\n return\n\n with self.cursor() as cur:\n for row in pixrows:\n cfdone = True\n cfprops = row.copy()\n for band in [\"b\", \"r\", \"z\"]:\n cfprops[\"band\"] = band\n cf_name = task_classes[\"cframe\"].name_join(cfprops)\n if cfstates[cf_name] != \"done\":\n cfdone = False\n\n spec_name = task_classes[\"spectra\"].name_join(row)\n red_name = task_classes[\"redshift\"].name_join(row)\n\n if (not cfdone) and (not specdone) :\n # The cframes do not exist, so reset the state of the\n # spectra and redshift tasks.\n set_hpx_frame_0(row, spec_name, red_name, cur)\n else:\n # The cframe exists...\n if spec_exists[row[\"pixel\"]]:\n if red_exists[row[\"pixel\"]]:\n # We are all done (state 3)\n set_hpx_frame_3(row, spec_name, red_name, cur)\n else:\n # We are only at state 2\n set_hpx_frame_2(row, spec_name, red_name, cur)\n else:\n # We are at just at state 1\n set_hpx_frame_1(row, spec_name, red_name, cur)\n\n # Update ready state of tasks\n self.getready(night=night)\n\n return\n\n\n def cleanup(self, tasktypes=None, expid=None, cleanfailed=False,\n cleansubmitted=False):\n \"\"\"Reset states of tasks.\n\n Any tasks that are marked as \"running\" will have their\n state reset to \"ready\". This can be called if a job dies before\n completing all tasks.\n\n Args:\n tasktypes (list): if not None, clean up only tasks of these types.\n expid (int): if not None, only clean tasks related to this\n exposure ID. Note that tasks which are independent of\n an expid (psfnight, fiberflatnight, spectra, redshift)\n will be ignored if this option is given.\n cleanfailed (bool): if True, also reset failed tasks to ready.\n cleansubmitted (bool): if True, set submitted flag to False.\n\n \"\"\"\n tasks_running = None\n\n alltypes = all_task_types()\n ttypes = None\n if tasktypes is None:\n ttypes = alltypes\n else:\n for tt in tasktypes:\n if tt not in alltypes:\n raise RuntimeError(\"Cannot clean invalid task type {}\"\\\n .format(tt))\n ttypes = tasktypes\n\n # Grab existing nightly tasks\n with self.cursor() as cur:\n tasks_running = {}\n for tt in ttypes:\n hasexpid = (tt not in [\"psfnight\", \"fiberflatnight\", \"spectra\",\n \"redshift\"])\n if hasexpid:\n # This task type has an expid property.\n cmd = None\n if expid is not None:\n # We are cleaning only a single exposure.\n cmd = \"select name from {} where expid = {} and ( state = {}\".format(tt, expid, task_state_to_int[\"running\"])\n else:\n # We are cleaning all exposures for this task type.\n cmd = \"select name from {} where ( state = {}\".format(tt, task_state_to_int[\"running\"])\n if cleanfailed:\n cmd = \"{} or state = {} )\".format(cmd,\n task_state_to_int[\"failed\"])\n else:\n cmd = \"{} )\".format(cmd)\n cur.execute(cmd)\n tasks_running[tt] = [ x for (x, ) in cur.fetchall() ]\n if cleansubmitted:\n if expid is not None:\n cmd = \"update {} set submitted = 0 where expid = {}\".format(tt, expid)\n else:\n cmd = \"update {} set submitted = 0\".format(tt)\n cur.execute(cmd)\n else:\n # This task type has no concept of an exposure ID\n if expid is not None:\n # We specified an exposure ID, which makes no sense\n # for this task type. Skip it.\n tasks_running[tt] = list()\n continue\n else:\n # cleanup this task type.\n cmd = \"select name from {} where ( state = {}\".format(tt, task_state_to_int[\"running\"])\n if cleanfailed:\n cmd = \"{} or state = {} )\".format(cmd,\n task_state_to_int[\"failed\"])\n else:\n cmd = \"{} )\".format(cmd)\n cur.execute(cmd)\n tasks_running[tt] = [ x for (x, ) in cur.fetchall() ]\n if cleansubmitted:\n if (tt != \"spectra\") and (tt != \"redshift\"):\n cmd = \"update {} set submitted = 0\".format(tt)\n cur.execute(cmd)\n\n for tt in ttypes:\n if len(tasks_running[tt]) > 0:\n st = [ (x, \"waiting\") for x in tasks_running[tt] ]\n self.set_states_type(tt, st)\n\n self.getready()\n\n return\n\n\n def getready(self, night=None):\n \"\"\"Update DB, changing waiting to ready depending on status of dependencies .\n\n Args:\n night (str): The night to process.\n\n \"\"\"\n from .tasks.base import task_classes, task_type\n log = get_logger()\n\n # Get the list of task types excluding spectra and redshifts,\n # which will be handled separately.\n ttypes = [ t for t in all_task_types() if (t != \"spectra\") \\\n and (t != \"redshift\") ]\n\n with self.cursor() as cur:\n for tt in ttypes:\n # for each type of task, get the list of tasks in waiting mode\n cmd = \"select name from {} where state = {}\".format(tt, task_state_to_int[\"waiting\"])\n if night is not None:\n cmd = \"{} and night = {}\".format(cmd, night)\n cur.execute(cmd)\n tasks = [ x for (x, ) in cur.fetchall()]\n if len(tasks) > 0:\n log.debug(\"checking {} {} tasks ...\".format(len(tasks),tt))\n for tsk in tasks:\n task_classes[tt].getready(db=self, name=tsk, cur=cur)\n\n for tt in [ \"spectra\" , \"redshift\" ]:\n if tt == \"spectra\":\n required_healpix_frame_state = 1\n # means we have a cframe\n elif tt == \"redshift\":\n required_healpix_frame_state = 2\n # means we have an updated spectra file\n\n cur.execute('select nside,pixel from healpix_frame where state = {}'.format(required_healpix_frame_state))\n entries = cur.fetchall()\n for entry in entries :\n log.debug(\"{} of pixel {} is ready to run\".format(tt,entry[1]))\n cur.execute('update {} set state = {} where nside = {} and pixel = {}'.format(tt,task_state_to_int[\"ready\"],entry[0],entry[1]))\n\n log.debug(\"checking waiting {} tasks to see if they are done...\".format(tt))\n cmd = \"select pixel from {} where state = {}\".format(tt, task_state_to_int[\"waiting\"])\n cur.execute(cmd)\n pixels = [ x for (x, ) in cur.fetchall()]\n if len(pixels) > 0:\n log.debug(\"checking {} {} ...\".format(len(pixels),tt))\n if tt == \"spectra\":\n required_healpix_frame_state = 2\n elif tt == \"redshift\":\n required_healpix_frame_state = 3\n for pixel in pixels:\n cur.execute('select pixel from healpix_frame where pixel = {} and state != {}'.format(pixel,required_healpix_frame_state))\n entries = cur.fetchall()\n if len(entries)==0 :\n log.debug(\"{} task of pixel {} is done\".format(tt,pixel))\n cur.execute('update {} set state = {} where pixel = {}'.format(tt,task_state_to_int[\"done\"],pixel))\n return\n\n\n def update_healpix_frame_state(self, props, state, cur):\n if \"expid\" in props :\n # update from a cframe\n cmd = \"update healpix_frame set state = {} where expid = {} and spec = {} and state = {}\".format(state,props[\"expid\"],props[\"spec\"],props[\"state\"])\n else :\n # update from a spectra or redshift task\n cmd = \"update healpix_frame set state = {} where nside = {} and pixel = {} and state = {}\".format(state,props[\"nside\"],props[\"pixel\"],props[\"state\"])\n\n if cur is None :\n with self.cursor() as cur:\n cur.execute(cmd)\n else :\n cur.execute(cmd)\n return\n\n\n def select_healpix_frame(self, props):\n res = []\n with self.cursor() as cur:\n cmd = \"select * from healpix_frame where \"\n first=True\n for k in props.keys() :\n if not first : cmd += \" and \"\n first=False\n cmd += \"{}={}\".format(k,props[k])\n cur.execute(cmd)\n entries = cur.fetchall()\n # convert that to list of dictionaries\n for entry in entries :\n tmp = dict()\n for i, k in enumerate([\"night\", \"expid\", \"spec\", \"nside\",\n \"pixel\", \"ntargets\", \"state\"]):\n tmp[k] = entry[i]\n res.append(tmp)\n return res\n\n\n def create_healpix_frame_table(self) :\n with self.cursor() as cur:\n cmd = \"create table healpix_frame (night integer, expid integer, spec integer, nside integer, pixel integer, ntargets integer, state integer, unique(expid, spec, nside, pixel))\"\n cur.execute(cmd)\n\n return\n\n\nclass DataBaseSqlite(DataBase):\n \"\"\"Pipeline database using sqlite3 as the backend.\n\n Args:\n path (str): the filesystem path of the database to open. If None, then\n a temporary database is created in memory.\n mode (str): if \"r\", the database is open in read-only mode. If \"w\",\n the database is open in read-write mode and created if necessary.\n\n \"\"\"\n def __init__(self, path, mode):\n super(DataBaseSqlite, self).__init__()\n\n self._path = path\n self._mode = mode\n\n create = True\n if (self._path is not None) and os.path.exists(self._path):\n create = False\n\n if self._mode == 'r' and create:\n raise RuntimeError(\"cannot open a non-existent DB in read-only \"\n \" mode\")\n\n self._connstr = None\n\n # This timeout is in seconds\n self._busytime = 1000\n\n # Journaling options\n self._journalmode = \"persist\"\n self._syncmode = \"normal\"\n\n if create:\n self.initdb()\n return\n\n\n def _open(self):\n import sqlite3\n\n if self._path is None:\n # We are opening an in-memory DB\n self._conn = sqlite3.connect(\":memory:\")\n else:\n try:\n # only python3 supports uri option\n if self._mode == 'r':\n self._connstr = 'file:{}?mode=ro'.format(self._path)\n else:\n self._connstr = 'file:{}?mode=rwc'.format(self._path)\n self._conn = sqlite3.connect(self._connstr, uri=True,\n timeout=self._busytime)\n except:\n self._conn = sqlite3.connect(self._path, timeout=self._busytime)\n if self._mode == 'w':\n # In read-write mode, set the journaling\n self._conn.execute(\"pragma journal_mode={}\"\\\n .format(self._journalmode))\n self._conn.execute(\"pragma synchronous={}\".format(self._syncmode))\n # Other tuning options\n self._conn.execute(\"pragma temp_store=memory\")\n self._conn.execute(\"pragma page_size=4096\")\n self._conn.execute(\"pragma cache_size=4000\")\n return\n\n\n def _close(self):\n del self._conn\n self._conn = None\n return\n\n\n @contextmanager\n def cursor(self):\n import sqlite3\n self._open()\n cur = self._conn.cursor()\n cur.execute(\"begin transaction\")\n try:\n yield cur\n except sqlite3.DatabaseError as err:\n log = get_logger()\n log.error(err)\n cur.execute(\"rollback\")\n raise err\n else:\n try:\n cur.execute(\"commit\")\n except sqlite3.OperationalError:\n #- sqlite3 in py3.5 can't commit a read-only finished transaction\n pass\n finally:\n del cur\n self._close()\n\n\n def initdb(self):\n \"\"\"Create DB tables for all tasks if they do not exist.\n \"\"\"\n # check existing tables\n tables_in_db = None\n with self.cursor() as cur:\n cur.execute(\"select name FROM sqlite_master WHERE type='table'\")\n tables_in_db = [x for (x, ) in cur.fetchall()]\n\n # Create a table for every task type\n from .tasks.base import task_classes, task_type\n for tt, tc in task_classes.items():\n if tt not in tables_in_db:\n tc.create(self)\n\n if \"healpix_frame\" not in tables_in_db:\n self.create_healpix_frame_table()\n return\n\n\nclass DataBasePostgres(DataBase):\n \"\"\"Pipeline database using PostgreSQL as the backend.\n\n Args:\n host (str): The database server.\n port (int): The connection port.\n dbname (str): The database to connect.\n user (str): The user name for the connection. The password should be\n stored in the ~/.pgpass file.\n schema (str): The schema within the database. If this is specified,\n then the database is assumed to exist. Otherwise the schema is\n computed from a hash of the production location and will be\n created.\n authorize (str): If creating the schema, this is the list of\n additional roles that should be granted access.\n\n \"\"\"\n def __init__(self, host, port, dbname, user, schema=None, authorize=None):\n super(DataBasePostgres, self).__init__()\n\n self._schema = schema\n self._user = user\n self._dbname = dbname\n self._host = host\n self._port = port\n self._authorize = authorize\n\n self._proddir = os.path.abspath(io.specprod_root())\n\n create = False\n if self._schema is None:\n create = True\n self._schema = self._compute_schema()\n\n if create:\n self.initdb()\n return\n\n\n def _compute_schema(self):\n import hashlib\n md = hashlib.md5()\n md.update(self._proddir.encode())\n return \"pipe_{}\".format(md.hexdigest())\n\n\n def _open(self):\n import psycopg2 as pg2\n import time\n import numpy.random\n\n # Open connection. If psycopg2 raises an exception, then sleep\n # for a random time interval and keep trying.\n maxtry = 10\n ntry = 0\n while True:\n try:\n self._conn = pg2.connect(host=self._host, port=self._port,\n user=self._user, dbname=self._dbname)\n except pg2.OperationalError as err:\n log = get_logger()\n log.debug(\"PostgreSQL connection failed with '{}', will sleep and retry\".format(err))\n if ntry > maxtry:\n log.error(err)\n break\n numpy.random.seed(int(time.time()))\n sec = numpy.random.uniform() * 3.0\n time.sleep(sec)\n ntry += 1\n else:\n break\n\n return\n\n\n def _close(self):\n del self._conn\n self._conn = None\n return\n\n\n @property\n def schema(self):\n return self._schema\n\n\n def _have_schema(self, cur):\n com = \"select exists(select 1 from pg_namespace where nspname = '{}')\".format(self._schema)\n cur.execute(com)\n return cur.fetchone()[0]\n\n\n @contextmanager\n def cursor(self, skipcheck=False):\n import psycopg2\n self._open()\n cur = self._conn.cursor()\n if not skipcheck:\n have_schema = self._have_schema(cur)\n if not have_schema:\n raise RuntimeError(\"Postgres schema for production {} does\"\n \" not exist. Make sure you create the production with\"\n \" postgres options and source the top-level setup.sh\"\n \" file.\".format(self._proddir))\n cur.execute(\"set search_path to '{}'\".format(self._schema))\n cur.execute(\"begin transaction\")\n try:\n yield cur\n except psycopg2.DatabaseError as err:\n log = get_logger()\n log.error(err)\n cur.execute(\"rollback\")\n raise err\n else:\n cur.execute(\"commit\")\n finally:\n del cur\n self._close()\n\n\n def initdb(self):\n \"\"\"Create DB tables for all tasks if they do not exist.\n \"\"\"\n log = get_logger()\n # Check existence of the schema. If we were not passed the schema\n # in the constructor, it means that we are creating a new prod, so any\n # existing schema should be wiped and recreated.\n tables_in_db = None\n with self.cursor(skipcheck=True) as cur:\n # See if our schema already exists...\n have_schema = self._have_schema(cur)\n if have_schema:\n # We need to wipe it first\n com = \"drop schema {} cascade\".format(self._schema)\n log.debug(com)\n cur.execute(com)\n com = \"create schema {} authorization {}\"\\\n .format(self._schema, self._user)\n log.debug(com)\n cur.execute(com)\n\n if self._authorize is not None:\n com = \"grant usage on schema {} to {}\"\\\n .format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant select on tables to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant select,usage on sequences to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant execute on functions to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n com = \"alter default privileges in schema {} grant usage on types to {}\".format(self._schema, self._authorize)\n log.debug(com)\n cur.execute(com)\n\n # Create a table of information about this prod\n com = \"create table {}.info (key text unique, val text)\"\\\n .format(self._schema)\n log.debug(com)\n cur.execute(com)\n com = \"insert into {}.info values ('{}', '{}')\"\\\n .format(self._schema, \"path\", self._proddir)\n log.debug(com)\n cur.execute(com)\n if 'USER' in os.environ:\n com = \"insert into {}.info values ('{}', '{}')\"\\\n .format(self._schema, \"created_by\", os.environ['USER'])\n log.debug(com)\n cur.execute(com)\n\n # check existing tables\n cur.execute(\"select tablename from pg_tables where schemaname = '{}'\".format(self.schema))\n tables_in_db = [x for (x, ) in cur.fetchall()]\n\n # Create a table for every task type\n from .tasks.base import task_classes, task_type\n for tt, tc in task_classes.items():\n if tt not in tables_in_db:\n tc.create(self)\n\n if \"healpix_frame\" not in tables_in_db:\n self.create_healpix_frame_table()\n\n return\n\n\ndef load_db(dbstring, mode=\"w\", user=None):\n \"\"\"Load a database from a connection string.\n\n This instantiates either an sqlite or postgresql database using a string.\n If this string begins with \"postgresql:\", then it is taken to be the\n information needed to connect to a postgres server. Otherwise it is\n assumed to be a filesystem path to use with sqlite. The mode is only\n meaningful when using sqlite. Postgres permissions are controlled through\n the user permissions.\n\n Args:\n dbstring (str): either a filesystem path (sqlite) or a colon-separated\n string of connection properties in the form\n \"postresql:<host>:<port>:<dbname>:<user>:<schema>\".\n mode (str): for sqlite, the mode.\n user (str): for postgresql, an alternate user name for opening the DB.\n This can be used to connect as a user with read-only access.\n\n Returns:\n DataBase: a derived database class of the appropriate type.\n\n \"\"\"\n if re.search(r\"postgresql:\", dbstring) is not None:\n props = dbstring.split(\":\")\n host = props[1]\n port = int(props[2])\n dbname = props[3]\n username = props[4]\n if user is not None:\n username = user\n schema = None\n if len(props) > 5:\n # Our DB string also contains the name of an existing\n # schema.\n schema = props[5]\n return DataBasePostgres(host=host, port=port, dbname=dbname,\n user=username, schema=schema)\n else:\n return DataBaseSqlite(dbstring, mode)\n",
"\nfrom __future__ import absolute_import, division\n\nimport os\nimport fitsio\nimport argparse\nimport numpy as np\n\nfrom desiutil.log import get_logger\n\nfrom desispec.io import read_fiberflat,write_fiberflat,findfile,read_frame\nfrom desispec.io.fiberflat_vs_humidity import get_humidity,read_fiberflat_vs_humidity\nfrom desispec.calibfinder import CalibFinder\nfrom desispec.fiberflat_vs_humidity import compute_humidity_corrected_fiberflat\n\ndef parse(options=None):\n parser = argparse.ArgumentParser(description=\"Compute a fiberflat corrected for variations with humidity.\")\n\n parser.add_argument('-i','--infile', type = str, default = None, required=True,\n help = 'path of DESI exposure frame fits file')\n parser.add_argument('--fiberflat', type = str, default = None, required=True,\n help = 'path of DESI fiberflat fits file')\n parser.add_argument('--use-sky-fibers', action = 'store_true',\n help = 'use sky fibers to improve the correction')\n parser.add_argument('-o','--outfile', type = str, default = None, required=True,\n help = 'path of output fiberflar file')\n args = None\n if options is None:\n args = parser.parse_args()\n else:\n args = parser.parse_args(options)\n return args\n\n\ndef main(args) :\n\n log = get_logger()\n\n # just read frame header in case we don't need to do anything\n frame_header = fitsio.read_header(args.infile,\"FLUX\")\n\n if args.use_sky_fibers :\n # need full frame to adjust correction on data\n frame = read_frame(args.infile)\n else :\n frame = None\n\n cfinder = CalibFinder([frame_header])\n if not cfinder.haskey(\"FIBERFLATVSHUMIDITY\"):\n log.info(\"No information on fiberflat vs humidity for camera {}, simply link the input fiberflat\".format(frame_header[\"CAMERA\"]))\n if not os.path.islink(args.outfile) :\n relpath=os.path.relpath(args.fiberflat,os.path.dirname(args.outfile))\n os.symlink(relpath,args.outfile)\n return 0\n\n # read fiberflat\n calib_fiberflat = read_fiberflat(args.fiberflat)\n\n # read mean fiberflat vs humidity\n filename = cfinder.findfile(\"FIBERFLATVSHUMIDITY\")\n log.info(f\"reading {filename}\")\n mean_fiberflat_vs_humidity , humidity_array, ffh_wave, ffh_header = read_fiberflat_vs_humidity(filename)\n assert(np.allclose(calib_fiberflat.wave,ffh_wave))\n\n # now need to find the humidity for this frame and for this fiberflat\n night=frame_header[\"NIGHT\"]\n camera=frame_header[\"CAMERA\"]\n current_frame_humidity =get_humidity(night=night,expid=frame_header[\"EXPID\"],camera=camera)\n log.info(\"humidity during current exposure={:.2f}\".format(current_frame_humidity))\n\n\n\n # we can compute the correction now that we have everything in hand\n improved_fiberflat = compute_humidity_corrected_fiberflat(calib_fiberflat, mean_fiberflat_vs_humidity , humidity_array, current_frame_humidity, frame = frame)\n\n # add telemetry humidity for the dome flats for the record\n # try to read the night exposure table to get the list of flats\n first_expid = calib_fiberflat.header[\"EXPID\"]\n calib_night = calib_fiberflat.header[\"NIGHT\"]\n calib_humidity=[ get_humidity(calib_night,first_expid,camera) ]\n fiberflat_expid=[ first_expid]\n for expid in range(first_expid+1,first_expid+40) :\n filename=findfile(\"raw\",calib_night,expid)\n if not os.path.isfile(filename): continue\n head=fitsio.read_header(filename,1)\n if not \"OBSTYPE\" in head.keys() or head[\"OBSTYPE\"]!=\"FLAT\" :\n break\n fiberflat_expid.append(expid)\n calib_humidity.append(get_humidity(calib_night,expid,camera))\n log.debug(\"calib expids={}\".format(fiberflat_expid))\n log.debug(\"calib humidities={}\".format(calib_humidity))\n calib_humidity=np.mean(calib_humidity)\n if np.isnan(calib_humidity) :\n log.warning(\"missing humidity info for fiber flat, use link to input\")\n calib_humidity=0.\n else :\n log.info(\"mean humidity during calibration exposures={:.2f}\".format(calib_humidity))\n fit_humidity = improved_fiberflat.header[\"CALFHUM\"]\n if np.abs(fit_humidity-calib_humidity)>10 :\n message=\"large difference between best fit humidity during dome flats ({:.1f}) and value from telemetry ({:.1f})\".format(fit_humidity,calib_humidity)\n if np.abs(fit_humidity-calib_humidity)>20 :\n log.error(message)\n raise RuntimeError(message)\n log.warning(message)\n\n improved_fiberflat.header[\"CALTHUM\"] = (calib_humidity,\"dome flat humidity from telemetry\")\n\n # write it\n write_fiberflat(args.outfile,improved_fiberflat)\n log.info(\"wrote humidity corrected flat {}\".format(args.outfile))\n\n return 0\n",
"\"\"\" \nMonitoring algorithms for Quicklook pipeline\n\"\"\"\n\nimport os,sys\nimport datetime\nimport numpy as np\nimport scipy.ndimage\nimport yaml\nimport re\nimport astropy.io.fits as fits\nimport desispec.qa.qa_plots_ql as plot\nimport desispec.quicklook.qlpsf\nimport desispec.qa.qa_plots_ql as fig\nfrom desispec.quicklook.qas import MonitoringAlg, QASeverity\nfrom desispec.quicklook import qlexceptions\nfrom desispec.quicklook import qllogger\nfrom desispec.quicklook.palib import resample_spec\nfrom astropy.time import Time\nfrom desispec.qa import qalib\nfrom desispec.io import qa, read_params\nfrom desispec.io.meta import findfile\nfrom desispec.io.sky import read_sky\nfrom desispec.image import Image as im\nfrom desispec.frame import Frame as fr\nfrom desispec.preproc import parse_sec_keyword\nfrom desispec.util import runcmd\nfrom desispec.qproc.qframe import QFrame\nfrom desispec.fluxcalibration import isStdStar\nfrom desitarget.targetmask import desi_mask\nimport astropy\nfrom astropy.io import fits\n\nqlog=qllogger.QLLogger(\"QuickLook\",0)\nlog=qlog.getlog()\n\ndef get_inputs(*args,**kwargs):\n '''\n Get inputs required for each QA\n '''\n inputs={}\n inputs[\"camera\"]=kwargs[\"camera\"]\n\n if \"paname\" not in kwargs: inputs[\"paname\"]=None\n else: inputs[\"paname\"]=kwargs[\"paname\"]\n\n if \"ReferenceMetrics\" in kwargs: inputs[\"refmetrics\"]=kwargs[\"ReferenceMetrics\"]\n else: inputs[\"refmetrics\"]=None\n\n inputs[\"amps\"]=False\n if \"amps\" in kwargs: inputs[\"amps\"]=kwargs[\"amps\"]\n\n if \"param\" in kwargs: inputs[\"param\"]=kwargs[\"param\"]\n else: inputs[\"param\"]=None\n\n inputs[\"psf\"]=None\n if \"PSFFile\" in kwargs: inputs[\"psf\"]=kwargs[\"PSFFile\"]\n\n inputs[\"fibermap\"]=None\n if \"FiberMap\" in kwargs: inputs[\"fibermap\"]=kwargs[\"FiberMap\"]\n\n if \"Peaks\" in kwargs: inputs[\"Peaks\"]=kwargs[\"Peaks\"]\n\n if \"qafile\" in kwargs: inputs[\"qafile\"] = kwargs[\"qafile\"]\n else: inputs[\"qafile\"]=None\n\n if \"qafig\" in kwargs: inputs[\"qafig\"]=kwargs[\"qafig\"]\n else: inputs[\"qafig\"]=None\n\n if \"plotconf\" in kwargs: inputs[\"plotconf\"]=kwargs[\"plotconf\"]\n else: inputs[\"plotconf\"]=None\n\n if \"hardplots\" in kwargs: inputs[\"hardplots\"]=kwargs[\"hardplots\"]\n else: inputs[\"hardplots\"]=False\n\n return inputs\n\ndef get_image(filetype,night,expid,camera,specdir):\n '''\n Make image object from file if in development mode\n '''\n #- Find correct file for QA\n imagefile = findfile(filetype,int(night),int(expid),camera,specprod_dir=specdir)\n\n #- Create necessary input for desispec.image\n image = fits.open(imagefile)\n pix = image['IMAGE'].data\n ivar = image['IVAR'].data\n mask = image['MASK'].data\n readnoise = image['READNOISE'].data\n meta = image['IMAGE'].header\n\n #- Create image object\n imageobj = im(pix,ivar,mask=mask,readnoise=readnoise,camera=camera,meta=meta)\n return imageobj\n\ndef get_frame(filetype,night,expid,camera,specdir):\n '''\n Make frame object from file if in development mode\n '''\n #- Find correct file for QA\n framefile = findfile(filetype,int(night),int(expid),camera,specprod_dir=specdir)\n\n #- Create necessary input for desispec.frame\n frame = fits.open(framefile)\n wave = frame['WAVE'].data\n flux = frame['FLUX'].data\n ivar = frame['IVAR'].data\n fibermap = frame['FIBERMAP'].data\n fibers = fibermap['FIBER']\n meta = frame['FLUX'].header\n\n #- Create frame object\n frameobj = QFrame(wave,flux,ivar,fibers=fibers,fibermap=fibermap,meta=meta)\n\n return frameobj\n\n\nclass Check_HDUs(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKHDUS\"\n import astropy\n rawtype=astropy.io.fits.hdu.hdulist.HDUList\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKHDUS\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKHDUS_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n MonitoringAlg.__init__(self,name,rawtype,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n\n if kwargs[\"singleqa\"] == 'Check_HDUs':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n rawfile = findfile('raw',int(night),int(expid),camera,rawdata_dir=kwargs[\"rawdir\"])\n raw = fits.open(rawfile)\n else: raw=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(raw,inputs)\n\n def run_qa(self,raw,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n\n rawimage=raw[camera.upper()].data\n header=raw[camera.upper()].header\n \n retval={}\n retval[\"EXPID\"]= '{0:08d}'.format(header[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"FLAVOR\"] = header[\"FLAVOR\"]\n #SE: quicklook to crash when a mismatched config file with the one in fits header\n from desispec.scripts import quicklook\n \n args=quicklook.parse() \n ad,fl = args.config.split(\"qlconfig_\")\n flvr = fl.split(\".yaml\")[0]\n #if flvr in ['darksurvey','graysurvey','brightsurvey']: flvr = 'science'\n if header[\"FLAVOR\"] == 'science': \n flvr = flvr.split(\"survey\")[0]\n if (header[\"FLAVOR\"] == flvr or header[\"FLAVOR\"] == format(flvr.upper()) or flvr == 'test'):\n log.info(\"The correct configuration file is being used!\")\n else:\n log.critical(\"Wrong configuration file is being used!\")\n sys.exit(\"Wrong configuration file! use the one for \"+str(header[\"FLAVOR\"]))\n\n elif (header[\"FLAVOR\"] == flvr or flvr == 'test'): \n log.info(\"The correct configuration file is being used!\")\n else: \n log.critical(\"Wrong configuration file is being used!\")\n sys.exit(\"Wrong configuration file! use the one for \"+str(header[\"FLAVOR\"]))\n \n\n if retval[\"FLAVOR\"] == 'science':\n retval[\"PROGRAM\"] = header[\"PROGRAM\"]\n else:\n pass\n retval[\"NIGHT\"] = header[\"NIGHT\"]\n kwargs=self.config['kwargs']\n \n\n HDUstat = \"NORMAL\" \n EXPNUMstat = \"NORMAL\" \n \n param['EXPTIME'] = header[\"EXPTIME\"]\n\n if camera != header[\"CAMERA\"]:\n log.critical(\"The raw FITS file is missing camera \"+camera)\n sys.exit(\"QuickLook Abort: CHECK THE RAW FITS FILE :\"+rawfile)\n HDUstat = 'ALARM'\n \n if header[\"EXPID\"] != kwargs['expid'] : \n log.critical(\"The raw FITS file is missing camera \"+camera)\n sys.exit(\"QuickLook Abort: EXPOSURE NUMBER DOES NOT MATCH THE ONE IN THE HEADER\") \n EXPNUMstat = \"ALARM\"\n \n \n \n if header[\"FLAVOR\"] != \"science\" :\n \n retval[\"METRICS\"] = {\"CHECKHDUS_STATUS\":HDUstat,\"EXPNUM_STATUS\":EXPNUMstat}\n\n else :\n retval[\"METRICS\"] = {\"CHECKHDUS_STATUS\":HDUstat,\"EXPNUM_STATUS\":EXPNUMstat}\n param['SEEING'] = header[\"SEEING\"]\n param['AIRMASS'] = header[\"AIRMASS\"]\n param['PROGRAM'] = header[\"PROGRAM\"]\n \n \n retval[\"PARAMS\"] = param \n \n if 'INHERIT' in header and header['INHERIT']:\n h0 = raw[0].header\n for key in h0:\n if key not in header:\n header[key] = h0[key]\n \n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Trace_Shifts(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"XYSHIFTS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"XYSHIFTS\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"XYSHIFTS_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n if \"XYSHIFTS_WARN_RANGE\" in parms and \"XYSHIFTS_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"XYSHIFTS_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"XYSHIFTS_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Trace_Shifts':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n \n #- qa dictionary \n retval={}\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = expid = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n \n \n retval[\"NIGHT\"] = night = image.meta[\"NIGHT\"]\n \n\n if param is None:\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n # create xytraceset object\n \n from desispec.calibfinder import findcalibfile\n from desispec.xytraceset import XYTraceSet\n #SE: all next lines till the dashed line exist just so that we get the psf name without hardcoding any address -> there must be a better way\n rawfile = findfile('raw',int(night),int(expid),camera,rawdata_dir=os.environ[\"QL_SPEC_DATA\"])\n hdulist=fits.open(rawfile)\n primary_header=hdulist[0].header\n camera_header =hdulist[camera].header\n hdulist.close()\n #--------------------------------------------------------\n psffile=findcalibfile([camera_header,primary_header],\"PSF\")\n psf=fits.open(psffile)\n xcoef=psf['XTRACE'].data\n ycoef=psf['YTRACE'].data\n wavemin=psf[\"XTRACE\"].header[\"WAVEMIN\"]\n wavemax=psf[\"XTRACE\"].header[\"WAVEMAX\"]\n npix_y=image.meta['NAXIS2']\n psftrace=XYTraceSet(xcoef,ycoef,wavemin,wavemax,npix_y=npix_y)\n\n # compute dx and dy\n from desispec.trace_shifts import compute_dx_from_cross_dispersion_profiles as compute_dx\n from desispec.trace_shifts import compute_dy_using_boxcar_extraction as compute_dy\n fibers=np.arange(500) #RS: setting nfibers to 500 for now\n ox,oy,odx,oex,of,ol=compute_dx(xcoef,ycoef,wavemin,wavemax,image,fibers=fibers)\n x_for_dy,y_for_dy,ody,ey,fiber_for_dy,wave_for_dy=compute_dy(psftrace,image,fibers)\n\n # return average shifts in x and y\n dx=np.mean(odx)\n dy=np.mean(ody)\n xyshift=np.array([dx,dy])\n\n retval[\"METRICS\"]={\"XYSHIFTS\":xyshift}\n retval[\"PARAMS\"]=param\n\n #get_outputs(qafile,qafig,retval,'plot_traceshifts')\n# outfile = qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Bias_From_Overscan(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"BIAS_OVERSCAN\"\n\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"BIAS_AMP\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"BIAS_AMP_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"BIAS_WARN_RANGE\" in parms and \"BIAS_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"BIAS_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"BIAS_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n \n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Bias_From_Overscan':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"CAMERA\"] = camera\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n retval[\"FLAVOR\"] = flavor = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"EXPTIME\"] = image.meta[\"EXPTIME\"]\n \n\n if retval[\"FLAVOR\"] == 'arc':\n pass\n else:\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n kwargs=self.config['kwargs']\n \n #SE: this would give the desispec version stored in DEPVER07 key of the raw simulated fits file :0.16.0.dev1830\n #RS: don't look for this if not using simulated files, differences in simulated headers vs. data headers cause this to crash\n if flavor == 'science':\n param['FITS_DESISPEC_VERSION'] = image.meta['DEPVER07'] \n import desispec\n from desispec import quicklook\n param['PROC_DESISPEC_VERSION']= desispec.__version__\n param['PROC_QuickLook_VERSION']= quicklook.__qlversion__\n \n \n if 'INHERIT' in image.meta and image.meta['INHERIT']:\n\n h0 = image.meta\n #h0 = header\n for key in h0:\n if key not in image.meta:\n image.meta[key] = h0[key]\n\n #RS: look for values in simulated data, if not found try finding data values\n try:\n bias_overscan = [image.meta['OVERSCN1'],image.meta['OVERSCN2'],image.meta['OVERSCN3'],image.meta['OVERSCN4']]\n except:\n bias_overscan = [image.meta['OVERSCNA'],image.meta['OVERSCNB'],image.meta['OVERSCNC'],image.meta['OVERSCND']]\n\n bias = np.mean(bias_overscan)\n\n if param is None:\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n\n retval[\"PARAMS\"] = param\n\n if amps:\n bias_amps=np.array(bias_overscan)\n retval[\"METRICS\"]={'BIAS_AMP':bias_amps}\n else:\n #retval[\"METRICS\"]={'BIAS':bias,\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DIFF3SIG\":diff3sig,\"DATA5SIG\":data5sig,\"BIAS_ROW\":mean_row}\n retval[\"METRICS\"]={}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_bias_overscan(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Get_RMS(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"RMS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"NOISE_AMP\" \n status=kwargs['statKey'] if 'statKey' in kwargs else \"NOISE_AMP_STATUS\" \n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n \n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n \n if \"NOISE_WARN_RANGE\" in parms and \"NOISE_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"NOISE_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"NOISE_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n \n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Get_RMS':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = flavor = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if flavor == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n\n # return rms values in rms/sqrt(exptime)\n #rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta[\"EXPTIME\"])) #- should we add dark current and/or readnoise to this as well?\n #rmsccd = np.mean([image.meta['RDNOISE1'],image.meta['RDNOISE2'],image.meta['RDNOISE3'],image.meta['RDNOISE4']]) #--> \"NOISE\":rmsccd\n \n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\") \n\n retval[\"PARAMS\"] = param\n\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # SE: this section is moved from BIAS_FROM_OVERSCAN to header\n\n data=[]\n row_data_amp1=[]\n row_data_amp2=[]\n row_data_amp3=[]\n row_data_amp4=[]\n bias_patnoise=[]\n #bias_overscan=[] \n #RS: loop through amps based on header info\n loop_amps = get_amp_ids(image.meta)\n exptime=image.meta[\"EXPTIME\"]\n if exptime == 0.:\n exptime = 1.\n for kk in loop_amps:\n sel=parse_sec_keyword(image.meta['BIASSEC'+kk])\n #- Obtain counts/second in bias region\n# pixdata=image[sel]/header[\"EXPTIME\"]\n pixdata=image.pix[sel]/exptime\n if kk == '1' or kk == 'A':\n for i in range(pixdata.shape[0]):\n row_amp1=pixdata[i]\n row_data_amp1.append(row_amp1)\n if kk == '2' or kk == 'B':\n \n for i in range(pixdata.shape[0]):\n row_amp2=pixdata[i]\n row_data_amp2.append(row_amp2)\n if kk == '3' or kk == 'C':\n \n for i in range(pixdata.shape[0]):\n row_amp3=pixdata[i]\n row_data_amp3.append(row_amp3)\n if kk == '4' or kk == 'D':\n \n for i in range(pixdata.shape[0]):\n row_amp4=pixdata[i]\n row_data_amp4.append(row_amp4)\n #- Compute statistics of the bias region that only reject\n # the 0.5% of smallest and largest values. (from sdssproc) \n isort=np.sort(pixdata.ravel())\n nn=isort.shape[0]\n bias=np.mean(isort[int(0.005*nn) : int(0.995*nn)])\n #bias_overscan.append(bias)\n data.append(isort)\n\n #- Combine data from each row per amp and take average\n # BIAS_ROW = mean_row \n median_row_amp1=[]\n for i in range(len(row_data_amp1)):\n median=np.median(row_data_amp1[i])\n median_row_amp1.append(median)\n \n rms_median_row_amp1= np.std(median_row_amp1)\n try:\n noise1 = image.meta['RDNOISE1']\n except:\n noise1 = image.meta['OBSRDNA']\n bias_patnoise.append(rms_median_row_amp1/noise1)\n \n median_row_amp2=[]\n for i in range(len(row_data_amp2)):\n median=np.median(row_data_amp2[i])\n median_row_amp2.append(median)\n \n rms_median_row_amp2= np.std(median_row_amp2)\n try:\n noise2 = image.meta['RDNOISE2']\n except:\n noise2 = image.meta['OBSRDNB']\n bias_patnoise.append(rms_median_row_amp2/noise2)\n \n \n median_row_amp3=[]\n for i in range(len(row_data_amp3)):\n median=np.median(row_data_amp3[i])\n median_row_amp3.append(median)\n \n rms_median_row_amp3= np.std(median_row_amp3)\n try:\n noise3 = image.meta['RDNOISE3']\n except:\n noise3 = image.meta['OBSRDNC']\n bias_patnoise.append(rms_median_row_amp3/noise3)\n \n median_row_amp4=[]\n for i in range(len(row_data_amp4)):\n median=np.median(row_data_amp4[i])\n median_row_amp4.append(median)\n \n rms_median_row_amp4= np.std(median_row_amp4)\n try:\n noise4 = image.meta['RDNOISE4']\n except:\n noise4 = image.meta['OBSRDND']\n bias_patnoise.append(rms_median_row_amp4/noise4)\n\n\n #- Calculate upper and lower bounds of 1, 2, and 3 sigma \n full_data=np.concatenate((data[0],data[1],data[2],data[3])).ravel()\n sig1_lo = np.percentile(full_data,50.-(param['PERCENTILES'][0]/2.))\n sig1_hi = np.percentile(full_data,50.+(param['PERCENTILES'][0]/2.))\n sig2_lo = np.percentile(full_data,50.-(param['PERCENTILES'][1]/2.))\n sig2_hi = np.percentile(full_data,50.+(param['PERCENTILES'][1]/2.))\n sig3_lo = np.percentile(full_data,50.-(param['PERCENTILES'][2]/2.))\n sig3_hi = np.percentile(full_data,50.+(param['PERCENTILES'][2]/2.))\n\n #- Find difference between upper and lower sigma bounds\n # DIFF1SIG: The number of counts separating the 1 sigma percentiles in the noise distribution (from the overscan region)\n diff1sig = sig1_hi - sig1_lo\n # DIFF2SIG: The number of counts separating 2 or 3 sigma in the noise distribution\n diff2sig = sig2_hi - sig2_lo\n diff3sig = sig3_hi - sig3_lo\n\n #-DATA5SIG: number of pixels more than 5 sigma below the bias level\n sig5_value = np.percentile(full_data,3e-5)\n data5sig = len(np.where(full_data <= sig5_value)[0])\n \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n if amps:\n rms_over_amps = [noise1,noise2,noise3,noise4]\n try:\n rms_amps = [image.meta['OBSRDN1'],image.meta['OBSRDN2'],image.meta['OBSRDN3'],image.meta['OBSRDN4']]\n except:\n rms_amps = [image.meta['OBSRDNA'],image.meta['OBSRDNB'],image.meta['OBSRDNC'],image.meta['OBSRDND']]\n retval[\"METRICS\"]={\"NOISE_AMP\":np.array(rms_amps),\"NOISE_OVERSCAN_AMP\":np.array(rms_over_amps),\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DATA5SIG\":data5sig,\"BIAS_PATNOISE\":bias_patnoise}#,\"NOISE_ROW\":noise_row,\"EXPNUM_WARN\":expnum,\"NOISE_OVER\":rmsover\n else:\n retval[\"METRICS\"]={\"DIFF1SIG\":diff1sig,\"DIFF2SIG\":diff2sig,\"DATA5SIG\":data5sig, \"BIAS_PATNOISE\":bias_patnoise} # Dropping \"NOISE_OVER\":rmsover,\"NOISE_ROW\":noise_row,\"EXPNUM_WARN\":expnum\n\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_RMS(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval \n\n def get_default_config(self):\n return {}\n\n\nclass Calc_XWSigma(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"XWSIGMA\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"XWSIGMA\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"XWSIGMA_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"XWSIGMA_WARN_RANGE\" in parms and \"XWSIGMA_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"XWSIGMA_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"XWSIGMA_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Calc_XWSigma':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n import desispec.quicklook.qlpsf\n from scipy.optimize import curve_fit\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n psffile=inputs[\"psf\"]\n psf=desispec.quicklook.qlpsf.PSF(psffile)\n amps=inputs[\"amps\"]\n allpeaks=inputs[\"Peaks\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat() \n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n #- Ensure that the QA will run even if 500 spectra aren't present\n if fibermap['FIBER'].shape[0] >= 500:\n fibers = 500\n else:\n fibers = fibermap['FIBER'].shape[0]\n\n #- Define number of pixels to be fit\n dp=param['PIXEL_RANGE']/2\n #- Get wavelength ranges around peaks\n peaks=allpeaks['{}_PEAKS'.format(camera[0].upper())]\n #- Maximum allowed fit sigma value\n maxsigma=param['MAX_SIGMA']\n\n xfails=[]\n wfails=[]\n xsigma=[]\n wsigma=[]\n xsigma_amp1=[]\n wsigma_amp1=[]\n xsigma_amp2=[]\n wsigma_amp2=[]\n xsigma_amp3=[]\n wsigma_amp3=[]\n xsigma_amp4=[]\n wsigma_amp4=[]\n \n for fiber in range(fibers):\n \n xs = -1 # SE: this prevents crash in \"XWSIGMA_AMP\" for when xs or ws is empty list -> try b9 of 20200515/00000001 \n ws = -1\n xsig=[]\n wsig=[]\n for peak in range(len(peaks)):\n #- Use psf information to convert wavelength to pixel values\n xpixel=desispec.quicklook.qlpsf.PSF.x(psf,ispec=fiber,wavelength=peaks[peak])[0][0]\n ypixel=desispec.quicklook.qlpsf.PSF.y(psf,ispec=fiber,wavelength=peaks[peak])[0][0]\n #- Find x and y pixel values around sky lines\n xpix_peak=np.arange(int(xpixel-dp),int(xpixel+dp),1)\n ypix_peak=np.arange(int(ypixel-dp),int(ypixel+dp),1)\n #- Fit gaussian to counts in pixels around sky line\n #- If any values fail, store x/w, wavelength, and fiber\n try:\n xpopt,xpcov=curve_fit(qalib.gauss,np.arange(len(xpix_peak)),image.pix[int(ypixel),xpix_peak])\n xs=np.abs(xpopt[2])\n if xs <= maxsigma:\n xsig.append(xs)\n else:\n xfail=[fiber,peaks[peak]]\n xfails.append(xfail)\n except:\n xfail=[fiber,peaks[peak]]\n xfails.append(xfail)\n pass\n try:\n wpopt,wpcov=curve_fit(qalib.gauss,np.arange(len(ypix_peak)),image.pix[ypix_peak,int(xpixel)])\n ws=np.abs(wpopt[2])\n if ws <= maxsigma:\n wsig.append(ws)\n else:\n wfail=[fiber,peaks[peak]]\n wfails.append(wfail)\n except:\n wfail=[fiber,peaks[peak]]\n wfails.append(wfail)\n pass\n\n #- Excluding fibers 240-260 in case some fibers overlap amps\n #- Excluding peaks in the center of image in case peak overlaps two amps\n #- This shouldn't cause a significant loss of information \n \n if amps:\n\n if fibermap['FIBER'][fiber]<240:\n if ypixel < 2000.:\n xsigma_amp1.append(xs)\n wsigma_amp1.append(ws)\n if ypixel > 2100.:\n xsigma_amp3.append(xs)\n wsigma_amp3.append(ws)\n\n if fibermap['FIBER'][fiber]>260:\n if ypixel < 2000.:\n xsigma_amp2.append(xs)\n wsigma_amp2.append(ws)\n if ypixel > 2100.:\n xsigma_amp4.append(xs)\n wsigma_amp4.append(ws)\n \n\n if len(xsig)!=0:\n xsigma.append(np.mean(xsig))\n if len(wsig)!=0:\n wsigma.append(np.mean(wsig))\n\n if fibermap['FIBER'].shape[0]<260:\n xsigma_amp2=[]\n xsigma_amp4=[]\n wsigma_amp2=[]\n wsigma_amp4=[]\n\n #- Calculate desired output metrics \n xsigma_med=np.median(np.array(xsigma))\n wsigma_med=np.median(np.array(wsigma))\n xsigma_amp=np.array([np.median(xsigma_amp1),np.median(xsigma_amp2),np.median(xsigma_amp3),np.median(xsigma_amp4)])\n wsigma_amp=np.array([np.median(wsigma_amp1),np.median(wsigma_amp2),np.median(wsigma_amp3),np.median(wsigma_amp4)])\n xwfails=np.array([xfails,wfails])\n\n\n #SE: mention the example here when the next lines are ineffective and when they are effective in removing the NaN from XWSIGMA_AMP--> XWSIGMA itself no longer includes any NaN value. As we both know, this is not the way to properly deal with NaNs -->let's see if switching to non-scipy fuction would bring about a better solution\n if len(xsigma)==0:\n xsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][0]]\n\n if len(wsigma)==0:\n wsigma=[param['XWSIGMA_{}_REF'.format(program.upper())][1]]\n\n #- Combine metrics for x and w\n xwsigma_fib=np.array((xsigma,wsigma)) #- (2,nfib)\n xwsigma_med=np.array((xsigma_med,wsigma_med)) #- (2)\n xwsigma_amp=np.array((xsigma_amp,wsigma_amp))\n\n if amps:\n #if len(xsigma_amp1)==0 :\n #xsigma_amp1 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp2)==0 :\n #xsigma_amp2 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp3)==0 :\n #xsigma_amp3 = [param['XWSIGMA_REF'][0]]\n #if len(xsigma_amp4)==0 :\n #xsigma_amp4 = [param['XWSIGMA_REF'][0]]\n\n #if len(wsigma_amp1)==0 :\n #wsigma_amp1 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp2)==0 :\n #wsigma_amp2 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp3)==0 :\n #wsigma_amp3 = [param['XWSIGMA_REF'][1]]\n #if len(wsigma_amp4)==0 :\n #wsigma_amp4 = [param['XWSIGMA_REF'][1]]\n\n retval[\"METRICS\"]={\"XWSIGMA\":xwsigma_med,\"XWSIGMA_FIB\":xwsigma_fib,\"XWSIGMA_AMP\":xwsigma_amp}#,\"XWSHIFT\":xwshift,\"XWSHIFT_AMP\":xwshift_amp,\"XWSIGMA_SHIFT\": xwsigma_shift}\n else:\n retval[\"METRICS\"]={\"XWSIGMA\":xwsigma_med,\"XWSIGMA_FIB\":xwsigma_fib}#,\"XWSHIFT\":xwshift,\"XWSIGMA_SHIFT\": xwsigma_shift}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_XWSigma(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n \n def get_default_config(self):\n return {}\n\n\nclass Count_Pixels(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"COUNTPIX\"\n from desispec.image import Image as im\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"LITFRAC_AMP\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"LITFRAC_AMP_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n \n if \"LITFRAC_AMP_WARN_RANGE\" in parms and \"LITFRAC_AMP_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"LITFRAC_AMP_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"LITFRAC_AMP_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,im,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Count_Pixels':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n image = get_image('preproc',night,expid,camera,kwargs[\"specdir\"])\n else: image=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(image,inputs)\n\n def run_qa(self,image,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(image.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = image.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if image.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n \n retval[\"NIGHT\"] = image.meta[\"NIGHT\"]\n \n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n\n retval[\"PARAMS\"] = param\n\n #- get the counts for each amp\n npix_amps=[]\n litfrac_amps=[]\n\n from desispec.preproc import parse_sec_keyword\n #RS: loop through amps based on header info\n try:\n header_test=parse_sec_keyword(image.meta['CCDSEC1'])\n loop_amps=['1','2','3','4']\n except:\n loop_amps=['A','B','C','D']\n #- get amp boundary in pixels\n for kk in loop_amps:\n ampboundary=parse_sec_keyword(image.meta[\"CCDSEC\"+kk])\n try:\n rdnoise_thisamp=image.meta[\"RDNOISE\"+kk]\n except:\n rdnoise_thisamp=image.meta[\"OBSRDN\"+kk]\n npix_thisamp= image.pix[ampboundary][image.pix[ampboundary] > param['CUTPIX'] * rdnoise_thisamp].size #- no of pixels above threshold\n npix_amps.append(npix_thisamp)\n size_thisamp=image.pix[ampboundary].size\n litfrac_thisamp=round(np.float64(npix_thisamp)/size_thisamp,2) #- fraction of pixels getting light above threshold\n litfrac_amps.append(litfrac_thisamp)\n\t# retval[\"METRICS\"]={\"NPIX_AMP\",npix_amps,'LITFRAC_AMP': litfrac_amps}\n retval[\"METRICS\"]={\"LITFRAC_AMP\": litfrac_amps}\t\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_countpix(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass CountSpectralBins(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"COUNTBINS\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"NGOODFIB\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"NGOODFIB_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"NGOODFIB_WARN_RANGE\" in parms and \"NGOODFIB_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"NGOODFIB_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"NGOODFIB_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'CountSpectralBins':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('frame',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n psf=inputs[\"psf\"]\n qafile=inputs[\"qafile\"]\n qafig=None #inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n grid=np.gradient(frame.wave)\n if not np.all(grid[0]==grid[1:]): \n log.debug(\"grid_size is NOT UNIFORM\")\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n retval[\"PARAMS\"] = param\n #- get the effective readnoise for the fibers \n #- readnoise per fib = readnoise per pix * sqrt(box car width)* sqrt(no. of bins in the amp) * binsize/pix size scale\n nspec=frame.nspec\n rdnoise_fib=np.zeros(nspec)\n if nspec > 250: #- upto 250 - amp 1 and 3, beyond that 2 and 4\n rdnoise_fib[:250]=[(frame.meta['RDNOISE1']+frame.meta['RDNOISE3'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*250\n rdnoise_fib[250:]=[(frame.meta['RDNOISE2']+frame.meta['RDNOISE4'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*(nspec-250)\n else:\n rdnoise_fib=[(frame.meta['RDNOISE1']+frame.meta['RDNOISE3'])*np.sqrt(5.)*np.sqrt(frame.flux.shape[1]/2)*frame.meta['WAVESTEP']/0.5]*nspec\n threshold=[param['CUTBINS']*ii for ii in rdnoise_fib]\n #- compare the flux sum to threshold\n \n totcounts=frame.flux.sum(axis=1)\n passfibers=np.where(totcounts>threshold)[0] \n ngoodfibers=passfibers.shape[0]\n good_fibers=np.array([0]*frame.nspec)\n good_fibers[passfibers]=1 #- assign 1 for good fiber\n\n #- leaving the amps granularity needed for caching as defunct. If needed in future, this needs to be propagated through.\n amps=False\n leftmax=None\n rightmax=None\n bottommax=None\n topmin=None\n\n if amps: #- leaving this for now\n leftmax,rightmin,bottommax,topmin = qalib.fiducialregion(frame,psf)\n retval[\"LEFT_MAX_FIBER\"]=int(leftmax)\n retval[\"RIGHT_MIN_FIBER\"]=int(rightmin)\n retval[\"BOTTOM_MAX_WAVE_INDEX\"]=int(bottommax)\n retval[\"TOP_MIN_WAVE_INDEX\"]=int(topmin)\n\n retval[\"METRICS\"]={\"NGOODFIB\": ngoodfibers, \"GOOD_FIBERS\": good_fibers, \"TOTCOUNT_FIB\": totcounts}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_countspectralbins(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Continuum(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SKYCONT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"SKYCONT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"SKYCONT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"SKYCONT_WARN_RANGE\" in parms and \"SKYCONT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"SKYCONT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"SKYCONT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Continuum':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('fframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n camera=frame.meta[\"CAMERA\"]\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n wrange1=param[\"{}_CONT\".format(camera[0].upper())][0]\n wrange2=param[\"{}_CONT\".format(camera[0].upper())][1]\n\n retval[\"PARAMS\"] = param\n\n skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(\n frame, wrange1, wrange2)\n \n \n retval[\"METRICS\"]={\"SKYFIBERID\": skyfiber.tolist(), \"SKYCONT\":skycont, \"SKYCONT_FIBER\":meancontfiber}\n \n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_sky_continuum(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Rband(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SKYRBAND\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"SKYRBAND\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"SKYRBAND_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"SKYRBAND_WARN_RANGE\" in parms and \"SKYRBAND_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"SKYRBAND_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"SKYRBAND_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is found for this QA\")\n sys.exit(\"Update the configuration file for the parameters\")\n\n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Rband':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('cframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- qa dictionary \n retval={}\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n retval[\"PANAME\" ]= paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n\n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n #- Find sky fibers\n objects=frame.fibermap['OBJTYPE']\n skyfibers=np.where(objects==\"SKY\")[0]\n\n flux=frame.flux\n wave=frame.wave\n #- Set appropriate filter and zero point\n if camera[0].lower() == 'r':\n responsefilter='decam2014-r'\n\n #- Get filter response information from speclite\n try:\n from pkg_resources import resource_filename\n responsefile=resource_filename('speclite','data/filters/{}.ecsv'.format(responsefilter))\n #- Grab wavelength and response information from file\n rfile=np.genfromtxt(responsefile)\n rfile=rfile[1:] # remove wavelength/response labels\n rwave=np.zeros(rfile.shape[0])\n response=np.zeros(rfile.shape[0])\n for i in range(rfile.shape[0]):\n rwave[i]=10.*rfile[i][0] # convert to angstroms\n response[i]=rfile[i][1]\n except:\n log.critical(\"Could not find filter response file, can't compute spectral magnitudes\")\n\n #- Convole flux with response information \n res=np.zeros(frame.wave.shape)\n for w in range(response.shape[0]):\n if w >= 1 and w<= response.shape[0]-2:\n ind=np.abs(frame.wave-rwave[w]).argmin()\n lo=(rwave[w]-rwave[w-1])/2\n wlo=rwave[w]-lo\n indlo=np.abs(frame.wave-wlo).argmin()\n hi=(rwave[w+1]-rwave[w])/2\n whi=rwave[w]+hi\n indhi=np.abs(frame.wave-whi).argmin()\n res[indlo:indhi]=response[w]\n skyrflux=res*flux[skyfibers]\n\n #- Calculate integrals for sky fibers\n integrals=[]\n for ii in range(len(skyrflux)):\n integrals.append(qalib.integrate_spec(frame.wave,skyrflux[ii]))\n integrals=np.array(integrals)\n\n #- Convert calibrated flux to fiber magnitude\n specmags=np.zeros(integrals.shape)\n specmags[integrals>0]=21.1-2.5*np.log10(integrals[integrals>0]/frame.meta[\"EXPTIME\"])\n avg_skyrband=np.mean(specmags[specmags>0])\n\n retval[\"METRICS\"]={\"SKYRBAND_FIB\":specmags,\"SKYRBAND\":avg_skyrband}\n\n #- If not in r channel, set reference and metrics to zero\n else:\n retval[\"PARAMS\"][\"SKYRBAND_{}_REF\".format(program.upper())]=[0.]\n zerospec=np.zeros_like(skyfibers)\n zerorband=0.\n retval[\"METRICS\"]={\"SKYRBAND_FIB\":zerospec,\"SKYRBAND\":zerorband}\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Peaks(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"PEAKCOUNT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"PEAKCOUNT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"PEAKCOUNT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"PEAKCOUNT_WARN_RANGE\" in parms and \"PEAKCOUNT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"PEAKCOUNT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"PEAKCOUNT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Peaks':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('fframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n from desispec.qa.qalib import sky_peaks\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n allpeaks=inputs[\"Peaks\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n # Parameters\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n param['B_PEAKS']=allpeaks['B_PEAKS']\n param['R_PEAKS']=allpeaks['R_PEAKS']\n param['Z_PEAKS']=allpeaks['Z_PEAKS']\n\n #nspec_counts, sky_counts, tgt_counts, tgt_counts_rms = sky_peaks(param, frame)\n nspec_counts, sky_counts, skyfibers, nskyfib= sky_peaks(param, frame)\n rms_nspec = np.std(nspec_counts)#qalib.getrms(nspec_counts)\n rms_skyspec = np.std(sky_counts)#qalib.getrms(sky_counts) \n \n sumcount_med_sky=np.median(sky_counts)\n\n retval[\"PARAMS\"] = param\n\n fiberid=frame.fibermap['FIBER']\n\n retval[\"METRICS\"]={\"FIBERID\":fiberid,\"PEAKCOUNT\":sumcount_med_sky,\"PEAKCOUNT_NOISE\":rms_skyspec,\"PEAKCOUNT_FIB\":nspec_counts,\"SKYFIBERID\":skyfibers, \"NSKY_FIB\":nskyfib}#,\"PEAKCOUNT_TGT\":tgt_counts,\"PEAKCOUNT_TGT_NOISE\":tgt_counts_rms}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_sky_peaks(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Sky_Residual(MonitoringAlg):\n \"\"\" \n Use offline sky_residual function to calculate sky residuals\n \"\"\"\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"RESIDUAL\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"RESIDNOISE\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"RESID_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"RESID_WARN_RANGE\" in parms and \"RESID_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"RESID_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"RESID_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe \n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Sky_Residual':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('sframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n skymodel=args[1]\n\n return self.run_qa(frame,skymodel,inputs)\n\n def run_qa(self,frame,skymodel,inputs):\n from desispec.sky import qa_skysub\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n \n if skymodel is None:\n raise IOError(\"Must have skymodel to find residual. It can't be None\")\n #- return values\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=fibmap[1].header['PROGRAM']\n \n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n \n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n\n qadict=qalib.sky_resid(param,frame,skymodel,quick_look=True)\n\n retval[\"METRICS\"] = {}\n for key in qadict.keys():\n retval[\"METRICS\"][key] = qadict[key]\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_residuals(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\n\nclass Integrate_Spec(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"INTEG\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"DELTAMAG_TGT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"DELTAMAG_TGT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"DELTAMAG_WARN_RANGE\" in parms and \"DELTAMAG_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"DELTAMAG_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"DELTAMAG_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Integrate_Spec':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('cframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n flux=frame.flux\n ivar=frame.ivar\n wave=frame.wave\n\n retval={}\n retval[\"PANAME\" ] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n retval[\"EXPID\"] = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n retval[\"NIGHT\"] = frame.meta[\"NIGHT\"]\n\n flux=frame.flux\n wave=frame.wave\n #- Grab magnitudes for appropriate filter\n if camera[0].lower() == 'b':\n band = 'G'\n responsefilter='decam2014-g'\n elif camera[0].lower() == 'r':\n band = 'R'\n responsefilter='decam2014-r'\n elif camera[0].lower() == 'z':\n band = 'Z'\n responsefilter='decam2014-z'\n else:\n raise ValueError(\"Camera {} not in b, r, or z channels...\".format(camera))\n\n #- Find fibers per target type\n elgfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.ELG) != 0)[0]\n lrgfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.LRG) != 0)[0]\n qsofibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.QSO) != 0)[0]\n bgsfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.BGS_ANY) != 0)[0]\n mwsfibers = np.where((frame.fibermap['DESI_TARGET'] & desi_mask.MWS_ANY) != 0)[0]\n stdfibers = np.where(isStdStar(frame.fibermap))[0]\n skyfibers = np.where(frame.fibermap['OBJTYPE'] == 'SKY')[0]\n\n #- Setup target fibers per program\n if program == 'dark':\n objfibers = [elgfibers,lrgfibers,qsofibers,stdfibers]\n elif program == 'gray':\n objfibers = [elgfibers,stdfibers]\n elif program == 'bright':\n objfibers = [bgsfibers,mwsfibers,stdfibers]\n\n magnitudes=np.zeros(frame.nspec)\n key = 'FLUX_'+band\n magnitudes = 22.5 - 2.5*np.log10(frame.fibermap[key])\n #- Set objects with zero flux to 30 mag\n zeroflux = np.where(frame.fibermap[key]==0.)[0]\n magnitudes[zeroflux] = 30.\n\n #- Get filter response information from speclite\n try:\n from pkg_resources import resource_filename\n responsefile=resource_filename('speclite','data/filters/{}.ecsv'.format(responsefilter))\n #- Grab wavelength and response information from file\n rfile=np.genfromtxt(responsefile)\n rfile=rfile[1:] # remove wavelength/response labels\n rwave=np.zeros(rfile.shape[0])\n response=np.zeros(rfile.shape[0])\n for i in range(rfile.shape[0]):\n rwave[i]=10.*rfile[i][0] # convert to angstroms\n response[i]=rfile[i][1]\n except:\n log.critical(\"Could not find filter response file, can't compute spectral magnitudes\")\n\n #- Convole flux with response information \n res=np.zeros(frame.wave.shape)\n for w in range(response.shape[0]):\n if w >= 1 and w<= response.shape[0]-2:\n ind=np.abs(frame.wave-rwave[w]).argmin()\n lo=(rwave[w]-rwave[w-1])/2\n wlo=rwave[w]-lo\n indlo=np.abs(frame.wave-wlo).argmin()\n hi=(rwave[w+1]-rwave[w])/2\n whi=rwave[w]+hi\n indhi=np.abs(frame.wave-whi).argmin()\n res[indlo:indhi]=response[w]\n rflux=res*flux\n\n #- Calculate integrals for all fibers\n integrals=[]\n for ii in range(len(rflux)):\n integrals.append(qalib.integrate_spec(frame.wave,rflux[ii]))\n integrals=np.array(integrals)\n\n #- Convert calibrated flux to spectral magnitude\n specmags=np.zeros(integrals.shape)\n specmags[integrals>0]=21.1-2.5*np.log10(integrals[integrals>0]/frame.meta[\"EXPTIME\"])\n\n #- Save number of negative flux fibers\n negflux=np.where(specmags==0.)[0]\n num_negflux=len(negflux)\n\n #- Set sky and negative flux fibers to 30 mag\n specmags[skyfibers]=30.\n specmags[negflux]=30.\n\n #- Calculate integrals for each target type\n tgt_specmags=[]\n for T in objfibers:\n if num_negflux != 0:\n T=np.array(list(set(T) - set(negflux)))\n obj_integ=[]\n for ii in range(len(rflux[T])):\n obj_integ.append(qalib.integrate_spec(frame.wave,rflux[T][ii]))\n obj_integ = np.array(obj_integ)\n\n #- Convert calibrated flux to spectral magnitude per terget type\n #- Using ST magnitude system because frame flux is in units ergs/s/cm**2/A\n obj_specmags = np.zeros(obj_integ.shape)\n obj_specmags[obj_integ>0] = 21.1-2.5*np.log10(obj_integ[obj_integ>0]/frame.meta[\"EXPTIME\"])\n tgt_specmags.append(obj_specmags)\n\n tgt_specmags = np.array(tgt_specmags)\n\n #- Fiber magnitudes per target type\n tgt_mags=[]\n for obj in objfibers:\n if num_negflux != 0:\n obj=np.array(list(set(obj) - set(negflux)))\n tgt_mags.append(magnitudes[obj])\n\n tgt_mags = np.array(tgt_mags)\n\n #- Calculate delta mag, remove sky/negative flux fibers first\n remove_fib = np.array(list(set(skyfibers) | set(negflux)))\n nosky_specmags = np.delete(specmags,remove_fib)\n nosky_mags = np.delete(magnitudes,remove_fib)\n deltamag = nosky_specmags - nosky_mags\n\n #- Calculate avg delta mag per target type\n deltamag_tgt = tgt_specmags - tgt_mags \n deltamag_tgt_avg=[]\n for tgt in range(len(deltamag_tgt)):\n deltamag_tgt_avg.append(np.mean(deltamag_tgt[tgt]))\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n fiberid=frame.fibermap['FIBER']\n\n #SE: should not have any nan or inf at this point but let's keep it for safety measures here \n retval[\"METRICS\"]={\"FIBERID\":fiberid,\"NFIBNOTGT\":num_negflux,\"SPEC_MAGS\":specmags, \"DELTAMAG\":np.nan_to_num(deltamag), \"STD_FIBERID\":stdfibers, \"DELTAMAG_TGT\":np.nan_to_num(deltamag_tgt_avg)}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_integral(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval \n\n def get_default_config(self):\n return {}\n \nclass Calculate_SNR(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"SNR\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"FIDSNR_TGT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"FIDSNR_TGT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"FIDSNR_TGT_WARN_RANGE\" in parms and \"FIDSNR_TGT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"FIDSNR_TGT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"FIDSNR_TGT_NORMAL_RANGE\"]),QASeverity.NORMAL)]# sorted by most severe to least severe\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Calculate_SNR':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n frame = get_frame('sframe',night,expid,camera,kwargs[\"specdir\"])\n else: frame=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(frame,inputs)\n\n def run_qa(self,frame,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n if isinstance(frame,QFrame):\n frame = frame.asframe()\n\n #- return values\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = expid = '{0:08d}'.format(frame.meta[\"EXPID\"])\n retval[\"CAMERA\"] = camera\n retval[\"FLAVOR\"] = frame.meta[\"FLAVOR\"]\n kwargs=self.config['kwargs']\n \n if frame.meta[\"FLAVOR\"] == 'science':\n fibmap =fits.open(kwargs['FiberMap'])\n retval[\"PROGRAM\"]=program=fibmap[1].header['PROGRAM']\n\n objlist=[]\n if program == 'dark':\n objlist = ['ELG','LRG','QSO','STAR']\n elif program == 'gray':\n objlist = ['ELG','STAR']\n elif program == 'bright':\n objlist = ['BGS','MWS','STAR']\n\n retval[\"NIGHT\"] = night = frame.meta[\"NIGHT\"]\n\n ra = fibermap[\"TARGET_RA\"]\n dec = fibermap[\"TARGET_DEC\"]\n\n #- select band for mag, using DECAM_R if present\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n\n fidboundary=None\n\n qadict,fitsnr = qalib.orig_SNRFit(frame,night,camera,expid,param,fidboundary=fidboundary)\n\n #- Check for inf and nans in missing magnitudes for json support of QLF #TODO review this later\n\n for obj in range(len(qadict[\"SNR_MAG_TGT\"])):\n for mag in [qadict[\"SNR_MAG_TGT\"][obj]]:\n k=np.where(~np.isfinite(mag))[0]\n if len(k) > 0:\n log.warning(\"{} objects have no or unphysical magnitudes\".format(len(k)))\n mag=np.array(mag)\n mag[k]=26. #- Putting 26, so as to make sure within reasonable range for plots.\n retval[\"METRICS\"] = qadict\n retval[\"PARAMS\"] = param\n\n rescut=param[\"RESIDUAL_CUT\"]\n sigmacut=param[\"SIGMA_CUT\"]\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_SNR(retval,qafig,objlist,fitsnr,rescut,sigmacut,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\nclass Check_Resolution(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKARC\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKARC\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKARC_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"CHECKARC_WARN_RANGE\" in parms and \"CHECKARC_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"CHECKARC_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"CHECKARC_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Check_Resolution':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n #- Finding psf file for QA\n #file_psf = get_psf('psf',night,expid,camera,kwargs[\"specdir\"])\n else: file_psf = args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(file_psf,inputs)\n\n def run_qa(self,file_psf,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n plotconf=inputs[\"plotconf\"]\n hardplots=inputs[\"hardplots\"]\n\n retval={}\n retval['PANAME'] = paname\n kwargs=self.config['kwargs']\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"EXPID\"] = '{:08d}'.format(kwargs['expid'])\n retval[\"CAMERA\"] = camera\n retval[\"PROGRAM\"] = 'ARC'\n retval[\"FLAVOR\"] = 'arc'\n retval[\"NIGHT\"] = kwargs['night']\n \n\n # file_psf.ycoeff is not the wsigma_array.\n # FIX later.TEST QA with file_psf.ycoeff\n \n wsigma_array = file_psf.ysig_vs_wave_traceset._coeff\n p0 = wsigma_array[0:, 0:1]\n p1 = wsigma_array[0:, 1:2]\n p2 = wsigma_array[0:, 2:3]\n\n #- Save array of ones and zeros for good/no fits\n nfib = len(p0)\n nofit = np.where(p0 == 0.)[0]\n allfibs=np.ones(nfib)\n allfibs[nofit] = 0.\n #- Total number of fibers fit used as scalar metric\n ngoodfits = len(np.where(allfibs == 1.)[0])\n\n # Medians of Legendre Coeffs to be used as 'Model'\n medlegpolcoef = np.median(wsigma_array,axis = 0)\n\n wsigma_rms = np.sqrt(np.mean((wsigma_array - medlegpolcoef)**2,axis = 0))\n\n # Check how many of each parameter are outside of +- 2 RMS of the median.\n toperror = np.array([medlegpolcoef[val] + 2*wsigma_rms[val] for val in [0,1,2]])\n bottomerror = np.array([medlegpolcoef[val] - 2*wsigma_rms[val] for val in [0,1,2]])\n\n badparamrnum0 = list(np.where(np.logical_or(p0>toperror[0], p0<bottomerror[0]))[0])\n badparamrnum1 = list(np.where(np.logical_or(p1>toperror[1], p1<bottomerror[1]))[0])\n badparamrnum2 = list(np.where(np.logical_or(p2>toperror[2], p2<bottomerror[2]))[0])\n nbadparam = np.array([len(badparamrnum0), len(badparamrnum1), len(badparamrnum2)])\n\n retval[\"METRICS\"]={\"CHECKARC\":ngoodfits, \"GOODPSFS\":allfibs, \"CHECKPSF\":nbadparam}\n retval[\"DATA\"]={\"LPolyCoef0\":p0, \"LPolyCoef1\":p1, \"LPolyCoef2\":p2}\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n retval[\"PARAMS\"] = param\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n# if qafile is not None:\n# outfile=qa.write_qa_ql(qafile,retval)\n# log.debug(\"Output QA data is in {}\".format(outfile))\n if qafig is not None:\n fig.plot_lpolyhist(retval,qafig,plotconf=plotconf,hardplots=hardplots)\n log.debug(\"Output QA fig {}\".format(qafig))\n\n return retval\n\n def get_default_config(self):\n return {}\n\nclass Check_FiberFlat(MonitoringAlg):\n def __init__(self,name,config,logger=None):\n if name is None or name.strip() == \"\":\n name=\"CHECKFLAT\"\n kwargs=config['kwargs']\n parms=kwargs['param']\n key=kwargs['refKey'] if 'refKey' in kwargs else \"CHECKFLAT\"\n status=kwargs['statKey'] if 'statKey' in kwargs else \"CHECKFLAT_STATUS\"\n kwargs[\"RESULTKEY\"]=key\n kwargs[\"QASTATUSKEY\"]=status\n\n if \"ReferenceMetrics\" in kwargs:\n r=kwargs[\"ReferenceMetrics\"]\n if key in r:\n kwargs[\"REFERENCE\"]=r[key]\n\n if \"CHECKFLAT_WARN_RANGE\" in parms and \"CHECKFLAT_NORMAL_RANGE\" in parms:\n kwargs[\"RANGES\"]=[(np.asarray(parms[\"CHECKFLAT_WARN_RANGE\"]),QASeverity.WARNING),\n (np.asarray(parms[\"CHECKFLAT_NORMAL_RANGE\"]),QASeverity.NORMAL)]\n\n MonitoringAlg.__init__(self,name,fr,config,logger)\n def run(self,*args,**kwargs):\n if len(args) == 0 :\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n \n if not self.is_compatible(type(args[0])):\n #raise qlexceptions.ParameterException(\"Incompatible input. Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n log.critical(\"Incompatible input!\")\n sys.exit(\"Was expecting {} got {}\".format(type(self.__inpType__),type(args[0])))\n\n if kwargs[\"singleqa\"] == 'Check_FiberFlat':\n night = kwargs['night']\n expid = '{:08d}'.format(kwargs['expid'])\n camera = kwargs['camera']\n else: fibflat=args[0]\n inputs=get_inputs(*args,**kwargs)\n\n return self.run_qa(fibflat,inputs)\n\n def run_qa(self,fibflat,inputs):\n camera=inputs[\"camera\"]\n paname=inputs[\"paname\"]\n fibermap=inputs[\"fibermap\"]\n amps=inputs[\"amps\"]\n qafile=inputs[\"qafile\"]\n qafig=inputs[\"qafig\"]\n param=inputs[\"param\"]\n refmetrics=inputs[\"refmetrics\"]\n \n kwargs=self.config['kwargs']\n retval={}\n retval[\"PANAME\"] = paname\n retval[\"QATIME\"] = datetime.datetime.now().isoformat()\n retval[\"PROGRAM\"] = 'FLAT'\n retval[\"FLAVOR\"] = 'flat'\n retval[\"NIGHT\"] = kwargs['night']\n retval[\"CAMERA\"] = fibflat.header['CAMERA']\n retval[\"EXPID\"] = '{:08d}'.format(kwargs['expid'])\n\n if param is None:\n log.critical(\"No parameter is given for this QA! \")\n sys.exit(\"Check the configuration file\")\n\n retval[\"PARAMS\"] = param\n\n #- Calculate mean and rms fiberflat value for each fiber\n fiberflat = fibflat.fiberflat\n avg_fiberflat=[]\n rms=[]\n for fib in range(len(fiberflat)):\n avg_fiberflat.append(np.mean(fiberflat[fib]))\n rms.append(np.std(fiberflat[fib]))\n\n #- Calculate mean of the fiber means for scalar metric\n avg_all = np.mean(avg_fiberflat)\n\n retval['METRICS'] = {\"FLATMEAN\":avg_fiberflat, \"FLATRMS\":rms, \"CHECKFLAT\":avg_all}\n\n ###############################################################\n # This section is for adding QA metrics for plotting purposes #\n ###############################################################\n\n ###############################################################\n\n return retval\n\n def get_default_config(self):\n return {}\n"
] | [
[
"numpy.sum",
"numpy.isnan",
"numpy.array",
"numpy.where",
"numpy.unique"
],
[
"numpy.allclose",
"numpy.abs",
"numpy.mean",
"numpy.isnan"
],
[
"numpy.ones",
"numpy.logical_or",
"numpy.asarray",
"numpy.nan_to_num",
"numpy.float64",
"numpy.isfinite",
"numpy.abs",
"numpy.genfromtxt",
"numpy.log10",
"numpy.delete",
"numpy.where",
"numpy.mean",
"numpy.sqrt",
"numpy.zeros",
"numpy.median",
"numpy.arange",
"numpy.all",
"numpy.std",
"numpy.array",
"numpy.percentile",
"numpy.zeros_like",
"numpy.gradient",
"numpy.concatenate"
]
] |
IRC-SPHERE/SklearnHyperStream | [
"7799e0ea15135fe5cb2935bdd39b471c53ccf0ff"
] | [
"online_learning_plugins/sklearn/tools/dataset/2017-08-22_v0.0.1.py"
] | [
"# The MIT License (MIT)\n# Copyright (c) 2014-2017 University of Bristol\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\n# OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom hyperstream import Tool, StreamInstance\nfrom hyperstream.utils import check_input_stream_count\n\nfrom datetime import datetime, timedelta\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nimport numpy as np\nfrom pytz import UTC\n\n\nclass Dataset(Tool):\n def __init__(self, dataset, shuffle=True, epochs=1, seed=None):\n \"\"\"\n Converts a static dataset into a stream with timestamps\n\n Parameters\n ==========\n dataset: data structure with the following attributes\n data: matrix\n Matrix with one row per sample and one column per feature\n target: array of int\n Array of integers with one label per sample\n\n shuffle: boolean\n Value indicating if the data needs to be shuffled\n\n epochs: Integer\n Number of iterations that the data will be repeated\n\n seed: Integer\n seed for the shuffling process\n \"\"\"\n super(Dataset, self).__init__(dataset=dataset, shuffle=shuffle,\n epochs=epochs, seed=seed)\n\n @check_input_stream_count(0)\n def _execute(self, sources, alignment_stream, interval):\n \"\"\"\n Processes the input data and produces streamed data\n\n yelds\n =====\n stream : with date and dictionary with following entries\n x_tr: array of float\n Training values for the given data stream\n y_tr: array of int\n Training binary label corresponding to the given data stream\n x_te: array of float\n Test values for the given data stream\n y_te: array of int\n Test binary label corresponding to the given data stream\n \"\"\"\n x = self.dataset.data\n y = self.dataset.target\n # Binarize data\n classes = np.unique(y)\n y = label_binarize(y, classes)\n\n j = 0\n start_dt = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n for i in range(self.epochs):\n X_tr, X_te, Y_tr, Y_te = train_test_split(\n x, y, shuffle=self.shuffle, train_size=0.5, stratify=y,\n random_state=self.seed)\n for x_tr, y_tr in zip(X_tr, Y_tr):\n x_te, y_te = X_te[j % len(X_te)], Y_te[j % len(Y_te)]\n j += 1\n dt = (start_dt + timedelta(minutes=j)).replace(tzinfo=UTC)\n yield StreamInstance(dt, dict(x_tr=x_tr.reshape(1, -1),\n x_te=x_te.reshape(1, -1),\n y_tr=y_tr.reshape(1, -1),\n y_te=y_te.reshape(1, -1)))\n"
] | [
[
"sklearn.preprocessing.label_binarize",
"sklearn.model_selection.train_test_split",
"numpy.unique"
]
] |
AHammoudeh/Flow_AH | [
"16c5641be3e9e85511756f75efd002478edaee9b"
] | [
"flow/visualize/time_space_diagram.py"
] | [
"\"\"\"Generate a time space diagram for some networks.\n\nThis method accepts as input a csv file containing the sumo-formatted emission\nfile, and then uses this data to generate a time-space diagram, with the x-axis\nbeing the time (in seconds), the y-axis being the position of a vehicle, and\ncolor representing the speed of te vehicles.\n\nIf the number of simulation steps is too dense, you can plot every nth step in\nthe plot by setting the input `--steps=n`.\n\nNote: This script assumes that the provided network has only one lane on the\neach edge, or one lane on the main highway in the case of MergeNetwork.\n\nUsage\n-----\n::\n python time_space_diagram.py </path/to/emission>.csv </path/to/params>.json\n\"\"\"\nfrom flow.utils.rllib import get_flow_params\nfrom flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork, HighwayNetwork\n\nimport argparse\nimport csv\ntry:\n from matplotlib import pyplot as plt\nexcept ImportError:\n import matplotlib\n matplotlib.use('TkAgg')\n from matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport matplotlib.colors as colors\nimport numpy as np\n\n\n# networks that can be plotted by this method\nACCEPTABLE_NETWORKS = [\n RingNetwork,\n FigureEightNetwork,\n MergeNetwork,\n I210SubNetwork,\n HighwayNetwork\n]\n\n\ndef import_data_from_emission(fp):\n r\"\"\"Import relevant data from the predefined emission (.csv) file.\n\n Parameters\n ----------\n fp : str\n file path (for the .csv formatted file)\n\n Returns\n -------\n dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n \"\"\"\n # initialize all output variables\n veh_id, t, edge, rel_pos, vel, lane = [], [], [], [], [], []\n\n # import relevant data from emission file\n for record in csv.DictReader(open(fp)):\n veh_id.append(record['id'])\n t.append(record['time'])\n edge.append(record['edge_id'])\n rel_pos.append(record['relative_position'])\n vel.append(record['speed'])\n lane.append(record['lane_number'])\n\n # we now want to separate data by vehicle ID\n ret = {key: {'time': [], 'edge': [], 'pos': [], 'vel': [], 'lane': []}\n for key in np.unique(veh_id)}\n for i in range(len(veh_id)):\n ret[veh_id[i]]['time'].append(float(t[i]))\n ret[veh_id[i]]['edge'].append(edge[i])\n ret[veh_id[i]]['pos'].append(float(rel_pos[i]))\n ret[veh_id[i]]['vel'].append(float(vel[i]))\n ret[veh_id[i]]['lane'].append(float(lane[i]))\n\n return ret\n\n\ndef get_time_space_data(data, params):\n r\"\"\"Compute the unique inflows and subsequent outflow statistics.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters, including:\n\n * \"network\" (str): name of the network that was used when generating\n the emission file. Must be one of the network names mentioned in\n ACCEPTABLE_NETWORKS,\n * \"net_params\" (flow.core.params.NetParams): network-specific\n parameters. This is used to collect the lengths of various network\n links.\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n as_array\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Raises\n ------\n AssertionError\n if the specified network is not supported by this method\n \"\"\"\n # check that the network is appropriate\n assert params['network'] in ACCEPTABLE_NETWORKS, \\\n 'Network must be one of: ' + ', '.join(ACCEPTABLE_NETWORKS)\n\n # switcher used to compute the positions based on the type of network\n # switcher used to compute the positions based on the type of network\n switcher = {\n RingNetwork: _ring_road,\n MergeNetwork: _merge,\n FigureEightNetwork: _figure_eight,\n I210SubNetwork: _i210_subnetwork,\n HighwayNetwork: _highway,\n }\n\n # Collect a list of all the unique times.\n all_time = []\n for veh_id in data.keys():\n all_time.extend(data[veh_id]['time'])\n all_time = np.sort(np.unique(all_time))\n\n # Get the function from switcher dictionary\n func = switcher[params['network']]\n\n # Execute the function\n pos, speed, all_time = func(data, params, all_time)\n\n return pos, speed, all_time\n\n\ndef _merge(data, params, all_time):\n r\"\"\"Generate position and speed data for the merge.\n\n This only include vehicles on the main highway, and not on the adjacent\n on-ramp.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n inflow_edge_len = 100\n premerge = params['net'].additional_params['pre_merge_length']\n postmerge = params['net'].additional_params['post_merge_length']\n\n # generate edge starts\n edgestarts = {\n 'inflow_highway': 0,\n 'left': inflow_edge_len + 0.1,\n 'center': inflow_edge_len + premerge + 22.6,\n 'inflow_merge': inflow_edge_len + premerge + postmerge + 22.6,\n 'bottom': 2 * inflow_edge_len + premerge + postmerge + 22.7,\n ':left_0': inflow_edge_len,\n ':center_0': inflow_edge_len + premerge + 0.1,\n ':center_1': inflow_edge_len + premerge + 0.1,\n ':bottom_0': 2 * inflow_edge_len + premerge + postmerge + 22.6\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # prepare the speed and absolute position in a way that is compatible with\n # the space-time diagram, and compute the number of vehicles at each step\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge']):\n # avoid vehicles outside the main highway\n if edge in ['inflow_merge', 'bottom', ':bottom_0']:\n continue\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n return pos, speed, all_time\n\n\ndef _highway(data, params, all_time):\n r\"\"\"Generate position and speed data for the highway subnetwork.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n length = params['net'].additional_params['length']\n num_edges = params['net'].additional_params['num_edges']\n edge_len = length / num_edges\n edge_starts = {}\n for i in range(num_edges):\n edge_starts.update({\"highway_{}\".format(i): i * edge_len, \":edge_{}_0\".format(i): i * edge_len})\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'],\n data[veh_id]['pos'],\n edge_starts)\n\n # track only vehicles that were around during this time period\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n observed_row_list = []\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge'],\n data[veh_id]['lane']):\n # avoid vehicles not on the relevant edges. Also only check the second to\n # last lane\n if edge not in edge_starts.keys() or ti not in all_time:\n continue\n else:\n if i not in observed_row_list:\n observed_row_list.append(i)\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n pos = pos[:, observed_row_list]\n speed = speed[:, observed_row_list]\n\n return pos, speed, all_time\n\n\ndef _ring_road(data, params, all_time):\n r\"\"\"Generate position and speed data for the ring road.\n\n Vehicles that reach the top of the plot simply return to the bottom and\n continue.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n ring_length = params['net'].additional_params[\"length\"]\n junction_length = 0.1 # length of inter-edge junctions\n\n edgestarts = {\n \"bottom\": 0,\n \":right_0\": 0.25 * ring_length,\n \"right\": 0.25 * ring_length + junction_length,\n \":top_0\": 0.5 * ring_length + junction_length,\n \"top\": 0.5 * ring_length + 2 * junction_length,\n \":left_0\": 0.75 * ring_length + 2 * junction_length,\n \"left\": 0.75 * ring_length + 3 * junction_length,\n \":bottom_0\": ring_length + 3 * junction_length\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time']):\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n return pos, speed, all_time\n\n\ndef _i210_subnetwork(data, params, all_time):\n r\"\"\"Generate position and speed data for the i210 subnetwork.\n\n We only look at the second to last lane of edge 119257908#1-AddedOnRampEdge\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n #\n # edge_starts = {\"119257908#0\": 0,\n # \"119257908#1-AddedOnRampEdge\": 686.98}\n desired_lane = 1\n edge_starts = {\"119257914\": 0,\n \"119257908#0\": 61.58,\n \"119257908#1-AddedOnRampEdge\": 686.98 + 61.58}\n # edge_starts = {\"119257908#0\": 0}\n # edge_starts = {\"119257908#1-AddedOnRampEdge\": 0}\n # desired_lane = 5\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'],\n data[veh_id]['pos'],\n edge_starts)\n\n # create the output variables\n # TODO(@ev) handle subsampling better than this\n low_time = int(0 / params['sim'].sim_step)\n high_time = int(1600 / params['sim'].sim_step)\n all_time = all_time[low_time:high_time]\n\n # track only vehicles that were around during this time period\n observed_row_list = []\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time'],\n data[veh_id]['edge'],\n data[veh_id]['lane']):\n # avoid vehicles not on the relevant edges. Also only check the second to\n # last lane\n if edge not in edge_starts.keys() or ti not in all_time or lane != desired_lane:\n continue\n else:\n if i not in observed_row_list:\n observed_row_list.append(i)\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n pos = pos[:, observed_row_list]\n speed = speed[:, observed_row_list]\n\n return pos, speed, all_time\n\n\ndef _figure_eight(data, params, all_time):\n r\"\"\"Generate position and speed data for the figure eight.\n\n The vehicles traveling towards the intersection from one side will be\n plotted from the top downward, while the vehicles from the other side will\n be plotted from the bottom upward.\n\n Parameters\n ----------\n data : dict of dict\n Key = \"veh_id\": name of the vehicle \\n Elements:\n\n * \"time\": time step at every sample\n * \"edge\": edge ID at every sample\n * \"pos\": relative position at every sample\n * \"vel\": speed at every sample\n params : dict\n flow-specific parameters\n all_time : array_like\n a (n_steps,) vector representing the unique time steps in the\n simulation\n\n Returns\n -------\n as_array\n n_steps x n_veh matrix specifying the absolute position of every\n vehicle at every time step. Set to zero if the vehicle is not present\n in the network at that time step.\n as_array\n n_steps x n_veh matrix specifying the speed of every vehicle at every\n time step. Set to zero if the vehicle is not present in the network at\n that time step.\n \"\"\"\n # import network data from flow params\n net_params = params['net']\n ring_radius = net_params.additional_params['radius_ring']\n ring_edgelen = ring_radius * np.pi / 2.\n intersection = 2 * ring_radius\n junction = 2.9 + 3.3 * net_params.additional_params['lanes']\n inner = 0.28\n\n # generate edge starts\n edgestarts = {\n 'bottom': inner,\n 'top': intersection / 2 + junction + inner,\n 'upper_ring': intersection + junction + 2 * inner,\n 'right': intersection + 3 * ring_edgelen + junction + 3 * inner,\n 'left': 1.5 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner,\n 'lower_ring': 2 * intersection + 3 * ring_edgelen + 2 * junction + 4 * inner,\n ':bottom_0': 0,\n ':center_1': intersection / 2 + inner,\n ':top_0': intersection + junction + inner,\n ':right_0': intersection + 3 * ring_edgelen + junction + 2 * inner,\n ':center_0': 1.5 * intersection + 3 * ring_edgelen + junction + 3 * inner,\n ':left_0': 2 * intersection + 3 * ring_edgelen + 2 * junction + 3 * inner,\n # for aimsun\n 'bottom_to_top': intersection / 2 + inner,\n 'right_to_left': junction + 3 * inner,\n }\n\n # compute the absolute position\n for veh_id in data.keys():\n data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'],\n data[veh_id]['pos'], edgestarts)\n\n # create the output variables\n pos = np.zeros((all_time.shape[0], len(data.keys())))\n speed = np.zeros((all_time.shape[0], len(data.keys())))\n for i, veh_id in enumerate(sorted(data.keys())):\n for spd, abs_pos, ti in zip(data[veh_id]['vel'],\n data[veh_id]['abs_pos'],\n data[veh_id]['time']):\n ind = np.where(ti == all_time)[0]\n pos[ind, i] = abs_pos\n speed[ind, i] = spd\n\n # reorganize data for space-time plot\n figure_eight_len = 6 * ring_edgelen + 2 * intersection + 2 * junction + 10 * inner\n intersection_loc = [edgestarts[':center_1'] + intersection / 2,\n edgestarts[':center_0'] + intersection / 2]\n pos[pos < intersection_loc[0]] += figure_eight_len\n pos[np.logical_and(pos > intersection_loc[0], pos < intersection_loc[1])] \\\n += - intersection_loc[1]\n pos[pos > intersection_loc[1]] = \\\n - pos[pos > intersection_loc[1]] + figure_eight_len + intersection_loc[0]\n\n return pos, speed, all_time\n\n\ndef _get_abs_pos(edge, rel_pos, edgestarts):\n \"\"\"Compute the absolute positions from edges and relative positions.\n\n This is the variable we will ultimately use to plot individual vehicles.\n\n Parameters\n ----------\n edge : list of str\n list of edges at every time step\n rel_pos : list of float\n list of relative positions at every time step\n edgestarts : dict\n the absolute starting position of every edge\n\n Returns\n -------\n list of float\n the absolute positive for every sample\n \"\"\"\n ret = []\n for edge_i, pos_i in zip(edge, rel_pos):\n ret.append(pos_i + edgestarts[edge_i])\n return ret\n\n\ndef _get_abs_pos_1_edge(edges, rel_pos, edge_starts):\n \"\"\"Compute the absolute positions from a subset of edges.\n\n This is the variable we will ultimately use to plot individual vehicles.\n\n Parameters\n ----------\n edges : list of str\n list of edges at every time step\n rel_pos : list of float\n list of relative positions at every time step\n edge_starts : dict\n the absolute starting position of every edge\n\n Returns\n -------\n list of float\n the absolute positive for every sample\n \"\"\"\n ret = []\n for edge_i, pos_i in zip(edges, rel_pos):\n if edge_i in edge_starts.keys():\n ret.append(pos_i + edge_starts[edge_i])\n else:\n ret.append(-1)\n return ret\n\n\ndef make_ts_diagram(flow_params, emission_path, min_speed, max_speed, start, stop, title):\n # flow_params is imported as a dictionary\n if '.json' in flow_params:\n flow_params = get_flow_params(flow_params)\n else:\n module = __import__(\"examples.exp_configs.non_rl\", fromlist=[flow_params])\n flow_params = getattr(module, flow_params).flow_params\n\n # import data from the emission.csv file\n emission_data = import_data_from_emission(emission_path)\n\n # compute the position and speed for all vehicles at all times\n pos, speed, time = get_time_space_data(emission_data, flow_params)\n\n # some plotting parameters\n cdict = {\n 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)),\n 'green': ((0, 0, 0), (0.2, 0, 0), (0.6, 1, 1), (1, 1, 1)),\n 'blue': ((0, 0, 0), (0.2, 0, 0), (0.6, 0, 0), (1, 0, 0))\n }\n my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024)\n\n # perform plotting operation\n fig = plt.figure(figsize=(16, 9))\n ax = plt.axes()\n norm = plt.Normalize(min_speed, max_speed)\n cols = []\n\n xmin = max(time[0], start)\n xmax = min(time[-1], stop)\n xbuffer = (xmax - xmin) * 0.025 # 2.5% of range\n ymin, ymax = np.amin(pos), np.amax(pos)\n ybuffer = (ymax - ymin) * 0.025 # 2.5% of range\n\n ax.set_xlim(xmin - xbuffer, xmax + xbuffer)\n ax.set_ylim(ymin - ybuffer, ymax + ybuffer)\n\n for indx_car in range(pos.shape[1]):\n unique_car_pos = pos[:, indx_car]\n\n if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork:\n indices = np.where(pos[:, indx_car] != 0)[0]\n unique_car_speed = speed[indices, indx_car]\n points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2)\n else:\n\n # discontinuity from wraparound\n disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1\n unique_car_time = np.insert(time, disc, np.nan)\n unique_car_pos = np.insert(unique_car_pos, disc, np.nan)\n unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan)\n #\n points = np.array(\n [unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap=my_cmap, norm=norm)\n\n # Set the values used for color mapping\n lc.set_array(unique_car_speed)\n lc.set_linewidth(1.75)\n cols.append(lc)\n\n plt.title(title, fontsize=25)\n plt.ylabel('Position (m)', fontsize=20)\n plt.xlabel('Time (s)', fontsize=20)\n\n for col in cols:\n line = ax.add_collection(col)\n cbar = plt.colorbar(line, ax=ax, norm=norm)\n cbar.set_label('Velocity (m/s)', fontsize=20)\n cbar.ax.tick_params(labelsize=18)\n\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n\n ###########################################################################\n # Note: For MergeNetwork only #\n if flow_params['network'] == 'MergeNetwork': #\n plt.plot(time, [0] * pos.shape[0], linewidth=3, color=\"white\") #\n plt.plot(time, [-0.1] * pos.shape[0], linewidth=3, color=\"white\") #\n ###########################################################################\n\n plt.show()\n\n\nif __name__ == '__main__':\n # create the parser\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='[Flow] Generates time space diagrams for flow networks.',\n epilog='python time_space_diagram.py </path/to/emission>.csv '\n '</path/to/flow_params>.json')\n\n # required arguments\n parser.add_argument('emission_path', type=str,\n help='path to the csv file.')\n parser.add_argument('flow_params', type=str,\n help='path to the flow_params json file.')\n\n # optional arguments\n parser.add_argument('--steps', type=int, default=1,\n help='rate at which steps are plotted.')\n parser.add_argument('--title', type=str, default='Time Space Diagram',\n help='rate at which steps are plotted.')\n parser.add_argument('--max_speed', type=int, default=8,\n help='The maximum speed in the color range.')\n parser.add_argument('--min_speed', type=int, default=0,\n help='The minimum speed in the color range.')\n parser.add_argument('--start', type=float, default=0,\n help='initial time (in sec) in the plot.')\n parser.add_argument('--stop', type=float, default=float('inf'),\n help='final time (in sec) in the plot.')\n\n args = parser.parse_args()\n\n make_ts_diagram(args.flow_params, args.emission_path, args.min_speed,\n args.max_speed, args.start, args.stop, args.title)\n"
] | [
[
"matplotlib.colors.LinearSegmentedColormap",
"numpy.diff",
"numpy.insert",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ylabel",
"numpy.amax",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"numpy.logical_and",
"matplotlib.pyplot.title",
"matplotlib.use",
"numpy.where",
"numpy.unique",
"matplotlib.pyplot.colorbar",
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.Normalize",
"numpy.amin",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.concatenate",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel"
]
] |
jonassagild/Track-to-Track-Fusion | [
"6bb7fbe6a6e2d9a2713c47f211899226485eee79"
] | [
"scripts/plot_results_kf_dependence.py"
] | [
"\"\"\"plot_stuff script to plot things\n\nJust temporary code to plot things. Not for producing results, but for testing code.\n\"\"\"\nimport numpy as np\nimport scipy\nfrom stonesoup.types.state import GaussianState\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Ellipse\n\n\nfrom trackers.kalman_filter_dependent_fusion import kalman_filter_dependent_fusion\n\nfrom utils.scenario_generator import generate_scenario_2\nfrom utils import open_object\nfrom utils.save_figures import save_figure\n\n# run dependent fusion and plot\n\nseed = 1996\n\nsigma_process = 1\nsigma_meas_radar = 5\nsigma_meas_ais = 10\nnum_steps = 15\n\nsave_fig = True\n\ngenerate_scenario_2(seed=seed, permanent_save=False, sigma_process=sigma_process, sigma_meas_radar=sigma_meas_radar,\n sigma_meas_ais=sigma_meas_ais, timesteps=num_steps)\n\nfolder = \"temp\" # temp instead of seed, as it is not a permanent save\n\n# load ground truth and the measurements\ndata_folder = \"../scenarios/scenario2/\" + folder + \"/\"\nground_truth = open_object.open_object(data_folder + \"ground_truth.pk1\")\nmeasurements_radar = open_object.open_object(data_folder + \"measurements_radar.pk1\")\nmeasurements_ais = open_object.open_object(data_folder + \"measurements_ais.pk1\")\n\n# load start_time\nstart_time = open_object.open_object(data_folder + \"start_time.pk1\")\n\n# prior\nprior = GaussianState([0, 1, 0, 1], np.diag([1.5, 0.5, 1.5, 0.5]) ** 2, timestamp=start_time)\n\n# tracker\nkf_dependent_fusion = kalman_filter_dependent_fusion(measurements_radar, measurements_ais, start_time, prior,\n sigma_process_radar=sigma_process,\n sigma_process_ais=sigma_process,\n sigma_meas_radar=sigma_meas_radar,\n sigma_meas_ais=sigma_meas_ais)\n\n# hacky way; just so its easy to reuse code\nmeasurement_model_radar = kf_dependent_fusion.measurement_model_radar\nmeasurement_model_ais = measurement_model_radar\n\ntracks_fused, tracks_ais, tracks_radar = kf_dependent_fusion.track()\n\n# plot\nfig = plt.figure(figsize=(10, 6))\nax = fig.add_subplot(1, 1, 1)\nax.set_xlabel(\"$x$\")\nax.set_ylabel(\"$y$\")\nax.axis('equal')\nax.plot([state.state_vector[0] for state in ground_truth],\n [state.state_vector[2] for state in ground_truth],\n linestyle=\"--\",\n label='Ground truth')\nax.scatter([state.state_vector[0] for state in measurements_radar],\n [state.state_vector[1] for state in measurements_radar],\n color='b',\n label='Measurements Radar')\nax.scatter([state.state_vector[0] for state in measurements_ais],\n [state.state_vector[1] for state in measurements_ais],\n color='r',\n label='Measurements AIS')\n\n# add ellipses to the posteriors\nfor state in tracks_radar:\n w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.2,\n color='b')\n ax.add_artist(ellipse)\n\nfor state in tracks_ais:\n w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.2,\n color='r')\n ax.add_patch(ellipse)\n\nfor track_fused in tracks_fused:\n w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused.covar @ measurement_model_ais.matrix().T)\n max_ind = np.argmax(w)\n min_ind = np.argmin(w)\n orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n ellipse = Ellipse(xy=(track_fused.state_vector[0], track_fused.state_vector[2]),\n width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n angle=np.rad2deg(orient),\n alpha=0.5,\n color='green')\n ax.add_patch(ellipse)\n\n# add ellipses to add legend todo do this less ugly\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='r',\n alpha=0.2,\n label='Posterior AIS')\nax.add_patch(ellipse)\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='b',\n alpha=0.2,\n label='Posterior Radar')\nax.add_patch(ellipse)\nellipse = Ellipse(xy=(0, 0),\n width=0,\n height=0,\n color='green',\n alpha=0.5,\n label='Posterior Fused')\nax.add_patch(ellipse)\n\nax.legend(prop={'size': 12})\ntitle = \"Scenario 1 with $\\sigma_{AIS} = \" + str(sigma_meas_ais) + \"$, $\\sigma_{radar} = \" + str(sigma_meas_radar) + \\\n \"$ and $\\sigma_{process} = \" + str(sigma_process) + \\\n \"$. \\n Fusion is performed accounting for the common process noise.\"\nax.set_title(title, fontsize=20)\nfig.show()\nif save_fig:\n save_figure(\"../results/final_results/scenario_examples\", \"scenario1_example.pdf\", fig)\n\n# # plot estimate for estimate\n# # plot\n# fig_2 = plt.figure(figsize=(10, 6))\n# ax = fig_2.add_subplot(1, 1, 1)\n# ax.set_xlabel(\"$x$\")\n# ax.set_ylabel(\"$y$\")\n# ax.axis('equal')\n# ax.plot([state.state_vector[0] for state in ground_truth],\n# [state.state_vector[2] for state in ground_truth],\n# linestyle=\"--\",\n# label='Ground truth')\n# # ax.scatter([state.state_vector[0] for state in measurements_radar],\n# # [state.state_vector[1] for state in measurements_radar],\n# # color='b',\n# # label='Measurements Radar')\n# # ax.scatter([state.state_vector[0] for state in measurements_ais],\n# # [state.state_vector[1] for state in measurements_ais],\n# # color='r',\n# # label='Measurements AIS')\n#\n# # for i in range(0, len(tracks_fused)):\n# # # plot measurements\n# # ax.scatter([measurements_radar[i + 1].state_vector[0]],\n# # [measurements_radar[i + 1].state_vector[1]],\n# # color='b',\n# # label='Measurements Radar')\n# # ax.scatter([measurements_ais[i + 1].state_vector[0]],\n# # [measurements_ais[i + 1].state_vector[1]],\n# # color='r',\n# # label='Measurements AIS')\n# #\n# # # plot one and one estimate\n# # state_radar = tracks_radar[i + 1]\n# # w, v = np.linalg.eig(measurement_model_radar.matrix() @ state_radar.covar @ measurement_model_radar.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_radar.state_vector[0], state_radar.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.2,\n# # color='b')\n# # ax.add_artist(ellipse)\n# #\n# # state_ais = tracks_ais[i + 1]\n# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_ais.covar @ measurement_model_ais.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_ais.state_vector[0], state_ais.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.2,\n# # color='r')\n# # ax.add_patch(ellipse)\n# #\n# # state_fused = tracks_fused[i]\n# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_fused.covar @ measurement_model_ais.matrix().T)\n# # max_ind = np.argmax(w)\n# # min_ind = np.argmin(w)\n# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# # ellipse = Ellipse(xy=(state_fused.state_vector[0], state_fused.state_vector[2]),\n# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# # angle=np.rad2deg(orient),\n# # alpha=0.5,\n# # color='green')\n# # ax.add_patch(ellipse)\n# #\n# # fig_2.show()\n# # input(\"Press Enter to continue...\")\n\n\n#\n# # add ellipses to the posteriors\n# for state in tracks_radar:\n# w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.2,\n# color='b')\n# ax.add_artist(ellipse)\n#\n# for state in tracks_ais:\n# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.2,\n# color='r')\n# ax.add_patch(ellipse)\n#\n# for track_fused in tracks_fused:\n# w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused[1] @ measurement_model_ais.matrix().T)\n# max_ind = np.argmax(w)\n# min_ind = np.argmin(w)\n# orient = np.arctan2(v[1, max_ind], v[0, max_ind])\n# ellipse = Ellipse(xy=(track_fused[0][0], track_fused[0][2]),\n# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),\n# angle=np.rad2deg(orient),\n# alpha=0.5,\n# color='green')\n# ax.add_patch(ellipse)\n\n# fig_2.show()\n"
] | [
[
"numpy.arctan2",
"numpy.diag",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.figure",
"numpy.argmin",
"numpy.rad2deg",
"numpy.argmax",
"numpy.sqrt"
]
] |
gkuling/BIRADS_BERT | [
"f218d05283df90e536b210efbb4fab1d6dff082d"
] | [
"examples/MLM_Training_transformers.py"
] | [
"'''\nCopyright (c) 2020, Martel Lab, Sunnybrook Research Institute\nCodes inspired by Hugging Face Transformers package code run_mlm.py\nhttps://github.com/huggingface/transformers/blob/main/examples/pytorch/\nlanguage-modeling/run_mlm.py\n\nDescription: Training code used to train a BERT embedding in Masked Language\nModeling for BERTFineTuning Codes.\n\nInput: train and test folders filled with .txt documents holding a list of\nsentences. These .txt files can be created with TextPReProcessingBERTModel.py\nfile.\nOutput: A saved Transformer model based on Huggingface Transformers package.\nThis includes a cnofig.json, eval_results.txt, pytorch_model.bin,\ntrianing_args.bin, and vocab.txt.\n'''\nimport sys\nsys.path.append('.')\n\nimport argparse\nimport os\nimport torch\nimport logging\nimport random\nimport numpy as np\nfrom transformers import BertConfig, BertForMaskedLM, AdamW, \\\n get_linear_schedule_with_warmup, BertTokenizer\nfrom torch.utils.data import DataLoader, SequentialSampler, RandomSampler\nfrom tqdm import tqdm, trange\nfrom tokenizers.implementations import BertWordPieceTokenizer\nfrom transformers.data.data_collator import DataCollatorForLanguageModeling\nfrom transformers.data.datasets import TextDataset\n\nfrom datetime import datetime as dt\n\ntic = dt.now()\n\nparser = argparse.ArgumentParser()\nlogger = logging.getLogger(__name__)\n# Required parameters\nparser.add_argument(\"--train_data_file\", default=None, type=str,\n required=True,\n help=\"The input training data in a .txt file\"\n \"files.\")\nparser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions \"\n \"and checkpoints will be written.\")\nparser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\nparser.add_argument(\"--per_gpu_train_batch_size\", default=16, type=int,\n help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\nparser.add_argument(\"--eval_data_file\", default=None, type=str,\n required=False,\n help=\"The input training data in a .txt file\"\n \"files.\")\nparser.add_argument(\"--num_train_epochs\", default=1.0, type=float,\n help=\"Total number of training epochs to perform.\")\nparser.add_argument(\"--warmup_steps\", default=2000, type=int,\n help=\"Linear warmup over warmup_steps.\")\nparser.add_argument('--save_steps', type=int, default=10000,\n help=\"Save checkpoint every X updates steps.\")\nparser.add_argument('--data_portion', type=float, default=1.0,\n help=\"The portion of the training data you wish to load. \"\n \"(1.0 for all data, >1.0 for a portion\")\nparser.add_argument('--logging_steps', type=int, default=10000,\n help=\"Log every X updates steps.\")\nparser.add_argument('--block_size', type=int, default=32,\n help=\"Max sequence length used in tokenizer and dataset.\")\nparser.add_argument(\"--start_from_checkpoint\", action='store_true',\n help=\"Start training from latest checkpoint.\")\nparser.add_argument(\"--preliminary_model\", type=str, default='fromScratch',\n help='Choice to start the model from a previously trained '\n 'model or start from scratch. Used with '\n 'model.from_pretrained(preliminary_model. ')\nargs = parser.parse_args()\n\ndef set_seed(sd):\n random.seed(sd)\n np.random.seed(sd)\n torch.manual_seed(sd)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(sd)\n\ndef evaluate(args, model, eval_dataset, tokenizer, step, prefix=\"\"):\n \"\"\"\n Evaluation of model\n :param args: input arguments from parser\n :param model: pytorch model to be evaluated\n :param eval_dataset: dataset used for evaluation\n :param tokenizer: tokenizer used by the model\n :param step: the current step in training\n :param prefix: prescript to be added to the beginning of save file\n :return: results of evaluation\n \"\"\"\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n print('')\n eval_output_dir = args.output_dir\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n eval_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm=True,\n mlm_probability=0.15\n )\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,\n batch_size=eval_batch_size,\n collate_fn=data_collator\n )\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader,\n desc=\"Evaluating\",\n position=0,\n leave=True):\n\n with torch.no_grad():\n outputs = model(input_ids=batch['input_ids'].to(args.device),\n labels=batch['labels'].to(args.device))\n loss = outputs['loss']\n eval_loss += loss.mean().item()\n\n nb_eval_steps += 1\n\n eval_loss /= nb_eval_steps\n perplexity = torch.exp(torch.tensor(eval_loss))\n\n result = {\n \"perplexity\": perplexity,\n 'loss': eval_loss,\n \"Iteration\": str(step)\n }\n\n output_eval_file = os.path.join(eval_output_dir, prefix,\n \"eval_results.txt\")\n with open(output_eval_file, \"a\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n writer.write('\\n')\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s, \" % (key, str(result[key])))\n\n writer.close()\n\n return result\n\ndef train(args, train_dataset, model, tokenizer, eval_dataset=None):\n \"\"\"\n Train the model\n :param args: input arguments from parser\n :param train_dataset: dataset used for training\n :param model: pytorch model to be evaluated\n :param tokenizer: tokenizer used by the model\n :param eval_dataset: dataset used for evaluation\n :return:\n \"\"\"\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm=True,\n mlm_probability=0.15\n )\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler,\n batch_size=args.train_batch_size,\n collate_fn=data_collator\n )\n\n init_total = len(\n train_dataloader) * args.num_train_epochs\n\n # loading a modle from a checkpoint if neccesary\n if args.start_from_checkpoint:\n chk_pt_fdlr = [fldr for fldr in os.listdir(args.output_dir) if\n fldr.startswith('checkpoint')]\n chk_pt_fdlr.sort()\n logger.info(\"***** Running training from checkpoint: \" + str(\n chk_pt_fdlr[-1]) + \"*****\")\n global_step = int(''.join([chr for chr in chk_pt_fdlr[-1]\n if chr.isdigit()]))\n it_total = init_total - global_step\n args.num_train_epochs = np.round(it_total / len(train_dataloader))\n # model = BertForMaskedLM(config=config)\n model = BertForMaskedLM.from_pretrained(args.output_dir + '/' +\n chk_pt_fdlr[-1])\n model.to(args.device)\n\n logger.info('Loaded checkpoint model. Beginning training.')\n else:\n global_step = 0\n it_total = init_total\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if\n not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if\n any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5,\n eps=1e-8)\n if global_step > args.warmup_steps:\n scheduler = \\\n get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=init_total)\n for _ in range(global_step):\n scheduler.step()\n logger.info('Initialized LR Scheduler and brought it to current step.')\n else:\n scheduler = \\\n get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=it_total)\n # multi-gpu training\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\",\n args.per_gpu_train_batch_size)\n logger.info(\" Total optimization steps = %d\", it_total)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\")\n set_seed(seed) # Added here for reproducibility (even between python 2\n # and 3)\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader,\n desc=\"Iteration\",\n position=0,\n leave=True)\n epoch_iterator.set_postfix({'loss': 'Initialized'})\n for step, batch in enumerate(epoch_iterator):\n model.train()\n outputs = model(input_ids=batch['input_ids'].to(args.device),\n labels=batch['labels'].to(args.device))\n # model outputs are always tuple in transformers (see doc)\n loss = outputs['loss']\n\n if args.n_gpu > 1:\n # mean() to average on multi-gpu parallel training\n loss = loss.mean()\n\n loss.backward()\n\n tr_loss += loss.item()\n epoch_iterator.set_postfix({'loss': loss.item()})\n\n torch.nn.utils.clip_grad_norm_(model.parameters(),\n 1.0)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n results = evaluate(args, model, eval_dataset, tokenizer,\n step=global_step)\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = 'checkpoint'\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir,\n '{}-{}'.format(checkpoint_prefix,\n global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module \\\n if hasattr(model, 'module') \\\n else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args,\n os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n return global_step, tr_loss / global_step, model\n\n\nargs.mlm = True\n\nif os.path.exists(args.output_dir) and os.listdir(\n args.output_dir) and not args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use \"\n \"--overwrite_output_dir to overcome.\".format(\n args.output_dir))\n\n# Setup CUDA, GPU & distributed training\ndevice = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\")\nargs.n_gpu = torch.cuda.device_count()\n\nargs.device = device\n\n# Setup logging\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger.info(\n \"Device: %s, n_gpu: %s\", device, args.n_gpu)\n\n# Set seed\nseed = 20210325\nset_seed(seed)\n\nlogger.info(\"Beginning Tokenizer Training on data in \" + args.train_data_file)\npaths = args.train_data_file\nargs.vocab_size = int(''.join([char for char in args.train_data_file.split(\n '/')[-1] if char.isnumeric()]))\nif not args.preliminary_model != 'fromScratch' and \\\n not args.start_from_checkpoint:\n # Building custom Tokenizer\n tokenizer = BertWordPieceTokenizer(\n clean_text=True,\n strip_accents=True,\n lowercase=True,\n )\n tokenizer.train(\n paths,\n vocab_size=args.vocab_size + 5,\n min_frequency=2,\n show_progress=True,\n special_tokens=[\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"],\n limit_alphabet=1000,\n wordpieces_prefix=\"##\",\n )\n tokenizer.save_model(args.output_dir)\n\nif args.preliminary_model != 'fromScratch':\n tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)\nelse:\n tokenizer = BertTokenizer.from_pretrained(args.output_dir)\n\nconfig = BertConfig.from_pretrained('bert-base-cased')\nconfig.vocab_size = tokenizer.vocab_size\nif args.preliminary_model != 'fromScratch':\n model = BertForMaskedLM.from_pretrained(args.preliminary_model)\nelse:\n model = BertForMaskedLM(config=config)\nmodel.to(args.device)\n\ntrain_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=args.train_data_file,\n block_size=32,\n overwrite_cache=args.overwrite_output_dir\n)\n\neval_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=args.eval_data_file,\n block_size=32,\n overwrite_cache=args.overwrite_output_dir\n)\nif args.data_portion < 1.0:\n train_dataset.examples = train_dataset.examples[:int(len(\n train_dataset.examples)*args.data_portion)]\n eval_dataset.examples = eval_dataset.examples[:int(len(\n eval_dataset.examples)*args.data_portion)]\n logger.info(\"Training and validation set limited to \" + str(\n args.data_portion) + \" portion of original data.\")\n\nlogger.info(\"Training/evaluation parameters %s\", args)\n\nglobal_step, tr_loss, model = train(args,\n train_dataset,\n model,\n tokenizer,\n eval_dataset=eval_dataset)\nlogger.info(\" global_step = %s, average loss = %s\", global_step,\n tr_loss)\n\n# Do the saving\n# Create output directory if needed\nif not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\nlogger.info(\"Saving model checkpoint to %s\", args.output_dir)\n# Save a trained model, configuration and tokenizer using `save_pretrained()`.\n# They can then be reloaded using `from_pretrained()`\n# Take care of parallel training\nmodel_to_save = model.module if hasattr(model,\n 'module') else model\nmodel_to_save.save_pretrained(args.output_dir)\n\n# Good practice: save your training arguments together with the trained model\ntorch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n# Load a trained model and vocabulary that you have fine-tuned\nmodel = BertForMaskedLM.from_pretrained(args.output_dir)\nif args.preliminary_model != 'fromScratch':\n tokenizer = BertTokenizer.from_pretrained(args.preliminary_model)\nelse:\n tokenizer = BertTokenizer.from_pretrained(args.output_dir)\nmodel.to(args.device)\n\n# Evaluation\nresults = {}\nif args.do_eval:\n checkpoints = [args.output_dir]\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split('-')[-1] if len(\n checkpoints) > 1 else \"\"\n prefix = checkpoint.split('/')[-1] if checkpoint.find(\n 'checkpoint') != -1 else \"\"\n\n model = BertForMaskedLM.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, eval_dataset, tokenizer, step='TestSet')\n result = dict(\n (k + '_{}'.format(global_step), v) for k, v in result.items())\n results.update(result)\ntoc = dt.now()\nprint(\"End of MLM_Training_transformers.py Script.\")\nprint('Total Script Runtime: ' + str(toc-tic))\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"numpy.random.seed",
"torch.tensor",
"torch.cuda.device_count",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.utils.data.RandomSampler"
]
] |
PurplePean/AIX360 | [
"0a71cfe372b91078dd7887d7597371e09d84f968"
] | [
"aix360/data/ted_data/GenerateData.py"
] | [
"# This file will generate a synthetic dataset to predict employee attrition\r\n# Like most datasets it will have a feature vector and a Y label for each instance.\r\n# However, unlike most datasets it will also have an Explanation (E) for each instance, encoded as an non-negative integer.\r\n# This is motivated by the TED framework, but can be used by other explainability algorithms as a metric for explainability\r\n# See the AIES'19 paper by Hind et al for more information on the TED framework.\r\n# See the tutorial notebook TED_Cartesian_test for information about how to use this dataset and the TED framework.\r\n# The comments in this code also provide some insight into how this dataset is generated\r\n\r\nimport random\r\nfrom random import choices\r\nimport pandas as pd\r\n\r\nAny = -99 # This is only applicable in the rule\r\nLow = -1 # These 3, Low, Med, High, can be values in the dataset and are used in the rules\r\nMed = -2\r\nHigh = -3\r\nYes = -10 # This is the positive Y label\r\nNo = -11 # This is the negative Y label\r\nRandom = -12 # This signfiies a random choice should be made for the Y label (either Yes or No) ]\r\n\r\n# Features, values, and distribution, details below\r\nfeatureThresholds = [\r\n # 1 Position: 4(5%), 3(20%), 2(30%), 1(45%)\r\n [4, [0.05, 0.20, 0.30, 0.45]],\r\n\r\n # 2 Organization \"Org\": 3(30%); 2(30%); 1(40%)\r\n [3, [0.30, 0.30, 0.40]],\r\n\r\n # 3 Potential \"Pot\": Yes (50%), No (50%)\r\n [2, [0.50, 0.50]],\r\n\r\n # 4 Rating value \"Rat\": High(15%), Med(80%), Low(5%)\r\n [3, [0.15, 0.80, 0.05]],\r\n\r\n # 5 Rating Slope \"Slope\": High (15%), Med(80%), Low(5%)\r\n [3, [0.15, 0.80, 0.05]],\r\n\r\n # 6 Salary Competitiveness \"Sal\": High (10%); Med(70%); Low(20%)\r\n [3, [0.10, 0.70, 0.20]],\r\n\r\n # 7 Tenure Low \"TenL\" & High Values \"TenH\": [0..360], 30% in 0..24; 30% in 25..60; 40% in 61..360\r\n [3, [0.30, 0.30, 0.40], [[0, 24], [25, 60], [61, 360]]],\r\n\r\n # 8 Position Tenure Low \"BTenL\" & High Values \"BTenH\": [0..360], 70% in 0..12; 20% in 13..24; 10% in 25..360\r\n # Position tenure needs to be lower than tenure, ensured in generation code below\r\n [3, [0.70, 0.20, 0.10], [[0, 12], [13, 24], [25, 360]]]\r\n]\r\n\r\n# Some convenient population lists\r\nHighMedLowPopulation = [High, Med, Low]\r\nYesNoPopulation = [Yes, No]\r\nIndex3Population = [0, 1, 2]\r\nInteger4Population = [4, 3, 2, 1]\r\nInteger3Population = [3, 2, 1]\r\n\r\n# Rules used to label a feature vector with a label and an explanation\r\n# Format: features, label, explanation #, Explanation String \r\nRetentionRules = [ \r\n #POS ORG Pot RAT Slope SALC TENL H BTEN LH \r\n [Any, 1, Any, High, Any,\tLow, Any, Any, Any, Any, #0\r\n Yes, 2, \"Seeking Higher Salary in Org 1\"],\r\n [1, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#1\r\n Yes, 3, \"Promotion Lag, Org 1, Position 1\"],\r\n [2, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#2\r\n Yes, 3, \"Promotion Lag, Org 1, Position 2\"],\r\n [3, 1,\t Any, Any, Any,\tAny, Any, Any, 15, Any,\t#3\r\n Yes, 3, \"Promotion Lag, Org 1, Position 3\"],\r\n [1, 2,\t Any, Any, Any,\tAny, Any, Any, 20, Any,\t#4\r\n Yes, 4, \"Promotion Lag, Org 2, Position 1\"],\r\n [2, 2,\t Any, Any, Any,\tAny, Any, Any, 20, Any,\t#5\r\n Yes, 4, \"Promotion Lag, Org 2, Position 2\"],\r\n [3, 2, Any, Any, Any,\tAny, Any, Any, 30, Any,\t#6\r\n Yes, 5, \"Promotion Lag, Org 2, Position 3\"],\r\n [1, 3, Any, Any, Any,\tAny, Any, Any, 20, Any,\t#7\r\n Yes, 6, \"Promotion Lag, Org 3, Position 1\"],\r\n [2, 3,\t Any, Any, Any,\tAny, Any, Any, 30, Any,\t#8\r\n Yes, 7, \"Promotion Lag, Org 3, Position 2\"],\r\n [3, 3,\t Any, Any, Any,\tAny, Any, Any, 30, Any,\t#9\r\n Yes, 7, \"Promotion Lag, Org 3, Position 3\"],\r\n [1, 1, Any, Any, Any,\tAny, 0, 12, Any, Any,\t#10\r\n Yes, 8, \"New employee, Org 1, Position 1\"],\r\n [2, 1, Any, Any, Any,\tAny, 0, 12, Any, Any,\t#11\r\n Yes, 8, \"New employee, Org 1, Position 2\"],\r\n [3, 1, Any, Any, Any,\tAny, 0, 30, Any, Any,\t#12\r\n Yes, 9, \"New employee, Org 1, Position 3\"],\r\n [1, 2, Any, Any, Any,\tAny, 0, 24, Any, Any,\t#13\r\n Yes, 10, \"New employee, Org 2, Position 1\"],\r\n [2, 2, Any, Any, Any,\tAny, 0, 30, Any, Any,\t#14\r\n Yes, 11, \"New employee, Org 2, Position 2\"],\r\n [Any, 1, Any, Low, High, Any, Any, Any, Any, Any,\t#15\r\n Yes, 13, \"Disappointing evaluation, Org 1\"],\r\n [Any, 2, Any, Low, High, Any, Any, Any, Any, Any,\t#16\r\n Yes, 14, \"Disappointing evaluation, Org 2\"],\r\n [Any, Any, Yes, Med, High, Low, Any, Any, Any, Any,\t#17\r\n Yes, 15, \"Compensation doesn't match evaluations, Med rating\"],\r\n [Any, Any, Yes, High, High, Low, Any, Any, Any, Any,\t#18\r\n Yes, 15, \"Compensation doesn't match evaluations, High rating\"],\r\n [Any, 1, Yes, Med, High, Med, Any, Any, Any, Any,\t#19\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 1, Med rating\"],\r\n [Any, 2, Yes, Med, High, Med, Any, Any, Any, Any,\t#20\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 2, Med rating\"],\r\n [Any, 1, Yes, High, High, Med, Any, Any, Any, Any,\t#21\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 1, High rating\"],\r\n [Any, 2, Yes, High, High, Med, Any, Any, Any, Any,\t#22\r\n\t Yes, 16, \"Compensation doesn't match evaluations, Org 2, High rating\"],\r\n [Any, 1, Any, Any, Med,\tMed, 120, 180, Any, Any,\t#23\r\n\t Yes, 17, \"Mid-career crisis, Org 1\"],\r\n [Any, 2, Yes, Any, Any,\tMed, 130, 190, Any, Any,\t#24\r\n\t Yes, 18, \"Mid-career crisis, Org 2\"]\r\n]\r\n\r\ndef ruleValToString(val):\r\n \"\"\" Convert the value passed into a string \"\"\"\r\n if val == Any :\r\n return \"Any\"\r\n elif val == Low :\r\n return \"Low\"\r\n elif val == Med :\r\n return \"Med\"\r\n elif val == High :\r\n return \"High\"\r\n elif val == Yes :\r\n return \"Yes\"\r\n elif val == No :\r\n return \"No\"\r\n elif val == Random :\r\n return \"Random\"\r\n else :\r\n return str(val)\r\n\r\ndef printFeatureStringHeader() :\r\n \"\"\" Print the feature headings \"\"\"\r\n print(\" Feature Headings\")\r\n print(\"[Pos, Org, Pot, Rating, Slope, Salary Competitiveness, Tenure, Position Tenure]\")\r\n \r\ndef featuresToString(featureVector) :\r\n \"\"\" Convert a feature vector into is string format\"\"\"\r\n val = \"[\"\r\n for i in range(0, 2) : # These features are just ints, Position, Organization\r\n val += str(featureVector[i])\r\n val += \" \" \r\n for i in range(2, 6) : # show encoding for these: Potential, Rating, Rating Slope, Salary Competiveness\r\n val += ruleValToString(featureVector[i]) \r\n val += \" \"\r\n for i in range(6, 8) : # These features are just ints: Tenure and Position Tenure\r\n val += str(featureVector[i])\r\n val += \" \" \r\n val += \"]\"\r\n return val\r\n\r\ndef printRule(rule) :\r\n \"\"\" Print the passed rule \"\"\"\r\n print(\"Rule: \", end='')\r\n for i in rule[0:1]: # ints or Any: Position and Organization\r\n if i == Any:\r\n print(ruleValToString(i) + \", \", end='')\r\n\r\n for i in rule[2:5]: # encoded: Potentional, Rating, Rating Slope, Salary Competitiveness\r\n print(ruleValToString(i) + \", \", end='')\r\n\r\n for i in rule[6:9]: # next 4 are ints or ANY: Tenure Low, Tenure High, Position Tenure Low, Position Tenure High\r\n if i == Any :\r\n print(ruleValToString(i) + \", \", end='')\r\n else :\r\n print(str(i) + \", \", end='') \r\n print(\"==> \"+ ruleValToString(rule[10]) + \"[\" + str(rule[11]) + \"] \" + str(rule[12]))\r\n\r\ndef printRules(rules) :\r\n \"\"\" print all rules\"\"\"\r\n for r in rules:\r\n printRule(r)\r\n\r\n########################################################################\r\n\r\ndef chooseRangeValue(thresholds, rangeList):\r\n \"\"\" Generate a random value based on the probability weights (thresholds) and list of ranges passed\r\n Args: \r\n thresholds : list of probalities for each choice\r\n rangeList: a list of pair lists giving the lower and upper bounds to choose value from \r\n \"\"\"\r\n\r\n # pick a number 1..3 from weights\r\n rangeVal = choices(Index3Population, thresholds)\r\n\r\n # get the appropriate range given rangeVal\r\n interval = rangeList[rangeVal[0]]\r\n\r\n # construct a population list from the result\r\n intervalPopulation = list(range(interval[0], interval[1]))\r\n\r\n # construct a equally prob weights list\r\n numElements = interval[1] - interval[0]\r\n probVal = 1.0 / numElements\r\n probList = [probVal] * numElements\r\n\r\n # now choose the value from the population based on the weights\r\n val = choices(intervalPopulation, probList)\r\n return val[0]\r\n\r\n\r\ndef chooseValueAndAppend(instance, population, weights) :\r\n \"\"\" Choose a random value from the population using weights list and append it to the passed instance\r\n \"\"\"\r\n val = choices(population, weights)\r\n instance.append(val[0])\r\n\r\ndef generateFeatures(numInstances) :\r\n \"\"\" generate the features (X) values for the dataset\r\n Args:\r\n numInstances (int) : number of instances to genreate\r\n Returns:\r\n dataset (list of lists) : the dataset with features, but no labels or explanations yet\r\n \"\"\"\r\n assert(numInstances > 0)\r\n\r\n dataset = []\r\n for i in range(numInstances) :\r\n instance = []\r\n\r\n #POS ORG Pot Rating Slope SALC TENL H BTEN LH \r\n chooseValueAndAppend(instance, Integer4Population, featureThresholds[0][1]) # Position\r\n chooseValueAndAppend(instance, Integer3Population, featureThresholds[1][1]) # Org\r\n chooseValueAndAppend(instance, YesNoPopulation, featureThresholds[2][1]) # Potential\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[3][1]) # Rating\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[4][1]) # Rating slope\r\n chooseValueAndAppend(instance, HighMedLowPopulation, featureThresholds[5][1]) # Sal competitiveness\r\n\r\n val1 = chooseRangeValue(featureThresholds[6][1], featureThresholds[6][2]) # Tenure\r\n instance.append(val1)\r\n\r\n # Position tenure needs to be <= Tenure\r\n val2 = chooseRangeValue(featureThresholds[7][1], featureThresholds[7][2]) # Pos Tenure\r\n if val2 > val1 :\r\n val2 = val1\r\n instance.append(val2)\r\n dataset.append(instance)\r\n \r\n return dataset\r\n\r\n#####################################################################################################\r\n\r\ndef match(ruleVal, featureVal) :\r\n \"\"\" Check if passed ruleVal matches the featureVal or if ruleVal is Any, which matches everything \r\n \"\"\"\r\n\r\n # print(\"Match called: \"+ ruleValToString(ruleVal) + \" \" + ruleValToString(featureVal))\r\n if ruleVal == Any :\r\n return True\r\n return (ruleVal == featureVal)\r\n\r\ndef intervalMatch(ruleValLower, ruleValUpper, featureVal) :\r\n \"\"\" Check to see if featureVal is in the interval defined by [ruleValLower, ruleValUpper)\r\n \"\"\"\r\n\r\n # Any in lower bound matches all values, (upper bound doesn't matter)\r\n if ruleValLower == Any :\r\n return True\r\n\r\n if ruleValLower <= featureVal :\r\n # Any in upper bound means infinitity\r\n if featureVal < ruleValUpper or ruleValUpper == Any :\r\n return True\r\n \r\n return False\r\n\r\ndef ruleMatch(rule, featureVector) :\r\n \"\"\" Determine if the passed featureVector matches the passed rule \r\n \"\"\"\r\n if (False) :\r\n print(\"ruleMatch called, \", end=\"\")\r\n printRule(rule)\r\n print(\" feature vector: \" + featuresToString(featureVector) )\r\n\r\n for i in range(0, 6) : # loop over first 6 features, 0..5\r\n if not match(rule[i], featureVector[i]) : # if we don't find a feature match, the rule doesn't match\r\n # print(\"Didn't match feature #\", i, ruleValToString(featureVector[i]))\r\n return False\r\n \r\n # These features are interval-based, so need a different matching routine\r\n if not intervalMatch(rule[6], rule[7], featureVector[6]) : # rule[6] and rule[7] have the lower and upper bounds of interval\r\n # print(\"Didn't match feature # 6: \", featureVector[6])\r\n return False\r\n if not intervalMatch(rule[8], rule[9], featureVector[7]) : # rule[8] and rule[9] have the lower and upper bounds of interval\r\n # print(\"Didn't match feature # 7: \", featureVector[7])\r\n return False\r\n \r\n # print(\"Matched all features\")\r\n return True # if we didn't find a non-match by now, we found a match\r\n\r\ndef findRule(instance, ruleSet) :\r\n \"\"\" find the rule(s) that matches the feture vector passed\r\n \"\"\"\r\n\r\n # print(\"*Looking for rule match for Feature vector: \" + featuresToString(instance))\r\n ruleNumber = 0 # counter to track rule number\r\n ruleMatches = [] # will hold all rule numbers that matched\r\n for rule in ruleSet :\r\n if (ruleMatch(rule, instance)) :\r\n ruleMatches.append(ruleNumber)\r\n counts[ruleNumber] += 1 # update global histogram of rule matches for stats reporting\r\n\r\n if (False) :\r\n print(\" ruleMatch found at rule #\" + str(ruleNumber))\r\n print(\" \", end=\"\")\r\n printRule(rule)\r\n\r\n ruleNumber += 1\r\n\r\n return ruleMatches\r\n\r\ndef countAnys(rule) :\r\n \"\"\" Count the number of Anys in the passed rule. An \"Any\" is a wildcard that matches all values\r\n \"\"\"\r\n count = 0\r\n for feature in RetentionRules[rule] :\r\n if feature == Any :\r\n count += 1\r\n\r\n return count\r\n\r\ndef pickBestRule(ruleList) :\r\n \"\"\" Choose the rule with the least number of Any's in it\r\n \"\"\"\r\n assert(len(ruleList) > 0)\r\n\r\n # print(\"ruleList: \", ruleList)\r\n minAnys = len(RetentionRules[0]) + 1 # initialize to a value larger than possible # of Anys in a rule\r\n bestRule = -1\r\n for rule in ruleList :\r\n # Count # of Any's in rule # rule\r\n count = countAnys(rule)\r\n if count < minAnys :\r\n minAnys = count\r\n bestRule = rule\r\n\r\n assert(bestRule != -1) # We should find a best rule\r\n return bestRule\r\n\r\ndef addLabelsAndExplanations(dataset, rules) :\r\n \"\"\" This function will use a ruleset to add labels (Y) and explanations/rules (E) to a passed dataset\r\n Arg:\r\n dataset (list of lists) : a list of feature vectors (list)\r\n rules (list of lists) : a list of rules\r\n \"\"\"\r\n\r\n noMatches = 0 # Counters to record how often there are no (Yes) matches, 1 (Yes) match, and multiple (Yes) matches\r\n multiMatches = 0\r\n oneMatches = 0\r\n for instance in dataset :\r\n ruleMatches = findRule(instance, rules)\r\n\r\n if len(ruleMatches) == 0 : # We didn't match a (Yes) rule, so this ia No situation\r\n rule = NoRiskRuleNum\r\n label = No\r\n noMatches +=1\r\n elif len(ruleMatches) > 1 : # Matched multiple Yes rules, need to pick one\r\n rule = pickBestRule(ruleMatches)\r\n assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid\r\n label = Yes\r\n multiMatches += 1\r\n else : # Found 1 Yes rule match, it's the winner\r\n rule = ruleMatches[0]\r\n label = Yes\r\n oneMatches += 1\r\n assert(rule >= 0 and rule < len(rules)) # Ensure rule number is valid\r\n\r\n # print(\"Label: \" + ruleValToString(label) + \", Rule: \" + ruleValToString(rule))\r\n\r\n instance.append(label)\r\n instance.append(rule) # add the label and explanation (rule #) to the featureVector\r\n\r\n if (True) :\r\n print(\"\\nRule matching statistics: \")\r\n totalYes = oneMatches + multiMatches\r\n total = oneMatches + multiMatches + noMatches\r\n print(\" Yes Labels: {}/{} ({:.2f}%)\".format(totalYes, total, totalYes/total*100))\r\n print(\" Matched 1 Yes rule: {}/{} ({:.2f}%)\".format(oneMatches, totalYes, oneMatches/totalYes*100))\r\n print(\" Matched multiple Yes rules: {}/{} ({:.2f}%)\".format(multiMatches, totalYes, multiMatches/totalYes*100))\r\n print(\" No Laels: {}/{} ({:.2f}%)\".format(noMatches, total, noMatches/total*100))\r\n\r\ndef printRuleUsage(counts, total) :\r\n print(\"\\nHistogram of rule usage:\")\r\n ruleNum = 0\r\n for num in counts :\r\n print(\" Rule {} was used {} times, {:.2f}%\".format(ruleNum, num, num/total*100))\r\n ruleNum += 1\r\n\r\n \r\nnumRentionRules = len(RetentionRules)\r\ncounts = [0]*numRentionRules\r\nNoRiskRuleNum = numRentionRules # the No Risk to leave rule is 1 more than than the total rules [0..]\r\n\r\nrandom.seed(1)\r\n# printFeatureStringHeader()\r\nnumInstances = 10000\r\ndataset = generateFeatures(numInstances)\r\n\r\naddLabelsAndExplanations(dataset, RetentionRules)\r\n\r\nprintRuleUsage(counts, numInstances)\r\n\r\n# insert TED headers\r\nNumFeatures = len(featureThresholds)\r\nheader = list(range(NumFeatures))\r\nheader.append(\"Y\")\r\nheader.append(\"E\")\r\ndataset.insert(0, header)\r\n\r\n# write to csv file\r\nmy_df = pd.DataFrame(dataset)\r\nmy_df.to_csv('Retention.csv', index=False, header=False)\r\n"
] | [
[
"pandas.DataFrame"
]
] |
asadziach/CarND-Semantic-Segmentation | [
"c3431ab5dc3878b82bfc66e7384005f7a93fcb16"
] | [
"main.py"
] | [
"import os.path\nimport tensorflow as tf\nimport helper\nimport ImageProcessor\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\nimport scipy.misc\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\nimport time\nimport timeit\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion(\n '1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn(\n 'No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n graph = tf.get_default_graph()\n image_input = graph.get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n\n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\n\n\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 tensor\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 tensor\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 tensor\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of tensor\n \"\"\"\n # Outputs of pooling layers 3 and 4 are scaled before they are fed into\n # the 1x1 convolutions.\n vgg_layer3_out = tf.multiply(vgg_layer3_out, 0.0001)\n vgg_layer4_out = tf.multiply(vgg_layer4_out, 0.01)\n\n regularizer = tf.contrib.layers.l2_regularizer(1e-3)\n conv_1x1_l3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n conv_1x1_l4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n conv_1x1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same',\n kernel_regularizer=regularizer)\n\n tensor = tf.layers.conv2d_transpose(\n conv_1x1_l7, num_classes, 4, strides=(2, 2), padding='same', kernel_regularizer=regularizer)\n tensor = tf.add(tensor, conv_1x1_l4)\n tensor = tf.layers.conv2d_transpose(\n tensor, num_classes, 4, strides=(2, 2), padding='same', kernel_regularizer=regularizer)\n tensor = tf.add(tensor, conv_1x1_l3)\n tensor = tf.layers.conv2d_transpose(\n tensor, num_classes, 16, strides=(8, 8), padding='same', kernel_regularizer=regularizer)\n\n return tensor\n\n\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n labels = tf.reshape(correct_label, (-1, num_classes))\n\n cross_entropy_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))\n\n '''\n When adding l2-regularization, setting a regularizer in the arguments of \n the tf.layers is not enough. Regularization loss terms must be manually \n added to your loss function. otherwise regularization is not implemented.\n '''\n regularization_losses = tf.get_collection(\n tf.GraphKeys.REGULARIZATION_LOSSES)\n\n cross_entropy_loss = tf.add(cross_entropy_loss, sum(regularization_losses))\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\n\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, step, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate, saver=None):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n\n for image, label in (get_batches_fn(batch_size)):\n _, loss = sess.run(\n [train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label,\n keep_prob: 1.0, learning_rate: 1e-4})\n print('Epoch: {} loss: {:.3f}'.format(step + 1, loss))\n \n if saver:\n saver.save(sess, \"./ckpts/model.ckpt\", global_step=step)\n \n return loss\n\n\n#tests.test_train_nn(train_nn)\n\n\ndef run():\n batches = 13\n epochs = 80\n restore_model = True\n training = True\n compute_iou = True\n save_inference_samples = True\n do_exteranl_tests = False\n save_graph = True\n\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n # Change following to switch datasets\n dataset = helper.KittiDataset(data_dir, image_shape)\n num_classes = dataset.get_num_classes()\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n with tf.Session() as sess:\n correct_label = tf.placeholder(\n tf.int32, [None, None, None, num_classes])\n learning_rate = tf.placeholder(tf.float32)\n\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = dataset.gen_batch_function()\n\n input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(\n sess, vgg_path)\n tensor = layers(layer3_out, layer4_out, layer7_out, num_classes)\n logits, optimizer, cross_entropy_loss = optimize(tensor, correct_label, learning_rate,\n num_classes)\n\n if compute_iou:\n predictions = tf.argmax(tf.nn.softmax(tensor), axis=-1)\n gt = tf.argmax(correct_label, axis=-1)\n mean_iou, iou_update_op = tf.metrics.mean_iou(\n gt, predictions, num_classes)\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n saver = tf.train.Saver(max_to_keep=2, keep_checkpoint_every_n_hours=1)\n restore_path = tf.train.latest_checkpoint('./ckpts/')\n if restore_path and restore_model:\n print(\"Resotring model from: %s \" % restore_path)\n saver.restore(sess, restore_path)\n\n for step in range(epochs):\n if training:\n print(\"Training...\")\n start_time = timeit.default_timer()\n loss = train_nn(sess, step, batches, get_batches_fn, optimizer, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate, saver)\n elapsed = timeit.default_timer() - start_time\n print('Epoch: {} loss: {:.3f} time: {:.3f}'.format(step + 1, loss, elapsed))\n \n if save_inference_samples:\n print(\"Saving inference samples...\")\n dataset.save_inference_samples(\n runs_dir, sess, logits, keep_prob, input_image)\n\n #compute mean_iou on traning images\n if compute_iou:\n print(\"Computing IOU...\")\n mean_ious = []\n for image, label in (get_batches_fn(batches)):\n sess.run([predictions, iou_update_op], feed_dict={\n input_image: image, correct_label: label, keep_prob: 1})\n # Avoiding headaches\n # http://ronny.rest/blog/post_2017_09_11_tf_metrics/\n mean_ious.append(sess.run(mean_iou))\n print(\"Mean IOU: {:.3f}\".format(sum(mean_ious) / len(mean_ious)))\n \n if do_exteranl_tests:\n print(\"Processing test images...\")\n processor = ImageProcessor.ImageProcessor(\n image_shape, sess, logits, keep_prob, input_image)\n for idx, image_file in enumerate(glob(\"./test_images/*.jpg\")):\n image = scipy.misc.imread(image_file)\n image = processor.process_image(image)\n scipy.misc.imsave(os.path.join(\n \"output_images\", str(idx) + \".png\"), image)\n\n print(\"Processing test video...\")\n videoname = 'test_video'\n output_file = videoname + '_output.mp4'\n input_file = videoname + '.mp4'\n\n clip = VideoFileClip(input_file)\n video_clip = clip.fl_image(processor.process_image)\n video_clip.write_videofile(output_file, audio=False)\n\n if save_graph:\n print(\"Saving graph...\")\n # Save GraphDef\n tf.train.write_graph(sess.graph_def,'.','graph.pb', as_text=False)\n \n print(\"Done.\")\n\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reshape",
"tensorflow.nn.softmax",
"tensorflow.train.write_graph",
"tensorflow.global_variables_initializer",
"tensorflow.multiply",
"tensorflow.metrics.mean_iou",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.layers.conv2d_transpose",
"tensorflow.test.gpu_device_name",
"tensorflow.get_collection",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.saved_model.loader.load",
"tensorflow.local_variables_initializer",
"tensorflow.placeholder",
"tensorflow.train.AdamOptimizer",
"tensorflow.add",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_default_graph",
"tensorflow.argmax"
]
] |
huilin16/PaddleRS | [
"ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a"
] | [
"tools/mask2geojson.py"
] | [
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport codecs\r\nimport argparse\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport geojson\r\nfrom geojson import Polygon, Feature, FeatureCollection\r\n\r\nfrom utils import Raster, Timer\r\n\r\n\r\ndef _gt_convert(x, y, geotf):\r\n x_geo = geotf[0] + x * geotf[1] + y * geotf[2]\r\n y_geo = geotf[3] + x * geotf[4] + y * geotf[5]\r\n return x_geo, y_geo\r\n\r\n\r\n@Timer\r\ndef convert_data(mask_path, save_path, epsilon=0):\r\n raster = Raster(mask_path)\r\n img = raster.getArray()\r\n ext = save_path.split(\".\")[-1]\r\n if ext != \"json\" and ext != \"geojson\":\r\n raise ValueError(\"The ext of `save_path` must be `json` or `geojson`, not {}.\".format(ext))\r\n geo_writer = codecs.open(save_path, \"w\", encoding=\"utf-8\")\r\n clas = np.unique(img)\r\n cv2_v = (cv2.__version__.split(\".\")[0] == \"3\")\r\n feats = []\r\n if not isinstance(epsilon, (int, float)):\r\n epsilon = 0\r\n for iclas in range(1, len(clas)):\r\n tmp = np.zeros_like(img).astype(\"uint8\")\r\n tmp[img == iclas] = 1\r\n # TODO: Detect internal and external contour\r\n results = cv2.findContours(tmp, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_TC89_KCOS)\r\n contours = results[1] if cv2_v else results[0]\r\n # hierarchys = results[2] if cv2_v else results[1]\r\n if len(contours) == 0:\r\n continue\r\n for contour in contours:\r\n contour = cv2.approxPolyDP(contour, epsilon, True)\r\n polys = []\r\n for point in contour:\r\n x, y = point[0]\r\n xg, yg = _gt_convert(x, y, raster.geot)\r\n polys.append((xg, yg))\r\n polys.append(polys[0])\r\n feat = Feature(\r\n geometry=Polygon([polys]), properties={\"class\": int(iclas)})\r\n feats.append(feat)\r\n gjs = FeatureCollection(feats)\r\n geo_writer.write(geojson.dumps(gjs))\r\n geo_writer.close()\r\n\r\n\r\nparser = argparse.ArgumentParser(description=\"input parameters\")\r\nparser.add_argument(\"--mask_path\", type=str, required=True, \\\r\n help=\"The path of mask tif.\")\r\nparser.add_argument(\"--save_path\", type=str, required=True, \\\r\n help=\"The path to save the results, file suffix is `*.json/geojson`.\")\r\nparser.add_argument(\"--epsilon\", type=float, default=0, \\\r\n help=\"The CV2 simplified parameters, `0` is the default.\")\r\n\r\nif __name__ == \"__main__\":\r\n args = parser.parse_args()\r\n convert_data(args.mask_path, args.save_path, args.epsilon)\r\n"
] | [
[
"numpy.zeros_like",
"numpy.unique"
]
] |
Lufeifeina/models | [
"d7d260d4c690e5163070e21d75df372ab559ea23"
] | [
"official/core/train_lib.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TFM common training driver library.\"\"\"\n# pytype: disable=attribute-error\nimport os\nfrom typing import Any, Mapping, Optional, Tuple, List\n\n# Import libraries\n\nfrom absl import logging\nimport orbit\nimport tensorflow as tf\n\nfrom official.core import actions\nfrom official.core import base_task\nfrom official.core import base_trainer\nfrom official.core import config_definitions\nfrom official.core import train_utils\n\nmaybe_create_best_ckpt_exporter = train_utils.maybe_create_best_ckpt_exporter\n\n\ndef run_experiment(\n distribution_strategy: tf.distribute.Strategy,\n task: base_task.Task,\n mode: str,\n params: config_definitions.ExperimentConfig,\n model_dir: str,\n run_post_eval: bool = False,\n save_summary: bool = True,\n train_actions: Optional[List[orbit.Action]] = None,\n eval_actions: Optional[List[orbit.Action]] = None,\n trainer: Optional[base_trainer.Trainer] = None,\n controller_cls=orbit.Controller\n) -> Tuple[tf.keras.Model, Mapping[str, Any]]:\n \"\"\"Runs train/eval configured by the experiment params.\n\n Args:\n distribution_strategy: A distribution distribution_strategy.\n task: A Task instance.\n mode: A 'str', specifying the mode. Can be 'train', 'eval', 'train_and_eval'\n or 'continuous_eval'.\n params: ExperimentConfig instance.\n model_dir: A 'str', a path to store model checkpoints and summaries.\n run_post_eval: Whether to run post eval once after training, metrics logs\n are returned.\n save_summary: Whether to save train and validation summary.\n train_actions: Optional list of Orbit train actions.\n eval_actions: Optional list of Orbit eval actions.\n trainer: the base_trainer.Trainer instance. It should be created within the\n strategy.scope().\n controller_cls: The controller class to manage the train and eval process.\n Must be a orbit.Controller subclass.\n\n Returns:\n A 2-tuple of (model, eval_logs).\n model: `tf.keras.Model` instance.\n eval_logs: returns eval metrics logs when run_post_eval is set to True,\n otherwise, returns {}.\n \"\"\"\n\n with distribution_strategy.scope():\n if not trainer:\n trainer = train_utils.create_trainer(\n params,\n task,\n train='train' in mode,\n evaluate=('eval' in mode) or run_post_eval,\n checkpoint_exporter=maybe_create_best_ckpt_exporter(\n params, model_dir))\n\n if trainer.checkpoint:\n if model_dir is None:\n raise ValueError('model_dir must be specified, but got None')\n checkpoint_manager = tf.train.CheckpointManager(\n trainer.checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=trainer.global_step,\n checkpoint_interval=params.trainer.checkpoint_interval,\n init_fn=trainer.initialize)\n else:\n checkpoint_manager = None\n\n train_actions = [] if not train_actions else train_actions\n train_actions += actions.get_train_actions(\n params, trainer, model_dir, checkpoint_manager=checkpoint_manager)\n\n eval_actions = [] if not eval_actions else eval_actions\n eval_actions += actions.get_eval_actions(params, trainer, model_dir)\n\n controller = controller_cls(\n strategy=distribution_strategy,\n trainer=trainer if 'train' in mode else None,\n evaluator=trainer,\n global_step=trainer.global_step,\n steps_per_loop=params.trainer.steps_per_loop,\n checkpoint_manager=checkpoint_manager,\n summary_dir=os.path.join(model_dir, 'train') if (save_summary) else None,\n eval_summary_dir=os.path.join(model_dir,\n params.trainer.validation_summary_subdir) if\n (save_summary) else None,\n summary_interval=params.trainer.summary_interval if\n (save_summary) else None,\n train_actions=train_actions,\n eval_actions=eval_actions)\n\n logging.info('Starts to execute mode: %s', mode)\n with distribution_strategy.scope():\n if mode == 'train' or mode == 'train_and_post_eval':\n controller.train(steps=params.trainer.train_steps)\n elif mode == 'train_and_eval':\n controller.train_and_evaluate(\n train_steps=params.trainer.train_steps,\n eval_steps=params.trainer.validation_steps,\n eval_interval=params.trainer.validation_interval)\n elif mode == 'eval':\n controller.evaluate(steps=params.trainer.validation_steps)\n elif mode == 'continuous_eval':\n\n def timeout_fn():\n if trainer.global_step.numpy() >= params.trainer.train_steps:\n return True\n return False\n\n controller.evaluate_continuously(\n steps=params.trainer.validation_steps,\n timeout=params.trainer.continuous_eval_timeout,\n timeout_fn=timeout_fn)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)\n\n num_params = train_utils.try_count_params(trainer.model)\n if num_params is not None:\n logging.info('Number of trainable params in model: %f Millions.',\n num_params / 10.**6)\n\n flops = train_utils.try_count_flops(trainer.model)\n if flops is not None:\n logging.info('FLOPs (multi-adds) in model: %f Billions.',\n flops / 10.**9 / 2)\n\n if run_post_eval or mode == 'train_and_post_eval':\n with distribution_strategy.scope():\n return trainer.model, controller.evaluate(\n steps=params.trainer.validation_steps)\n else:\n return trainer.model, {}\n"
] | [
[
"tensorflow.train.CheckpointManager"
]
] |
szabi-luxonis/openvino | [
"c8dd831fc3ba68a256ab47edb4f6bf3cb5e804be"
] | [
"model-optimizer/extensions/middle/UpsampleToResample.py"
] | [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\nimport math\nfrom typing import Dict\n\nimport numpy as np\n\nfrom extensions.ops.Cast import Cast\nfrom extensions.ops.elementwise import Mul\nfrom extensions.ops.interpolate import Interpolate\nfrom mo.front.common.layout import get_height_dim, get_width_dim, get_depth_dim\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input\nfrom mo.graph.graph import Graph, Node\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.shape import Shape\nfrom mo.ops.strided_slice import StridedSlice\n\n\nclass UpsampleToResample(MiddleReplacementPattern):\n enabled = True\n force_clean_up = True\n\n def run_after(self):\n from extensions.middle.pass_separator import MiddleStart\n return [MiddleStart]\n\n def run_before(self):\n from extensions.middle.pass_separator import MiddleFinish\n return [MiddleFinish]\n\n def pattern(self):\n return dict(\n nodes=[\n ('upsample', dict(kind='op', op='Upsample')),\n ('output', dict(kind='data'))],\n edges=[('upsample', 'output')]\n )\n\n def replace_pattern(self, graph: Graph, match: Dict[str, Node]):\n log.debug('UpsampleToResample is triggered')\n upsample = match['upsample']\n upsample_name = upsample.soft_get('name', upsample.id)\n input_shape = upsample.in_port(0).data.get_shape()\n input_shape_rank = len(input_shape)\n if input_shape_rank not in [4, 5]:\n log.warning('The input shape is not 4D or 5D for op {}'.format(upsample.soft_get('name')))\n return\n\n depth_scale = None\n if len(upsample.in_nodes()) == 2:\n if upsample.in_node(1).value is None:\n return\n scales = upsample.in_node(1).value\n assert len(scales) in (4, 5), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(\n len(scales), upsample_name)\n if not (math.isclose(scales[0], 1, rel_tol=1e-5) and math.isclose(scales[1], 1, rel_tol=1e-5)):\n return\n height_scale = scales[2]\n width_scale = scales[3]\n if len(scales) == 5:\n depth_scale = scales[4]\n else:\n height_scale = upsample['height_scale']\n width_scale = upsample['width_scale']\n\n if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():\n upsample.in_port(1).disconnect()\n\n shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()\n\n layout = graph.graph['layout']\n\n if input_shape_rank == 4:\n begin_value = int64_array([get_height_dim(layout, input_shape_rank)])\n factor_value = np.array([height_scale, width_scale])\n else:\n begin_value = int64_array([get_depth_dim(layout, input_shape_rank)])\n factor_value = np.array([depth_scale, height_scale, width_scale])\n\n ss = create_op_with_const_inputs(graph, StridedSlice,\n {1: begin_value,\n 2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),\n 3: int64_array([1])\n },\n {'name': upsample_name + '/ss_0_port',\n 'begin_mask': int64_array([1]),\n 'end_mask': int64_array([1]),\n 'new_axis_mask': int64_array([0]),\n 'shrink_axis_mask': int64_array([0]),\n 'ellipsis_mask': int64_array([0])\n }\n )\n\n mul = create_op_node_with_second_input(graph, Mul, factor_value, {'name': upsample_name + '/factor_mul_'})\n\n source = upsample.in_port(0).get_connection().get_source()\n source.connect(shape.in_port(0))\n shape.out_port(0).connect(ss.in_port(0))\n\n ss.out_port(0).connect(mul.in_port(0))\n\n # Create Interpolate operation\n if input_shape_rank == 4:\n axes = int64_array([get_height_dim(layout, input_shape_rank),\n get_width_dim(layout, input_shape_rank)])\n else:\n axes = int64_array([get_depth_dim(layout, input_shape_rank),\n get_height_dim(layout, input_shape_rank),\n get_width_dim(layout, input_shape_rank)])\n\n resample_op = Interpolate(graph, dict(name=upsample_name + '/Interpolate',\n axes=axes, mode=upsample.attrs()['mode'],\n antialias=0, convert_to_resample=True)).create_node()\n\n upsample.add_input_port(1, skip_if_exist=True)\n assert upsample.in_port(1).disconnected()\n mul.out_port(0).connect(resample_op.in_port(1))\n\n upsample.in_port(0).get_connection().set_destination(resample_op.in_port(0))\n upsample.out_port(0).get_connection().set_source(resample_op.out_port(0))\n\n convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()\n convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()\n\n mul.in_port(0).get_connection().insert_node(convert_to_float)\n mul.out_port(0).get_connection().insert_node(convert_to_int)\n"
] | [
[
"numpy.array"
]
] |
Pandinosaurus/gala | [
"975ed783a6cb3c0afe24a921afdacf2f27184fcf"
] | [
"tests/test_watershed.py"
] | [
"import os\nimport time\nimport numpy as np\nfrom scipy import ndimage as nd\nfrom numpy.testing import assert_array_equal, assert_array_less\n\nfrom gala import morpho\n\nrundir = os.path.dirname(__file__)\n\ndef time_me(function):\n def wrapped(*args, **kwargs):\n start = time.time()\n r = function(*args, **kwargs)\n end = time.time()\n return r, (end-start)*1000\n return wrapped\n\n\ntest_idxs = list(range(4))\nnum_tests = len(test_idxs)\nfns = [os.path.join(rundir, 'toy-data/test-%02i-probabilities.txt' % i)\n for i in test_idxs]\nprobs = list(map(np.loadtxt, fns))\nfns = [os.path.join(rundir, 'toy-data/test-%02i-watershed.txt' % i)\n for i in test_idxs]\nresults = [np.loadtxt(fn, dtype=np.int32) for fn in fns]\nlandscape = np.array([1,0,1,2,1,3,2,0,2,4,1,0])\n\n\ndef test_watershed_images():\n wss = [morpho.watershed(probs[i], dams=(i == 0)) for i in range(2)]\n for i, (ws, res) in enumerate(zip(wss, results)):\n yield (assert_array_equal, ws, res,\n 'Image watershed test %i failed.' % i)\n\n\ndef test_watershed():\n regular_watershed_result = np.array([1,1,1,0,2,0,3,3,3,0,4,4])\n regular_watershed = morpho.watershed(landscape, dams=True)\n assert_array_equal(regular_watershed, regular_watershed_result)\n\n\ndef test_watershed_nodams():\n nodam_watershed_result = np.array([1,1,1,2,2,2,3,3,3,4,4,4])\n nodam_watershed = morpho.watershed(landscape, dams=False)\n assert_array_equal(nodam_watershed, nodam_watershed_result)\n\n\ndef test_watershed_seeded():\n seeds_bool = (landscape == 0)\n seeds_unique = nd.label(seeds_bool)[0]\n seeded_watershed_result = np.array([1,1,1,1,1,0,2,2,2,0,3,3])\n seeded_watershed1 = morpho.watershed(landscape, seeds_bool, dams=True)\n seeded_watershed2 = morpho.watershed(landscape, seeds_unique, dams=True)\n assert_array_equal(seeded_watershed1, seeded_watershed_result)\n assert_array_equal(seeded_watershed2, seeded_watershed_result)\n\n\ndef test_watershed_seeded_nodams():\n seeds_bool = landscape==0\n seeded_nodam_ws_result = np.array([1,1,1,1,1,1,2,2,2,3,3,3])\n seeded_nodam_ws = morpho.watershed(landscape,\n seeds=seeds_bool, override_skimage=True, dams=False)\n assert_array_equal(seeded_nodam_ws, seeded_nodam_ws_result)\n\n\ndef test_watershed_saddle_basin():\n saddle_landscape = np.array([[0,0,3],[2,1,2],[0,0,3]])\n saddle_result = np.array([[1,1,1],[0,0,0],[2,2,2]])\n saddle_ws = morpho.watershed(saddle_landscape, dams=True)\n assert_array_equal(saddle_ws, saddle_result)\n\n\ndef test_watershed_plateau_performance():\n \"\"\"Test time taken by watershed on plateaus is acceptable.\n \n Versions prior to 2d319e performed redundant computations in the\n idxs_adjacent_to_labels queue which resulted in an explosion in \n runtime on plateaus. This test checks against that behavior.\n \"\"\"\n plat = np.ones((11,11))\n plat[5,5] = 0\n timed_watershed = time_me(morpho.watershed)\n time_taken = timed_watershed(plat)[1]\n assert_array_less(time_taken, 100, 'watershed plateau too slow')\n\n\nif __name__ == '__main__':\n from numpy import testing\n testing.run_module_suite()\n"
] | [
[
"numpy.ones",
"scipy.ndimage.label",
"numpy.testing.assert_array_less",
"numpy.testing.run_module_suite",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.loadtxt"
]
] |
xiangruhuang/OpenPCDet | [
"d82d9594a0629ffed0c457aedc304e0805e93221"
] | [
"pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py"
] | [
"import math\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules\nfrom ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils\nfrom ....utils import common_utils\n\n\ndef bilinear_interpolate_torch(im, x, y):\n \"\"\"\n Args:\n im: (H, W, C) [y, x]\n x: (N)\n y: (N)\n\n Returns:\n\n \"\"\"\n x0 = torch.floor(x).long()\n x1 = x0 + 1\n\n y0 = torch.floor(y).long()\n y1 = y0 + 1\n\n x0 = torch.clamp(x0, 0, im.shape[1] - 1)\n x1 = torch.clamp(x1, 0, im.shape[1] - 1)\n y0 = torch.clamp(y0, 0, im.shape[0] - 1)\n y1 = torch.clamp(y1, 0, im.shape[0] - 1)\n\n Ia = im[y0, x0]\n Ib = im[y1, x0]\n Ic = im[y0, x1]\n Id = im[y1, x1]\n\n wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)\n wb = (x1.type_as(x) - x) * (y - y0.type_as(y))\n wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)\n wd = (x - x0.type_as(x)) * (y - y0.type_as(y))\n ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)\n return ans\n\n\ndef sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):\n \"\"\"\n Args:\n rois: (M, 7 + C)\n points: (N, 3)\n sample_radius_with_roi:\n num_max_points_of_part:\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n if points.shape[0] < num_max_points_of_part:\n distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)\n min_dis, min_dis_roi_idx = distance.min(dim=-1)\n roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)\n point_mask = min_dis < roi_max_dim + sample_radius_with_roi\n else:\n start_idx = 0\n point_mask_list = []\n while start_idx < points.shape[0]:\n distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)\n min_dis, min_dis_roi_idx = distance.min(dim=-1)\n roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)\n cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi\n point_mask_list.append(cur_point_mask)\n start_idx += num_max_points_of_part\n point_mask = torch.cat(point_mask_list, dim=0)\n\n if point_mask.sum() == 0:\n point_mask = torch.zeros(points.shape[0], dtype=torch.bool, device=points.device)\n point_mask[0] = True\n assert point_mask.sum() > 0\n sampled_points = points[point_mask, :]\n\n return sampled_points, point_mask\n\n\ndef sector_fps(points, num_sampled_points, num_sectors, seg_labels=None):\n \"\"\"\n Args:\n points: (N, 3)\n num_sampled_points: int\n num_sectors: int\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n sector_size = np.pi * 2 / num_sectors\n point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi\n sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)\n xyz_points_list = []\n xyz_batch_cnt = []\n num_sampled_points_list = []\n for k in range(num_sectors):\n mask = (sector_idx == k)\n cur_num_points = mask.sum().item()\n if cur_num_points > 0:\n xyz_points_list.append(points[mask])\n xyz_batch_cnt.append(cur_num_points)\n ratio = cur_num_points / points.shape[0]\n num_sampled_points_list.append(\n min(cur_num_points, math.ceil(ratio * num_sampled_points))\n )\n\n if len(xyz_batch_cnt) == 0:\n xyz_points_list.append(points)\n xyz_batch_cnt.append(len(points))\n num_sampled_points_list.append(num_sampled_points)\n print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')\n\n xyz = torch.cat(xyz_points_list, dim=0)\n xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()\n sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()\n\n sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(\n xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt\n ).long()\n\n sampled_points = xyz[sampled_pt_idxs]\n if seg_labels is not None:\n seg_labels = seg_labels[sampled_pt_idxs]\n return sampled_points, seg_labels\n else:\n return sampled_points\n\n\nclass VoxelSetAbstraction(nn.Module):\n def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,\n num_rawpoint_features=None, **kwargs):\n super().__init__()\n self.model_cfg = model_cfg\n self.voxel_size = voxel_size\n self.point_cloud_range = point_cloud_range\n self.on_seg = model_cfg.get(\"ON_SEG\", False)\n self.suffix = model_cfg.get(\"SUFFIX\", '')\n\n SA_cfg = self.model_cfg.SA_LAYER\n\n self.SA_layers = nn.ModuleList()\n self.SA_layer_names = []\n self.downsample_times_map = {}\n c_in = 0\n for src_name in self.model_cfg.FEATURES_SOURCE:\n if src_name in ['bev', 'raw_points']:\n continue\n self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR\n\n if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None:\n input_channels = SA_cfg[src_name].MLPS[0][0] \\\n if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0]\n else:\n input_channels = SA_cfg[src_name]['INPUT_CHANNELS']\n\n cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(\n input_channels=input_channels, config=SA_cfg[src_name]\n )\n self.SA_layers.append(cur_layer)\n self.SA_layer_names.append(src_name)\n\n c_in += cur_num_c_out\n \n\n if 'bev' in self.model_cfg.FEATURES_SOURCE:\n c_bev = num_bev_features\n c_in += c_bev\n\n if 'raw_points' in self.model_cfg.FEATURES_SOURCE:\n self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module(\n input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points']\n )\n\n c_in += cur_num_c_out\n\n self.vsa_point_feature_fusion = nn.Sequential(\n nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),\n nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),\n nn.ReLU(),\n )\n self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES\n self.num_point_features_before_fusion = c_in\n\n def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):\n \"\"\"\n Args:\n keypoints: (N1 + N2 + ..., 4)\n bev_features: (B, C, H, W)\n batch_size:\n bev_stride:\n\n Returns:\n point_bev_features: (N1 + N2 + ..., C)\n \"\"\"\n x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0]\n y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1]\n\n x_idxs = x_idxs / bev_stride\n y_idxs = y_idxs / bev_stride\n\n point_bev_features_list = []\n for k in range(batch_size):\n bs_mask = (keypoints[:, 0] == k)\n\n cur_x_idxs = x_idxs[bs_mask]\n cur_y_idxs = y_idxs[bs_mask]\n cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)\n point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)\n point_bev_features_list.append(point_bev_features)\n\n point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)\n return point_bev_features\n\n def sectorized_proposal_centric_sampling(self, roi_boxes, points, seg_labels=None):\n \"\"\"\n Args:\n roi_boxes: (M, 7 + C)\n points: (N, 3)\n\n Returns:\n sampled_points: (N_out, 3)\n \"\"\"\n\n sampled_points, point_mask = sample_points_with_roi(\n rois=roi_boxes, points=points,\n sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI,\n num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000)\n )\n if seg_labels is not None:\n seg_labels = seg_labels[point_mask]\n return sector_fps(\n points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS,\n num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS, seg_labels=seg_labels\n )\n\n def get_sampled_points(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n\n Returns:\n keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z]\n \"\"\"\n batch_size = batch_dict['batch_size']\n on_seg = self.on_seg and (\"seg_labels\" in batch_dict)\n if self.model_cfg.POINT_SOURCE == 'raw_points':\n src_points = batch_dict['points'][:, 1:4]\n if on_seg:\n src_seg_labels = batch_dict['seg_labels'][:, 1] # segmentation labels\n batch_indices = batch_dict['points'][:, 0].long()\n elif self.model_cfg.POINT_SOURCE == 'voxel_centers':\n src_points = common_utils.get_voxel_centers(\n batch_dict['voxel_coords'][:, 1:4],\n downsample_times=1,\n voxel_size=self.voxel_size,\n point_cloud_range=self.point_cloud_range\n )\n batch_indices = batch_dict['voxel_coords'][:, 0].long()\n else:\n raise NotImplementedError\n keypoints_list = []\n if on_seg:\n keypoint_labels_list = []\n\n if isinstance(self.model_cfg.SAMPLE_METHOD, list):\n use_fps = 'FPS' in self.model_cfg.SAMPLE_METHOD\n use_spc = 'SPC' in self.model_cfg.SAMPLE_METHOD\n else:\n use_fps = 'FPS' == self.model_cfg.SAMPLE_METHOD\n use_spc = 'SPC' == self.model_cfg.SAMPLE_METHOD\n\n for bs_idx in range(batch_size):\n bs_mask = (batch_indices == bs_idx)\n sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)\n keypoints = []\n if on_seg:\n sampled_seg_labels = src_seg_labels[bs_mask].unsqueeze(dim=0) # (1, N, 3)\n keypoint_labels = []\n if use_fps:\n cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample(\n sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS\n ).long()\n\n if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:\n times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1\n non_empty = cur_pt_idxs[0, :sampled_points.shape[1]]\n cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS]\n\n cur_keypoints = sampled_points[0][cur_pt_idxs[0]]\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n if on_seg:\n keypoint_labels.append(sampled_seg_labels[0][cur_pt_idxs[0]])\n\n if use_spc:\n if on_seg:\n cur_keypoints, cur_keypoint_labels = self.sectorized_proposal_centric_sampling(\n roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0],\n seg_labels=sampled_seg_labels[0]\n )\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n keypoint_labels.append(cur_keypoint_labels)\n else:\n cur_keypoints = self.sectorized_proposal_centric_sampling(\n roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0]\n )\n bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx\n keypoints.append(torch.cat((bs_idxs[:, None], cur_keypoints), dim=1))\n\n keypoints = torch.cat(keypoints, axis=0) if isinstance(keypoints, list) else keypoints[0] # [x, N, 3]\n keypoints_list.append(keypoints)\n if on_seg:\n keypoint_labels = torch.cat(keypoint_labels, axis=0) if isinstance(keypoint_labels, list) else keypoint_labels[0] # [x, N, 3]\n keypoint_labels_list.append(keypoint_labels)\n\n keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4)\n\n if on_seg:\n keypoint_labels = torch.cat(keypoint_labels_list, dim=0).view(-1)\n return keypoints, keypoint_labels\n else:\n return keypoints\n\n @staticmethod\n def aggregate_keypoint_features_from_one_source(\n batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt,\n filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None\n ):\n \"\"\"\n\n Args:\n aggregate_func:\n xyz: (N, 3)\n xyz_features: (N, C)\n xyz_bs_idxs: (N)\n new_xyz: (M, 3)\n new_xyz_batch_cnt: (batch_size), [N1, N2, ...]\n\n filter_neighbors_with_roi: True/False\n radius_of_neighbor: float\n num_max_points_of_part: int\n rois: (batch_size, num_rois, 7 + C)\n Returns:\n\n \"\"\"\n xyz_batch_cnt = xyz.new_zeros(batch_size).int()\n if filter_neighbors_with_roi:\n point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz\n point_features_list = []\n for bs_idx in range(batch_size):\n bs_mask = (xyz_bs_idxs == bs_idx)\n _, valid_mask = sample_points_with_roi(\n rois=rois[bs_idx], points=xyz[bs_mask],\n sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part,\n )\n point_features_list.append(point_features[bs_mask][valid_mask])\n xyz_batch_cnt[bs_idx] = valid_mask.sum()\n\n valid_point_features = torch.cat(point_features_list, dim=0)\n xyz = valid_point_features[:, 0:3]\n xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None\n else:\n for bs_idx in range(batch_size):\n xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum()\n\n pooled_points, pooled_features = aggregate_func(\n xyz=xyz.contiguous(),\n xyz_batch_cnt=xyz_batch_cnt,\n new_xyz=new_xyz,\n new_xyz_batch_cnt=new_xyz_batch_cnt,\n features=xyz_features.contiguous(),\n )\n return pooled_features\n\n def forward(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size:\n keypoints: (B, num_keypoints, 3)\n multi_scale_3d_features: {\n 'x_conv4': ...\n }\n points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]\n spatial_features: optional\n spatial_features_stride: optional\n\n Returns:\n point_features: (N, C)\n point_coords: (N, 4)\n\n \"\"\"\n on_seg = self.on_seg and (\"seg_labels\" in batch_dict)\n if on_seg:\n keypoints, keypoint_labels = self.get_sampled_points(batch_dict)\n else:\n keypoints = self.get_sampled_points(batch_dict)\n\n point_features_list = []\n if 'bev' in self.model_cfg.FEATURES_SOURCE:\n point_bev_features = self.interpolate_from_bev_features(\n keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],\n bev_stride=batch_dict['spatial_features_stride']\n )\n point_features_list.append(point_bev_features)\n\n batch_size = batch_dict['batch_size']\n\n new_xyz = keypoints[:, 1:4].contiguous()\n new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int()\n for k in range(batch_size):\n new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum()\n\n if 'raw_points' in self.model_cfg.FEATURES_SOURCE:\n raw_points = batch_dict['points']\n\n pooled_features = self.aggregate_keypoint_features_from_one_source(\n batch_size=batch_size, aggregate_func=self.SA_rawpoints,\n xyz=raw_points[:, 1:4],\n xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None,\n xyz_bs_idxs=raw_points[:, 0],\n new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,\n filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False),\n radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),\n rois=batch_dict.get('rois', None)\n )\n point_features_list.append(pooled_features)\n\n for k, src_name in enumerate(self.SA_layer_names):\n cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices\n cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous()\n\n xyz = common_utils.get_voxel_centers(\n cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name],\n voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range\n )\n\n pooled_features = self.aggregate_keypoint_features_from_one_source(\n batch_size=batch_size, aggregate_func=self.SA_layers[k],\n xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0],\n new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt,\n filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False),\n radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None),\n rois=batch_dict.get('rois', None)\n )\n\n point_features_list.append(pooled_features)\n\n point_features = torch.cat(point_features_list, dim=-1)\n\n if on_seg:\n batch_dict[f'point_seg_labels{self.suffix}'] = keypoint_labels # (BXN, 1)\n batch_dict[f'point_features_before_fusion{self.suffix}'] = point_features.view(-1, point_features.shape[-1])\n point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1]))\n\n batch_dict[f'point_features{self.suffix}'] = point_features # (BxN, C)\n batch_dict[f'point_coords{self.suffix}'] = keypoints # (BxN, 4)\n\n return batch_dict\n"
] | [
[
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.tensor",
"torch.t",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torch.floor",
"torch.zeros",
"torch.cat",
"torch.clamp",
"torch.atan2"
]
] |
dxs/neighbour-analyser | [
"609c220b1352f9c3e64ea96ff43007584712efb0"
] | [
"testing/car_record.py"
] | [
"import cv2 as cv\nimport argparse\nimport sys\nimport numpy as np \nimport os.path \n\n#set constants\nFRONT_CAMERA = 1\nBACK_CAMERA = 0\ni = 0\n\nconfThreshold = 0.5 #Confidence threshold\nnmsThreshold = 0.4 #Non-maximum suppression threshold\ninpWidth = 416 #Width of network's input image\ninpHeight = 416 #Height of network's input image\n\n# Load names of classes\nclassesFile = 'coco.names'\nclasses = None\nwith open(classesFile, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n\n# LOAD MODEL AND CLASSES\n# Give the configuration and weight files for the model and load the network using them.\nmodelConfiguration = 'yolov3-tiny.cfg' # Network configuration\nmodelWeights = 'yolov3-tiny.weights' #Pre-trained network's weights\n\nnet = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\nnet.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n\n# Get the names of the output layers\ndef getOutputsNames(net):\n # Get the names of all the layers in the network\n layersNames = net.getLayerNames()\n # Get the names of the output layers, i.e. the layers with unconnected outputs\n return [layersNames[i[0] -1] for i in net.getUnconnectedOutLayers()]\n\n# Process inputs\noutputFile = 'cars.avi'\n\ncap = cv.VideoCapture(BACK_CAMERA)\n\n# Get the video writer initialized to save the output video when needed\nvideo_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))\n\ncounter_seen_car_ago = 0\n\nwhile cv.waitKey(1) < 0:\n # Get frame from the video\n hasFrame, frame = cap.read()\n\n if not hasFrame:\n print('Done processing !!!')\n cv.waitKey(3000)\n break\n\n if counter_seen_car_ago > 0:\n counter_seen_car_ago = counter_seen_car_ago-1\n video_writer.write(frame.astype(np.uint8))\n continue\n \n # Create a 4D blob from a frame\n blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False)\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n if(classes[classId] == 'car'):\n video_writer.write(frame.astype(np.uint8))\n i = i + 1\n counter_seen_car_ago = 10\n print('save img {0}'.format(i))\n \n"
] | [
[
"numpy.argmax"
]
] |
Thomas-Schatz/scone-phobia | [
"55577d150ff71fd1f1c52073143c64e242b28600"
] | [
"scone_phobia/utils/apply_analyses.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 20 10:59:28 2018\n\n@author: Thomas Schatz\n\nCode for managing the analysis of a set of ABXpy minimal-pair scores \nfor ABX tasks with the following structure:\n ON phone BY speaker, previous and following phonetic context)\n\nThe main function is apply_analysis, see readme.md for usage example.\n\nIn the current implementation, the metadata associated with each minimal-pair\nfile can be stored directly in the name of the file, following a scheme described below.\nNote that this scheme is parameterized by the 'primary-metadata' section of\nthe config file.\nAt minima, the filename should be sufficient to deduce all relevant metadata for carrying\nout the analyses and plots.\nThe scone_phobia/metadata folder can be used to store information associating filename\ncomponents with further metadata. It can be useful to keep filenames from getting too long.\n\nThis code requires a folder where pickles containing the minimal-pair scores are stored.\nIf resampling of the scores is needed (e.g. to obtain estimate of variability for\nthe analysis results), this folder should also contain a 'resampling' subfolder\nwhere pickles containing resample of the minimal-pair scores are stored.\n\nThese minimal-pair scores pickles can be obtained with precompute_mp_scores.py\nand resample of those with resample_mp_scores.py. Note that both these scripts\nwill name pickles based on the name of the original ABXpy results filename,\nso it's probably a good idea to name those original results files in accordance\nwith the naming scheme described below.\n\nPart of this code could probably be generalized to analysing results from \nother ABX tasks. If we need to do that, Not sure if we should try to increase\nthe scope of the current library, if we should do two independent libraries with\nsome (a lot of?) redundant code or if we should have an independent abstract\nlibrary being called by several libraries applied to particular tasks.\n\"\"\"\n\nimport pandas\nimport os\nimport os.path as path\nimport scone_phobia.utils.mp_scores as mp_scores\nimport yaml\n\n\ndef load_cfg_from_file(f):\n # decorator that will load keyword cfg argument\n # from \"../config.yml\" unless it is specified explicitly\n def wrapper(*args, **kwargs):\n if not('cfg' in kwargs) or (kwargs['cfg'] is None):\n dir = path.dirname(path.realpath(__file__))\n cfg_file = path.join(dir, \"..\", \"config.yml\")\n with open(cfg_file, 'r') as ymlfile:\n kwargs['cfg'] = yaml.load(ymlfile, Loader=yaml.Loader)['primary-metadata']\n return f(*args, **kwargs)\n return wrapper\n\n\n\"\"\"\nFilename parsing utilities\n\nParse filenames for ABX results files and derivatives based on the 'primary-metadata'\nspecified in '../config.yml'.\n\nFilenames (without the extension) should be of the form:\n\n Property1valueProperty1key__Property2valueProperty2key__...___PropertyNvaluePropertyNkey.extension\n\nwhere the property values and keys should not contain any double underscores and\nwhere the property keys should correspond to the keys in the 'primary-metadata' \nsection of the '../config.yml' file. The extension can be whatever file extension\nis appropriate. For example the following would be valid filenames for the 'primary-metadata'\nsection of the config file template ('../config.yml.example'):\n\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis.pickle\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis.txt\n MFCCmodel__Nonetrain__CSJtest__COSdis.pickle\n\nThe first one, for example, would be parsed into the following list of pairs:\n\n [('model type', 'HMM-GMM'),\n ('training set', 'WSJ'),\n ('test set', 'CSJ'),\n ('dissimilarity', 'KL')]\n\nFor bootstrap related files, filenames will look like:\n\n HMM-GMMmodel__WSJtrain__CSJtest__KLdis__batchsize50__batch3.pickle\n\nwhere the number after 'batchsize' indicates the size of the resampling\nbatches and the number after 'batch' is the batch ID for this particular file.\nFor these files, the resampling batch size and batch ID are also\nreturned.\n\"\"\"\n\ndef suffix_split(token, cfg, err_message):\n \"\"\"\n Helper function for parsing filenames.\n Looking for a key from cfg that would be\n a suffix of token. There should be one\n and only one.\n \"\"\"\n matches = []\n for key in cfg:\n if len(token) >= len(key):\n if token[-len(key):] == key:\n matches.append((key, token[:-len(key)]))\n assert len(matches) == 1, err_message\n return matches[0]\n\n\n@load_cfg_from_file\ndef parse_res_fname(fpath, cfg=None):\n name, _ = path.splitext(path.split(fpath)[1])\n err_message = (\"Results filename {} is not correctly formatted.\"\n \" Check your config file and \"\n \"formatting instructions in analyze_mp_scores.py.\"\n ).format(name)\n N = len(cfg)\n tokens = name.split('__')\n assert len(tokens) == N, err_message\n used_keys = []\n res = []\n for token in tokens:\n key, value = suffix_split(token, cfg, err_message)\n assert not key in used_keys, err_message\n used_keys.append(key)\n res.append((cfg[key], value))\n return res\n\n\n@load_cfg_from_file\ndef parse_bootres_fname(fpath, cfg=None):\n name, _ = path.splitext(path.split(fpath)[1])\n err_message = (\"Bootstrap results filename filename {} is not correctly\"\n \" formatted. Check your config file and \"\n \"formatting instructions in analyze_mp_scores.py.\"\n ).format(name)\n N = len(cfg)\n tokens = name.split('__')\n assert len(tokens) == N+2, err_message\n properties = parse_res_fname('__'.join(tokens[:N]), cfg=cfg)\n batch = tokens[-1]\n assert len(batch) >= 5 and batch[:5] == 'batch', batch\n batch = int(batch[5:])\n batchsize = tokens[-2]\n assert len(batchsize) >= 9 and batchsize[:9] == 'batchsize', batchsize\n batchsize = int(batchsize[9:])\n properties.append(('batch ID', batch))\n properties.append(('batch size', batchsize))\n return properties\n\n\n\n############################\n## Fetch and analyse data #\n############################\n\ndef fetch_data(analysis, mp_folder, filt=None, encoding=None,\n add_metadata=None):\n \"\"\"Use the above to get just the right data\"\"\"\n get_metadata = lambda x, parse=parse_res_fname: parse(x)\n df = mp_scores.load_mp_errors(mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding) # load all mp scores in a big df\n if not(add_metadata is None):\n df = add_metadata(df)\n df = analysis(df)\n return df\n\n\ndef fetch_resampled_data(analysis,\n resampling_file=None,\n resampled_mp_folder=None,\n filt=None,\n encoding=None,\n add_metadata=None,\n verbose=0):\n # Getting resampled minimal-pair scores to estimate variability.\n # This can take time so if resampling_file is not None,\n # results are saved once they are computed\n get_metadata = lambda x, parse=parse_bootres_fname: parse(x)\n if resampling_file is None:\n boot_dfs = mp_scores.resample_analysis(analysis,\n resampled_mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding,\n add_metadata=add_metadata,\n verbose=verbose)\n else:\n boot_dfs = mp_scores.resample_analysis_cached(resampling_file,\n analysis,\n resampled_mp_folder,\n get_metadata,\n filt=filt,\n encoding=encoding,\n add_metadata=add_metadata,\n verbose=verbose)\n boot_df = pandas.concat(boot_dfs)\n return boot_df\n\n\ndef resampling_filts(resample_caching_scheme, mp_folder, user_filt=None):\n \"\"\"\n Function used to specify various way of caching resamples of analysis\n results.\n It is the responsibility of this function to ensure that all\n caching filters are consistent with user_filt.\n See apply_analysis below.\n TODO? Could add a scheme where caching is done by type of model.\n \"\"\"\n caching_filts = []\n mp_files = [path.splitext(e)[0] for e in os.listdir(mp_folder)\n if path.splitext(e)[1] == '.pickle']\n if resample_caching_scheme == 'mp_file':\n for mp_fname in mp_files:\n # only use caching filters useful given user-provided filt\n if (user_filt is None) or user_filt(mp_fname):\n # use second arg default value to avoid scope issues \n filt = lambda boot_mp_fname, mp_fname=mp_fname:\\\n mp_fname in boot_mp_fname\n caching_filts.append((mp_fname, filt))\n elif resample_caching_scheme == 'sametestset_mp_filepairs':\n # analysis not assumed symmetric, so we loop over all\n # pairs\n for mp_fname1 in mp_files:\n # only use caching filters useful given user-provided filt\n if user_filt(mp_fname1):\n metadata1 = dict(parse_res_fname(mp_fname1))\n for mp_fname2 in mp_files:\n # only use caching filters useful given user-provided filt\n if user_filt(mp_fname2):\n metadata2 = dict(parse_res_fname(mp_fname2))\n if metadata1['test set'] == metadata2['test set']:\n filt_name = mp_fname1 + '___' + mp_fname2 # hacky\n # use args default values to avoid scope issues\n filt = lambda bname, n1=mp_fname1, n2=mp_fname2: \\\n n1 in bname or n2 in bname\n caching_filts.append((filt_name, filt))\n else:\n raise ValueError(('Unsupported resample caching scheme '\n '{}'.format(resample_caching_scheme)))\n return caching_filts\n\n\ndef apply_analysis(analysis, mp_folder,\n filt=None,\n add_metadata=None,\n resampling=False,\n resample_caching_scheme=None,\n analysis_folder=None,\n pickle_encoding=None,\n resampled_pickle_encoding=\"latin1\",\n verbose=0):\n \"\"\"\n analysis: function that takes a pandas dataframe containing all\n required minimal-pair scores and returns the analysis results\n of interest (in a pandas dataframe if resampling=True).\n mp_folder: folder where the pickles containing minimal-pair scores are stored\n if resampling=True, mp_folder should also contain a 'resampling' subfolder\n where pickles containing resampled versions of the minimal-pair scores\n are stored.\n filt: string -> bool function, that takes the name of a file in mp_folder\n and returns True iff that file should be included in the analysis. If\n set to None, all available files are included.\n add_metadata: pandas.Dataframe -> pandas.Dataframe function, that takes a\n raw mp_scores Dataframe (containing only 'contrast', 'error' and primary\n metadata columns, where primary metadata is as specified in the\n config.yml file) and adds some additional metadata columns to it\n resampling: whether or not to use resampling. Currently, this is only\n supported for minimal pairs averaged on speaker first then on context.\n This returns the full bootstrapped data and also adds resampling-based\n standard deviation estimate to the analysis results.\n resample_caching_scheme: if resampling is True, determines whether and how\n to cache resampled analysis results. Caching results on disk is useful:\n - if applying the analysis on resamples takes too long (if there are\n N resamples, the duration required for the analysis will be\n multiplied by N compared to applying the analysis without\n resampling)\n - if loading resampled minimal pair scores at once for all relevant\n files in mp_folder (as determined by filt) exhausts the available\n memory\n Currently there is only three supported values for\n resample_caching_scheme:\n - None: no caching\n - 'mp_file': will create one cache file per (non-resampled)\n minimal-pair scores file.\n ** This should only be used for analyses which can be applied\n independently for each set of minimal pair scores obtained\n in the same ABX task with the same features\n and dissimilarity function **\n - 'sametestset_mp_filepairs': will create one cache file\n per (ordered) pair of (non-resampled) minimal-pair scores\n files sharing the same test set.\n ** This should only be used for analyses comparing patterns of\n discriminability in the same ABX task for pairs of \n (features/dissimilarity function couples). **\n analysis_folder: currently only used if resampling=True and\n resample_caching_scheme is not None, to specify where to store cached\n analysis resamples.\n pickle_encoding and resampled_pickle_encoding: useful to ensure pickles\n containing minimal pair scores, resp. resampled versions of those, will be\n read correctly, for example if they have been computed under a different \n python environment than the current one.\n \"\"\"\n if filt is None:\n filt = lambda mp_fname: True \n df = fetch_data(analysis, mp_folder, filt=filt, encoding=pickle_encoding,\n add_metadata=add_metadata)\n if resampling:\n boot_dfs = []\n resampled_mp_folder = path.join(mp_folder, 'resampling')\n if resample_caching_scheme is None:\n resampling_file = None\n boot_dfs.append(\n fetch_resampled_data(analysis, resampling_file,\n resampled_mp_folder,\n filt=filt,\n encoding=resampled_pickle_encoding,\n add_metadata=add_metadata,\n verbose=verbose))\n else:\n caching_filts = resampling_filts(resample_caching_scheme,\n mp_folder,\n user_filt=filt)\n assert not(analysis_folder is None)\n for filt_name, caching_filt in caching_filts:\n # keep and_filt in case we add other resampling caching schemes\n # where the caching filts are defined more coarsely than some\n # possible user-provided filters.\n and_filt = lambda mp_fname, f1=filt, f2=caching_filt:\\\n f1(mp_fname) and f2(mp_fname)\n resampling_file = path.join(analysis_folder,\n '{}.pickle'.format(filt_name))\n boot_dfs.append(\n fetch_resampled_data(analysis, resampling_file,\n resampled_mp_folder,\n filt=and_filt,\n encoding=resampled_pickle_encoding,\n add_metadata=add_metadata,\n verbose=verbose))\n boot_df = pandas.concat(boot_dfs)\n # Add resulting standard deviation estimates to main dataframe \n df = mp_scores.estimate_std(df, boot_df)\n # TODO: permutation tests\n return df, boot_df\n else:\n return df\n"
] | [
[
"pandas.concat"
]
] |
mwittgen/rogue | [
"4be0e9a4d17bdd3987a268f54ad195ee1093190d"
] | [
"tests/test_list_memory.py"
] | [
"#!/usr/bin/env python3\n#-----------------------------------------------------------------------------\n# This file is part of the rogue software platform. It is subject to\n# the license terms in the LICENSE.txt file found in the top-level directory\n# of this distribution and at:\n# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.\n# No part of the rogue software platform, including this file, may be\n# copied, modified, propagated, or distributed except according to the terms\n# contained in the LICENSE.txt file.\n#-----------------------------------------------------------------------------\n\n# Comment added by rherbst for demonstration purposes.\nimport pyrogue as pr\nimport pyrogue.interfaces.simulation\nimport rogue.interfaces.memory\nimport numpy as np\nimport random\nimport time\n\n#rogue.Logging.setLevel(rogue.Logging.Warning)\n#import logging\n#logger = logging.getLogger('pyrogue')\n#logger.setLevel(logging.DEBUG)\n\nclass ListDevice(pr.Device):\n\n # Last comment added by rherbst for demonstration.\n def __init__(\n self,\n name = 'ListDevice',\n description = 'List Device Test',\n **kwargs):\n\n super().__init__(\n name = name,\n description = description,\n **kwargs)\n\n ##############################\n # Variables\n ##############################\n\n self.add(pr.RemoteVariable(\n name = 'UInt32List',\n offset = 0x0000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'Int32List',\n offset = 0x1000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.Int,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt48List',\n offset = 0x2000,\n bitSize = 48 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 48,\n valueStride = 48\n ))\n\n self.add(pr.RemoteVariable(\n name = 'FloatList',\n offset = 0x3000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.Float,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 32,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'DoubleList',\n offset = 0x4000,\n bitSize = 64 * 32,\n bitOffset = 0x0000,\n base = pr.Double,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 64,\n valueStride = 64\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt16List',\n offset = 0x5000,\n bitSize = 16 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 16,\n valueStride = 16\n ))\n\n self.add(pr.RemoteVariable(\n name = 'UInt21List',\n offset = 0x6000,\n bitSize = 32 * 32,\n bitOffset = 0x0000,\n base = pr.UInt,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 21,\n valueStride = 32\n ))\n\n self.add(pr.RemoteVariable(\n name = 'BoolList',\n offset = 0x7000,\n bitSize = 32,\n bitOffset = 0x0000,\n base = pr.Bool,\n mode = 'RW',\n disp = '{}',\n numValues = 32,\n valueBits = 1,\n valueStride = 1\n ))\n\nclass DummyTree(pr.Root):\n\n def __init__(self):\n pr.Root.__init__(self,\n name='dummyTree',\n description=\"Dummy tree for example\",\n timeout=2.0,\n pollEn=False)\n #serverPort=None)\n\n # Use a memory space emulator\n sim = rogue.interfaces.memory.Emulate(4,0x1000)\n self.addInterface(sim)\n\n self.add(ListDevice(\n offset = 0,\n memBase = sim\n ))\n\ndef test_memory():\n\n UInt32ListARaw = [int(random.random()*1000) for i in range(32)]\n Int32ListARaw = [int(random.random()*1000) for i in range(32)]\n UInt48ListARaw = [int(random.random()*1000) for i in range(32)]\n FloatListARaw = [random.random()*1000 for i in range(32)]\n DoubleListARaw = [random.random()*1000 for i in range(32)]\n UInt16ListARaw = [int(random.random()*1000) for i in range(32)]\n UInt21ListARaw = [int(random.random()*1000) for i in range(32)]\n BoolListARaw = [int(random.random()*1000)%2==0 for i in range(32)]\n\n UInt32ListA = np.array(UInt32ListARaw,np.uint32)\n Int32ListA = np.array(Int32ListARaw,np.int32)\n UInt48ListA = np.array(UInt48ListARaw,np.uint64)\n FloatListA = np.array(FloatListARaw,np.float32)\n DoubleListA = np.array(DoubleListARaw,np.float64)\n UInt16ListA = np.array(UInt16ListARaw,np.uint32)\n UInt21ListA = np.array(UInt21ListARaw,np.uint32)\n BoolListA = np.array(BoolListARaw,bool)\n\n UInt32ListB = [int(random.random()*1000) for i in range(32)]\n Int32ListB = [int(random.random()*1000) for i in range(32)]\n UInt48ListB = [int(random.random()*1000) for i in range(32)]\n FloatListB = [random.random()*1000 for i in range(32)]\n DoubleListB = [random.random()*1000 for i in range(32)]\n UInt16ListB = [int(random.random()*1000) for i in range(32)]\n UInt21ListB = [int(random.random()*1000) for i in range(32)]\n BoolListB = [int(random.random()*1000)%2==0 for i in range(32)]\n\n with DummyTree() as root:\n\n with root.updateGroup():\n root.ListDevice.UInt32List.set(UInt32ListARaw)\n root.ListDevice.Int32List.set(Int32ListARaw)\n root.ListDevice.UInt48List.set(UInt48ListARaw)\n root.ListDevice.FloatList.set(FloatListARaw)\n root.ListDevice.DoubleList.set(DoubleListARaw)\n root.ListDevice.UInt16List.set(UInt16ListARaw)\n root.ListDevice.UInt21List.set(UInt21ListARaw)\n root.ListDevice.BoolList.set(BoolListARaw)\n\n UInt32ListAA = root.ListDevice.UInt32List.get()\n Int32ListAA = root.ListDevice.Int32List.get()\n UInt48ListAA = root.ListDevice.UInt48List.get()\n FloatListAA = root.ListDevice.FloatList.get()\n DoubleListAA = root.ListDevice.DoubleList.get()\n UInt16ListAA = root.ListDevice.UInt16List.get()\n UInt21ListAA = root.ListDevice.UInt21List.get()\n BoolListAA = root.ListDevice.BoolList.get()\n\n UInt32ListAB = np.array([0] * 32,np.uint32)\n Int32ListAB = np.array([0] * 32,np.int32)\n UInt48ListAB = np.array([0] * 32,np.uint64)\n FloatListAB = np.array([0] * 32,np.float32)\n DoubleListAB = np.array([0] * 32,np.float64)\n UInt16ListAB = np.array([0] * 32,np.uint32)\n UInt21ListAB = np.array([0] * 32,np.uint32)\n BoolListAB = np.array([0] * 32,bool)\n\n for i in range(32):\n UInt32ListAB[i] = root.ListDevice.UInt32List.get(index=i)\n Int32ListAB[i] = root.ListDevice.Int32List.get(index=i)\n UInt48ListAB[i] = root.ListDevice.UInt48List.get(index=i)\n FloatListAB[i] = root.ListDevice.FloatList.get(index=i)\n DoubleListAB[i] = root.ListDevice.DoubleList.get(index=i)\n UInt16ListAB[i] = root.ListDevice.UInt16List.get(index=i)\n UInt21ListAB[i] = root.ListDevice.UInt21List.get(index=i)\n BoolListAB[i] = root.ListDevice.BoolList.get(index=i)\n\n for i in range(32):\n if UInt32ListAA[i] != UInt32ListA[i]:\n raise AssertionError(f'Verification Failure for UInt32ListAA at position {i}')\n\n if Int32ListAA[i] != Int32ListA[i]:\n raise AssertionError(f'Verification Failure for Int32ListAA at position {i}')\n\n if UInt48ListAA[i] != UInt48ListA[i]:\n raise AssertionError(f'Verification Failure for UInt48ListAA at position {i}')\n\n if abs(FloatListAA[i] - FloatListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListAA at position {i}')\n\n if abs(DoubleListAA[i] - DoubleListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListAA at position {i}')\n\n if UInt16ListAA[i] != UInt16ListA[i]:\n raise AssertionError(f'Verification Failure for UInt16ListAA at position {i}')\n\n if UInt21ListAA[i] != UInt21ListA[i]:\n raise AssertionError(f'Verification Failure for UInt21ListAA at position {i}')\n\n if BoolListAA[i] != BoolListA[i]:\n raise AssertionError(f'Verification Failure for BoolListAA at position {i}')\n\n if UInt32ListAB[i] != UInt32ListA[i]:\n raise AssertionError(f'Verification Failure for UInt32ListAB at position {i}')\n\n if UInt48ListAB[i] != UInt48ListA[i]:\n raise AssertionError(f'Verification Failure for UInt48ListAB at position {i}')\n\n if abs(FloatListAB[i] - FloatListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListAB at position {i}')\n\n if abs(DoubleListAB[i] - DoubleListA[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListAB at position {i}')\n\n if UInt16ListAB[i] != UInt16ListA[i]:\n raise AssertionError(f'Verification Failure for UInt16ListAB at position {i}')\n\n if UInt21ListAB[i] != UInt21ListA[i]:\n raise AssertionError(f'Verification Failure for UInt21ListAB at position {i}')\n\n if BoolListAB[i] != BoolListA[i]:\n raise AssertionError(f'Verification Failure for BoolListAB at position {i}')\n\n for i in range(32):\n root.ListDevice.UInt32List.set(UInt32ListB[i],index=i)\n root.ListDevice.Int32List.set(Int32ListB[i],index=i)\n root.ListDevice.UInt48List.set(UInt48ListB[i],index=i)\n root.ListDevice.FloatList.set(FloatListB[i],index=i)\n root.ListDevice.DoubleList.set(DoubleListB[i],index=i)\n root.ListDevice.UInt16List.set(UInt16ListB[i],index=i)\n root.ListDevice.UInt21List.set(UInt21ListB[i],index=i)\n root.ListDevice.BoolList.set(BoolListB[i],index=i)\n\n UInt32ListBA = root.ListDevice.UInt32List.get()\n Int32ListBA = root.ListDevice.Int32List.get()\n UInt48ListBA = root.ListDevice.UInt48List.get()\n FloatListBA = root.ListDevice.FloatList.get()\n DoubleListBA = root.ListDevice.DoubleList.get()\n UInt16ListBA = root.ListDevice.UInt16List.get()\n UInt21ListBA = root.ListDevice.UInt21List.get()\n BoolListBA = root.ListDevice.BoolList.get()\n\n UInt32ListBB = np.array([0] * 32,np.uint32)\n Int32ListBB = np.array([0] * 32,np.int32)\n UInt48ListBB = np.array([0] * 32,np.uint64)\n FloatListBB = np.array([0] * 32,np.float32)\n DoubleListBB = np.array([0] * 32,np.float64)\n UInt16ListBB = np.array([0] * 32,np.uint32)\n UInt21ListBB = np.array([0] * 32,np.uint32)\n BoolListBB = np.array([0] * 32,bool)\n\n for i in range(32):\n UInt32ListBB[i] = root.ListDevice.UInt32List.get(index=i)\n Int32ListBB[i] = root.ListDevice.Int32List.get(index=i)\n UInt48ListBB[i] = root.ListDevice.UInt48List.get(index=i)\n FloatListBB[i] = root.ListDevice.FloatList.get(index=i)\n DoubleListBB[i] = root.ListDevice.DoubleList.get(index=i)\n UInt16ListBB[i] = root.ListDevice.UInt16List.get(index=i)\n UInt21ListBB[i] = root.ListDevice.UInt21List.get(index=i)\n BoolListBB[i] = root.ListDevice.BoolList.get(index=i)\n\n for i in range(32):\n if UInt32ListBA[i] != UInt32ListB[i]:\n raise AssertionError(f'Verification Failure for UInt32ListBA at position {i}')\n\n if Int32ListBA[i] != Int32ListB[i]:\n raise AssertionError(f'Verification Failure for Int32ListBA at position {i}')\n\n if UInt48ListBA[i] != UInt48ListB[i]:\n raise AssertionError(f'Verification Failure for UInt48ListBA at position {i}')\n\n if abs(FloatListBA[i] - FloatListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListBA at position {i}')\n\n if abs(DoubleListBA[i] != DoubleListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListBA at position {i}')\n\n if UInt16ListBA[i] != UInt16ListB[i]:\n raise AssertionError(f'Verification Failure for UInt16ListBA at position {i}')\n\n if UInt21ListBA[i] != UInt21ListB[i]:\n raise AssertionError(f'Verification Failure for UInt21ListBA at position {i}')\n\n if BoolListBA[i] != BoolListB[i]:\n raise AssertionError(f'Verification Failure for BoolListBA at position {i}')\n\n if UInt32ListBB[i] != UInt32ListB[i]:\n raise AssertionError(f'Verification Failure for UInt32ListBB at position {i}')\n\n if abs(FloatListBB[i] - FloatListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for FloatListBB at position {i}')\n\n if abs(DoubleListBB[i] - DoubleListB[i]) > 0.001:\n raise AssertionError(f'Verification Failure for DoubleListBB at position {i}')\n\n if UInt16ListBB[i] != UInt16ListB[i]:\n raise AssertionError(f'Verification Failure for UInt16ListBB at position {i}')\n\n if UInt21ListBB[i] != UInt21ListB[i]:\n raise AssertionError(f'Verification Failure for UInt21ListBB at position {i}')\n\n if BoolListBB[i] != BoolListB[i]:\n raise AssertionError(f'Verification Failure for BoolListBB at position {i}')\n\n\n root.ListDevice.UInt32List.set(UInt32ListA)\n root.ListDevice.Int32List.set(Int32ListA)\n\n root.ListDevice.UInt32List.set(np.array([1,2,3],np.uint32),index=7)\n root.ListDevice.Int32List.set([1,-22,-33],index=5)\n\n resA = root.ListDevice.UInt32List.get()\n resB = root.ListDevice.Int32List.get()\n\n UInt32ListA[7:10] = [1,2,3]\n Int32ListA[5:8] = [1,-22,-33]\n\n # Verify update\n for i in range(32):\n\n if resA[i] != UInt32ListA[i]:\n raise AssertionError(f'Stripe Verification Failure for UInt32ListA at position {i}')\n\n if resB[i] != Int32ListA[i]:\n raise AssertionError(f'Stripe Verification Failure for Int32ListA at position {i}')\n\n # Test value shift\n _ = resA[0] >> 5\n\ndef run_gui():\n import pyrogue.pydm\n\n with DummyTree() as root:\n pyrogue.pydm.runPyDM(root=root,title='test123',sizeX=1000,sizeY=500)\n\nif __name__ == \"__main__\":\n test_memory()\n #run_gui()\n\n"
] | [
[
"numpy.array"
]
] |
event-driven-robotics/models | [
"a8b6e2a83d4842eb99878d3fa53cd92f4c6b3db8"
] | [
"nxsdk_modules_ncl/dnn/composable/composable_dnn.py"
] | [
"# \n# Copyright © 2020 Intel Corporation.\n# \n# This software and the related documents are Intel copyrighted\n# materials, and your use of them is governed by the express \n# license under which they were provided to you (License). Unless\n# the License provides otherwise, you may not use, modify, copy, \n# publish, distribute, disclose or transmit this software or the\n# related documents without Intel's prior written permission.\n# \n# This software and the related documents are provided as is, with\n# no express or implied warranties, other than those that are \n# expressly stated in the License.\n\n\"\"\"A wrapper around NxModel to make it a composable\"\"\"\n\nimport os\nfrom typing import List\n\nimport numpy as np\nfrom jinja2 import Environment, FileSystemLoader\nimport atexit\n\nfrom nxsdk import get_logger\nfrom nxsdk.composable.abstract_composable import AbstractComposable\nfrom nxsdk.composable.collections import Processes\nfrom nxsdk.composable.interfaces.composable_enums import ResourceMapType\nfrom nxsdk.composable.interfaces.process import Process\nfrom nxsdk.composable.interfaces.process_aggregator_interface import AbstractProcessAggregator\nfrom nxsdk.composable.port_impl import StateInputPort\nfrom nxsdk.composable.resource_map import ResourceMapFactory\nfrom nxsdk.graph.graph import Graph\nfrom nxsdk.graph.monitor.probes import SpikeProbeCondition\nfrom nxsdk.graph.processes.phase_enums import Phase\nfrom nxsdk_modules_ncl.dnn.src.dnn_layers import ProbableStates, InputModes\nfrom nxsdk_modules_ncl.dnn.tests.test_softreset import printLayerMappings, \\\n printLayers\n\n\nclass ComposableDNN(AbstractComposable):\n \"\"\"A DNN that is composable. See nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel which is the underlying DNN Model\"\"\"\n def __init__(self, model: 'NxModel', num_steps_per_img: int, enable_reset: bool = True):\n \"\"\"\n Wraps a DNNModel and makes it composable\n\n :param model (nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel): The underlying DNN Model created from NxTF Layers\n :param num_steps_per_img: Number of steps to run for each image\n :param enable_reset: Whether to reset states after ``num_steps_per_img``.\n \"\"\"\n super().__init__()\n\n self._logger = get_logger(\"NET.DNN\")\n\n self._build(model=model, num_steps_per_img=num_steps_per_img, enableReset=enable_reset)\n\n def _build(self, *args, **kwargs):\n \"\"\"Builds the ports, probes and snips for the composable. This method is called from base class constructor\"\"\"\n # Stores a reference to the underlying model\n self._dnn = kwargs[\"model\"]\n self._addPorts()\n self._addProcesses()\n self._num_steps_per_img = kwargs[\"num_steps_per_img\"]\n self._enableReset = kwargs['enableReset']\n\n def _addPorts(self):\n \"\"\"Adds ports to the composable\"\"\"\n # Create and add input port. This will be delegated to the input layer\n self.addPort(StateInputPort(name=\"input\"))\n\n def _addProcesses(self):\n \"\"\"Adds processes/snips associated with DNN Composable\"\"\"\n snipDir = os.path.join(os.path.dirname(__file__), '..', 'snips', 'reset_model_states')\n\n # Init snip to populate number of cores and reset interval\n init = Process(\n name='init',\n cFilePath=snipDir + \"/snip_init.c\",\n includeDir=snipDir,\n funcName='init_1',\n phase=Phase.EMBEDDED_INIT,\n lmtId=0)\n self.addProcess(init)\n\n # Todo : Profile and measure to see if spreading readout and/or reset across lmts helps.\n # Reset SNIP\n reset_snip = Process(\n name='reset',\n cFilePath=snipDir + \"/snip_reset.c\",\n includeDir=snipDir,\n guardName='do_reset',\n funcName='reset',\n phase=Phase.EMBEDDED_MGMT,\n lmtId=0)\n self.addProcess(reset_snip)\n\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n\n # This is an example of lazily creating a process. The C file does not exist yet and will\n # only be generated post map phase when output layer has been mapped to neurocores.\n\n # Class Readout SNIP\n readout_snip = Process(\n name='readout',\n cFilePath=readout_spike_activity_snip_dir + \"/snip_class_readout.c\",\n includeDir=readout_spike_activity_snip_dir,\n guardName='do_readout',\n funcName='readout',\n phase=Phase.EMBEDDED_MGMT,\n lmtId=0)\n self.addProcess(readout_snip)\n\n def partition(self, board: Graph) -> AbstractComposable:\n \"\"\"Partition the dnn model. We ignore this step and delegate it to map which invokes compileModel\"\"\"\n return self\n\n def map(self, board: Graph) -> AbstractComposable:\n \"\"\"Invoke partition and mapping of the dnn model\"\"\"\n mapper = self._dnn.compileModel(board)\n\n printLayerInfo = False\n if printLayerInfo:\n printLayerMappings(self._dnn.layers, mapper, synapses=True, inputAxons=True)\n printLayers(self._dnn.layers)\n\n self._createSnips(board)\n self._createReadoutSnip()\n return self\n\n def updatePorts(self, board: Graph) -> AbstractComposable:\n \"\"\"Updates resourceMap to input and output ports\"\"\"\n inputLayer = self._dnn.layers[0]\n\n if inputLayer.inputMode == InputModes.AEDAT:\n self.ports.input.resourceMap = ResourceMapFactory.createExplicit(\n ResourceMapType.INPUT_AXON, inputLayer.inputAxonResourceMap)\n else:\n # Return input compartments for multi-compartment neurons\n neuronSize = 2 if inputLayer.resetMode == 'soft' else 1\n cxResourceMap = inputLayer.cxResourceMap[::neuronSize]\n self.ports.input.resourceMap = ResourceMapFactory.createExplicit(\n ResourceMapType.COMPARTMENT, cxResourceMap)\n # self.ports.output.resourceMap = CompartmentResourceMap(self._dnn.layers[-1].cxResourceMap)\n return self\n\n def completeConnectivity(self, board: Graph, processAggregator: AbstractProcessAggregator) -> AbstractComposable:\n \"\"\"Create channel to communicate data to init snip\"\"\"\n # Should pipe to resourceMap indices for output layer\n self._createInitializationChannel(board, processAggregator)\n self._createReadoutChannel(board, processAggregator)\n return self\n\n def _createSnips(self, board: Graph):\n \"\"\"Create clones of reset and init snips based on number of chips used by input layer.\"\"\"\n processes = Processes()\n for chip_id in range(board.numChips):\n # init snip\n initProcess = self.processes.init\n initProcessWithChipId = initProcess.clone(name=initProcess.name + str(chip_id),\n params={'chipId': chip_id})\n processes.add(initProcessWithChipId)\n\n # reset snip\n resetProcess = self.processes.reset\n resetProcessWithChipId = resetProcess.clone(name=resetProcess.name + str(chip_id),\n params={'chipId': chip_id})\n processes.add(resetProcessWithChipId)\n\n # Todo : Enable readout for output layers distributed across multiple chips.\n # readout\n chip_id = self._dnn.layers[-1].cxResourceMap[0, 0]\n assert len(np.unique(self._dnn.layers[-1].cxResourceMap[:, 0])) == 1\n\n readoutProcess = self.processes.readout\n readoutProcessWithChipId = readoutProcess.clone(name=readoutProcess.name,\n params={'chipId': chip_id})\n processes.add(readoutProcessWithChipId)\n self.processes = processes\n\n def _createReadoutSnip(self):\n \"\"\"Create readout snip for compartment of the output layer.\n\n The voltage is readout when using an output layer with a softmax\n activation, otherwise, spikes are readout by creating spike counters\n at the lakemonts.\n \"\"\"\n probeDt = 1\n probeStart = 100000000\n\n # Get the output layer from the spiking model\n output_layer = self._dnn.layers[-1]\n\n NUM_CLASSES = int(np.prod(output_layer.output_shape[1:]))\n\n # Return output compartments for multi-compartment neurons.\n neuronSize = 2 if output_layer.resetMode == 'soft' else 1\n offset = 1 if output_layer.resetMode == 'soft' else 0\n\n # Determine whether to read spikes or voltages based on activation.\n readSpikes = True\n if hasattr(output_layer, 'activation') and \\\n output_layer.activation.__name__ == 'softmax':\n offset = 0\n readSpikes = False\n\n lmt_spike_counters = []\n\n if readSpikes:\n for i in range(NUM_CLASSES):\n spike_probe = output_layer[i * neuronSize + offset].probe(\n state=ProbableStates.SPIKE,\n probeCondition=SpikeProbeCondition(dt=probeDt, tStart=probeStart))\n lmt_spike_counters.append(spike_probe.counterId)\n cores = cxIds = np.zeros_like(lmt_spike_counters).tolist()\n else:\n rm = output_layer.cxResourceMap\n cores = rm[offset::neuronSize, 1].tolist()\n cxIds = rm[offset::neuronSize, 2].tolist()\n lmt_spike_counters = np.zeros_like(cxIds).tolist()\n\n # Now that lmt_spike_counters are known, generate the snip_class_readout.c\n self._generateReadOutSnipCFileFromJinjaTemplate(readSpikes=readSpikes,\n num_classes=NUM_CLASSES,\n lmt_output_spike_counter_ids=lmt_spike_counters,\n cores=cores,\n cxIds=cxIds)\n\n @staticmethod\n def _cleanup():\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n cFilePath = os.path.join(readout_spike_activity_snip_dir, \"snip_class_readout.c\")\n if os.path.exists(cFilePath):\n os.remove(cFilePath)\n\n def _generateReadOutSnipCFileFromJinjaTemplate(self,\n readSpikes: bool,\n num_classes: int,\n lmt_output_spike_counter_ids: List[int],\n cores: List[int],\n cxIds: List[int]):\n atexit.register(ComposableDNN._cleanup)\n\n readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),\n '..', 'snips', 'readout_spike_activity')\n\n context = {\n \"READ_SPIKES\": int(readSpikes),\n \"NUM_CLASSES\": num_classes,\n \"NUM_STEPS_PER_IMG\": self._num_steps_per_img,\n \"LMT_OUTPUT_SPIKE_COUNTER_IDS\": \"{\" + str(lmt_output_spike_counter_ids)[1:-1] + \"}\",\n \"CORE_IDS\": \"{\" + str(cores)[1:-1] + \"}\",\n \"CX_IDS\": \"{\" + str(cxIds)[1:-1] + \"}\"\n }\n\n env = Environment(loader=FileSystemLoader(os.path.join(readout_spike_activity_snip_dir, \"templates\")),\n trim_blocks=True)\n\n c_template = env.get_template(\"snip_class_readout.c.template\")\n c_contents = c_template.render(context)\n with open(os.path.join(readout_spike_activity_snip_dir, \"snip_class_readout.c\"), 'w') as cFile:\n cFile.write(c_contents)\n\n def _createInitializationChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):\n \"\"\"Creates a channel and connects it to init snip\"\"\"\n\n for chip_id in range(board.numChips):\n init_process = self.processes['init' + str(chip_id)]\n processKey = init_process.getProcessKey()\n snip_init_1 = processAggregator.getEmbeddedSnipForProcessKey(processKey)\n name = 'channel_init_ch{}_lmt0'.format(chip_id)\n setattr(self,\n name,\n board.createChannel(bytes(name, 'utf-8'), \"int\", 3))\n\n getattr(self, name).connect(None, snip_init_1)\n\n def _createReadoutChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):\n \"\"\"Create a readout channel to read the classification values from spike counters\"\"\"\n readout_process = self.processes.readout\n processKey = readout_process.getProcessKey()\n snip_readout = processAggregator.getEmbeddedSnipForProcessKey(processKey)\n self.readout_channel = board.createChannel(bytes('readout', 'utf-8'), \"int\", numElements=100000)\n self.readout_channel.connect(snip_readout, None)\n\n @staticmethod\n def load(path: str, board: Graph = None) -> 'AbstractComposable':\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n\n def save(self, path: str):\n \"\"\"Not Implemented\"\"\"\n raise NotImplementedError\n\n def start(self, board: Graph, *args, **kwargs):\n \"\"\"Writes initial configuration settings (num_cores_per_chip, num_steps_per_img, enableReset) to init channel\"\"\"\n num_cores_per_chip = [board.n2Chips[i].numCores for i in range(board.numChips)]\n for chip_id in range(board.numChips):\n name = 'channel_init_ch{}_lmt0'.format(chip_id)\n getattr(self, name).write(3, [num_cores_per_chip[chip_id], self._num_steps_per_img, self._enableReset])\n"
] | [
[
"numpy.zeros_like",
"numpy.unique",
"numpy.prod"
]
] |
sixhobbits/prefect | [
"bf7a6b95ab592ad4808415f295163a64e38f1419"
] | [
"src/prefect/engine/serializers.py"
] | [
"import base64\nimport bz2\nimport gzip\nimport io\nimport json\nimport lzma\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Tuple\nimport zlib\n\nimport cloudpickle\nimport pendulum\n\nif TYPE_CHECKING:\n import pandas as pd\n\n__all__ = (\n \"Serializer\",\n \"PickleSerializer\",\n \"JSONSerializer\",\n \"DateTimeSerializer\",\n \"PandasSerializer\",\n \"CompressedSerializer\",\n)\n\nCOMPRESSION_FORMATS: Dict[str, Tuple[Callable[..., bytes], Callable[..., bytes]]] = {\n \"bz2\": (bz2.compress, bz2.decompress),\n \"gzip\": (gzip.compress, gzip.decompress),\n \"lzma\": (lzma.compress, lzma.decompress),\n \"zlib\": (zlib.compress, zlib.decompress),\n}\n\n\nclass Serializer:\n \"\"\"\n Serializers are used by Results to handle the transformation of Python\n objects to and from bytes.\n\n Subclasses should implement `serialize` and `deserialize`.\n \"\"\"\n\n def __eq__(self, other: Any) -> bool:\n return type(self) == type(other)\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to bytes.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n raise NotImplementedError\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from bytes.\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n raise NotImplementedError\n\n\nclass PickleSerializer(Serializer):\n \"\"\"A `Serializer` that uses cloudpickle to serialize Python objects.\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to bytes using cloudpickle.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return cloudpickle.dumps(value)\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from bytes using cloudpickle.\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n try:\n return cloudpickle.loads(value)\n except Exception as exc:\n try:\n # old versions of Core encoded pickles with base64\n return cloudpickle.loads(base64.b64decode(value))\n except Exception:\n # if there's an error with the backwards-compatible step,\n # reraise the original exception\n raise exc\n\n\nclass JSONSerializer(Serializer):\n \"\"\"A Serializer that uses JSON to serialize objects\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to JSON\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return json.dumps(value).encode()\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from JSON\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return json.loads(value)\n\n\nclass DateTimeSerializer(Serializer):\n \"\"\"A Serializer for working with human-readable datetimes\"\"\"\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize a datetime to human-readable bytes\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n return pendulum.instance(value).to_iso8601_string().encode()\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an datetime from human-readable bytes\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return pendulum.parse(value.decode())\n\n\nclass PandasSerializer(Serializer):\n \"\"\"A Serializer for Pandas DataFrames.\n\n Args:\n - file_type (str): The type you want the resulting file to be\n saved as, e.g. \"csv\" or \"parquet\". Must match a type used\n in a `DataFrame.to_` method and a `pd.read_` function.\n - deserialize_kwargs (dict, optional): Keyword arguments to pass to the\n serialization method.\n - serialize_kwargs (dict, optional): Keyword arguments to pass to the\n deserialization method.\n \"\"\"\n\n def __init__(\n self,\n file_type: str,\n deserialize_kwargs: dict = None,\n serialize_kwargs: dict = None,\n ) -> None:\n self.file_type = file_type\n\n # Fails fast if user specifies a format that Pandas can't deal with.\n self._get_deserialize_method()\n self._get_serialize_method()\n\n self.deserialize_kwargs = (\n {} if deserialize_kwargs is None else deserialize_kwargs\n )\n self.serialize_kwargs = {} if serialize_kwargs is None else serialize_kwargs\n\n def serialize(self, value: \"pd.DataFrame\") -> bytes: # noqa: F821\n \"\"\"\n Serialize a Pandas DataFrame to bytes.\n\n Args:\n - value (DataFrame): the DataFrame to serialize\n\n Returns:\n - bytes: the serialized value\n \"\"\"\n serialization_method = self._get_serialize_method(dataframe=value)\n buffer = io.BytesIO()\n try:\n serialization_method(buffer, **self.serialize_kwargs)\n return buffer.getvalue()\n except TypeError:\n # there are some weird bugs with several of the Pandas serialization\n # methods when trying to serialize to bytes directly. This is a\n # workaround. See https://github.com/pandas-dev/pandas/pull/35129\n string_buffer = io.StringIO()\n serialization_method(string_buffer, **self.serialize_kwargs)\n return string_buffer.getvalue().encode()\n\n def deserialize(self, value: bytes) -> \"pd.DataFrame\": # noqa: F821\n \"\"\"\n Deserialize an object to a Pandas DataFrame\n\n Args:\n - value (bytes): the value to deserialize\n\n Returns:\n - DataFrame: the deserialized DataFrame\n \"\"\"\n deserialization_method = self._get_deserialize_method()\n buffer = io.BytesIO(value)\n deserialized_data = deserialization_method(buffer, **self.deserialize_kwargs)\n return deserialized_data\n\n def __eq__(self, other: Any) -> bool:\n if type(self) == type(other):\n return (\n self.file_type == other.file_type\n and self.serialize_kwargs == other.serialize_kwargs\n and self.deserialize_kwargs == other.deserialize_kwargs\n )\n return False\n\n # _get_read_method and _get_write_method are constructed as they are both to\n # limit copy/paste but also to make it easier for potential future extension to serialization\n # methods that do not map to the \"to_{}/read_{}\" interface.\n def _get_deserialize_method(self) -> Callable:\n import pandas as pd\n\n try:\n return getattr(pd, \"read_{}\".format(self.file_type))\n except AttributeError as exc:\n raise ValueError(\n \"Could not find deserialization methods for {}\".format(self.file_type)\n ) from exc\n\n def _get_serialize_method(self, dataframe: \"pd.DataFrame\" = None) -> Callable:\n import pandas as pd\n\n if dataframe is None:\n # If you just want to test if the method exists, create an empty dataframe\n dataframe = pd.DataFrame()\n try:\n return getattr(dataframe, \"to_{}\".format(self.file_type))\n except AttributeError as exc:\n raise ValueError(\n \"Could not find serialization methods for {}\".format(self.file_type)\n ) from exc\n\n\nclass CompressedSerializer(Serializer):\n \"\"\"\n A Serializer that wraps another Serializer and a compression function to serialize\n Python objects with compression.\n\n Args:\n - serializer (Serializer): the serializer that this serializer wraps\n - format (str): name of the selected pre-defined compression format (bz2, gzip,\n lzma, or zlib)\n - compress (Callable[..., bytes]): the custom compression function\n - decompress (Callable[..., bytes]): the custom decompression function\n - compress_kwargs (Dict[str, Any]): keyword arguments to be passed to the\n compression function\n - decompress_kwargs (Dict[str, Any]): keyword arguments to be passed to the\n decompression function\n \"\"\"\n\n def __init__(\n self,\n serializer: Serializer,\n format: str = None,\n compress: Callable[..., bytes] = None,\n decompress: Callable[..., bytes] = None,\n compress_kwargs: Dict[str, Any] = None,\n decompress_kwargs: Dict[str, Any] = None,\n ):\n self._serializer = serializer\n\n if format and (compress or decompress):\n raise ValueError(\n \"You must specify either `format` or `compress`/`decompress`, \"\n \"but not both.\"\n )\n elif format:\n try:\n self._compress, self._decompress = COMPRESSION_FORMATS[format]\n except KeyError as e:\n raise ValueError(\n \"`format` must be one of: {}\".format(\", \".join(COMPRESSION_FORMATS))\n ) from e\n elif compress and decompress:\n self._compress = compress\n self._decompress = decompress\n else:\n raise ValueError(\n \"You must specify either `format` or `compress`/`decompress`.\"\n )\n\n self._compress_kwargs = compress_kwargs or {}\n self._decompress_kwargs = decompress_kwargs or {}\n\n def __eq__(self, other: Any) -> bool:\n return (\n type(self) == type(other)\n and self._serializer == other._serializer\n and self._compress == other._compress\n and self._decompress == other._decompress\n and self._compress_kwargs == other._compress_kwargs\n and self._decompress_kwargs == other._decompress_kwargs\n )\n\n def serialize(self, value: Any) -> bytes:\n \"\"\"\n Serialize an object to compressed bytes.\n\n Args:\n - value (Any): the value to serialize\n\n Returns:\n - bytes: the compressed serialized value\n \"\"\"\n return self._compress(\n self._serializer.serialize(value), **self._compress_kwargs\n )\n\n def deserialize(self, value: bytes) -> Any:\n \"\"\"\n Deserialize an object from compressed bytes.\n\n Args:\n - value (bytes): the compressed value to deserialize\n\n Returns:\n - Any: the deserialized value\n \"\"\"\n return self._serializer.deserialize(\n self._decompress(value, **self._decompress_kwargs)\n )\n"
] | [
[
"pandas.DataFrame"
]
] |
seekindark/helloworld | [
"00fe439fdbd98add53f3bec7eac2b1ba1dc817a7"
] | [
"python/matplotlib/ee.py"
] | [
"import matplotlib.pyplot as plt\r\nx = [1, 2, 3, 4, 5]\r\ny = [2.3, 3.4, 1.2, 6.6, 7.0]\r\nplt.scatter(x, y, color='r', marker='+')\r\nplt.show()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter"
]
] |
Adib234/AugLy | [
"35a6a5de07e64f465b8979e3257218551929e57a"
] | [
"augly/video/helpers/ffmpeg.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport io\nimport math\nimport os\nimport shutil\nfrom typing import Any, Dict, Optional, Union\n\nimport augly.audio.utils as audutils\nimport ffmpeg\nimport numpy as np\nfrom augly.utils import pathmgr, SILENT_AUDIO_PATH\nfrom augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH\nfrom ffmpeg.nodes import FilterableStream\n\n\ndef combine_frames_and_audio_to_file(\n raw_frames: str,\n audio: Optional[Union[str, io.BytesIO]],\n output_path: str,\n framerate: float,\n) -> None:\n frame_dir = os.path.dirname(raw_frames)\n if not os.path.isdir(frame_dir):\n raise RuntimeError(\n f\"Got raw frames glob path of {raw_frames}, but {frame_dir} is not \"\n \"a directory\"\n )\n\n video_stream = ffmpeg.input(raw_frames, pattern_type=\"glob\", framerate=framerate)\n video_stream = video_stream.filter(\n \"pad\", **{\"width\": \"ceil(iw/2)*2\", \"height\": \"ceil(ih/2)*2\"}\n )\n merge_video_and_audio(video_stream, audio, output_path)\n\n\ndef extract_audio_to_file(video_path: str, output_audio_path: str) -> None:\n audio_info = get_audio_info(video_path)\n sample_rate = str(audio_info[\"sample_rate\"])\n codec = audio_info[\"codec_name\"]\n\n if os.path.splitext(output_audio_path)[-1] == \".aac\":\n (\n ffmpeg.input(video_path, loglevel=\"quiet\")\n .output(output_audio_path, acodec=codec, ac=1)\n .overwrite_output()\n .run(cmd=FFMPEG_PATH)\n )\n else:\n out, err = (\n ffmpeg.input(video_path, loglevel=\"quiet\")\n .output(\"-\", format=\"f32le\", acodec=\"pcm_f32le\", ac=1, ar=sample_rate)\n .run(cmd=FFMPEG_PATH, capture_stdout=True, capture_stderr=True)\n )\n audio = np.frombuffer(out, np.float32)\n audutils.ret_and_save_audio(audio, output_audio_path, int(sample_rate))\n\n\ndef extract_frames_to_dir(\n video_path: str,\n output_dir: str,\n output_pattern: str = \"raw_frame%08d.jpg\",\n quality: int = 0,\n scale: float = 1,\n) -> None:\n video_info = get_video_info(video_path)\n\n (\n ffmpeg.input(video_path, ss=0, loglevel=\"quiet\")\n .filter(\"scale\", f\"iw*{scale}\", f\"ih*{scale}\")\n .output(\n os.path.join(output_dir, output_pattern),\n vframes=video_info[\"nb_frames\"],\n **{\"qscale:v\": quality},\n )\n .overwrite_output()\n .run(cmd=FFMPEG_PATH)\n )\n\n\ndef get_audio_info(media_path: str) -> Dict[str, Any]:\n \"\"\"\n Returns whatever ffprobe returns. Of particular use are things such as the\n encoder (\"codec_name\") used for audio encoding, the sample rate (\"sample_rate\"),\n and length in seconds (\"duration\")\n\n Accepts as input either an audio or video path.\n \"\"\"\n try:\n local_media_path = pathmgr.get_local_path(media_path)\n except RuntimeError:\n raise FileNotFoundError(f\"Provided media path {media_path} does not exist\")\n\n probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)\n audio_info = next(\n (stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"audio\"),\n None,\n )\n\n assert (\n audio_info is not None\n ), \"Error retrieving audio metadata, please verify that an audio stream exists\"\n\n return audio_info\n\n\ndef get_video_fps(video_path: str) -> Optional[float]:\n video_info = get_video_info(video_path)\n\n try:\n frame_rate = video_info[\"avg_frame_rate\"]\n # ffmpeg often returns fractional framerates, e.g. 225480/7523\n if \"/\" in frame_rate:\n num, denom = (float(f) for f in frame_rate.split(\"/\"))\n return num / denom\n else:\n return float(frame_rate)\n except Exception:\n return None\n\n\ndef get_video_info(video_path: str) -> Dict[str, Any]:\n \"\"\"\n Returns whatever ffprobe returns. Of particular use are things such as the FPS\n (\"avg_frame_rate\"), number of raw frames (\"nb_frames\"), height and width of each\n frame (\"height\", \"width\") and length in seconds (\"duration\")\n \"\"\"\n try:\n local_video_path = pathmgr.get_local_path(video_path)\n except RuntimeError:\n raise FileNotFoundError(f\"Provided video path {video_path} does not exist\")\n\n probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)\n video_info = next(\n (stream for stream in probe[\"streams\"] if stream[\"codec_type\"] == \"video\"),\n None,\n )\n\n assert (\n video_info is not None\n ), \"Error retrieving video metadata, please verify that the video file exists\"\n\n return video_info\n\n\ndef has_audio_stream(video_path: str) -> bool:\n streams = ffmpeg.probe(video_path, cmd=FFPROBE_PATH)[\"streams\"]\n for stream in streams:\n if stream[\"codec_type\"] == \"audio\":\n return True\n return False\n\n\ndef add_silent_audio(\n video_path: str,\n output_path: Optional[str] = None,\n duration: Optional[float] = None,\n) -> None:\n local_video_path = pathmgr.get_local_path(video_path)\n if local_video_path != video_path:\n assert (\n output_path is not None\n ), \"If remote video_path is provided, an output_path must be provided\"\n video_path = local_video_path\n output_path = output_path or video_path\n\n if has_audio_stream(video_path):\n if video_path != output_path:\n shutil.copy(video_path, output_path)\n return\n\n duration = duration or float(get_video_info(video_path)[\"duration\"])\n video = ffmpeg.input(video_path).video\n silent_audio_path = pathmgr.get_local_path(SILENT_AUDIO_PATH)\n audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio\n output = ffmpeg.output(video, audio, output_path, pix_fmt=\"yuv420p\", t=duration)\n output.overwrite_output().run(cmd=FFMPEG_PATH)\n\n\ndef merge_video_and_audio(\n video_stream: FilterableStream,\n audio: Optional[Union[str, io.BytesIO]],\n output_path: str,\n) -> None:\n kwargs = {\"c:v\": \"libx264\", \"c:a\": \"copy\", \"bsf:a\": \"aac_adtstoasc\"}\n if audio:\n audio_stream = ffmpeg.input(audio, loglevel=\"quiet\")\n output = ffmpeg.output(\n video_stream, audio_stream, output_path, pix_fmt=\"yuv420p\", **kwargs\n ).overwrite_output()\n else:\n output = ffmpeg.output(\n video_stream, output_path, pix_fmt=\"yuv420p\", **kwargs\n ).overwrite_output()\n\n output.run(cmd=FFMPEG_PATH)\n"
] | [
[
"numpy.frombuffer"
]
] |
ueshin/mars | [
"0b542974243be4e0ff239eaf49ab0fb2935f3361"
] | [
"mars/lib/sparse/matrix.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom collections.abc import Iterable\n\nfrom .core import issparse, get_array_module, cp, cps, \\\n get_sparse_module, naked, sps, splinalg\nfrom .array import SparseNDArray, SparseArray\n\n\ndef zeros_sparse_matrix(shape, dtype=float, gpu=False):\n m = sps if not gpu else cps\n return SparseMatrix(m.csr_matrix(shape, dtype=np.dtype(dtype)))\n\n\ndef diag_sparse_matrix(v, k=0, gpu=False):\n v = naked(v)\n if gpu and get_array_module(v) is not cp:\n v = cp.asarray(v)\n if not gpu and get_array_module(v) is not np:\n v = v.get()\n\n if v.ndim == 1:\n sparse_m = sps if not gpu else cps\n m = n = v.size + k\n mat = sparse_m.spdiags(v[None], [k], m, n, format='csr')\n return SparseMatrix(mat)\n else:\n assert v.ndim == 2\n sparse_m = sps if not gpu else cps\n sparse_eye = sparse_m.eye(v.shape[0], v.shape[1], k=k)\n mat = sparse_eye.multiply(v).tocoo()\n size = sparse_eye.nnz\n col = mat.col - max(k, 0)\n row = get_array_module(col).zeros((len(col),))\n return SparseNDArray(sparse_m.csr_matrix((mat.data, (row, col)), shape=(1, size)),\n shape=(size,))\n\n\ndef eye_sparse_matrix(N, M=None, k=0, dtype=float, gpu=False):\n m = sps if not gpu else cps\n return SparseMatrix(m.eye(N, n=M, k=k, dtype=dtype, format='csr'))\n\n\ndef triu_sparse_matrix(m, k=0, gpu=False):\n m = naked(m)\n if gpu and get_array_module(m) is not cp:\n m = cp.asarray(m)\n if not gpu and get_array_module(m) is not np:\n m = m.get()\n\n sparse_m = sps if not gpu else cps\n mat = sparse_m.triu(m, k=k)\n return SparseMatrix(mat)\n\n\ndef tril_sparse_matrix(m, k=0, gpu=False):\n m = naked(m)\n if gpu and get_array_module(m) is not cp:\n m = cp.asarray(m)\n if not gpu and get_array_module(m) is not np:\n m = m.get()\n\n sparse_m = sps if not gpu else cps\n mat = sparse_m.tril(m, k=k)\n return SparseMatrix(mat)\n\n\ndef where(cond, x, y):\n cond, x, y = [SparseMatrix(i) if issparse(i) else i\n for i in (cond, x, y)]\n return cond * x + (cond * (-y) + y)\n\n\ndef lu_sparse_matrix(a):\n a = naked(a)\n a = a.tocsc()\n super_lu = splinalg.splu(a, permc_spec=\"NATURAL\", diag_pivot_thresh=0, options={\"SymmetricMode\": True})\n l = super_lu.L\n u = super_lu.U\n p = sps.lil_matrix(a.shape)\n p[super_lu.perm_r.copy(), np.arange(a.shape[1])] = 1\n return SparseMatrix(p), SparseMatrix(l), SparseMatrix(u),\n\n\ndef solve_triangular_sparse_matrix(a, b, lower=False, sparse=True):\n a = naked(a)\n b = b.toarray() if issparse(b) else b\n\n x = splinalg.spsolve_triangular(a, b, lower=lower)\n if sparse:\n spx = sps.csr_matrix(x).reshape(x.shape[0], 1) if len(x.shape) == 1 else sps.csr_matrix(x)\n return SparseNDArray(spx, shape=x.shape)\n else:\n return x\n\n\nclass SparseMatrix(SparseArray):\n __slots__ = 'spmatrix',\n\n def __init__(self, spmatrix, shape=()):\n if shape and len(shape) != 2:\n raise ValueError('Only accept 2-d array')\n if isinstance(spmatrix, SparseMatrix):\n self.spmatrix = spmatrix.spmatrix\n else:\n self.spmatrix = spmatrix.tocsr()\n\n @property\n def shape(self):\n return self.spmatrix.shape\n\n def transpose(self, axes=None):\n assert axes is None or tuple(axes) == (1, 0)\n return SparseMatrix(self.spmatrix.transpose())\n\n @property\n def T(self):\n return SparseMatrix(self.spmatrix.T)\n\n def dot(self, other, sparse=True):\n other_shape = other.shape\n try:\n other = naked(other)\n except TypeError:\n return NotImplemented\n\n if sparse:\n if len(other_shape) == 1:\n x = self.spmatrix.dot(other.T)\n else:\n x = self.spmatrix.dot(other)\n else:\n a = self.spmatrix.toarray()\n if issparse(other):\n other = other.toarray().reshape(other_shape)\n x = a.dot(other)\n if issparse(x):\n shape = (x.shape[0],) if len(other_shape) == 1 else x.shape\n return SparseNDArray(x, shape=shape)\n return get_array_module(x).asarray(x)\n\n def concatenate(self, other, axis=0):\n try:\n other = naked(other)\n except TypeError:\n return NotImplemented\n\n if issparse(other):\n xps = get_sparse_module(self.spmatrix)\n if axis not in (0, 1):\n raise ValueError('axis can only be 0 or 1')\n method = xps.vstack if axis == 0 else xps.hstack\n x = method((self.spmatrix, other))\n else:\n xp = get_array_module(self.spmatrix)\n x = xp.concatenate((self.spmatrix.toarray(), other), axis=axis)\n\n if issparse(x):\n return SparseMatrix(x)\n return get_array_module(x).asarray(x)\n\n def _reduction(self, method_name, axis=None, dtype=None, keepdims=None, todense=False, **kw):\n # TODO: support keepdims\n if isinstance(axis, tuple):\n if sorted(axis) != [0, 1]:\n assert len(axis) == 1\n axis = axis[0]\n else:\n axis = None\n\n if todense:\n x = self.spmatrix.toarray()\n x = getattr(get_array_module(x), method_name)(x, axis=axis, **kw)\n else:\n x = getattr(self.spmatrix, method_name)(axis=axis, **kw)\n if not isinstance(axis, Iterable):\n axis = (axis,)\n axis = list(range(len(self.shape))) if axis is None else axis\n shape = tuple(s if i not in axis else 1 for i, s in enumerate(self.shape)\n if keepdims or i not in axis)\n m = get_array_module(x)\n if issparse(x):\n return SparseNDArray(x, shape=shape)\n if m.isscalar(x):\n if keepdims:\n return m.array([x])[0].reshape((1,) * self.ndim)\n else:\n return m.array([x])[0]\n else:\n return m.asarray(x).reshape(shape)\n"
] | [
[
"numpy.arange",
"numpy.dtype"
]
] |
iwangjian/ByteCup2018 | [
"348bdee3215c146ef7d6e4fe1fecbe4598798c8a"
] | [
"model/dropout.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass LockedDropout(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, dropout=0.5, seq_lens=None):\n if not self.training or not dropout:\n return x\n if seq_lens == None:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)\n mask = Variable(m, requires_grad=False) / (1 - dropout)\n mask = mask.expand_as(x)\n return mask * x\n else:\n x, _ = nn.utils.rnn.pad_packed_sequence(x)\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)\n mask = Variable(m, requires_grad=False) / (1 - dropout)\n mask = mask.expand_as(x)\n x = mask * x\n return nn.utils.rnn.pack_padded_sequence(x, seq_lens)\n\n\nclass WeightDropout(nn.Module):\n def __init__(self, module, weights, dropout=0, variational=False):\n super(WeightDropout, self).__init__()\n self.module = module\n self.weights = weights\n self.dropout = dropout\n self.variational = variational\n if hasattr(module, 'batch_first'):\n self.batch_first = module.batch_first\n else:\n self.batch_first = False\n self._setup()\n\n def widget_demagnetizer_y2k_edition(*args, **kwargs):\n # We need to replace flatten_parameters with a nothing function\n # It must be a function rather than a lambda as otherwise pickling explodes\n # We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!\n return\n\n def _setup(self):\n # Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN\n if issubclass(type(self.module), nn.RNNBase):\n self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition\n\n for name_w in self.weights:\n print('Applying weight drop of {} to {}'.format(self.dropout, name_w))\n w = getattr(self.module, name_w)\n del self.module._parameters[name_w]\n self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))\n\n def _setweights(self):\n for name_w in self.weights:\n raw_w = getattr(self.module, name_w + '_raw')\n w = None\n if self.variational:\n mask = Variable(torch.ones(raw_w.size(0), 1))\n if raw_w.is_cuda: mask = mask.cuda()\n mask = nn.functional.dropout(mask, p=self.dropout, training=True)\n w = mask.expand_as(raw_w) * raw_w\n else:\n w = nn.functional.dropout(raw_w, p=self.dropout, training=self.training)\n setattr(self.module, name_w, w)\n\n def forward(self, *args):\n self._setweights()\n return self.module.forward(*args)\n"
] | [
[
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.functional.dropout",
"torch.autograd.Variable",
"torch.nn.Parameter",
"torch.nn.utils.rnn.pad_packed_sequence"
]
] |
kkelchte/pilot | [
"e3c3b753351efac30323af57465abe360973653a"
] | [
"pilot/models/alex_net_v4.py"
] | [
"\"\"\"\nVersion of Alexnet with smaller input size and less weights\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# input downscaled to 128x128x1\ndef alexnet(inputs,\n num_outputs=1,\n dropout_rate=0,\n reuse=None,\n is_training=False,\n verbose=False):\n \"\"\"A basic alex net.\"\"\"\n end_points={}\n \n # TOWER ONE\n end_point = 'conv_1'\n l1 = tf.layers.conv2d(inputs, 32, kernel_size=[11,11], strides=4, padding='valid', activation=None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l1: {}\".format(l1.shape))\n end_points[end_point]=l1\n end_point='bn_1'\n bn1 = tf.layers.batch_normalization(l1, axis=-1, momentum=0.999, epsilon=0.00001, center=True, scale=False, training=is_training, name=end_point, reuse=reuse)\n end_points[end_point]=bn1\n end_point='relu_1'\n relu1 = tf.nn.relu(bn1, name=end_point)\n end_points[end_point]=relu1 \n end_point = 'pool_1'\n p1=tf.layers.max_pooling2d(relu1, pool_size=3, strides=2, padding='valid',name=end_point)\n if verbose: print(\"shape p1: {}\".format(p1.shape))\n end_points[end_point]=p1\n \n # TOWER TWO\n end_point = 'conv_2'\n l2=tf.layers.conv2d(p1, 64, kernel_size=[5,5], strides=1, padding='same', activation=None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l2: {}\".format(l2.shape))\n end_points[end_point]=l2\n end_point='bn_2'\n bn2 = tf.layers.batch_normalization(l2, axis=-1, momentum=0.999, epsilon=0.00001, center=True, scale=False, training=is_training, name=end_point, reuse=reuse)\n end_points[end_point]=bn2\n end_point='relu_2'\n relu2 = tf.nn.relu(bn2, name=end_point)\n end_points[end_point]=relu2 \n end_point = 'pool_2'\n p2=tf.layers.max_pooling2d(relu2, pool_size=3, strides=2, padding='valid',name=end_point)\n if verbose: print(\"shape p2: {}\".format(p2.shape))\n end_points[end_point]=p2\n\n # TOWER THREE\n end_point = 'conv_3'\n l3=tf.layers.conv2d(p2, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l3: {}\".format(l3.shape))\n end_points[end_point]=l3\n end_point = 'conv_4'\n l4=tf.layers.conv2d(l3, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l4: {}\".format(l4.shape))\n end_points[end_point]=l4\n end_point = 'conv_5'\n l5=tf.layers.conv2d(l4, 64, kernel_size=[3,3], strides=1, padding='same', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l5: {}\".format(l5.shape))\n end_points[end_point]=l5\n end_point = 'pool_5'\n p5=tf.layers.max_pooling2d(l5, pool_size=3, strides=1 , padding='valid', name=end_point)\n if verbose: print(\"shape p5: {}\".format(p5.shape))\n end_points[end_point]=p5\n p5 = tf.reshape(p5, (-1,1,4*4*64))\n \n if dropout_rate != 0:\n end_point = 'dropout_5'\n p5 = tf.layers.dropout(p5, dropout_rate)\n end_points[end_point]=p5\n \n end_point = 'fc_6'\n l6=tf.layers.conv1d(p5, filters=1024, kernel_size=1, strides=1, padding='valid', activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l6: {}\".format(l6.shape))\n end_points[end_point]=l6\n \n if dropout_rate != 0:\n end_point = 'dropout_6'\n l6 = tf.layers.dropout(l6, dropout_rate)\n end_points[end_point]=l6\n \n end_point = 'fc_7'\n l7=tf.layers.conv1d(l6, filters=1024, kernel_size=1, strides=1, padding='valid', activation=tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l7: {}\".format(l7.shape))\n end_points[end_point]=l7\n\n end_point = 'fc_8'\n l8=tf.layers.conv1d(l7, filters=num_outputs, kernel_size=1, strides=1, padding='valid', activation=tf.nn.tanh if num_outputs == 1 else None, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n # l8=tf.layers.conv1d(l7, filters=num_outputs, kernel_size=1, strides=1, padding='valid', activation=tf.nn.tanh if num_outputs == 1 else tf.nn.relu, use_bias=False, kernel_initializer=tf.contrib.layers.xavier_initializer(), name=end_point, reuse=reuse)\n if verbose: print(\"shape l8: {}\".format(l8.shape))\n end_points[end_point]=l8\n end_point = 'outputs'\n outputs = tf.squeeze(l8, [1], name=end_point)\n if verbose: print(\"shape outputs: {}\".format(outputs.shape))\n end_points[end_point]=outputs\n \n return end_points\n\n# default_image_size=[227,227,3]\n# default_image_size=[127,127,3]\ndefault_image_size=[127,127,1]\n"
] | [
[
"tensorflow.reshape",
"tensorflow.layers.batch_normalization",
"tensorflow.squeeze",
"tensorflow.layers.max_pooling2d",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.relu",
"tensorflow.layers.dropout"
]
] |
goldnimrod/IML.HUJI | [
"4fe39f597e1fc9eb188ca12daa2b3111bae92ee9"
] | [
"IMLearn/learners/classifiers/decision_stump.py"
] | [
"from __future__ import annotations\nfrom typing import Tuple, NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nfrom ...metrics import misclassification_error\nfrom itertools import product\n\n\nclass DecisionStump(BaseEstimator):\n \"\"\"\n A decision stump classifier for {-1,1} labels according to the CART algorithm\n\n Attributes\n ----------\n self.threshold_ : float\n The threshold by which the data is split\n\n self.j_ : int\n The index of the feature by which to split the data\n\n self.sign_: int\n The label to predict for samples where the value of the j'th feature is about the threshold\n \"\"\"\n\n def __init__(self) -> DecisionStump:\n \"\"\"\n Instantiate a Decision stump classifier\n \"\"\"\n super().__init__()\n self.threshold_, self.j_, self.sign_ = None, None, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n fits a decision stump to the given data\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n min_error = 1\n # Run on all sign combinations instead of determining the majority\n for feature_index, sign in product(range(X.shape[1]),\n np.unique(np.sign(y))):\n threshold, error = self._find_threshold(X[:, feature_index], y,\n sign)\n if error <= min_error:\n min_error = error\n self.threshold_ = threshold\n self.sign_ = sign\n self.j_ = feature_index\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n\n Notes\n -----\n Feature values strictly below threshold are predicted as `-sign` whereas values which equal\n to or above the threshold are predicted as `sign`\n \"\"\"\n return np.where(X[:, self.j_] < self.threshold_, -self.sign_,\n self.sign_)\n\n def _find_threshold(self, values: np.ndarray, labels: np.ndarray,\n sign: int) -> Tuple[float, float]:\n \"\"\"\n Given a feature vector and labels, find a threshold by which to perform a split\n The threshold is found according to the value minimizing the misclassification\n error along this feature\n\n Parameters\n ----------\n values: ndarray of shape (n_samples,)\n A feature vector to find a splitting threshold for\n\n labels: ndarray of shape (n_samples,)\n The labels to compare against\n\n sign: int\n Predicted label assigned to values equal to or above threshold\n\n Returns\n -------\n thr: float\n Threshold by which to perform split\n\n thr_err: float between 0 and 1\n Misclassificaiton error of returned threshold\n\n Notes\n -----\n For every tested threshold, values strictly below threshold are predicted as `-sign` whereas values\n which equal to or above the threshold are predicted as `sign`\n \"\"\"\n sorted_values = values[values.argsort()]\n sorted_labels = labels[values.argsort()]\n error_count = np.sum(np.abs(sorted_labels[\n np.not_equal(np.sign(sorted_labels),\n np.ones(\n sorted_values.shape[\n 0]) * sign)]))\n\n def calc_thr_value_error(i):\n \"\"\"\n Calculates the misclassificaiton error of the threshold with\n The value in index i\n\n Parameters\n ----------\n i: int\n The index of the value in the sorted_values array\n\n Returns\n -------\n thr_err: float between 0 and 1\n Misclassificaiton error of the threshold\n\n \"\"\"\n nonlocal error_count\n if i == 0:\n return error_count\n if np.sign(sorted_labels[i - 1]) == -sign:\n error_count -= np.abs(sorted_labels[i - 1])\n else:\n error_count += np.abs(sorted_labels[i - 1])\n return error_count\n\n errors = np.vectorize(calc_thr_value_error)(\n np.arange(sorted_values.shape[0]))\n min_error_index = np.argmin(errors)\n return sorted_values[min_error_index], errors[min_error_index]\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n return misclassification_error(np.sign(y), np.sign(self.predict(X)))\n"
] | [
[
"numpy.ones",
"numpy.sign",
"numpy.vectorize",
"numpy.argmin",
"numpy.abs",
"numpy.arange",
"numpy.where"
]
] |
dataubc/number_-of_clicks_prediction | [
"738c2e15dc627b247ad5692ab80d3713fe5c8f3e"
] | [
"xgboost_model.py"
] | [
"import pandas as pd\nimport numpy as np\nimport xgboost\n\n# reading data\nhotel_data = pd.read_csv('cleaned_train.csv')\nX = hotel_data.drop(columns=['n_clicks', 'hotel_id'])\n# let's also add the new feature avg_saving_cash\nX['avg_saving_cash'] = X['avg_price'] * X['avg_saving_percent']\ny = hotel_data['n_clicks']\n\n# let's create trained data for xgboost\ndtrain = xgboost.DMatrix(X, label=y)\n\nparams = {'max_depth': 6, 'min_child_weight': 3, 'eta': .1, 'subsample': 1, 'colsample_bytree': 0.7,\n 'objective': 'reg:squarederror', 'eval_metric': \"rmse\"}\nnum_boost_round = 999\nprint('Training phase has started')\n\n# training best model on the optimized hyper-parameters.\nbest_model = xgboost.train(\n params,\n dtrain,\n num_boost_round=num_boost_round,\n)\nprint('Saving the model as best_model.model')\nbest_model.save_model(\"best_model.model\")\nprint('Reading test data')\n\n# reading test data\nX_test = pd.read_csv('cleaned_test.csv')\ndtest = xgboost.DMatrix(X_test.drop(columns=['hotel_id']))\npredicted_y = best_model.predict(dtest)\nX_test['n_clicks'] = predicted_y\n# getting all negative prediction to 0\nX_test['n_clicks'] = np.where(X_test['n_clicks'] < 0, 0, X_test['n_clicks'])\nfinal_result = X_test[['hotel_id', 'n_clicks']]\nprint('Saving the prediction as predictions.csv')\n# saving the result\nfinal_result.to_csv('predictions.csv')\n"
] | [
[
"pandas.read_csv",
"numpy.where"
]
] |
PacktPublishing/Machine-Learning-Algorithms-Second-Edition | [
"b25d3607e9d5cc388bcf5f1a029bae39bb2b837b"
] | [
"Chapter10/birch.py"
] | [
"from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_blobs\nfrom sklearn.cluster import Birch\nfrom sklearn.metrics import adjusted_rand_score\n\n\n# Set random seed for reproducibility\nnp.random.seed(1000)\n\n\nnb_samples = 2000\nbatch_size = 80\n\n\nif __name__ == '__main__':\n # Create the dataset\n X, Y = make_blobs(n_samples=nb_samples, n_features=2, centers=5, cluster_std=1.5, random_state=1000)\n\n # Create an instance of BIRCH\n birch = Birch(n_clusters=5, threshold=0.15, branching_factor=100)\n\n # Train the model\n X_batch = []\n Y_preds = []\n\n for i in range(0, nb_samples, batch_size):\n birch.partial_fit(X[i:i + batch_size])\n X_batch.append(X[:i + batch_size])\n Y_preds.append(birch.predict(X[:i + batch_size]))\n\n print(adjusted_rand_score(birch.predict(X), Y))\n\n # Show the training steps\n fig, ax = plt.subplots(5, 5, figsize=(20, 12))\n\n for i in range(5):\n for j in range(5):\n idx = (i * 5) + j\n\n for k in range(5):\n ax[i][j].scatter(X_batch[idx][Y_preds[idx] == k, 0], X_batch[idx][Y_preds[idx] == k, 1], s=3)\n\n ax[i][j].set_xticks([])\n ax[i][j].set_yticks([])\n ax[i][j].set_title('{} samples'.format(batch_size * (idx + 1)))\n\n plt.show()\n\n\n"
] | [
[
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"sklearn.cluster.Birch",
"sklearn.datasets.make_blobs"
]
] |
rniranjan93/tensorflow | [
"2d22f93b04cd137d2480528a80b45ea5306ca9b3"
] | [
"tensorflow/python/compat/compat.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 3, 25)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning(\"Trying to set the forward compatibility date to the past\"\n \" date %s. This will be ignored by TensorFlow.\" % (date))\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n"
] | [
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.tf_export.tf_export"
]
] |
quantify-os/quantify-scheduler | [
"9dee17ca9345560b998b52f956c23b79a9ab287f"
] | [
"tests/scheduler/test_waveforms.py"
] | [
"# pylint: disable=missing-function-docstring\n\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom quantify_scheduler.waveforms import (\n square,\n drag,\n staircase,\n modulate_wave,\n rotate_wave,\n)\n\n\ndef test_square_wave():\n amped_sq = square(np.arange(50), 2.44)\n npt.assert_array_equal(amped_sq, np.linspace(2.44, 2.44, 50))\n\n amped_sq_iq = square(np.arange(20), 6.88)\n npt.assert_array_equal(amped_sq_iq.real, np.linspace(6.88, 6.88, 20))\n npt.assert_array_equal(amped_sq_iq.imag, np.linspace(0, 0, 20))\n\n\ndef test_staircase():\n t = np.linspace(0, 1e-6, 20)\n sig = staircase(t, -1, 2, 4)\n answer = np.array(\n [\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 1.0,\n 2.0,\n 2.0,\n 2.0,\n 2.0,\n 2.0,\n ]\n )\n npt.assert_array_equal(sig, answer)\n\n\ndef test_drag_ns():\n duration = 20e-9\n nr_sigma = 3\n G_amp = 0.5\n D_amp = 1\n\n times = np.arange(0, duration, 1e-9) # sampling rate set to 1 GSPs\n mu = times[0] + duration / 2\n sigma = duration / (2 * nr_sigma)\n gauss_env = G_amp * np.exp(-(0.5 * ((times - mu) ** 2) / sigma ** 2))\n deriv_gauss_env = D_amp * -1 * (times - mu) / (sigma ** 1) * gauss_env\n exp_waveform = gauss_env + 1j * deriv_gauss_env\n\n # quantify\n waveform = drag(\n times,\n G_amp=G_amp,\n D_amp=D_amp,\n duration=duration,\n nr_sigma=nr_sigma,\n subtract_offset=\"none\",\n )\n\n np.testing.assert_array_almost_equal(waveform, exp_waveform, decimal=3)\n assert pytest.approx(np.max(waveform), 0.5)\n\n with pytest.raises(ValueError):\n drag(times, 0.5, D_amp, duration, subtract_offset=\"bad!\")\n\n waveform = drag(\n times,\n G_amp=G_amp,\n D_amp=D_amp,\n duration=duration,\n nr_sigma=nr_sigma,\n subtract_offset=\"average\",\n )\n exp_waveform.real -= np.mean([exp_waveform.real[0], exp_waveform.real[-1]])\n exp_waveform.imag -= np.mean([exp_waveform.imag[0], exp_waveform.imag[-1]])\n np.testing.assert_array_almost_equal(waveform, exp_waveform, decimal=3)\n\n\ndef test_rotate_wave():\n\n I = np.ones(10) # noqa # Q component is zero\n Q = np.zeros(10) # noqa # not used as input, only used for testing\n\n rot_wf = rotate_wave(I, 0)\n\n npt.assert_array_almost_equal(I, rot_wf.real)\n npt.assert_array_almost_equal(I.imag, rot_wf.imag)\n\n rot_wf = rotate_wave(I, 90)\n\n npt.assert_array_almost_equal(I, rot_wf.imag)\n npt.assert_array_almost_equal(Q, -rot_wf.real)\n\n rot_wf = rotate_wave(I, 180)\n\n npt.assert_array_almost_equal(I, -rot_wf.real)\n npt.assert_array_almost_equal(Q, -rot_wf.imag)\n\n rot_wf = rotate_wave(I, 360)\n\n npt.assert_array_almost_equal(I, rot_wf.real)\n npt.assert_array_almost_equal(Q, rot_wf.imag)\n\n\ndef test_modulate():\n fs = 100\n f = 4\n t = np.arange(fs)\n I = np.sin(2 * np.pi * f * (t / fs)) # noqa\n Q = np.sin(2 * np.pi * f * (t / fs) + (np.pi / 2)) # noqa\n wf = I + 1j * Q\n\n mod_wf = modulate_wave(np.linspace(0, 1, fs), wf, 2)\n npt.assert_array_almost_equal(\n mod_wf.real, np.sin(2 * np.pi * (f + 2) * (t / fs)), decimal=1\n )\n\n mod_wf = modulate_wave(np.linspace(0, 1, fs), wf, -2)\n npt.assert_array_almost_equal(\n mod_wf.imag, np.sin(2 * np.pi * (f - 2) * (t / fs) + (np.pi / 2)), decimal=1\n )\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.testing.assert_array_equal",
"numpy.exp",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"numpy.max",
"numpy.array",
"numpy.sin",
"numpy.linspace",
"numpy.mean"
]
] |
surchs/lab-documentation | [
"9d71a4710b66da5341e7c3c67108d175f8a9fe0d"
] | [
"source/tutorials/files/mnist.py"
] | [
"import tensorflow as tf\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy()\nmodel.compile(optimizer='adam',\n loss=loss_fn,\n metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=5)\n\nmodel.evaluate(x_test, y_test, verbose=2)"
] | [
[
"tensorflow.keras.datasets.cifar10.load_data",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D"
]
] |
kahilah/hpc-python | [
"5d2efa08076ed2706c81ca255c7e4574c937557c"
] | [
"demos/mpi-collective.py"
] | [
"from mpi4py import MPI\nfrom numpy import arange, empty\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nn = 10\ndata = empty(n, float)\nif rank == 0:\n data = arange(n, dtype=float)\n\ncomm.Bcast(data, 0)\n\nif rank == 1:\n print(\"Received: \" + str(data))\n\n"
] | [
[
"numpy.arange",
"numpy.empty"
]
] |
yangyuethz/qutip | [
"7f5682b5edfd4c906b2e89f69cf0a8be4bfd529b"
] | [
"qutip/propagator.py"
] | [
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['propagator', 'propagator_steadystate']\n\nimport types\nimport numpy as np\nimport scipy.linalg as la\nimport functools\nimport scipy.sparse as sp\nfrom qutip.qobj import Qobj\nfrom qutip.tensor import tensor\nfrom qutip.operators import qeye\nfrom qutip.rhs_generate import (rhs_generate, rhs_clear, _td_format_check)\nfrom qutip.superoperator import (vec2mat, mat2vec,\n vector_to_operator, operator_to_vector)\nfrom qutip.sparse import sp_reshape\nfrom qutip.cy.sparse_utils import unit_row_norm\nfrom qutip.mesolve import mesolve\nfrom qutip.sesolve import sesolve\nfrom qutip.states import basis\nfrom qutip.solver import Options, _solver_safety_check, config\nfrom qutip.parallel import parallel_map, _default_kwargs\nfrom qutip.ui.progressbar import BaseProgressBar, TextProgressBar\n\n\ndef propagator(H, t, c_op_list=[], args={}, options=None,\n unitary_mode='batch', parallel=False,\n progress_bar=None, _safe_mode=True,\n **kwargs):\n r\"\"\"\n Calculate the propagator U(t) for the density matrix or wave function such\n that :math:`\\psi(t) = U(t)\\psi(0)` or\n :math:`\\rho_{\\mathrm vec}(t) = U(t) \\rho_{\\mathrm vec}(0)`\n where :math:`\\rho_{\\mathrm vec}` is the vector representation of the\n density matrix.\n\n Parameters\n ----------\n H : qobj or list\n Hamiltonian as a Qobj instance of a nested list of Qobjs and\n coefficients in the list-string or list-function format for\n time-dependent Hamiltonians (see description in :func:`qutip.mesolve`).\n\n t : float or array-like\n Time or list of times for which to evaluate the propagator.\n\n c_op_list : list\n List of qobj collapse operators.\n\n args : list/array/dictionary\n Parameters to callback functions for time-dependent Hamiltonians and\n collapse operators.\n\n options : :class:`qutip.Options`\n with options for the ODE solver.\n\n unitary_mode = str ('batch', 'single')\n Solve all basis vectors simulaneously ('batch') or individually\n ('single').\n\n parallel : bool {False, True}\n Run the propagator in parallel mode. This will override the\n unitary_mode settings if set to True.\n\n progress_bar: BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation. By default no progress bar\n is used, and if set to True a TextProgressBar will be used.\n\n Returns\n -------\n a : qobj\n Instance representing the propagator :math:`U(t)`.\n\n \"\"\"\n kw = _default_kwargs()\n if 'num_cpus' in kwargs:\n num_cpus = kwargs['num_cpus']\n else:\n num_cpus = kw['num_cpus']\n\n if progress_bar is None:\n progress_bar = BaseProgressBar()\n elif progress_bar is True:\n progress_bar = TextProgressBar()\n\n if options is None:\n options = Options()\n options.rhs_reuse = True\n rhs_clear()\n\n if isinstance(t, (int, float, np.integer, np.floating)):\n tlist = [0, t]\n else:\n tlist = t\n\n if _safe_mode:\n _solver_safety_check(H, None, c_ops=c_op_list, e_ops=[], args=args)\n\n td_type = _td_format_check(H, c_op_list, solver='me')\n\n if isinstance(H, (types.FunctionType, types.BuiltinFunctionType,\n functools.partial)):\n H0 = H(0.0, args)\n if unitary_mode =='batch':\n # batch don't work with function Hamiltonian\n unitary_mode = 'single'\n elif isinstance(H, list):\n H0 = H[0][0] if isinstance(H[0], list) else H[0]\n else:\n H0 = H\n\n if len(c_op_list) == 0 and H0.isoper:\n # calculate propagator for the wave function\n\n N = H0.shape[0]\n dims = H0.dims\n\n if parallel:\n unitary_mode = 'single'\n u = np.zeros([N, N, len(tlist)], dtype=complex)\n output = parallel_map(_parallel_sesolve, range(N),\n task_args=(N, H, tlist, args, options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N):\n for k, t in enumerate(tlist):\n u[:, n, k] = output[n].states[k].full().T\n else:\n if unitary_mode == 'single':\n output = sesolve(H, qeye(dims[0]), tlist, [], args, options,\n _safe_mode=False)\n if len(tlist) == 2:\n return output.states[-1]\n else:\n return output.states\n\n elif unitary_mode =='batch':\n u = np.zeros(len(tlist), dtype=object)\n _rows = np.array([(N+1)*m for m in range(N)])\n _cols = np.zeros_like(_rows)\n _data = np.ones_like(_rows, dtype=complex)\n psi0 = Qobj(sp.coo_matrix((_data, (_rows, _cols))).tocsr())\n if td_type[1] > 0 or td_type[2] > 0:\n H2 = []\n for k in range(len(H)):\n if isinstance(H[k], list):\n H2.append([tensor(qeye(N), H[k][0]), H[k][1]])\n else:\n H2.append(tensor(qeye(N), H[k]))\n else:\n H2 = tensor(qeye(N), H)\n options.normalize_output = False\n output = sesolve(H2, psi0, tlist, [],\n args=args, options=options,\n _safe_mode=False)\n for k, t in enumerate(tlist):\n u[k] = sp_reshape(output.states[k].data, (N, N))\n unit_row_norm(u[k].data, u[k].indptr, u[k].shape[0])\n u[k] = u[k].T.tocsr()\n\n else:\n raise Exception('Invalid unitary mode.')\n\n\n elif len(c_op_list) == 0 and H0.issuper:\n # calculate the propagator for the vector representation of the\n # density matrix (a superoperator propagator)\n unitary_mode = 'single'\n N = H0.shape[0]\n sqrt_N = int(np.sqrt(N))\n dims = H0.dims\n\n u = np.zeros([N, N, len(tlist)], dtype=complex)\n\n if parallel:\n output = parallel_map(_parallel_mesolve,range(N * N),\n task_args=(\n sqrt_N, H, tlist, c_op_list, args,\n options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N * N):\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output[n].states[k].full()).T\n else:\n rho0 = qeye(N,N)\n rho0.dims = [[sqrt_N, sqrt_N], [sqrt_N, sqrt_N]]\n output = mesolve(H, psi0, tlist, [], args, options,\n _safe_mode=False)\n if len(tlist) == 2:\n return output.states[-1]\n else:\n return output.states\n\n else:\n # calculate the propagator for the vector representation of the\n # density matrix (a superoperator propagator)\n unitary_mode = 'single'\n N = H0.shape[0]\n dims = [H0.dims, H0.dims]\n\n u = np.zeros([N * N, N * N, len(tlist)], dtype=complex)\n\n if parallel:\n output = parallel_map(_parallel_mesolve, range(N * N),\n task_args=(\n N, H, tlist, c_op_list, args, options),\n progress_bar=progress_bar, num_cpus=num_cpus)\n for n in range(N * N):\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output[n].states[k].full()).T\n else:\n progress_bar.start(N * N)\n for n in range(N * N):\n progress_bar.update(n)\n col_idx, row_idx = np.unravel_index(n, (N, N))\n rho0 = Qobj(sp.csr_matrix(([1], ([row_idx], [col_idx])),\n shape=(N,N), dtype=complex))\n output = mesolve(H, rho0, tlist, c_op_list, [], args, options,\n _safe_mode=False)\n for k, t in enumerate(tlist):\n u[:, n, k] = mat2vec(output.states[k].full()).T\n progress_bar.finished()\n\n if len(tlist) == 2:\n if unitary_mode == 'batch':\n return Qobj(u[-1], dims=dims)\n else:\n return Qobj(u[:, :, 1], dims=dims)\n else:\n if unitary_mode == 'batch':\n return np.array([Qobj(u[k], dims=dims)\n for k in range(len(tlist))], dtype=object)\n else:\n return np.array([Qobj(u[:, :, k], dims=dims)\n for k in range(len(tlist))], dtype=object)\n\n\ndef _get_min_and_index(lst):\n \"\"\"\n Private function for obtaining min and max indicies.\n \"\"\"\n minval, minidx = lst[0], 0\n for i, v in enumerate(lst[1:]):\n if v < minval:\n minval, minidx = v, i + 1\n return minval, minidx\n\n\ndef propagator_steadystate(U):\n \"\"\"Find the steady state for successive applications of the propagator\n :math:`U`.\n\n Parameters\n ----------\n U : qobj\n Operator representing the propagator.\n\n Returns\n -------\n a : qobj\n Instance representing the steady-state density matrix.\n\n \"\"\"\n\n evals, evecs = la.eig(U.full())\n\n shifted_vals = np.abs(evals - 1.0)\n ev_idx = np.argmin(shifted_vals)\n ev_min = shifted_vals[ev_idx]\n evecs = evecs.T\n rho = Qobj(vec2mat(evecs[ev_idx]), dims=U.dims[0])\n rho = rho * (1.0 / rho.tr())\n rho = 0.5 * (rho + rho.dag()) # make sure rho is herm\n rho.isherm = True\n return rho\n\n\ndef _parallel_sesolve(n, N, H, tlist, args, options):\n psi0 = basis(N, n)\n output = sesolve(H, psi0, tlist, [], args, options, _safe_mode=False)\n return output\n\ndef _parallel_mesolve(n, N, H, tlist, c_op_list, args, options):\n col_idx, row_idx = np.unravel_index(n, (N, N))\n rho0 = Qobj(sp.csr_matrix(([1], ([row_idx], [col_idx])),\n shape=(N,N), dtype=complex))\n output = mesolve(H, rho0, tlist, c_op_list, [], args, options,\n _safe_mode=False)\n return output\n"
] | [
[
"numpy.zeros_like",
"numpy.argmin",
"scipy.sparse.csr_matrix",
"numpy.abs",
"numpy.ones_like",
"scipy.sparse.coo_matrix",
"numpy.sqrt",
"numpy.unravel_index"
]
] |
dfarrow0/flu-contest | [
"8356cf48910a76d2643d105651342288076a9377"
] | [
"src/epicast/fc_epicast_analysis.py"
] | [
"from statistics import median_low\nimport mysql.connector\nimport numpy as np\nimport scipy.stats\nfrom ..forecasters.fc_abstract import Forecaster\nfrom delphi.epidata.client.delphi_epidata import Epidata\nimport delphi.operations.secrets as secrets\nimport delphi.utils.epiweek as flu\nfrom ..utils.forecast_type import ForecastType\n\n\nclass Epicast(Forecaster):\n\n def __init__(self, test_season, locations, forecast_type, verbose=False, users=None):\n super().__init__('epicast', test_season, locations, forecast_type, smooth_weeks_bw=0, smooth_wili_bw=0)\n self.verbose = verbose\n self.users = users\n\n @staticmethod\n def fit_distribution(values, num_bins, bin_size, first_value, unbounded, num_users):\n values = [v for v in values if v is not None]\n if len(values) == 0:\n values = [0]\n mu = np.median(values)\n if len(values) == 1:\n sigma = 0\n else:\n sigma = np.std(values, ddof=1)\n sigma = max(sigma, 1e-3)\n df = max(1, num_users - 1)\n cdf = scipy.stats.t(df, mu, sigma).cdf\n dist = []\n for i in range(num_bins):\n a = first_value + i * bin_size\n if unbounded and i == num_bins - 1:\n b = float('inf')\n else:\n b = a + bin_size\n dist.append(cdf(b) - cdf(a))\n dist = np.array(dist)\n mass = sum(dist)\n if mass > 0:\n dist /= mass\n return dist\n\n @staticmethod\n def get_week_forecast(num_users):\n def _forecast(first_epiweek, num_bins, indices, uniform_weight, smooth_bw, allow_none):\n if smooth_bw > 0:\n print(' [EC] warning: epicast doesnt smooth week bins, but smooth_bw = %.3f' % smooth_bw)\n num_none = indices.count(None)\n if num_none > 0 and not allow_none:\n raise Exception('target does not allow None, but None given')\n dist = Epicast.fit_distribution(indices, num_bins, 1, -0.5, False, num_users)\n dist *= len(indices) - num_none\n extra = [num_none] if allow_none else []\n dist = Forecaster.Utils.normalize(list(dist) + extra)\n dist = Forecaster.Utils.blend(dist, uniform_weight)\n if allow_none:\n dist, none = dist[:-1], dist[-1]\n else:\n none = None\n possibilities = [i for i in indices if i is not None]\n if len(possibilities) == 0:\n possibilities = [0]\n point = flu.add_epiweeks(first_epiweek, int(np.median(possibilities)))\n return (dist, none, point)\n return _forecast\n\n @staticmethod\n def get_wili_forecast(num_users):\n def _forecast(bin_size, num_bins, wili, uniform_weight, smooth_bw):\n if smooth_bw > 0:\n print(' [EC] warning: epicast doesnt smooth wILI bins, but smooth_bw = %.3f' % smooth_bw)\n dist = Epicast.fit_distribution(wili, num_bins, bin_size, 0, True, num_users)\n dist = Forecaster.Utils.normalize(dist)\n dist = Forecaster.Utils.blend(dist, uniform_weight)\n point = np.median(wili)\n return (dist, point)\n return _forecast\n\n\n def extractUsers(self, region, epiweek_now):\n self.cur = self.cnx.cursor(buffered=True)\n\n # 1. load forecast, with dimensions [location, user, ew2 (+1, 2, 3, 4)]\n # Get all user_id\n self.cur.execute(\"select distinct(user_id) from ec_fluv_forecast_mturk where epiweek_now = %d\" % epiweek_now)\n num_users = 0\n user_ids = []\n for user_id in self.cur:\n user_id = user_id[0]\n if user_id not in [45, 312, 539, 670, 145, 410, 411, 1, 2, 3, 4, 5, 6, 7, 8]:\n num_users += 1\n user_ids.append(user_id)\n\n # Get forecasts\n forecast = {}\n region_ids = [i for i in range(1, 24)] + [i for i in range(25, 30)] + [i for i in range(31, 62)]\n region_user_map = {}\n for r in region_ids:\n forecast[r] = {}\n region_user_map[r] = {}\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n forecast[r][ew2] = {}\n\n self.cur.execute(\"\"\"\n select f.user_id, f.region_id, f.epiweek_now, f.epiweek, f.wili from ec_fluv_forecast_mturk f \n JOIN ec_fluv_submissions_mturk s ON f.user_id = s.user_id AND f.region_id = s.region_id AND\n f.epiweek_now = s.epiweek_now where f.epiweek_now = %d and f.epiweek <= 201920\"\"\" % epiweek_now)\n\n num_predictions = 0\n for (u, r, ew1, ew2, wili) in self.cur:\n if ew1 == epiweek_now:\n try:\n forecast[r][ew2][u] = wili\n region_user_map[r][u] = 1\n num_predictions += 1\n except:\n pass\n\n # 2. for each location and epiweek, compute the median\n medians = {}\n for r in region_ids:\n medians[r] = {}\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n # print('forecast for this region and ew2: ', list(forecast[r][ew2].keys()))\n medians[r][ew2] = np.median(list(forecast[r][ew2].values()))\n\n # 3. for each location, for each user, get the sum of distance of the 4 weeks' forecasts\n errors = {}\n for r in region_ids:\n errors[r] = {}\n for user_id in region_user_map[r]:\n errors[r][user_id] = 0\n for ew2 in range(epiweek_now + 1, epiweek_now + 5):\n errors[r][user_id] += abs(medians[r][ew2] - forecast[r][ew2][user_id])\n\n # 4. for each region, rank the users and take the upper half\n topWorkers = {}\n for r in region_ids:\n ranks = []\n topWorkers[r] = []\n for user_id in region_user_map[r]:\n error = errors[r][user_id]\n ranks.append({'user_id': user_id, 'error': error})\n sorted_users = sorted(ranks, key=lambda x: x['error'])\n numTopHalf = len(sorted_users) // 2\n tmp = sorted_users[:numTopHalf]\n for worker in tmp:\n topWorkers[r].append(worker['user_id'])\n\n # get region id from region (which is fluview_name)\n region = \"'\" + region + \"'\"\n self.cur.execute(\"select id from ec_fluv_regions where fluview_name = %s\" % region)\n print(self.cur)\n for id in self.cur:\n region = id[0]\n return topWorkers[region]\n\n\n def fetch_submissions(self, region, epiweek_now):\n topUsers = self.extractUsers(region, epiweek_now)\n print(topUsers)\n final_week = flu.join_epiweek(self.test_season + 1, 20)\n self.cur = self.cnx.cursor()\n self.cur.execute(\"\"\"\n SELECT\n u.`id` `user_id`, f.`epiweek`, f.`wili`\n FROM (\n SELECT\n u.*\n FROM\n `ec_fluv_users_mturk_2019` u\n JOIN\n `ec_fluv_defaults` d\n ON\n TRUE\n LEFT JOIN\n `ec_fluv_user_preferences` p\n ON\n p.`user_id` = u.`id` AND p.`name` = d.`name`\n WHERE\n d.`name` = '_debug' AND coalesce(p.`value`, d.`value`) = '0'\n ) u\n JOIN\n `ec_fluv_submissions_mturk` s\n ON\n s.`user_id` = u.`id`\n JOIN\n `ec_fluv_forecast_mturk` f\n ON\n f.`user_id` = u.`id` AND f.`region_id` = s.`region_id` AND f.`epiweek_now` = s.`epiweek_now`\n JOIN\n `ec_fluv_regions` r\n ON\n r.`id` = s.`region_id`\n WHERE\n r.`fluview_name` = %s AND s.`epiweek_now` = %s AND f.`epiweek` <= %s AND f.`wili` > 0\n ORDER BY\n u.`id` ASC, f.`epiweek` ASC\n \"\"\", (region, epiweek_now, final_week))\n submissions = {}\n for (user, epiweek, wili) in self.cur:\n if self.users is not None and user not in self.users:\n continue\n # only get performance from top users\n if user in topUsers:\n if user not in submissions:\n submissions[user] = []\n submissions[user].append(wili)\n self.cur.close()\n curves = []\n expected_weeks = flu.delta_epiweeks(epiweek_now, final_week)\n for user in submissions:\n if len(submissions[user]) != expected_weeks:\n print(' [EC] warning: missing data in user submission [%d|%s|%d]' % (user, region, epiweek_now))\n else:\n curves.append(submissions[user])\n\n print(region, curves)\n return curves\n\n def _init(self):\n if self.test_season == 2014:\n db = 'epicast'\n elif self.test_season >= 2015:\n db = 'epicast2'\n else:\n raise Exception('invalid epicast season [%d]' % self.test_season)\n u, p = secrets.db.epi\n self.cnx = mysql.connector.connect(user=u, password=p, database=db)\n\n def _fini(self):\n self.cnx.commit()\n self.cnx.close()\n\n def _train(self, region):\n pass\n\n def _forecast(self, region, epiweek):\n # season setup and sanity check\n ew1 = flu.join_epiweek(self.test_season, 40)\n ew2 = flu.join_epiweek(self.test_season + 1, 20)\n if not ew1 <= epiweek <= ew2:\n raise Exception('`epiweek` outside of `test_season`')\n # get past values (left half) from the Epidata API\n epidata = Forecaster.Utils.decode(Epidata.fluview(region, Epidata.range(ew1, epiweek), issues=epiweek))\n pinned = [row['wili'] for row in epidata]\n if len(pinned) != flu.delta_epiweeks(ew1, epiweek) + 1:\n raise Exception('missing ILINet data')\n # get the user submissions (right half) from the database\n submissions = self.fetch_submissions(region, epiweek)\n self._num_users = len(submissions)\n print(' [EC] %d users found for %s on %d' % (len(submissions), region, epiweek))\n # concatenate observed data and user submissions\n return [pinned + sub for sub in submissions]\n"
] | [
[
"numpy.array",
"numpy.std",
"numpy.median"
]
] |
C3RV1/LaytonEditor | [
"51e1a9a372a8acdaa4183ae008235a721dc56cdc"
] | [
"formats/sound/sample_transform.py"
] | [
"import math\nimport numpy as np\n\n\ndef change_sample_rate(buffer: np.ndarray, current, target) -> np.ndarray:\n shape = [0, 0]\n shape[0] = buffer.shape[0]\n\n # RATEo = SAMPLESo\n # RATEm = (SAMPLESo / RATEo) * RATEm\n extend = target / current\n shape[1] = int(math.ceil(buffer.shape[1] * extend))\n converted = np.zeros(shape, dtype=buffer.dtype)\n\n for channel in range(shape[0]):\n for dst_i in range(shape[1]):\n converted[channel][dst_i] = buffer[channel][int(dst_i // extend)]\n\n return converted\n\ndef change_channels(buffer: np.ndarray, target: int) -> np.ndarray:\n converted = np.ndarray(shape=(target, buffer.shape[1]), dtype=buffer.dtype)\n for i in range(target):\n if i < buffer.shape[0]:\n converted[i] = buffer[i]\n else:\n converted[i] = buffer[-1]\n return converted\n"
] | [
[
"numpy.ndarray",
"numpy.zeros"
]
] |
sunmengnan/city_brain | [
"478f0b974f4491b4201956f37b83ce6860712bc8"
] | [
"algorithms/02-edge-subdivision/osmnx_hz/district.py"
] | [
"import pandas as pd\nimport osmnx\nimport numpy as np\n\nfix = {'西湖区,杭州市,浙江省,中国': 2}\ncity_query = [\n '杭州市,浙江省,中国',\n]\ndistrict_query = [\n '上城区,杭州市,浙江省,中国',\n '下城区,杭州市,浙江省,中国',\n '江干区,杭州市,浙江省,中国',\n '西湖区,杭州市,浙江省,中国',\n '拱墅区,杭州市,浙江省,中国',\n '滨江区,杭州市,浙江省,中国',\n]\n\n\ndef query_str_to_dic(query_str):\n result = query_str.split(',')\n if len(result) == 3:\n result.insert(0, '')\n query_dic = {\n 'district': result[0],\n 'city': result[1],\n 'province': result[2],\n }\n return query_dic\n\n\ndef process_query(q):\n query_dic = query_str_to_dic(q)\n limit = fix.get(q, 1)\n nominatim_response = osmnx.osm_polygon_download(q, limit=limit)\n response_json = nominatim_response[limit - 1]\n result_dic = {}\n result_dic.update(response_json)\n result_dic.update(query_dic)\n result_dic['q'] = q\n return result_dic\n\n\ndistrict_df = pd.DataFrame()\nq_result_list = []\nfor q in district_query:\n q_result = process_query(q)\n q_result_list.append(q_result)\ndistrict_df = pd.DataFrame(q_result_list)\nprint(district_df)\n"
] | [
[
"pandas.DataFrame"
]
] |
shun60s/impulse-response | [
"4bdf8ef671ed0b55afd452a12b43f6fde6cdf3ac"
] | [
"ola_convolve.py"
] | [
"#coding:utf-8\r\n\r\n# overlap-add convolve with impulse response waveform\r\n\r\n\r\nimport sys\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nfrom scipy import signal\r\nfrom scipy.io.wavfile import read as wavread\r\nfrom scipy.io.wavfile import write as wavwrite\r\n\r\n# Check version\r\n# Python 3.6.4 on win32 (Windows 10)\r\n# numpy 1.16.3\r\n# scipy 1.4.1\r\n\r\ndef load_wav( path0, force_mono=False):\r\n # return \r\n # yg: wav data\r\n # sr: sampling rate\r\n try:\r\n sr, y = wavread(path0)\r\n except:\r\n print ('error: wavread ', path0)\r\n sys.exit()\r\n else:\r\n yg= y / (2 ** 15)\r\n if force_mono :\r\n if yg.ndim == 2: # if stereo\r\n yg= np.average(yg, axis=1)\r\n \r\n print ('file ', path0)\r\n print ('sampling rate ', sr)\r\n print ('length ', yg.shape)\r\n print ('yg.max', np.amax( np.abs(yg)))\r\n return yg,sr\r\n \r\ndef load_wav32( path0, wave_len, yg_in):\r\n #\r\n # wave_len: impluse effective length time [sec]\r\n # return \r\n # yg: wav data (stereo)\r\n # sr: sampling rate\r\n try:\r\n sr, y = wavread(path0)\r\n except:\r\n print ('error: wavread ', path0)\r\n sys.exit()\r\n else:\r\n len0= int(wave_len * sr)\r\n yg= y[sr : sr+len0] # / (2 ** 31)\r\n \r\n if yg_in.ndim == 2:\r\n yg2=np.hstack((yg,yg)).reshape( 2, len(yg) ).T\r\n else:\r\n yg2=yg.copy()\r\n \r\n print ('file ', path0)\r\n print ('sampling rate ', sr)\r\n print ('yg2.shape', yg2.shape)\r\n print ('yg.max', np.amax( np.abs(yg)), yg[0],yg[-1])\r\n return yg2, sr\r\n\r\ndef save_wav( path0, data, sr=44100, normalize=False):\r\n #\r\n print ('file ', path0)\r\n \r\n amplitude = np.iinfo(np.int16).max\r\n max_data = np.amax(np.abs(data)) # normalize, max level is 16bit full bit\r\n if max_data < (1.0 / amplitude):\r\n max_data=1.0\r\n \r\n try:\r\n if normalize :\r\n wavwrite(path0, sr, np.array( (amplitude / max_data) * data , dtype=np.int16))\r\n else:\r\n wavwrite(path0, sr, np.array( amplitude * data , dtype=np.int16))\r\n except:\r\n print ('error: wavwrite ', path0)\r\n sys.exit()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #\r\n parser = argparse.ArgumentParser(description='overlap-add convolve with impulse response waveform')\r\n parser.add_argument('--wav_file', '-w', default='test.wav', help='wav file name(16bit)')\r\n parser.add_argument('--wav_32_file', '-i', default='impulse_1sec_100_1sec_44100-TwoTube-output-rtwdf.wav', help='impulse response wav file name (mono 32bit)')\r\n args = parser.parse_args()\r\n \r\n \r\n path0= args.wav_file\r\n # overwrite path0\r\n # path0='test_882.wav'\r\n yg,sr= load_wav(path0)\r\n \r\n path2= args.wav_32_file\r\n # overwrite path2\r\n # path2='impulse_1sec_10_1sec_88200-output-rtwdf.wav\r\n yg2,sr2= load_wav32(path2, 0.150, yg)\r\n \r\n # overlap-add convolve with impulse response waveform\r\n out1= signal.oaconvolve( yg, yg2, axes=0) # need scipy > 1.4.1\r\n # set output file name\r\n path_out0= os.path.splitext(os.path.basename(path0))[0] + '_overlapadd_out.wav'\r\n save_wav( path_out0, out1, sr, normalize=True)\r\n"
] | [
[
"scipy.signal.oaconvolve",
"numpy.abs",
"numpy.iinfo",
"numpy.hstack",
"scipy.io.wavfile.read",
"numpy.array",
"numpy.average"
]
] |
erikolofsson/scrypted | [
"39016a617464003cac13719a426eefcc2421e51a"
] | [
"plugins/opencv/src/opencv/__init__.py"
] | [
"from __future__ import annotations\nfrom time import sleep\nfrom detect import DetectionSession, DetectPlugin\nfrom typing import Any, List\nimport numpy as np\nimport cv2\nimport imutils\nfrom gi.repository import GLib, Gst\nfrom scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected\n\nclass OpenCVDetectionSession(DetectionSession):\n cap: cv2.VideoCapture\n previous_frame: Any\n\n def __init__(self) -> None:\n super().__init__()\n self.previous_frame = None\n self.cap = None\n\ndefaultThreshold = 25\ndefaultArea = 2000\ndefaultInterval = 250\n\nclass OpenCVPlugin(DetectPlugin):\n def __init__(self, nativeId: str | None = None):\n super().__init__(nativeId=nativeId)\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n\n if True:\n self.retainAspectRatio = False\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n else:\n self.retainAspectRatio = True\n self.color2Gray = cv2.COLOR_BGRA2GRAY\n self.pixelFormat = \"BGRA\"\n self.pixelFormatChannelCount = 4\n\n async def getDetectionModel(self) -> ObjectDetectionModel:\n d: ObjectDetectionModel = {\n 'name': '@scrypted/opencv',\n 'classes': ['motion'],\n }\n settings = [\n {\n 'title': \"Motion Area\",\n 'description': \"The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.\",\n 'value': defaultArea,\n 'key': 'area',\n 'placeholder': defaultArea,\n 'type': 'number',\n },\n {\n 'title': \"Motion Threshold\",\n 'description': \"The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.\",\n 'value': defaultThreshold,\n 'key': 'threshold',\n 'placeholder': defaultThreshold,\n 'type': 'number',\n },\n {\n 'title': \"Frame Analysis Interval\",\n 'description': \"The number of milliseconds to wait between motion analysis.\",\n 'value': defaultInterval,\n 'key': 'interval',\n 'placeholder': defaultInterval,\n 'type': 'number',\n },\n ]\n d['settings'] = settings\n return d\n\n def get_pixel_format(self):\n return self.pixelFormat\n\n def parse_settings(self, settings: Any):\n area = defaultArea\n threshold = defaultThreshold\n interval = defaultInterval\n if settings:\n area = float(settings.get('area', area))\n threshold = int(settings.get('threshold', threshold))\n interval = float(settings.get('interval', interval))\n return area, threshold, interval\n\n def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:\n area, threshold, interval = self.parse_settings(settings)\n\n # see get_detection_input_size on undocumented size requirements for GRAY8\n if self.color2Gray != None:\n gray = cv2.cvtColor(frame, self.color2Gray)\n else:\n gray = frame\n curFrame = cv2.GaussianBlur(gray, (21,21), 0)\n\n if detection_session.previous_frame is None:\n detection_session.previous_frame = curFrame\n return\n\n frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)\n detection_session.previous_frame = curFrame\n\n _, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=2)\n fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(fcontours)\n\n detections: List[ObjectDetectionResult] = []\n detection_result: ObjectsDetected = {}\n detection_result['detections'] = detections\n detection_result['inputDimensions'] = src_size\n \n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n # if w * h != contour_area:\n # print(\"mismatch w/h\", contour_area - w * h)\n\n x2, y2, _ = convert_to_src_size((x + w, y + h))\n x, y, _ = convert_to_src_size((x, y))\n w = x2 - x + 1\n h = y2 - y + 1\n\n contour_area = w * h\n\n if not area or contour_area > area:\n detection: ObjectDetectionResult = {}\n detection['boundingBox'] = (x, y, w, h)\n detection['className'] = 'motion'\n detection['score'] = 1 if area else contour_area\n detections.append(detection)\n\n return detection_result \n\n def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:\n raise Exception('can not run motion detection on jpeg')\n\n def get_detection_input_size(self, src_size):\n # The initial implementation of this plugin used BGRA\n # because it seemed impossible to pull the Y frame out of I420 without corruption.\n # This is because while 318x174 is aspect ratio correct,\n # it seems to cause strange issues with stride and the image is skewed.\n # By using 300x300, this seems to avoid some undocumented minimum size\n # reqiurement in gst-videoscale or opencv. Unclear which.\n\n # This is the same input size as tensorflow-lite. Allows for better pipelining.\n if not self.retainAspectRatio:\n return (300, 300)\n\n width, height = src_size\n if (width > height):\n if (width > 318):\n height = height / width * 318\n width = 318\n else:\n if (height > 318):\n width = width / height * 318\n height = 318\n\n width = int(np.floor(width / 6) * 6)\n height = int(np.floor(height / 6) * 6)\n\n return width, height\n\n def end_session(self, detection_session: OpenCVDetectionSession):\n if detection_session and detection_session.cap:\n detection_session.cap.release()\n detection_session.cap = None\n return super().end_session(detection_session)\n\n def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:\n buf = gst_sample.get_buffer()\n caps = gst_sample.get_caps()\n # can't trust the width value, compute the stride\n height = caps.get_structure(0).get_value('height')\n width = caps.get_structure(0).get_value('width')\n result, info = buf.map(Gst.MapFlags.READ)\n if not result:\n return\n try:\n mat = np.ndarray(\n (height,\n width,\n self.pixelFormatChannelCount),\n buffer=info.data,\n dtype= np.uint8)\n return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)\n finally:\n buf.unmap(info)\n\n def create_detection_session(self):\n return OpenCVDetectionSession()\n\n def detection_event_notified(self, settings: Any):\n area, threshold, interval = self.parse_settings(settings)\n # it is safe to block here because gstreamer creates a queue thread\n sleep(interval / 1000)\n return super().detection_event_notified(settings)\n"
] | [
[
"numpy.ndarray",
"numpy.floor"
]
] |
tbhuwan14/ga-learner-dsb-repo | [
"1d2271037214e6203a0ff92bae75aff32964263e"
] | [
"Banking-Inference/code.py"
] | [
"# --------------\nimport pandas as pd\r\nimport scipy.stats as stats\r\nimport math\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n#Sample_Size\r\nsample_size=2000\r\n\r\n#Z_Critical Score\r\nz_critical = stats.norm.ppf(q = 0.95) \r\n\r\n\r\n# path [File location variable]\r\ndata=pd.read_csv(path)\r\n\r\n#Code starts here\r\ndata_sample=data.sample(n=sample_size,random_state=0)\r\n\r\nsample_mean=data_sample['installment'].mean()\r\nprint(sample_mean)\r\n\r\nsample_std=data_sample['installment'].std()\r\nprint(sample_std)\r\n\r\nmargin_of_error=z_critical*sample_std/math.sqrt(sample_size)\r\nprint(margin_of_error)\r\n\r\nconfidence_interval=(sample_mean-margin_of_error,sample_mean+margin_of_error)\r\nprint(confidence_interval)\r\n\r\ntrue_mean=data['installment'].mean()\r\nprint(true_mean)\n\n\n# --------------\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Different sample sizes to take\r\nsample_size=np.array([20,50,100])\r\n\r\n#Code starts here\r\nfig ,axes=plt.subplots(1,3,figsize=(20,10))\r\nfor i in range(len(sample_size)):\r\n m=[]\r\n for j in range(1000):\r\n m.append(data['installment'].sample(n=sample_size[i]).mean())\r\n mean_series=pd.Series(m)\r\n axes[i].hist(mean_series)\r\nplt.show()\n\n\n# --------------\n#Importing header files\r\n\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\ndata['int.rate'] = (data['int.rate'].str.replace('%', '')).astype(float)/100\r\n\r\nz_statistic , p_value=ztest(data[data['purpose']=='small_business']['int.rate'],value=data['int.rate'].mean(),alternative='larger')\r\nprint(z_statistic,p_value)\r\nif p_value<0.05:\r\n a='reject'\r\nelse:\r\n a='accept'\r\n\r\nprint('We',a,'The Null Hypothesis')\r\n\n\n\n# --------------\n#Importing header files\r\nfrom statsmodels.stats.weightstats import ztest\r\n\r\n#Code starts here\r\nz_statistic, p_value=ztest(x1=data[data['paid.back.loan']=='No']['installment'],x2=data[data['paid.back.loan']=='Yes']['installment'])\r\nprint(z_statistic,p_value)\r\n\r\nif p_value<0.05:\r\n a='Reject'\r\nelse:\r\n a='Accept'\r\n\r\nprint('We',a,'The Null Hypothesis')\n\n\n# --------------\n#Importing header files\r\nfrom scipy.stats import chi2_contingency\r\n\r\n#Critical value \r\ncritical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*\r\n df = 6) # Df = number of variable categories(in purpose) - 1\r\n\r\n#Code starts here\r\nyes=data[data['paid.back.loan']=='Yes']['purpose'].value_counts()\r\nno=data[data['paid.back.loan']=='No']['purpose'].value_counts()\r\nobserved=pd.concat((yes.transpose(), no.transpose()),axis=1, keys=['Yes', 'No'])\r\nchi2, p, dof, ex=chi2_contingency(observed)\r\n\r\nif chi2>critical_value:\r\n a='Reject'\r\nelse:\r\n a='Accept'\r\nprint('We',a,'The Null Hypothesis')\r\n\n\n\n"
] | [
[
"pandas.Series",
"scipy.stats.chi2.ppf",
"scipy.stats.norm.ppf",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.array",
"scipy.stats.chi2_contingency"
]
] |
And1210/SRGAN | [
"200731d6249c674d0ed556ba287ad7a7698f88b5"
] | [
"datasets/Pedestron_dataset.py"
] | [
"import os\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom torchvision.transforms import transforms\nfrom torch.utils.data import Dataset\nfrom datasets.base_dataset import BaseDataset\nfrom utils.augmenters.augment import seg\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport random\n\nclass CARLADataset(BaseDataset):\n \"\"\"\n Input params:\n stage: The stage of training.\n configuration: Configuration dictionary.\n \"\"\"\n def __init__(self, configuration):\n super().__init__(configuration)\n\n self._stage = configuration[\"stage\"]\n\n self._image_size = tuple(configuration[\"input_size\"])\n self._downsample_size = tuple(configuration[\"downsample_size\"])\n\n self.dataset_path = os.path.join(configuration[\"dataset_path\"])#, \"{}\".format(self._stage))\n\n #-----------------------------------------------------------------------\n #Here is where you can do things like preload data and labels or do image preprocessing\n\n self.sim_img_paths = []\n for i in os.listdir(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"])):\n for j in os.listdir(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"], i)):\n self.sim_img_paths.append(os.path.join(self.dataset_path, configuration[\"sim_data_folder\"], i, j))\n\n #-----------------------------------------------------------------------\n\n\n self._transform = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.ToTensor(),\n ]\n )\n\n #This function returns an data, label pair. All data processing and modification should be done by the end of this function\n def __getitem__(self, index):\n sim_filename = self.sim_img_paths[index]\n\n #Image loading assuming the images are in the 'images' folder in the dataset root path\n sim_image = Image.open(sim_filename)\n sim_image = np.asarray(sim_image)\n sim_image = sim_image.astype(np.uint8)\n\n #Image resizing\n sim_image = cv2.resize(sim_image, self._image_size)\n downsample_image = cv2.resize(sim_image, self._downsample_size)\n\n #Image formatting\n sim_image = np.dstack([sim_image] * 1)\n downsample_image = np.dstack([downsample_image] * 1)\n\n #Some image augmentation\n # image = seg(image=image)\n\n #Apply defined transforms to image from constructor (will convert to tensor)\n sim_image = self._transform(sim_image)\n downsample_image = self._transform(downsample_image)\n\n #image should be the image data, target should be the label\n return sim_image, downsample_image\n\n def __len__(self):\n # return the size of the dataset, replace with len of labels array\n return len(self.sim_img_paths)\n"
] | [
[
"numpy.dstack",
"numpy.asarray"
]
] |
ORNL-BSEC/morph-net | [
"eb1a493ca07ba4992af1f91ab3b73a6c4fb9cca8"
] | [
"morph_net/network_regularizers/cost_calculator.py"
] | [
"\"\"\"CostCalculator that computes network cost or regularization loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nCONV2D_OPS = ('Conv2D', 'Conv2DBackpropInput', 'DepthwiseConv2dNative')\nFLOP_OPS = CONV2D_OPS + ('MatMul',)\nSUPPORTED_OPS = FLOP_OPS + (\n 'Add', 'AddN', 'ConcatV2', 'FusedBatchNorm', 'Mul', 'Relu', 'Relu6', 'Sum')\n\n\nclass CostCalculator(object):\n \"\"\"CostCalculator that calculates resource cost/loss for a network.\"\"\"\n\n def __init__(self, op_regularizer_manager, resource_function):\n \"\"\"Creates an instance.\n\n Args:\n op_regularizer_manager: OpRegularizerManager that contains the\n OpRegularizer for each op in the network.\n resource_function: Callable that returns the resource (e.g. FLOP) cost or\n loss for an op. The function signature is:\n op; A tf.Operation.\n is_regularization; Boolean indicating whether to calculate\n regularization loss. If False, calculate cost instead.\n num_alive_inputs; Scalar Tensor indicating how many input channels are\n considered alive.\n num_alive_outputs; Scalar Tensor indicating how many output channels\n are considered alive.\n reg_inputs; Scalar Tensor which is the sum over the input\n regularization vector.\n reg_outputs; Scalar Tensor which is the sum over the output\n regularization vector.\n batch_size; Integer batch size to calculate cost/loss for.\n \"\"\"\n self._manager = op_regularizer_manager\n self._resource_function = resource_function\n\n def _get_cost_or_regularization_term(self, is_regularization, ops=None):\n \"\"\"Returns cost or regularization term for ops.\n\n Args:\n is_regularization: Boolean indicating whether to calculate regularization\n loss. If False, calculate cost instead.\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Cost or regularization term for ops as a tensor or float.\n \"\"\"\n total = 0.0\n if not ops:\n ops = self._manager.ops\n for op in ops:\n if op.type not in SUPPORTED_OPS:\n continue\n\n # Get regularization and alive terms for input and output.\n input_tensor = _get_input(op)\n if op.type == 'ConcatV2':\n # For concat, the input and output regularization are identical but the\n # input is composed of multiple concatenated regularizers. Thus, just\n # use the output regularizer as the input regularizer for simpler cost\n # calculation.\n input_tensor = op.outputs[0]\n input_op_reg = self._manager.get_regularizer(input_tensor.op)\n output_op_reg = self._manager.get_regularizer(op)\n num_alive_inputs = _count_alive(input_tensor, input_op_reg)\n num_alive_outputs = _count_alive(op.outputs[0], output_op_reg)\n reg_inputs = _sum_of_reg_vector(input_op_reg)\n reg_outputs = _sum_of_reg_vector(output_op_reg)\n\n total += self._resource_function(\n op, is_regularization, num_alive_inputs, num_alive_outputs,\n reg_inputs, reg_outputs)\n\n # If at least one supported op is present, type would be tensor, not float.\n if isinstance(total, float):\n # Tests rely on this function not raising exception in this case.\n tf.logging.warning('No supported ops found.')\n return total\n\n def get_cost(self, ops=None):\n \"\"\"Returns cost for ops.\n\n Args:\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Cost of ops as a tensor for float.\n \"\"\"\n\n return self._get_cost_or_regularization_term(False, ops)\n\n def get_regularization_term(self, ops=None):\n \"\"\"Returns regularization for ops.\n\n Args:\n ops: List of tf.Operation. If None, calculates cost/regularization for\n all ops found by OpRegularizerManager.\n\n Returns:\n Regularization term of ops as a tensor or float.\n \"\"\"\n return self._get_cost_or_regularization_term(True, ops)\n\n\ndef _get_input(op):\n \"\"\"Returns the input to that op that represents the activations.\n\n (as opposed to e.g. weights.)\n\n Args:\n op: A tf.Operation object with type in SUPPORTED_OPS.\n\n Returns:\n A tf.Tensor representing the input activations.\n\n Raises:\n ValueError: MatMul is used with transposition (unsupported).\n \"\"\"\n assert op.type in SUPPORTED_OPS, 'Op type %s is not supported.' % op.type\n if op.type == 'Conv2D' or op.type == 'DepthwiseConv2dNative':\n return op.inputs[0]\n if op.type == 'Conv2DBackpropInput':\n return op.inputs[2]\n if op.type == 'MatMul':\n if op.get_attr('transpose_a') or op.get_attr('transpose_b'):\n raise ValueError('MatMul with transposition is not yet supported.')\n return op.inputs[0]\n return op.inputs[0]\n\n\ndef _count_alive(tensor, opreg):\n if opreg:\n return tf.reduce_sum(tf.cast(opreg.alive_vector, tf.float32))\n shape = tensor.shape.as_list()\n if shape:\n num_outputs = tensor.shape.as_list()[-1]\n if num_outputs is not None:\n return tf.constant(num_outputs, tf.float32)\n tf.logging.info('Unknown channel count in tensor %s', tensor)\n return tf.constant(0, tf.float32)\n\n\ndef _sum_of_reg_vector(opreg):\n if opreg:\n return tf.reduce_sum(opreg.regularization_vector)\n else:\n return tf.constant(0.0, tf.float32)\n"
] | [
[
"tensorflow.logging.info",
"tensorflow.cast",
"tensorflow.logging.warning",
"tensorflow.constant",
"tensorflow.reduce_sum"
]
] |
timwillhack/dm-haikuBah2 | [
"b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5"
] | [
"haiku/_src/layer_norm_test.py"
] | [
"# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for haiku._src.layer_norm.\"\"\"\n\nimport itertools\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom haiku._src import initializers\nfrom haiku._src import layer_norm\nfrom haiku._src import test_utils\nfrom haiku._src import transform\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\nclass LayerNormTest(parameterized.TestCase):\n\n @test_utils.transform_and_run\n def test_connection(self):\n data = jnp.zeros([2, 3, 4, 5])\n norms = []\n for axis in range(4):\n norms.append(layer_norm.LayerNorm(axis=axis, create_scale=True,\n create_offset=True)(data))\n\n norms.append(layer_norm.LayerNorm(axis=slice(1, None), create_scale=True,\n create_offset=True)(data))\n norms.append(layer_norm.LayerNorm(axis=slice(2, None), create_scale=True,\n create_offset=True)(data))\n norms.append(layer_norm.LayerNorm(axis=slice(1, -1), create_scale=True,\n create_offset=True)(data))\n\n return norms\n\n @parameterized.parameters(itertools.product([True, False], repeat=3))\n def test_bf16(self, create_scale, create_offset, use_fast_variance):\n \"\"\"For all configurations, ensure bf16 outputs from bf16 inputs.\"\"\"\n def f(x):\n ln = layer_norm.LayerNorm(\n axis=-1, create_scale=create_scale, create_offset=create_offset,\n use_fast_variance=use_fast_variance)\n return ln(x)\n\n fwd = transform.transform(f)\n data = jnp.zeros([2, 3, 4, 5], dtype=jnp.bfloat16)\n params = fwd.init(jax.random.PRNGKey(428), data)\n bf16_params = jax.tree_map(lambda t: t.astype(jnp.bfloat16), params)\n self.assertEqual(fwd.apply(bf16_params, None, data).dtype, jnp.bfloat16)\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_simple_case(self, use_fast_variance):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n inputs = np.ones([2, 3, 3, 5])\n\n outputs = layer(inputs)\n for x in np.nditer(outputs):\n self.assertEqual(x, 0.0)\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_simple_case_var(self, use_fast_variance):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=True,\n create_offset=True,\n scale_init=initializers.Constant(0.5),\n offset_init=initializers.Constant(2.0),\n use_fast_variance=use_fast_variance)\n\n inputs = np.ones([2, 3, 3, 5])\n\n outputs = layer(inputs)\n for x in np.nditer(outputs):\n self.assertEqual(x, 2.0)\n\n @test_utils.transform_and_run\n def test_simple_case_tensor(self):\n layer = layer_norm.LayerNorm([1, 2],\n create_scale=False,\n create_offset=False)\n\n inputs = np.ones([2, 3, 3, 5])\n scale = np.full((5,), 0.5)\n offset = np.full((5,), 2.0)\n\n outputs = layer(inputs, scale, offset)\n for x in np.nditer(outputs):\n self.assertEqual(x, 2.0)\n\n @parameterized.named_parameters((\"String\", \"foo\"), (\"ListString\", [\"foo\"]))\n @test_utils.transform_and_run\n def test_invalid_axis(self, axis):\n with self.assertRaisesRegex(\n ValueError, \"`axis` should be an int, slice or iterable of ints.\"):\n layer_norm.LayerNorm(axis, create_scale=False, create_offset=False)\n\n @test_utils.transform_and_run\n def test_no_scale_and_init_provided(self):\n with self.assertRaisesRegex(\n ValueError, \"Cannot set `scale_init` if `create_scale=False`.\"):\n layer_norm.LayerNorm(\n 3, create_scale=False, create_offset=True, scale_init=np.ones)\n\n @test_utils.transform_and_run\n def test_no_offset_beta_init_provided(self):\n with self.assertRaisesRegex(\n ValueError, \"Cannot set `offset_init` if `create_offset=False`.\"):\n layer_norm.LayerNorm(\n 3, create_scale=True, create_offset=False, offset_init=np.zeros)\n\n @test_utils.transform_and_run\n def test_create_scale_and_scale_provided(self):\n layer = layer_norm.LayerNorm([2], create_scale=True, create_offset=False)\n\n with self.assertRaisesRegex(\n ValueError, \"Cannot pass `scale` at call time if `create_scale=True`.\"):\n layer(np.ones([2, 3, 4]), scale=np.ones([4]))\n\n @test_utils.transform_and_run\n def test_create_offset_and_offset_provided(self):\n layer = layer_norm.LayerNorm([2], create_offset=True, create_scale=False)\n\n with self.assertRaisesRegex(\n ValueError,\n \"Cannot pass `offset` at call time if `create_offset=True`.\"):\n layer(np.ones([2, 3, 4]), offset=np.ones([4]))\n\n @parameterized.parameters(True, False)\n @test_utils.transform_and_run\n def test_slice_axis(self, use_fast_variance):\n slice_layer = layer_norm.LayerNorm(\n slice(1, -1),\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n axis_layer = layer_norm.LayerNorm((1, 2),\n create_scale=False,\n create_offset=False,\n use_fast_variance=use_fast_variance)\n\n inputs = np.random.uniform(size=[3, 4, 4, 5], low=0, high=10)\n scale = np.random.normal(size=(5,), loc=1.0)\n offset = np.random.normal(size=(5,))\n\n slice_outputs = slice_layer(inputs, scale, offset)\n axis_outputs = axis_layer(inputs, scale, offset)\n\n np.testing.assert_array_equal(slice_outputs, axis_outputs)\n\n @test_utils.transform_and_run\n def test_connection_instance_norm(self):\n layer = layer_norm.InstanceNorm(create_scale=True, create_offset=True)\n\n inputs = np.ones([3, 4, 5, 6])\n result = layer(inputs)\n\n self.assertEqual(result.shape, (3, 4, 5, 6))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.nditer",
"numpy.random.normal",
"numpy.full"
]
] |
unbun/snake.ai | [
"0c017357608dc7c06af0ca3ca57d870641461207",
"0c017357608dc7c06af0ca3ca57d870641461207"
] | [
"venv/Lib/site-packages/scipy/fftpack/tests/test_basic.py",
"venv/Lib/site-packages/numpy/core/tests/test_indexing.py"
] | [
"# Created by Pearu Peterson, September 2002\n\nfrom __future__ import division, print_function, absolute_import\n\n__usage__ = \"\"\"\nBuild fftpack:\n python setup_fftpack.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.fftpack.test()'\nRun tests if fftpack is not installed:\n python tests/test_basic.py\n\"\"\"\n\nfrom numpy.testing import (assert_, assert_equal, assert_array_almost_equal,\n assert_array_almost_equal_nulp, assert_array_less)\nimport pytest\nfrom pytest import raises as assert_raises\nfrom scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2\nfrom scipy.fftpack import _fftpack as fftpack\nfrom scipy.fftpack.basic import _is_safe_size\n\nfrom numpy import (arange, add, array, asarray, zeros, dot, exp, pi,\n swapaxes, double, cdouble)\nimport numpy as np\nimport numpy.fft\nfrom numpy.random import rand\n\n# \"large\" composite numbers supported by FFTPACK\nLARGE_COMPOSITE_SIZES = [\n 2**13,\n 2**5 * 3**5,\n 2**3 * 3**3 * 5**2,\n]\nSMALL_COMPOSITE_SIZES = [\n 2,\n 2*3*5,\n 2*2*3*3,\n]\n# prime\nLARGE_PRIME_SIZES = [\n 2011\n]\nSMALL_PRIME_SIZES = [\n 29\n]\n\n\ndef _assert_close_in_norm(x, y, rtol, size, rdt):\n # helper function for testing\n err_msg = \"size: %s rdt: %s\" % (size, rdt)\n assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)\n\n\ndef random(size):\n return rand(*size)\n\n\ndef get_mat(n):\n data = arange(n)\n data = add.outer(data, data)\n return data\n\n\ndef direct_dft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n, dtype=cdouble)\n w = -arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w), x)\n return y\n\n\ndef direct_idft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n, dtype=cdouble)\n w = arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w), x)/n\n return y\n\n\ndef direct_dftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = fft(x, axis=axis)\n return x\n\n\ndef direct_idftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = ifft(x, axis=axis)\n return x\n\n\ndef direct_rdft(x):\n x = asarray(x)\n n = len(x)\n w = -arange(n)*(2j*pi/n)\n r = zeros(n, dtype=double)\n for i in range(n//2+1):\n y = dot(exp(i*w), x)\n if i:\n r[2*i-1] = y.real\n if 2*i < n:\n r[2*i] = y.imag\n else:\n r[0] = y.real\n return r\n\n\ndef direct_irdft(x):\n x = asarray(x)\n n = len(x)\n x1 = zeros(n, dtype=cdouble)\n for i in range(n//2+1):\n if i:\n if 2*i < n:\n x1[i] = x[2*i-1] + 1j*x[2*i]\n x1[n-i] = x[2*i-1] - 1j*x[2*i]\n else:\n x1[i] = x[2*i-1]\n else:\n x1[0] = x[0]\n return direct_idft(x1).real\n\n\nclass _TestFFTBase(object):\n def setup_method(self):\n self.cdt = None\n self.rdt = None\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)\n y = fft(x)\n assert_equal(y.dtype, self.cdt)\n y1 = direct_dft(x)\n assert_array_almost_equal(y,y1)\n x = np.array([1,2,3,4+0j,5], dtype=self.cdt)\n assert_array_almost_equal(fft(x),direct_dft(x))\n\n def test_n_argument_real(self):\n x1 = np.array([1,2,3,4], dtype=self.rdt)\n x2 = np.array([1,2,3,4], dtype=self.rdt)\n y = fft([x1,x2],n=4)\n assert_equal(y.dtype, self.cdt)\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def _test_n_argument_complex(self):\n x1 = np.array([1,2,3,4+1j], dtype=self.cdt)\n x2 = np.array([1,2,3,4+1j], dtype=self.cdt)\n y = fft([x1,x2],n=4)\n assert_equal(y.dtype, self.cdt)\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x)\n y2 = numpy.fft.fft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x)\n assert_array_almost_equal(y,y2)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, fft, [])\n assert_raises(ValueError, fft, [[1,1],[2,2]], -5)\n\n def test__is_safe_size(self):\n vals = [(0, True), (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, False),\n (15, True), (16, True), (17, False), (18, True), (21, False), (25, True), (50, True),\n (120, True), (210, False)]\n for n, is_safe in vals:\n assert_equal(_is_safe_size(n), is_safe)\n\n\nclass TestDoubleFFT(_TestFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestSingleFFT(_TestFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n @pytest.mark.xfail(run=False, reason=\"single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved\")\n def test_notice(self):\n pass\n\n\nclass TestFloat16FFT(object):\n\n def test_1_argument_real(self):\n x1 = np.array([1, 2, 3, 4], dtype=np.float16)\n y = fft(x1, n=4)\n assert_equal(y.dtype, np.complex64)\n assert_equal(y.shape, (4, ))\n assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))\n\n def test_n_argument_real(self):\n x1 = np.array([1, 2, 3, 4], dtype=np.float16)\n x2 = np.array([1, 2, 3, 4], dtype=np.float16)\n y = fft([x1, x2], n=4)\n assert_equal(y.dtype, np.complex64)\n assert_equal(y.shape, (2, 4))\n assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))\n assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))\n\n\nclass _TestIFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)\n y = ifft(x)\n y1 = direct_idft(x)\n assert_equal(y.dtype, self.cdt)\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4+0j,5], self.cdt)\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_definition_real(self):\n x = np.array([1,2,3,4,1,2,3,4], self.rdt)\n y = ifft(x)\n assert_equal(y.dtype, self.cdt)\n y1 = direct_idft(x)\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4,5], dtype=self.rdt)\n assert_equal(y.dtype, self.cdt)\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x,direction=-1)\n y2 = numpy.fft.ifft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x,direction=-1)\n assert_array_almost_equal(y,y2)\n\n def test_random_complex(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.cdt)\n x = random([size]).astype(self.cdt) + 1j*x\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n assert_equal(y1.dtype, self.cdt)\n assert_equal(y2.dtype, self.cdt)\n assert_array_almost_equal(y1, x)\n assert_array_almost_equal(y2, x)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n assert_equal(y1.dtype, self.cdt)\n assert_equal(y2.dtype, self.cdt)\n assert_array_almost_equal(y1, x)\n assert_array_almost_equal(y2, x)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = ifft(fft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = fft(ifft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n x = (x + 1j*np.random.rand(size)).astype(self.cdt)\n y = ifft(fft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = fft(ifft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, ifft, [])\n assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)\n\n\nclass TestDoubleIFFT(_TestIFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestSingleIFFT(_TestIFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n\nclass _TestRFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:\n x = np.array(t, dtype=self.rdt)\n y = rfft(x)\n y1 = direct_rdft(x)\n assert_array_almost_equal(y,y1)\n assert_equal(y.dtype, self.rdt)\n\n def test_djbfft(self):\n from numpy.fft import fft as numpy_fft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y2 = numpy_fft(x)\n y1 = zeros((n,),dtype=double)\n y1[0] = y2[0].real\n y1[-1] = y2[n//2].real\n for k in range(1, n//2):\n y1[2*k-1] = y2[k].real\n y1[2*k] = y2[k].imag\n y = fftpack.drfft(x)\n assert_array_almost_equal(y,y1)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, rfft, [])\n assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)\n\n # See gh-5790\n class MockSeries(object):\n def __init__(self, data):\n self.data = np.asarray(data)\n\n def __getattr__(self, item):\n try:\n return getattr(self.data, item)\n except AttributeError:\n raise AttributeError((\"'MockSeries' object \"\n \"has no attribute '{attr}'\".\n format(attr=item)))\n\n def test_non_ndarray_with_dtype(self):\n x = np.array([1., 2., 3., 4., 5.])\n xs = _TestRFFTBase.MockSeries(x)\n\n expected = [1, 2, 3, 4, 5]\n out = rfft(xs)\n\n # Data should not have been overwritten\n assert_equal(x, expected)\n assert_equal(xs.data, expected)\n\nclass TestRFFTDouble(_TestRFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\n\nclass TestRFFTSingle(_TestRFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n\nclass _TestIRFFTBase(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x1 = [1,2,3,4,1,2,3,4]\n x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]\n x2 = [1,2,3,4,1,2,3,4,5]\n x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]\n\n def _test(x, xr):\n y = irfft(np.array(x, dtype=self.rdt))\n y1 = direct_irdft(x)\n assert_equal(y.dtype, self.rdt)\n assert_array_almost_equal(y,y1, decimal=self.ndec)\n assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)\n\n _test(x1, x1_1)\n _test(x2, x2_1)\n\n def test_djbfft(self):\n from numpy.fft import ifft as numpy_ifft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n x1 = zeros((n,),dtype=cdouble)\n x1[0] = x[0]\n for k in range(1, n//2):\n x1[k] = x[2*k-1]+1j*x[2*k]\n x1[n-k] = x[2*k-1]-1j*x[2*k]\n x1[n//2] = x[-1]\n y1 = numpy_ifft(x1)\n y = fftpack.drfft(x,direction=-1)\n assert_array_almost_equal(y,y1)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = irfft(rfft(x))\n y2 = rfft(irfft(x))\n assert_equal(y1.dtype, self.rdt)\n assert_equal(y2.dtype, self.rdt)\n assert_array_almost_equal(y1, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n assert_array_almost_equal(y2, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = irfft(rfft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n y = rfft(irfft(x))\n _assert_close_in_norm(x, y, rtol, size, self.rdt)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, irfft, [])\n assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)\n\n\n# self.ndec is bogus; we should have a assert_array_approx_equal for number of\n# significant digits\n\nclass TestIRFFTDouble(_TestIRFFTBase):\n def setup_method(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n self.ndec = 14\n\n\nclass TestIRFFTSingle(_TestIRFFTBase):\n def setup_method(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n self.ndec = 5\n\n\nclass Testfft2(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_regression_244(self):\n \"\"\"FFT returns wrong result with axes parameter.\"\"\"\n # fftn (and hence fft2) used to break when both axes and shape were\n # used\n x = numpy.ones((4, 4, 2))\n y = fft2(x, shape=(8, 8), axes=(-3, -2))\n y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))\n assert_array_almost_equal(y, y_r)\n\n def test_invalid_sizes(self):\n assert_raises(ValueError, fft2, [[]])\n assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))\n\n\nclass TestFftnSingle(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(np.array(x, np.float32))\n assert_(y.dtype == np.complex64,\n msg=\"double precision output with single precision\")\n\n y_r = np.array(fftn(x), np.complex64)\n assert_array_almost_equal_nulp(y, y_r)\n\n @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)\n def test_size_accuracy_small(self, size):\n x = np.random.rand(size, size) + 1j*np.random.rand(size, size)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\n @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)\n def test_size_accuracy_large(self, size):\n x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\n def test_definition_float16(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(np.array(x, np.float16))\n assert_equal(y.dtype, np.complex64)\n y_r = np.array(fftn(x), np.complex64)\n assert_array_almost_equal_nulp(y, y_r)\n\n @pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)\n def test_float16_input_small(self, size):\n x = np.random.rand(size, size) + 1j*np.random.rand(size, size)\n y1 = fftn(x.real.astype(np.float16))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 5e5)\n\n @pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)\n def test_float16_input_large(self, size):\n x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)\n y1 = fftn(x.real.astype(np.float16))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n assert_equal(y1.dtype, np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2e6)\n\n\nclass TestFftn(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n y = fftn(x)\n assert_array_almost_equal(y, direct_dftn(x))\n\n x = random((20, 26))\n assert_array_almost_equal(fftn(x), direct_dftn(x))\n\n x = random((5, 4, 3, 20))\n assert_array_almost_equal(fftn(x), direct_dftn(x))\n\n def test_axes_argument(self):\n # plane == ji_plane, x== kji_space\n plane1 = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n plane2 = [[10, 11, 12],\n [13, 14, 15],\n [16, 17, 18]]\n plane3 = [[19, 20, 21],\n [22, 23, 24],\n [25, 26, 27]]\n ki_plane1 = [[1, 2, 3],\n [10, 11, 12],\n [19, 20, 21]]\n ki_plane2 = [[4, 5, 6],\n [13, 14, 15],\n [22, 23, 24]]\n ki_plane3 = [[7, 8, 9],\n [16, 17, 18],\n [25, 26, 27]]\n jk_plane1 = [[1, 10, 19],\n [4, 13, 22],\n [7, 16, 25]]\n jk_plane2 = [[2, 11, 20],\n [5, 14, 23],\n [8, 17, 26]]\n jk_plane3 = [[3, 12, 21],\n [6, 15, 24],\n [9, 18, 27]]\n kj_plane1 = [[1, 4, 7],\n [10, 13, 16], [19, 22, 25]]\n kj_plane2 = [[2, 5, 8],\n [11, 14, 17], [20, 23, 26]]\n kj_plane3 = [[3, 6, 9],\n [12, 15, 18], [21, 24, 27]]\n ij_plane1 = [[1, 4, 7],\n [2, 5, 8],\n [3, 6, 9]]\n ij_plane2 = [[10, 13, 16],\n [11, 14, 17],\n [12, 15, 18]]\n ij_plane3 = [[19, 22, 25],\n [20, 23, 26],\n [21, 24, 27]]\n ik_plane1 = [[1, 10, 19],\n [2, 11, 20],\n [3, 12, 21]]\n ik_plane2 = [[4, 13, 22],\n [5, 14, 23],\n [6, 15, 24]]\n ik_plane3 = [[7, 16, 25],\n [8, 17, 26],\n [9, 18, 27]]\n ijk_space = [jk_plane1, jk_plane2, jk_plane3]\n ikj_space = [kj_plane1, kj_plane2, kj_plane3]\n jik_space = [ik_plane1, ik_plane2, ik_plane3]\n jki_space = [ki_plane1, ki_plane2, ki_plane3]\n kij_space = [ij_plane1, ij_plane2, ij_plane3]\n x = array([plane1, plane2, plane3])\n\n assert_array_almost_equal(fftn(x),\n fftn(x, axes=(-3, -2, -1))) # kji_space\n assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))\n assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))\n y = fftn(x, axes=(2, 1, 0)) # ijk_space\n assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))\n y = fftn(x, axes=(2, 0, 1)) # ikj_space\n assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),\n fftn(ikj_space))\n y = fftn(x, axes=(1, 2, 0)) # jik_space\n assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),\n fftn(jik_space))\n y = fftn(x, axes=(1, 0, 2)) # jki_space\n assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))\n y = fftn(x, axes=(0, 2, 1)) # kij_space\n assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))\n\n y = fftn(x, axes=(-2, -1)) # ji_plane\n assert_array_almost_equal(fftn(plane1), y[0])\n assert_array_almost_equal(fftn(plane2), y[1])\n assert_array_almost_equal(fftn(plane3), y[2])\n\n y = fftn(x, axes=(1, 2)) # ji_plane\n assert_array_almost_equal(fftn(plane1), y[0])\n assert_array_almost_equal(fftn(plane2), y[1])\n assert_array_almost_equal(fftn(plane3), y[2])\n\n y = fftn(x, axes=(-3, -2)) # kj_plane\n assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])\n assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])\n assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])\n\n y = fftn(x, axes=(-3, -1)) # ki_plane\n assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])\n assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])\n assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])\n\n y = fftn(x, axes=(-1, -2)) # ij_plane\n assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))\n assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))\n assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))\n\n y = fftn(x, axes=(-1, -3)) # ik_plane\n assert_array_almost_equal(fftn(ik_plane1),\n swapaxes(y[:, 0, :], -1, -2))\n assert_array_almost_equal(fftn(ik_plane2),\n swapaxes(y[:, 1, :], -1, -2))\n assert_array_almost_equal(fftn(ik_plane3),\n swapaxes(y[:, 2, :], -1, -2))\n\n y = fftn(x, axes=(-2, -3)) # jk_plane\n assert_array_almost_equal(fftn(jk_plane1),\n swapaxes(y[:, :, 0], -1, -2))\n assert_array_almost_equal(fftn(jk_plane2),\n swapaxes(y[:, :, 1], -1, -2))\n assert_array_almost_equal(fftn(jk_plane3),\n swapaxes(y[:, :, 2], -1, -2))\n\n y = fftn(x, axes=(-1,)) # i_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])\n y = fftn(x, axes=(-2,)) # j_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])\n y = fftn(x, axes=(0,)) # k_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])\n\n y = fftn(x, axes=()) # point\n assert_array_almost_equal(y, x)\n\n def test_shape_argument(self):\n small_x = [[1, 2, 3],\n [4, 5, 6]]\n large_x1 = [[1, 2, 3, 0],\n [4, 5, 6, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]]\n\n y = fftn(small_x, shape=(4, 4))\n assert_array_almost_equal(y, fftn(large_x1))\n\n y = fftn(small_x, shape=(3, 4))\n assert_array_almost_equal(y, fftn(large_x1[:-1]))\n\n def test_shape_axes_argument(self):\n small_x = [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n large_x1 = array([[1, 2, 3, 0],\n [4, 5, 6, 0],\n [7, 8, 9, 0],\n [0, 0, 0, 0]])\n y = fftn(small_x, shape=(4, 4), axes=(-2, -1))\n assert_array_almost_equal(y, fftn(large_x1))\n y = fftn(small_x, shape=(4, 4), axes=(-1, -2))\n\n assert_array_almost_equal(y, swapaxes(\n fftn(swapaxes(large_x1, -1, -2)), -1, -2))\n\n def test_shape_axes_argument2(self):\n # Change shape of the last axis\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-1,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-1, n=8))\n\n # Change shape of an arbitrary axis which is not the last one\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-2,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-2, n=8))\n\n # Change shape of axes: cf #244, where shape and axes were mixed up\n x = numpy.random.random((4, 4, 2))\n y = fftn(x, axes=(-3, -2), shape=(8, 8))\n assert_array_almost_equal(y,\n numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))\n\n def test_shape_argument_more(self):\n x = zeros((4, 4, 2))\n with assert_raises(ValueError,\n match=\"when given, axes and shape arguments\"\n \" have to be of the same length\"):\n fftn(x, shape=(8, 8, 2, 1))\n\n def test_invalid_sizes(self):\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[1 0\\]\\) specified\"):\n fftn([[]])\n\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[ 4 -3\\]\\) specified\"):\n fftn([[1, 1], [2, 2]], (4, -3))\n\n\nclass TestIfftn(object):\n dtype = None\n cdtype = None\n\n def setup_method(self):\n np.random.seed(1234)\n\n @pytest.mark.parametrize('dtype,cdtype,maxnlp',\n [(np.float64, np.complex128, 2000),\n (np.float32, np.complex64, 3500)])\n def test_definition(self, dtype, cdtype, maxnlp):\n x = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=dtype)\n y = ifftn(x)\n assert_equal(y.dtype, cdtype)\n assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)\n\n x = random((20, 26))\n assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)\n\n x = random((5, 4, 3, 20))\n assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)\n\n @pytest.mark.parametrize('maxnlp', [2000, 3500])\n @pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])\n def test_random_complex(self, maxnlp, size):\n x = random([size, size]) + 1j*random([size, size])\n assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)\n assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)\n\n def test_invalid_sizes(self):\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[1 0\\]\\) specified\"):\n ifftn([[]])\n\n with assert_raises(ValueError,\n match=\"invalid number of data points\"\n r\" \\(\\[ 4 -3\\]\\) specified\"):\n ifftn([[1, 1], [2, 2]], (4, -3))\n\n\nclass TestLongDoubleFailure(object):\n def setup_method(self):\n np.random.seed(1234)\n\n def test_complex(self):\n if np.dtype(np.longcomplex).itemsize == np.dtype(complex).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longdouble) + \\\n 1j * np.random.randn(10).astype(np.longdouble)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type {0} not supported but does not fail\" %\n np.longcomplex)\n except ValueError:\n pass\n\n def test_real(self):\n if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longcomplex)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type %r not supported but does not fail\" %\n np.longcomplex)\n except ValueError:\n pass\n\n\nclass FakeArray(object):\n def __init__(self, data):\n self._data = data\n self.__array_interface__ = data.__array_interface__\n\n\nclass FakeArray2(object):\n def __init__(self, data):\n self._data = data\n\n def __array__(self):\n return self._data\n\n\nclass TestOverwrite(object):\n \"\"\"Check input overwrite behavior of the FFT functions.\"\"\"\n\n real_dtypes = [np.float32, np.float64]\n dtypes = real_dtypes + [np.complex64, np.complex128]\n fftsizes = [8, 16, 32]\n\n def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):\n x2 = x.copy()\n for fake in [lambda x: x, FakeArray, FakeArray2]:\n routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)\n\n sig = \"%s(%s%r, %r, axis=%r, overwrite_x=%r)\" % (\n routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)\n if not should_overwrite:\n assert_equal(x2, x, err_msg=\"spurious overwrite in %s\" % sig)\n\n def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,\n fftsize, overwrite_x):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n should_overwrite = (overwrite_x\n and dtype in overwritable_dtypes\n and fftsize <= shape[axis]\n and (len(shape) == 1 or\n (axis % len(shape) == len(shape)-1\n and fftsize == shape[axis])))\n self._check(data, routine, fftsize, axis,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n\n @pytest.mark.parametrize('dtype', dtypes)\n @pytest.mark.parametrize('fftsize', fftsizes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), -1),\n ((16, 2), 0),\n ((2, 16), 1)])\n def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):\n overwritable = (np.complex128, np.complex64)\n self._check_1d(fft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n self._check_1d(ifft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n\n @pytest.mark.parametrize('dtype', real_dtypes)\n @pytest.mark.parametrize('fftsize', fftsizes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), -1),\n ((16, 2), 0),\n ((2, 16), 1)])\n def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):\n overwritable = self.real_dtypes\n self._check_1d(irfft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n self._check_1d(rfft, dtype, shape, axes, overwritable,\n fftsize, overwrite_x)\n\n def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,\n overwrite_x):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n def fftshape_iter(shp):\n if len(shp) <= 0:\n yield ()\n else:\n for j in (shp[0]//2, shp[0], shp[0]*2):\n for rest in fftshape_iter(shp[1:]):\n yield (j,) + rest\n\n if axes is None:\n part_shape = shape\n else:\n part_shape = tuple(np.take(shape, axes))\n\n for fftshape in fftshape_iter(part_shape):\n should_overwrite = (overwrite_x\n and data.ndim == 1\n and np.all([x < y for x, y in zip(fftshape,\n part_shape)])\n and dtype in overwritable_dtypes)\n self._check(data, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n if data.ndim > 1:\n # check fortran order: it never overwrites\n self._check(data.T, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=False)\n\n @pytest.mark.parametrize('dtype', dtypes)\n @pytest.mark.parametrize('overwrite_x', [True, False])\n @pytest.mark.parametrize('shape,axes', [((16,), None),\n ((16,), (0,)),\n ((16, 2), (0,)),\n ((2, 16), (1,)),\n ((8, 16), None),\n ((8, 16), (0, 1)),\n ((8, 16, 2), (0, 1)),\n ((8, 16, 2), (1, 2)),\n ((8, 16, 2), (0,)),\n ((8, 16, 2), (1,)),\n ((8, 16, 2), (2,)),\n ((8, 16, 2), None),\n ((8, 16, 2), (0, 1, 2))])\n def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):\n overwritable = (np.complex128, np.complex64)\n self._check_nd_one(fftn, dtype, shape, axes, overwritable,\n overwrite_x)\n self._check_nd_one(ifftn, dtype, shape, axes, overwritable,\n overwrite_x)\n",
"from __future__ import division, absolute_import, print_function\n\nimport sys\nimport warnings\nimport functools\nimport operator\nimport pytest\n\nimport numpy as np\nfrom numpy.core._multiarray_tests import array_indexing\nfrom itertools import product\nfrom numpy.testing import (\n assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,\n HAS_REFCOUNT, suppress_warnings,\n )\n\n\nclass TestIndexing(object):\n def test_index_no_floats(self):\n a = np.array([[[5]]])\n\n assert_raises(IndexError, lambda: a[0.0])\n assert_raises(IndexError, lambda: a[0, 0.0])\n assert_raises(IndexError, lambda: a[0.0, 0])\n assert_raises(IndexError, lambda: a[0.0,:])\n assert_raises(IndexError, lambda: a[:, 0.0])\n assert_raises(IndexError, lambda: a[:, 0.0,:])\n assert_raises(IndexError, lambda: a[0.0,:,:])\n assert_raises(IndexError, lambda: a[0, 0, 0.0])\n assert_raises(IndexError, lambda: a[0.0, 0, 0])\n assert_raises(IndexError, lambda: a[0, 0.0, 0])\n assert_raises(IndexError, lambda: a[-1.4])\n assert_raises(IndexError, lambda: a[0, -1.4])\n assert_raises(IndexError, lambda: a[-1.4, 0])\n assert_raises(IndexError, lambda: a[-1.4,:])\n assert_raises(IndexError, lambda: a[:, -1.4])\n assert_raises(IndexError, lambda: a[:, -1.4,:])\n assert_raises(IndexError, lambda: a[-1.4,:,:])\n assert_raises(IndexError, lambda: a[0, 0, -1.4])\n assert_raises(IndexError, lambda: a[-1.4, 0, 0])\n assert_raises(IndexError, lambda: a[0, -1.4, 0])\n assert_raises(IndexError, lambda: a[0.0:, 0.0])\n assert_raises(IndexError, lambda: a[0.0:, 0.0,:])\n\n def test_slicing_no_floats(self):\n a = np.array([[5]])\n\n # start as float.\n assert_raises(TypeError, lambda: a[0.0:])\n assert_raises(TypeError, lambda: a[0:, 0.0:2])\n assert_raises(TypeError, lambda: a[0.0::2, :0])\n assert_raises(TypeError, lambda: a[0.0:1:2,:])\n assert_raises(TypeError, lambda: a[:, 0.0:])\n # stop as float.\n assert_raises(TypeError, lambda: a[:0.0])\n assert_raises(TypeError, lambda: a[:0, 1:2.0])\n assert_raises(TypeError, lambda: a[:0.0:2, :0])\n assert_raises(TypeError, lambda: a[:0.0,:])\n assert_raises(TypeError, lambda: a[:, 0:4.0:2])\n # step as float.\n assert_raises(TypeError, lambda: a[::1.0])\n assert_raises(TypeError, lambda: a[0:, :2:2.0])\n assert_raises(TypeError, lambda: a[1::4.0, :0])\n assert_raises(TypeError, lambda: a[::5.0,:])\n assert_raises(TypeError, lambda: a[:, 0:4:2.0])\n # mixed.\n assert_raises(TypeError, lambda: a[1.0:2:2.0])\n assert_raises(TypeError, lambda: a[1.0::2.0])\n assert_raises(TypeError, lambda: a[0:, :2.0:2.0])\n assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])\n assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])\n assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])\n # should still get the DeprecationWarning if step = 0.\n assert_raises(TypeError, lambda: a[::0.0])\n\n def test_index_no_array_to_index(self):\n # No non-scalar arrays.\n a = np.array([[[1]]])\n\n assert_raises(TypeError, lambda: a[a:a:a])\n\n def test_none_index(self):\n # `None` index adds newaxis\n a = np.array([1, 2, 3])\n assert_equal(a[None], a[np.newaxis])\n assert_equal(a[None].ndim, a.ndim + 1)\n\n def test_empty_tuple_index(self):\n # Empty tuple index creates a view\n a = np.array([1, 2, 3])\n assert_equal(a[()], a)\n assert_(a[()].base is a)\n a = np.array(0)\n assert_(isinstance(a[()], np.int_))\n\n def test_void_scalar_empty_tuple(self):\n s = np.zeros((), dtype='V4')\n assert_equal(s[()].dtype, s.dtype)\n assert_equal(s[()], s)\n assert_equal(type(s[...]), np.ndarray)\n\n def test_same_kind_index_casting(self):\n # Indexes should be cast with same-kind and not safe, even if that\n # is somewhat unsafe. So test various different code paths.\n index = np.arange(5)\n u_index = index.astype(np.uintp)\n arr = np.arange(10)\n\n assert_array_equal(arr[index], arr[u_index])\n arr[u_index] = np.arange(5)\n assert_array_equal(arr, np.arange(10))\n\n arr = np.arange(10).reshape(5, 2)\n assert_array_equal(arr[index], arr[u_index])\n\n arr[u_index] = np.arange(5)[:,None]\n assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))\n\n arr = np.arange(25).reshape(5, 5)\n assert_array_equal(arr[u_index, u_index], arr[index, index])\n\n def test_empty_fancy_index(self):\n # Empty list index creates an empty array\n # with the same dtype (but with weird shape)\n a = np.array([1, 2, 3])\n assert_equal(a[[]], [])\n assert_equal(a[[]].dtype, a.dtype)\n\n b = np.array([], dtype=np.intp)\n assert_equal(a[[]], [])\n assert_equal(a[[]].dtype, a.dtype)\n\n b = np.array([])\n assert_raises(IndexError, a.__getitem__, b)\n\n def test_ellipsis_index(self):\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n assert_(a[...] is not a)\n assert_equal(a[...], a)\n # `a[...]` was `a` in numpy <1.9.\n assert_(a[...].base is a)\n\n # Slicing with ellipsis can skip an\n # arbitrary number of dimensions\n assert_equal(a[0, ...], a[0])\n assert_equal(a[0, ...], a[0,:])\n assert_equal(a[..., 0], a[:, 0])\n\n # Slicing with ellipsis always results\n # in an array, not a scalar\n assert_equal(a[0, ..., 1], np.array(2))\n\n # Assignment with `(Ellipsis,)` on 0-d arrays\n b = np.array(1)\n b[(Ellipsis,)] = 2\n assert_equal(b, 2)\n\n def test_single_int_index(self):\n # Single integer index selects one row\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n\n assert_equal(a[0], [1, 2, 3])\n assert_equal(a[-1], [7, 8, 9])\n\n # Index out of bounds produces IndexError\n assert_raises(IndexError, a.__getitem__, 1 << 30)\n # Index overflow produces IndexError\n assert_raises(IndexError, a.__getitem__, 1 << 64)\n\n def test_single_bool_index(self):\n # Single boolean index\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n\n assert_equal(a[np.array(True)], a[None])\n assert_equal(a[np.array(False)], a[None][0:0])\n\n def test_boolean_shape_mismatch(self):\n arr = np.ones((5, 4, 3))\n\n index = np.array([True])\n assert_raises(IndexError, arr.__getitem__, index)\n\n index = np.array([False] * 6)\n assert_raises(IndexError, arr.__getitem__, index)\n\n index = np.zeros((4, 4), dtype=bool)\n assert_raises(IndexError, arr.__getitem__, index)\n\n assert_raises(IndexError, arr.__getitem__, (slice(None), index))\n\n def test_boolean_indexing_onedim(self):\n # Indexing a 2-dimensional array with\n # boolean array of length one\n a = np.array([[ 0., 0., 0.]])\n b = np.array([ True], dtype=bool)\n assert_equal(a[b], a)\n # boolean assignment\n a[b] = 1.\n assert_equal(a, [[1., 1., 1.]])\n\n def test_boolean_assignment_value_mismatch(self):\n # A boolean assignment should fail when the shape of the values\n # cannot be broadcast to the subscription. (see also gh-3458)\n a = np.arange(4)\n\n def f(a, v):\n a[a > -1] = v\n\n assert_raises(ValueError, f, a, [])\n assert_raises(ValueError, f, a, [1, 2, 3])\n assert_raises(ValueError, f, a[:1], [1, 2, 3])\n\n def test_boolean_assignment_needs_api(self):\n # See also gh-7666\n # This caused a segfault on Python 2 due to the GIL not being\n # held when the iterator does not need it, but the transfer function\n # does\n arr = np.zeros(1000)\n indx = np.zeros(1000, dtype=bool)\n indx[:100] = True\n arr[indx] = np.ones(100, dtype=object)\n\n expected = np.zeros(1000)\n expected[:100] = 1\n assert_array_equal(arr, expected)\n\n def test_boolean_indexing_twodim(self):\n # Indexing a 2-dimensional array with\n # 2-dimensional boolean array\n a = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n b = np.array([[ True, False, True],\n [False, True, False],\n [ True, False, True]])\n assert_equal(a[b], [1, 3, 5, 7, 9])\n assert_equal(a[b[1]], [[4, 5, 6]])\n assert_equal(a[b[0]], a[b[2]])\n\n # boolean assignment\n a[b] = 0\n assert_equal(a, [[0, 2, 0],\n [4, 0, 6],\n [0, 8, 0]])\n\n def test_boolean_indexing_list(self):\n # Regression test for #13715. It's a use-after-free bug which the\n # test won't directly catch, but it will show up in valgrind.\n a = np.array([1, 2, 3])\n b = [True, False, True]\n # Two variants of the test because the first takes a fast path\n assert_equal(a[b], [1, 3])\n assert_equal(a[None, b], [[1, 3]])\n\n def test_reverse_strides_and_subspace_bufferinit(self):\n # This tests that the strides are not reversed for simple and\n # subspace fancy indexing.\n a = np.ones(5)\n b = np.zeros(5, dtype=np.intp)[::-1]\n c = np.arange(5)[::-1]\n\n a[b] = c\n # If the strides are not reversed, the 0 in the arange comes last.\n assert_equal(a[0], 0)\n\n # This also tests that the subspace buffer is initialized:\n a = np.ones((5, 2))\n c = np.arange(10).reshape(5, 2)[::-1]\n a[b, :] = c\n assert_equal(a[0], [0, 1])\n\n def test_reversed_strides_result_allocation(self):\n # Test a bug when calculating the output strides for a result array\n # when the subspace size was 1 (and test other cases as well)\n a = np.arange(10)[:, None]\n i = np.arange(10)[::-1]\n assert_array_equal(a[i], a[i.copy('C')])\n\n a = np.arange(20).reshape(-1, 2)\n\n def test_uncontiguous_subspace_assignment(self):\n # During development there was a bug activating a skip logic\n # based on ndim instead of size.\n a = np.full((3, 4, 2), -1)\n b = np.full((3, 4, 2), -1)\n\n a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T\n b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()\n\n assert_equal(a, b)\n\n def test_too_many_fancy_indices_special_case(self):\n # Just documents behaviour, this is a small limitation.\n a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS\n assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)\n\n def test_scalar_array_bool(self):\n # NumPy bools can be used as boolean index (python ones as of yet not)\n a = np.array(1)\n assert_equal(a[np.bool_(True)], a[np.array(True)])\n assert_equal(a[np.bool_(False)], a[np.array(False)])\n\n # After deprecating bools as integers:\n #a = np.array([0,1,2])\n #assert_equal(a[True, :], a[None, :])\n #assert_equal(a[:, True], a[:, None])\n #\n #assert_(not np.may_share_memory(a, a[True, :]))\n\n def test_everything_returns_views(self):\n # Before `...` would return a itself.\n a = np.arange(5)\n\n assert_(a is not a[()])\n assert_(a is not a[...])\n assert_(a is not a[:])\n\n def test_broaderrors_indexing(self):\n a = np.zeros((5, 5))\n assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))\n assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)\n\n def test_trivial_fancy_out_of_bounds(self):\n a = np.zeros(5)\n ind = np.ones(20, dtype=np.intp)\n ind[-1] = 10\n assert_raises(IndexError, a.__getitem__, ind)\n assert_raises(IndexError, a.__setitem__, ind, 0)\n ind = np.ones(20, dtype=np.intp)\n ind[0] = 11\n assert_raises(IndexError, a.__getitem__, ind)\n assert_raises(IndexError, a.__setitem__, ind, 0)\n\n def test_trivial_fancy_not_possible(self):\n # Test that the fast path for trivial assignment is not incorrectly\n # used when the index is not contiguous or 1D, see also gh-11467.\n a = np.arange(6)\n idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]\n assert_array_equal(a[idx], idx)\n\n # this case must not go into the fast path, note that idx is\n # a non-contiuguous none 1D array here.\n a[idx] = -1\n res = np.arange(6)\n res[0] = -1\n res[3] = -1\n assert_array_equal(a, res)\n\n def test_nonbaseclass_values(self):\n class SubClass(np.ndarray):\n def __array_finalize__(self, old):\n # Have array finalize do funny things\n self.fill(99)\n\n a = np.zeros((5, 5))\n s = a.copy().view(type=SubClass)\n s.fill(1)\n\n a[[0, 1, 2, 3, 4], :] = s\n assert_((a == 1).all())\n\n # Subspace is last, so transposing might want to finalize\n a[:, [0, 1, 2, 3, 4]] = s\n assert_((a == 1).all())\n\n a.fill(0)\n a[...] = s\n assert_((a == 1).all())\n\n def test_subclass_writeable(self):\n d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],\n dtype=[('target', 'S20'), ('V_mag', '>f4')])\n ind = np.array([False, True, True], dtype=bool)\n assert_(d[ind].flags.writeable)\n ind = np.array([0, 1])\n assert_(d[ind].flags.writeable)\n assert_(d[...].flags.writeable)\n assert_(d[0].flags.writeable)\n\n def test_memory_order(self):\n # This is not necessary to preserve. Memory layouts for\n # more complex indices are not as simple.\n a = np.arange(10)\n b = np.arange(10).reshape(5,2).T\n assert_(a[b].flags.f_contiguous)\n\n # Takes a different implementation branch:\n a = a.reshape(-1, 1)\n assert_(a[b, 0].flags.f_contiguous)\n\n def test_scalar_return_type(self):\n # Full scalar indices should return scalars and object\n # arrays should not call PyArray_Return on their items\n class Zero(object):\n # The most basic valid indexing\n def __index__(self):\n return 0\n\n z = Zero()\n\n class ArrayLike(object):\n # Simple array, should behave like the array\n def __array__(self):\n return np.array(0)\n\n a = np.zeros(())\n assert_(isinstance(a[()], np.float_))\n a = np.zeros(1)\n assert_(isinstance(a[z], np.float_))\n a = np.zeros((1, 1))\n assert_(isinstance(a[z, np.array(0)], np.float_))\n assert_(isinstance(a[z, ArrayLike()], np.float_))\n\n # And object arrays do not call it too often:\n b = np.array(0)\n a = np.array(0, dtype=object)\n a[()] = b\n assert_(isinstance(a[()], np.ndarray))\n a = np.array([b, None])\n assert_(isinstance(a[z], np.ndarray))\n a = np.array([[b, None]])\n assert_(isinstance(a[z, np.array(0)], np.ndarray))\n assert_(isinstance(a[z, ArrayLike()], np.ndarray))\n\n def test_small_regressions(self):\n # Reference count of intp for index checks\n a = np.array([0])\n if HAS_REFCOUNT:\n refcount = sys.getrefcount(np.dtype(np.intp))\n # item setting always checks indices in separate function:\n a[np.array([0], dtype=np.intp)] = 1\n a[np.array([0], dtype=np.uint8)] = 1\n assert_raises(IndexError, a.__setitem__,\n np.array([1], dtype=np.intp), 1)\n assert_raises(IndexError, a.__setitem__,\n np.array([1], dtype=np.uint8), 1)\n\n if HAS_REFCOUNT:\n assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)\n\n def test_unaligned(self):\n v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]\n d = v.view(np.dtype(\"S8\"))\n # unaligned source\n x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]\n x = x.view(np.dtype(\"S8\"))\n x[...] = np.array(\"b\" * 8, dtype=\"S\")\n b = np.arange(d.size)\n #trivial\n assert_equal(d[b], d)\n d[b] = x\n # nontrivial\n # unaligned index array\n b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]\n b = b.view(np.intp)[:d.size]\n b[...] = np.arange(d.size)\n assert_equal(d[b.astype(np.int16)], d)\n d[b.astype(np.int16)] = x\n # boolean\n d[b % 2 == 0]\n d[b % 2 == 0] = x[::2]\n\n def test_tuple_subclass(self):\n arr = np.ones((5, 5))\n\n # A tuple subclass should also be an nd-index\n class TupleSubclass(tuple):\n pass\n index = ([1], [1])\n index = TupleSubclass(index)\n assert_(arr[index].shape == (1,))\n # Unlike the non nd-index:\n assert_(arr[index,].shape != (1,))\n\n def test_broken_sequence_not_nd_index(self):\n # See gh-5063:\n # If we have an object which claims to be a sequence, but fails\n # on item getting, this should not be converted to an nd-index (tuple)\n # If this object happens to be a valid index otherwise, it should work\n # This object here is very dubious and probably bad though:\n class SequenceLike(object):\n def __index__(self):\n return 0\n\n def __len__(self):\n return 1\n\n def __getitem__(self, item):\n raise IndexError('Not possible')\n\n arr = np.arange(10)\n assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])\n\n # also test that field indexing does not segfault\n # for a similar reason, by indexing a structured array\n arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])\n assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])\n\n def test_indexing_array_weird_strides(self):\n # See also gh-6221\n # the shapes used here come from the issue and create the correct\n # size for the iterator buffering size.\n x = np.ones(10)\n x2 = np.ones((10, 2))\n ind = np.arange(10)[:, None, None, None]\n ind = np.broadcast_to(ind, (10, 55, 4, 4))\n\n # single advanced index case\n assert_array_equal(x[ind], x[ind.copy()])\n # higher dimensional advanced index\n zind = np.zeros(4, dtype=np.intp)\n assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])\n\n def test_indexing_array_negative_strides(self):\n # From gh-8264,\n # core dumps if negative strides are used in iteration\n arro = np.zeros((4, 4))\n arr = arro[::-1, ::-1]\n\n slices = (slice(None), [0, 1, 2, 3])\n arr[slices] = 10\n assert_array_equal(arr, 10.)\n\nclass TestFieldIndexing(object):\n def test_scalar_return_type(self):\n # Field access on an array should return an array, even if it\n # is 0-d.\n a = np.zeros((), [('a','f8')])\n assert_(isinstance(a['a'], np.ndarray))\n assert_(isinstance(a[['a']], np.ndarray))\n\n\nclass TestBroadcastedAssignments(object):\n def assign(self, a, ind, val):\n a[ind] = val\n return a\n\n def test_prepending_ones(self):\n a = np.zeros((3, 2))\n\n a[...] = np.ones((1, 3, 2))\n # Fancy with subspace with and without transpose\n a[[0, 1, 2], :] = np.ones((1, 3, 2))\n a[:, [0, 1]] = np.ones((1, 3, 2))\n # Fancy without subspace (with broadcasting)\n a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))\n\n def test_prepend_not_one(self):\n assign = self.assign\n s_ = np.s_\n a = np.zeros(5)\n\n # Too large and not only ones.\n assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))\n assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))\n assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))\n\n def test_simple_broadcasting_errors(self):\n assign = self.assign\n s_ = np.s_\n a = np.zeros((5, 1))\n\n assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))\n assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))\n assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))\n assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))\n assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))\n\n def test_index_is_larger(self):\n # Simple case of fancy index broadcasting of the index.\n a = np.zeros((5, 5))\n a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]\n\n assert_((a[:3, :3] == [2, 3, 4]).all())\n\n def test_broadcast_subspace(self):\n a = np.zeros((100, 100))\n v = np.arange(100)[:,None]\n b = np.arange(100)[::-1]\n a[b] = v\n assert_((a[::-1] == v).all())\n\n\nclass TestSubclasses(object):\n def test_basic(self):\n # Test that indexing in various ways produces SubClass instances,\n # and that the base is set up correctly: the original subclass\n # instance for views, and a new ndarray for advanced/boolean indexing\n # where a copy was made (latter a regression test for gh-11983).\n class SubClass(np.ndarray):\n pass\n\n a = np.arange(5)\n s = a.view(SubClass)\n s_slice = s[:3]\n assert_(type(s_slice) is SubClass)\n assert_(s_slice.base is s)\n assert_array_equal(s_slice, a[:3])\n\n s_fancy = s[[0, 1, 2]]\n assert_(type(s_fancy) is SubClass)\n assert_(s_fancy.base is not s)\n assert_(type(s_fancy.base) is np.ndarray)\n assert_array_equal(s_fancy, a[[0, 1, 2]])\n assert_array_equal(s_fancy.base, a[[0, 1, 2]])\n\n s_bool = s[s > 0]\n assert_(type(s_bool) is SubClass)\n assert_(s_bool.base is not s)\n assert_(type(s_bool.base) is np.ndarray)\n assert_array_equal(s_bool, a[a > 0])\n assert_array_equal(s_bool.base, a[a > 0])\n\n def test_finalize_gets_full_info(self):\n # Array finalize should be called on the filled array.\n class SubClass(np.ndarray):\n def __array_finalize__(self, old):\n self.finalize_status = np.array(self)\n self.old = old\n\n s = np.arange(10).view(SubClass)\n new_s = s[:3]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n new_s = s[[0,1,2,3]]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n new_s = s[s > 0]\n assert_array_equal(new_s.finalize_status, new_s)\n assert_array_equal(new_s.old, s)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_slice_decref_getsetslice(self):\n # See gh-10066, a temporary slice object should be discarted.\n # This test is only really interesting on Python 2 since\n # it goes through `__set/getslice__` here and can probably be\n # removed. Use 0:7 to make sure it is never None:7.\n class KeepIndexObject(np.ndarray):\n def __getitem__(self, indx):\n self.indx = indx\n if indx == slice(0, 7):\n raise ValueError\n\n def __setitem__(self, indx, val):\n self.indx = indx\n if indx == slice(0, 4):\n raise ValueError\n\n k = np.array([1]).view(KeepIndexObject)\n k[0:5]\n assert_equal(k.indx, slice(0, 5))\n assert_equal(sys.getrefcount(k.indx), 2)\n try:\n k[0:7]\n raise AssertionError\n except ValueError:\n # The exception holds a reference to the slice so clear on Py2\n if hasattr(sys, 'exc_clear'):\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n sys.exc_clear()\n assert_equal(k.indx, slice(0, 7))\n assert_equal(sys.getrefcount(k.indx), 2)\n\n k[0:3] = 6\n assert_equal(k.indx, slice(0, 3))\n assert_equal(sys.getrefcount(k.indx), 2)\n try:\n k[0:4] = 2\n raise AssertionError\n except ValueError:\n # The exception holds a reference to the slice so clear on Py2\n if hasattr(sys, 'exc_clear'):\n with suppress_warnings() as sup:\n sup.filter(DeprecationWarning)\n sys.exc_clear()\n assert_equal(k.indx, slice(0, 4))\n assert_equal(sys.getrefcount(k.indx), 2)\n\n\nclass TestFancyIndexingCast(object):\n def test_boolean_index_cast_assign(self):\n # Setup the boolean index and float arrays.\n shape = (8, 63)\n bool_index = np.zeros(shape).astype(bool)\n bool_index[0, 1] = True\n zero_array = np.zeros(shape)\n\n # Assigning float is fine.\n zero_array[bool_index] = np.array([1])\n assert_equal(zero_array[0, 1], 1)\n\n # Fancy indexing works, although we get a cast warning.\n assert_warns(np.ComplexWarning,\n zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))\n assert_equal(zero_array[0, 1], 2) # No complex part\n\n # Cast complex to float, throwing away the imaginary portion.\n assert_warns(np.ComplexWarning,\n zero_array.__setitem__, bool_index, np.array([1j]))\n assert_equal(zero_array[0, 1], 0)\n\nclass TestFancyIndexingEquivalence(object):\n def test_object_assign(self):\n # Check that the field and object special case using copyto is active.\n # The right hand side cannot be converted to an array here.\n a = np.arange(5, dtype=object)\n b = a.copy()\n a[:3] = [1, (1,2), 3]\n b[[0, 1, 2]] = [1, (1,2), 3]\n assert_array_equal(a, b)\n\n # test same for subspace fancy indexing\n b = np.arange(5, dtype=object)[None, :]\n b[[0], :3] = [[1, (1,2), 3]]\n assert_array_equal(a, b[0])\n\n # Check that swapping of axes works.\n # There was a bug that made the later assignment throw a ValueError\n # do to an incorrectly transposed temporary right hand side (gh-5714)\n b = b.T\n b[:3, [0]] = [[1], [(1,2)], [3]]\n assert_array_equal(a, b[:, 0])\n\n # Another test for the memory order of the subspace\n arr = np.ones((3, 4, 5), dtype=object)\n # Equivalent slicing assignment for comparison\n cmp_arr = arr.copy()\n cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]\n arr[[0], ...] = [[[1], [2], [3], [4]]]\n assert_array_equal(arr, cmp_arr)\n arr = arr.copy('F')\n arr[[0], ...] = [[[1], [2], [3], [4]]]\n assert_array_equal(arr, cmp_arr)\n\n def test_cast_equivalence(self):\n # Yes, normal slicing uses unsafe casting.\n a = np.arange(5)\n b = a.copy()\n\n a[:3] = np.array(['2', '-3', '-1'])\n b[[0, 2, 1]] = np.array(['2', '-1', '-3'])\n assert_array_equal(a, b)\n\n # test the same for subspace fancy indexing\n b = np.arange(5)[None, :]\n b[[0], :3] = np.array([['2', '-3', '-1']])\n assert_array_equal(a, b[0])\n\n\nclass TestMultiIndexingAutomated(object):\n \"\"\"\n These tests use code to mimic the C-Code indexing for selection.\n\n NOTE:\n\n * This still lacks tests for complex item setting.\n * If you change behavior of indexing, you might want to modify\n these tests to try more combinations.\n * Behavior was written to match numpy version 1.8. (though a\n first version matched 1.7.)\n * Only tuple indices are supported by the mimicking code.\n (and tested as of writing this)\n * Error types should match most of the time as long as there\n is only one error. For multiple errors, what gets raised\n will usually not be the same one. They are *not* tested.\n\n Update 2016-11-30: It is probably not worth maintaining this test\n indefinitely and it can be dropped if maintenance becomes a burden.\n\n \"\"\"\n\n def setup(self):\n self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)\n self.b = np.empty((3, 0, 5, 6))\n self.complex_indices = ['skip', Ellipsis,\n 0,\n # Boolean indices, up to 3-d for some special cases of eating up\n # dimensions, also need to test all False\n np.array([True, False, False]),\n np.array([[True, False], [False, True]]),\n np.array([[[False, False], [False, False]]]),\n # Some slices:\n slice(-5, 5, 2),\n slice(1, 1, 100),\n slice(4, -1, -2),\n slice(None, None, -3),\n # Some Fancy indexes:\n np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast\n np.array([0, 1, -2]),\n np.array([[2], [0], [1]]),\n np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),\n np.array([2, -1], dtype=np.int8),\n np.zeros([1]*31, dtype=int), # trigger too large array.\n np.array([0., 1.])] # invalid datatype\n # Some simpler indices that still cover a bit more\n self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),\n 'skip']\n # Very simple ones to fill the rest:\n self.fill_indices = [slice(None, None), 0]\n\n def _get_multi_index(self, arr, indices):\n \"\"\"Mimic multi dimensional indexing.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed.\n indices : tuple of index objects\n\n Returns\n -------\n out : ndarray\n An array equivalent to the indexing operation (but always a copy).\n `arr[indices]` should be identical.\n no_copy : bool\n Whether the indexing operation requires a copy. If this is `True`,\n `np.may_share_memory(arr, arr[indices])` should be `True` (with\n some exceptions for scalars and possibly 0-d arrays).\n\n Notes\n -----\n While the function may mostly match the errors of normal indexing this\n is generally not the case.\n \"\"\"\n in_indices = list(indices)\n indices = []\n # if False, this is a fancy or boolean index\n no_copy = True\n # number of fancy/scalar indexes that are not consecutive\n num_fancy = 0\n # number of dimensions indexed by a \"fancy\" index\n fancy_dim = 0\n # NOTE: This is a funny twist (and probably OK to change).\n # The boolean array has illegal indexes, but this is\n # allowed if the broadcast fancy-indices are 0-sized.\n # This variable is to catch that case.\n error_unless_broadcast_to_empty = False\n\n # We need to handle Ellipsis and make arrays from indices, also\n # check if this is fancy indexing (set no_copy).\n ndim = 0\n ellipsis_pos = None # define here mostly to replace all but first.\n for i, indx in enumerate(in_indices):\n if indx is None:\n continue\n if isinstance(indx, np.ndarray) and indx.dtype == bool:\n no_copy = False\n if indx.ndim == 0:\n raise IndexError\n # boolean indices can have higher dimensions\n ndim += indx.ndim\n fancy_dim += indx.ndim\n continue\n if indx is Ellipsis:\n if ellipsis_pos is None:\n ellipsis_pos = i\n continue # do not increment ndim counter\n raise IndexError\n if isinstance(indx, slice):\n ndim += 1\n continue\n if not isinstance(indx, np.ndarray):\n # This could be open for changes in numpy.\n # numpy should maybe raise an error if casting to intp\n # is not safe. It rejects np.array([1., 2.]) but not\n # [1., 2.] as index (same for ie. np.take).\n # (Note the importance of empty lists if changing this here)\n try:\n indx = np.array(indx, dtype=np.intp)\n except ValueError:\n raise IndexError\n in_indices[i] = indx\n elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':\n raise IndexError('arrays used as indices must be of '\n 'integer (or boolean) type')\n if indx.ndim != 0:\n no_copy = False\n ndim += 1\n fancy_dim += 1\n\n if arr.ndim - ndim < 0:\n # we can't take more dimensions then we have, not even for 0-d\n # arrays. since a[()] makes sense, but not a[(),]. We will\n # raise an error later on, unless a broadcasting error occurs\n # first.\n raise IndexError\n\n if ndim == 0 and None not in in_indices:\n # Well we have no indexes or one Ellipsis. This is legal.\n return arr.copy(), no_copy\n\n if ellipsis_pos is not None:\n in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *\n (arr.ndim - ndim))\n\n for ax, indx in enumerate(in_indices):\n if isinstance(indx, slice):\n # convert to an index array\n indx = np.arange(*indx.indices(arr.shape[ax]))\n indices.append(['s', indx])\n continue\n elif indx is None:\n # this is like taking a slice with one element from a new axis:\n indices.append(['n', np.array([0], dtype=np.intp)])\n arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))\n continue\n if isinstance(indx, np.ndarray) and indx.dtype == bool:\n if indx.shape != arr.shape[ax:ax+indx.ndim]:\n raise IndexError\n\n try:\n flat_indx = np.ravel_multi_index(np.nonzero(indx),\n arr.shape[ax:ax+indx.ndim], mode='raise')\n except Exception:\n error_unless_broadcast_to_empty = True\n # fill with 0s instead, and raise error later\n flat_indx = np.array([0]*indx.sum(), dtype=np.intp)\n # concatenate axis into a single one:\n if indx.ndim != 0:\n arr = arr.reshape((arr.shape[:ax]\n + (np.prod(arr.shape[ax:ax+indx.ndim]),)\n + arr.shape[ax+indx.ndim:]))\n indx = flat_indx\n else:\n # This could be changed, a 0-d boolean index can\n # make sense (even outside the 0-d indexed array case)\n # Note that originally this is could be interpreted as\n # integer in the full integer special case.\n raise IndexError\n else:\n # If the index is a singleton, the bounds check is done\n # before the broadcasting. This used to be different in <1.9\n if indx.ndim == 0:\n if indx >= arr.shape[ax] or indx < -arr.shape[ax]:\n raise IndexError\n if indx.ndim == 0:\n # The index is a scalar. This used to be two fold, but if\n # fancy indexing was active, the check was done later,\n # possibly after broadcasting it away (1.7. or earlier).\n # Now it is always done.\n if indx >= arr.shape[ax] or indx < - arr.shape[ax]:\n raise IndexError\n if (len(indices) > 0 and\n indices[-1][0] == 'f' and\n ax != ellipsis_pos):\n # NOTE: There could still have been a 0-sized Ellipsis\n # between them. Checked that with ellipsis_pos.\n indices[-1].append(indx)\n else:\n # We have a fancy index that is not after an existing one.\n # NOTE: A 0-d array triggers this as well, while one may\n # expect it to not trigger it, since a scalar would not be\n # considered fancy indexing.\n num_fancy += 1\n indices.append(['f', indx])\n\n if num_fancy > 1 and not no_copy:\n # We have to flush the fancy indexes left\n new_indices = indices[:]\n axes = list(range(arr.ndim))\n fancy_axes = []\n new_indices.insert(0, ['f'])\n ni = 0\n ai = 0\n for indx in indices:\n ni += 1\n if indx[0] == 'f':\n new_indices[0].extend(indx[1:])\n del new_indices[ni]\n ni -= 1\n for ax in range(ai, ai + len(indx[1:])):\n fancy_axes.append(ax)\n axes.remove(ax)\n ai += len(indx) - 1 # axis we are at\n indices = new_indices\n # and now we need to transpose arr:\n arr = arr.transpose(*(fancy_axes + axes))\n\n # We only have one 'f' index now and arr is transposed accordingly.\n # Now handle newaxis by reshaping...\n ax = 0\n for indx in indices:\n if indx[0] == 'f':\n if len(indx) == 1:\n continue\n # First of all, reshape arr to combine fancy axes into one:\n orig_shape = arr.shape\n orig_slice = orig_shape[ax:ax + len(indx[1:])]\n arr = arr.reshape((arr.shape[:ax]\n + (np.prod(orig_slice).astype(int),)\n + arr.shape[ax + len(indx[1:]):]))\n\n # Check if broadcasting works\n res = np.broadcast(*indx[1:])\n # unfortunately the indices might be out of bounds. So check\n # that first, and use mode='wrap' then. However only if\n # there are any indices...\n if res.size != 0:\n if error_unless_broadcast_to_empty:\n raise IndexError\n for _indx, _size in zip(indx[1:], orig_slice):\n if _indx.size == 0:\n continue\n if np.any(_indx >= _size) or np.any(_indx < -_size):\n raise IndexError\n if len(indx[1:]) == len(orig_slice):\n if np.product(orig_slice) == 0:\n # Work around for a crash or IndexError with 'wrap'\n # in some 0-sized cases.\n try:\n mi = np.ravel_multi_index(indx[1:], orig_slice,\n mode='raise')\n except Exception:\n # This happens with 0-sized orig_slice (sometimes?)\n # here it is a ValueError, but indexing gives a:\n raise IndexError('invalid index into 0-sized')\n else:\n mi = np.ravel_multi_index(indx[1:], orig_slice,\n mode='wrap')\n else:\n # Maybe never happens...\n raise ValueError\n arr = arr.take(mi.ravel(), axis=ax)\n try:\n arr = arr.reshape((arr.shape[:ax]\n + mi.shape\n + arr.shape[ax+1:]))\n except ValueError:\n # too many dimensions, probably\n raise IndexError\n ax += mi.ndim\n continue\n\n # If we are here, we have a 1D array for take:\n arr = arr.take(indx[1], axis=ax)\n ax += 1\n\n return arr, no_copy\n\n def _check_multi_index(self, arr, index):\n \"\"\"Check a multi index item getting and simple setting.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed, must be a reshaped arange.\n index : tuple of indexing objects\n Index being tested.\n \"\"\"\n # Test item getting\n try:\n mimic_get, no_copy = self._get_multi_index(arr, index)\n except Exception as e:\n if HAS_REFCOUNT:\n prev_refcount = sys.getrefcount(arr)\n assert_raises(type(e), arr.__getitem__, index)\n assert_raises(type(e), arr.__setitem__, index, 0)\n if HAS_REFCOUNT:\n assert_equal(prev_refcount, sys.getrefcount(arr))\n return\n\n self._compare_index_result(arr, index, mimic_get, no_copy)\n\n def _check_single_index(self, arr, index):\n \"\"\"Check a single index item getting and simple setting.\n\n Parameters\n ----------\n arr : ndarray\n Array to be indexed, must be an arange.\n index : indexing object\n Index being tested. Must be a single index and not a tuple\n of indexing objects (see also `_check_multi_index`).\n \"\"\"\n try:\n mimic_get, no_copy = self._get_multi_index(arr, (index,))\n except Exception as e:\n if HAS_REFCOUNT:\n prev_refcount = sys.getrefcount(arr)\n assert_raises(type(e), arr.__getitem__, index)\n assert_raises(type(e), arr.__setitem__, index, 0)\n if HAS_REFCOUNT:\n assert_equal(prev_refcount, sys.getrefcount(arr))\n return\n\n self._compare_index_result(arr, index, mimic_get, no_copy)\n\n def _compare_index_result(self, arr, index, mimic_get, no_copy):\n \"\"\"Compare mimicked result to indexing result.\n \"\"\"\n arr = arr.copy()\n indexed_arr = arr[index]\n assert_array_equal(indexed_arr, mimic_get)\n # Check if we got a view, unless its a 0-sized or 0-d array.\n # (then its not a view, and that does not matter)\n if indexed_arr.size != 0 and indexed_arr.ndim != 0:\n assert_(np.may_share_memory(indexed_arr, arr) == no_copy)\n # Check reference count of the original array\n if HAS_REFCOUNT:\n if no_copy:\n # refcount increases by one:\n assert_equal(sys.getrefcount(arr), 3)\n else:\n assert_equal(sys.getrefcount(arr), 2)\n\n # Test non-broadcast setitem:\n b = arr.copy()\n b[index] = mimic_get + 1000\n if b.size == 0:\n return # nothing to compare here...\n if no_copy and indexed_arr.ndim != 0:\n # change indexed_arr in-place to manipulate original:\n indexed_arr += 1000\n assert_array_equal(arr, b)\n return\n # Use the fact that the array is originally an arange:\n arr.flat[indexed_arr.ravel()] += 1000\n assert_array_equal(arr, b)\n\n def test_boolean(self):\n a = np.array(5)\n assert_equal(a[np.array(True)], 5)\n a[np.array(True)] = 1\n assert_equal(a, 1)\n # NOTE: This is different from normal broadcasting, as\n # arr[boolean_array] works like in a multi index. Which means\n # it is aligned to the left. This is probably correct for\n # consistency with arr[boolean_array,] also no broadcasting\n # is done at all\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool),))\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))\n self._check_multi_index(\n self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))\n\n def test_multidim(self):\n # Automatically test combinations with complex indexes on 2nd (or 1st)\n # spot and the simple ones in one other spot.\n with warnings.catch_warnings():\n # This is so that np.array(True) is not accepted in a full integer\n # index, when running the file separately.\n warnings.filterwarnings('error', '', DeprecationWarning)\n warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)\n\n def isskip(idx):\n return isinstance(idx, str) and idx == \"skip\"\n\n for simple_pos in [0, 2, 3]:\n tocheck = [self.fill_indices, self.complex_indices,\n self.fill_indices, self.fill_indices]\n tocheck[simple_pos] = self.simple_indices\n for index in product(*tocheck):\n index = tuple(i for i in index if not isskip(i))\n self._check_multi_index(self.a, index)\n self._check_multi_index(self.b, index)\n\n # Check very simple item getting:\n self._check_multi_index(self.a, (0, 0, 0, 0))\n self._check_multi_index(self.b, (0, 0, 0, 0))\n # Also check (simple cases of) too many indices:\n assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))\n assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)\n assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))\n assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)\n\n def test_1d(self):\n a = np.arange(10)\n for index in self.complex_indices:\n self._check_single_index(a, index)\n\nclass TestFloatNonIntegerArgument(object):\n \"\"\"\n These test that ``TypeError`` is raised when you try to use\n non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``\n and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.\n\n \"\"\"\n def test_valid_indexing(self):\n # These should raise no errors.\n a = np.array([[[5]]])\n\n a[np.array([0])]\n a[[0, 0]]\n a[:, [0, 0]]\n a[:, 0,:]\n a[:,:,:]\n\n def test_valid_slicing(self):\n # These should raise no errors.\n a = np.array([[[5]]])\n\n a[::]\n a[0:]\n a[:2]\n a[0:2]\n a[::2]\n a[1::2]\n a[:2:2]\n a[1:2:2]\n\n def test_non_integer_argument_errors(self):\n a = np.array([[5]])\n\n assert_raises(TypeError, np.reshape, a, (1., 1., -1))\n assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))\n assert_raises(TypeError, np.take, a, [0], 1.)\n assert_raises(TypeError, np.take, a, [0], np.float64(1.))\n\n def test_non_integer_sequence_multiplication(self):\n # NumPy scalar sequence multiply should not work with non-integers\n def mult(a, b):\n return a * b\n\n assert_raises(TypeError, mult, [1], np.float_(3))\n # following should be OK\n mult([1], np.int_(3))\n\n def test_reduce_axis_float_index(self):\n d = np.zeros((3,3,3))\n assert_raises(TypeError, np.min, d, 0.5)\n assert_raises(TypeError, np.min, d, (0.5, 1))\n assert_raises(TypeError, np.min, d, (1, 2.2))\n assert_raises(TypeError, np.min, d, (.2, 1.2))\n\n\nclass TestBooleanIndexing(object):\n # Using a boolean as integer argument/indexing is an error.\n def test_bool_as_int_argument_errors(self):\n a = np.array([[[1]]])\n\n assert_raises(TypeError, np.reshape, a, (True, -1))\n assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))\n # Note that operator.index(np.array(True)) does not work, a boolean\n # array is thus also deprecated, but not with the same message:\n assert_raises(TypeError, operator.index, np.array(True))\n assert_warns(DeprecationWarning, operator.index, np.True_)\n assert_raises(TypeError, np.take, args=(a, [0], False))\n\n def test_boolean_indexing_weirdness(self):\n # Weird boolean indexing things\n a = np.ones((2, 3, 4))\n a[False, True, ...].shape == (0, 2, 3, 4)\n a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)\n assert_raises(IndexError, lambda: a[False, [0, 1], ...])\n\n\nclass TestArrayToIndexDeprecation(object):\n \"\"\"Creating an an index from array not 0-D is an error.\n\n \"\"\"\n def test_array_to_index_error(self):\n # so no exception is expected. The raising is effectively tested above.\n a = np.array([[[1]]])\n\n assert_raises(TypeError, operator.index, np.array([1]))\n assert_raises(TypeError, np.reshape, a, (a, -1))\n assert_raises(TypeError, np.take, a, [0], a)\n\n\nclass TestNonIntegerArrayLike(object):\n \"\"\"Tests that array_likes only valid if can safely cast to integer.\n\n For instance, lists give IndexError when they cannot be safely cast to\n an integer.\n\n \"\"\"\n def test_basic(self):\n a = np.arange(10)\n\n assert_raises(IndexError, a.__getitem__, [0.5, 1.5])\n assert_raises(IndexError, a.__getitem__, (['1', '2'],))\n\n # The following is valid\n a.__getitem__([])\n\n\nclass TestMultipleEllipsisError(object):\n \"\"\"An index can only have a single ellipsis.\n\n \"\"\"\n def test_basic(self):\n a = np.arange(10)\n assert_raises(IndexError, lambda: a[..., ...])\n assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))\n assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))\n\n\nclass TestCApiAccess(object):\n def test_getitem(self):\n subscript = functools.partial(array_indexing, 0)\n\n # 0-d arrays don't work:\n assert_raises(IndexError, subscript, np.ones(()), 0)\n # Out of bound values:\n assert_raises(IndexError, subscript, np.ones(10), 11)\n assert_raises(IndexError, subscript, np.ones(10), -11)\n assert_raises(IndexError, subscript, np.ones((10, 10)), 11)\n assert_raises(IndexError, subscript, np.ones((10, 10)), -11)\n\n a = np.arange(10)\n assert_array_equal(a[4], subscript(a, 4))\n a = a.reshape(5, 2)\n assert_array_equal(a[-4], subscript(a, -4))\n\n def test_setitem(self):\n assign = functools.partial(array_indexing, 1)\n\n # Deletion is impossible:\n assert_raises(ValueError, assign, np.ones(10), 0)\n # 0-d arrays don't work:\n assert_raises(IndexError, assign, np.ones(()), 0, 0)\n # Out of bound values:\n assert_raises(IndexError, assign, np.ones(10), 11, 0)\n assert_raises(IndexError, assign, np.ones(10), -11, 0)\n assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)\n assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)\n\n a = np.arange(10)\n assign(a, 4, 10)\n assert_(a[4] == 10)\n\n a = a.reshape(5, 2)\n assign(a, 4, 10)\n assert_array_equal(a[-1], [10, 10])\n"
] | [
[
"numpy.take",
"numpy.testing.assert_equal",
"numpy.dtype",
"scipy.fftpack.basic._is_safe_size",
"numpy.issubdtype",
"numpy.random.seed",
"numpy.asarray",
"numpy.add.outer",
"scipy.fftpack.ifftn",
"scipy.fftpack._fftpack.drfft",
"scipy.fftpack.fft2",
"numpy.random.rand",
"scipy.fftpack.rfft",
"scipy.fftpack.ifft",
"numpy.zeros",
"scipy.fftpack.fftn",
"scipy.fftpack.irfft",
"numpy.arange",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.testing.assert_array_almost_equal",
"numpy.linalg.norm",
"numpy.fft.fft",
"scipy.fftpack._fftpack.zfft",
"scipy.fftpack.fft",
"scipy.fftpack._fftpack.zrfft",
"numpy.swapaxes",
"numpy.random.randn",
"numpy.exp",
"numpy.fft.ifft",
"numpy.array",
"numpy.testing.assert_"
],
[
"numpy.ones",
"numpy.ravel_multi_index",
"numpy.testing.assert_equal",
"numpy.dtype",
"numpy.any",
"numpy.int_",
"numpy.testing.assert_warns",
"numpy.float64",
"numpy.testing.assert_array_equal",
"numpy.bool_",
"numpy.broadcast",
"numpy.nonzero",
"numpy.float_",
"numpy.zeros",
"numpy.testing.suppress_warnings",
"numpy.arange",
"numpy.may_share_memory",
"numpy.prod",
"numpy.broadcast_to",
"numpy.intp",
"numpy.testing.assert_raises",
"numpy.zeros_like",
"numpy.empty",
"numpy.product",
"numpy.array",
"numpy.full",
"numpy.rec.array",
"numpy.testing.assert_"
]
] |
snowcoding/justice40-tool | [
"b6a6813bb5d617abf400cafc97da891618541558"
] | [
"data/data-pipeline/data_pipeline/etl/sources/michigan_ejscreen/etl.py"
] | [
"import pandas as pd\n\nfrom data_pipeline.etl.base import ExtractTransformLoad\nfrom data_pipeline.utils import get_module_logger\nfrom data_pipeline.score import field_names\nfrom data_pipeline.config import settings\n\nlogger = get_module_logger(__name__)\n\n\nclass MichiganEnviroScreenETL(ExtractTransformLoad):\n \"\"\"Michigan EJ Screen class that ingests dataset represented\n here: https://www.arcgis.com/apps/webappviewer/index.html?id=dc4f0647dda34959963488d3f519fd24\n This class ingests the data presented in \"Assessing the State of Environmental\n Justice in Michigan.\" Please see the README in this module for further details.\n \"\"\"\n\n def __init__(self):\n self.MICHIGAN_EJSCREEN_S3_URL = (\n settings.AWS_JUSTICE40_DATASOURCES_URL\n + \"/michigan_ejscore_12212021.csv\"\n )\n\n self.CSV_PATH = self.DATA_PATH / \"dataset\" / \"michigan_ejscreen\"\n self.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_THRESHOLD: float = 0.75\n\n self.COLUMNS_TO_KEEP = [\n self.GEOID_TRACT_FIELD_NAME,\n field_names.MICHIGAN_EJSCREEN_SCORE_FIELD,\n field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD,\n field_names.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_FIELD,\n ]\n\n self.df: pd.DataFrame\n\n def extract(self) -> None:\n logger.info(\"Downloading Michigan EJSCREEN Data\")\n self.df = pd.read_csv(\n filepath_or_buffer=self.MICHIGAN_EJSCREEN_S3_URL,\n dtype={\"GEO_ID\": \"string\"},\n low_memory=False,\n )\n\n def transform(self) -> None:\n logger.info(\"Transforming Michigan EJSCREEN Data\")\n\n self.df.rename(\n columns={\n \"GEO_ID\": self.GEOID_TRACT_FIELD_NAME,\n \"EJ_Score_Cal_Min\": field_names.MICHIGAN_EJSCREEN_SCORE_FIELD,\n \"Pct_CalMin\": field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD,\n },\n inplace=True,\n )\n # Calculate the top quartile of prioritized communities\n # Please see pg. 104 - 109 from source:\n # pg. https://deepblue.lib.umich.edu/bitstream/handle/2027.42/149105/AssessingtheStateofEnvironmentalJusticeinMichigan_344.pdf\n self.df[field_names.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_FIELD] = (\n self.df[field_names.MICHIGAN_EJSCREEN_PERCENTILE_FIELD]\n >= self.MICHIGAN_EJSCREEN_PRIORITY_COMMUNITY_THRESHOLD\n )\n\n def load(self) -> None:\n logger.info(\"Saving Michigan Environmental Screening Tool to CSV\")\n # write nationwide csv\n self.CSV_PATH.mkdir(parents=True, exist_ok=True)\n self.df[self.COLUMNS_TO_KEEP].to_csv(\n self.CSV_PATH / \"michigan_ejscreen.csv\", index=False\n )\n"
] | [
[
"pandas.read_csv"
]
] |
jiwoncpark/lens-classification | [
"c1faf4dbbd4a16f2df74a34fd593ec7128750252"
] | [
"magnificat/drw_dataset.py"
] | [
"import os\nimport os.path as osp\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom magnificat import drw_utils\nfrom magnificat.cadence import LSSTCadence\n\n\nclass DRWDataset(Dataset):\n\n bp_to_int = dict(zip(list('ugrizy'), range(6)))\n int_to_bp = dict(zip(range(6), list('ugrizy')))\n\n def __init__(self,\n params_sampler,\n out_dir,\n num_samples,\n is_training,\n transform_x_func=lambda x: x,\n transform_y_func=lambda x: x,\n prestored_bandpasses=list('ugrizy'),\n seed=123,\n obs_kwargs={}):\n \"\"\"Dataset of DRW light curves\n\n Parameters\n ----------\n params_sampler : flexible\n Any sampler that has a `sample()` method returning a dict\n of `self.param_names` (see below) and has an attribute\n `bandpasses` which is a list of strings indicating which\n LSST bands, and `idx` which is list of indices if sampler\n is associated with a catalog\n out_dir : str\n Output directory for this dataset\n num_samples : int\n Number of AGNs in this dataset\n is_training : bool\n whether this is the training set\n transform_x_func : callable, optional\n Transform function for the times x, useful if the ML model is\n sensitive to the absolute scale of time. Default: identity function\n prestored_bandpasses : TYPE, optional\n Description\n seed : int, optional\n Random seed relevant for generating DRW light curves\n obs_kwargs: dict\n Parameters defining pointings. Includes as keys 'n_pointings_init'\n (number of pointings to request), 'obs_dir' (directory\n containing observation conditions), 'seed' (random seed for\n sampling observation conditions for each light curve, defaults to\n `seed`), 'bandpasses' (list of bandpasses to include in trimming)\n \"\"\"\n self.params_sampler = params_sampler\n # Figure out which bandpasses are sampled\n bandpasses = self.params_sampler.bandpasses\n self.bandpasses_int = [self.bp_to_int[bp] for bp in bandpasses]\n self.bandpasses_int.sort()\n self.bandpasses = [self.int_to_bp[bp_i] for bp_i in self.bandpasses_int]\n # Compile list of parameters, both bp-dependent and otherwise\n # Determined at data generation time\n param_names = ['BH_mass', 'M_i']\n param_names += [f'log_sf_inf_{bp}' for bp in prestored_bandpasses]\n param_names += [f'{bp}' for bp in prestored_bandpasses]\n param_names += ['redshift']\n param_names += [f'log_rf_tau_{bp}' for bp in prestored_bandpasses]\n self.param_names = param_names\n # Create output directory for this dataset\n self.out_dir = out_dir\n os.makedirs(self.out_dir, exist_ok=True)\n self.num_samples = num_samples\n self.obs_kwargs = obs_kwargs\n self.is_training = is_training\n self.seed = seed\n self.transform_x_func = transform_x_func\n self.transform_y_func = transform_y_func\n self.delta_x = 1.0 # 1-day interval\n self.max_x = 3650.0 # LSST 10-year\n # Preview of untrimmed times\n self.x_grid = np.arange(0, self.max_x, self.delta_x)\n self.x_grid = self.transform_x_func(self.x_grid)\n self.n_points = len(self.x_grid)\n # For standardizing params\n self.mean_params = None\n self.std_params = None\n self.log_params = None\n self.slice_params = None\n # Load observation strategy\n self.load_obs_strat()\n # Generate and prestore light curves\n self._generate_x_y_params()\n np.savetxt(os.path.join(out_dir, 'cat_idx.txt'),\n self.params_sampler.idx, fmt='%i')\n self._fully_obs = False # init property\n self._add_noise = True # init property\n\n def get_sliced_params(self):\n return np.array(self.param_names)[np.array(self.slice_params)]\n\n def load_obs_strat(self):\n \"\"\"Load observation strategies\n\n \"\"\"\n self.cadence_obj = LSSTCadence(self.obs_kwargs['obs_dir'])\n ra, dec = self.cadence_obj.get_pointings(self.obs_kwargs['n_pointings_init'])\n self.cadence_obj.get_obs_info(ra, dec, skip_ddf=True,\n min_visits=50)\n self.cadence_obj.bin_by_day(bandpasses=self.obs_kwargs['bandpasses'])\n obs_mask = self.cadence_obj.get_observed_mask() # [3650,]\n self.trimmed_T = sum(obs_mask)\n self.obs_mask = torch.from_numpy(obs_mask).to(torch.bool)\n self.rng = np.random.default_rng(self.obs_kwargs.get('seed', self.seed)) # for sampling pointings\n\n def get_t_obs(self):\n \"\"\"Get full 10-year times in observed frame\n\n \"\"\"\n return torch.arange(0, self.max_x, self.delta_x)\n\n def _generate_x_y_params(self):\n \"\"\"Generate and store fully observed DRW light curves and params\n\n \"\"\"\n # Save times first, since it's the same for all AGNs in dataset\n x = self.get_t_obs() # [3651]\n torch.save(self.obs_mask, osp.join(self.out_dir, 'obs_mask.pt'))\n torch.save(x, osp.join(self.out_dir, 'x.pt'))\n for index in tqdm(range(self.num_samples), desc=\"y, params\"):\n if osp.exists(osp.join(self.out_dir, f'drw_{index}.pt')):\n continue\n # Sample params\n params_dict = self.params_sampler.sample()\n z = params_dict['redshift']\n y_concat = torch.ones([self.n_points, 6])*(-99) # [3650, 6]\n # Render LC for each filter\n for bp in self.bandpasses:\n bp_int = self.bp_to_int[bp]\n log_rf_tau = params_dict[f'log_rf_tau_{bp}']\n log_sf_inf = params_dict[f'log_sf_inf_{bp}']\n mean_mag = params_dict[f'{bp}']\n y = self._generate_light_curve(index, log_rf_tau, log_sf_inf,\n mean_mag, z) # [3650,]\n y_concat[:, bp_int] = y\n # Sort params in predetermined ordering\n params = torch.tensor([params_dict[n] for n in self.param_names]) # [n_params]\n # Concat along filter dimension in predetermined filter ordering\n # y_concat = y_concat[self.obs_mask, :] # [trimmed_T, N_filters]\n # Save y_concat without obs_mask\n # y_concat ~ [3651, N_filters]\n torch.save((y_concat, params),\n osp.join(self.out_dir, f'drw_{index}.pt'))\n\n def _generate_light_curve(self, index, log_rf_tau, log_sf_inf, mean, z):\n \"\"\"Generate a single light curve in a given filter.\n Rendering is done in the rest frame, with the input params\n assumed to be in the rest frame.\n\n Parameters\n ----------\n index : int\n index within the dataset\n log_rf_tau : float\n log10 of rest-frame timescale in days\n log_sf_inf : float\n log10 of rest-frame asymptotic amplitude in mag\n mean : float\n mean static magnitude\n z : float\n redshift\n\n Returns\n -------\n tuple\n single-filter light curve of shape [n_points, 1]\n \"\"\"\n torch.manual_seed(int(str(self.seed) + str(index)))\n # Shifted rest-frame times\n t_rest = self.get_t_obs()/(1.0 + z)\n # DRW flux\n tau = 10**log_rf_tau\n sf_inf = 10**log_sf_inf\n y = drw_utils.get_drw_torch(t_rest, tau, z, sf_inf,\n xmean=mean) # [T,]\n return y\n\n @property\n def fully_obs(self):\n return self._fully_obs\n\n @fully_obs.setter\n def fully_obs(self, val):\n self._fully_obs = val\n\n @property\n def add_noise(self):\n return self._add_noise\n\n @add_noise.setter\n def add_noise(self, val):\n self._add_noise = val\n\n def __getitem__(self, index):\n # Load fully observed light curve at fully obs times\n y, params = torch.load(osp.join(self.out_dir,\n f'drw_{index}.pt')) # [T=3650, 6]\n if self.fully_obs:\n obs_mask = slice(None)\n else:\n obs_mask = self.obs_mask\n # Trim the times\n x = torch.load(osp.join(self.out_dir, 'x.pt'))[obs_mask] # [trimmed_T,]\n y = y[obs_mask, :]\n # Slice relevant bandpasses\n y = y[:, self.bandpasses_int]\n # Rescale x for numerical stability of ML model\n x = self.transform_x_func(x)\n # Add noise and rescale flux to [-1, 1]\n y = self.transform_y_func(y)\n # y = (y - torch.min(y))/(torch.max(y) - torch.min(y))*2.0 - 1.0\n if self.slice_params is not None:\n params = params[self.slice_params]\n if self.log_params is not None:\n params[self.log_params] = torch.log10(params[self.log_params])\n if self.mean_params is not None:\n params -= self.mean_params\n params /= self.std_params\n # Sample observation mask\n if self.is_training:\n # Randomly drawn pointing index\n p = self.rng.integers(low=0, high=self.cadence_obj.n_pointings)\n else:\n # Do not shuffle pointing for validation set\n p = 0\n trimmed_mask = self.cadence_obj.get_trimmed_mask(p,\n as_tensor=True)\n # trimmed_mask = trimmed_mask[:, self.bandpasses_int]\n\n data = dict(x=x,\n y=y,\n params=params,\n trimmed_mask=trimmed_mask\n )\n return data\n\n def get_normalizing_metadata(self, set_metadata=True):\n loader = DataLoader(self,\n batch_size=100,\n shuffle=False,\n drop_last=False)\n mean_params = 0.0\n var_params = 0.0\n print(\"Computing normalizing metadata...\")\n # Compute mean, std\n for i, data in enumerate(loader):\n params = data['params']\n new_mean = params.mean(dim=0)\n new_var = params.var(dim=0, unbiased=False)\n var_params += (new_var - var_params)/(i+1)\n var_params += (i/(i+1)**2.0)*(mean_params - new_mean)**2.0\n mean_params += (new_mean - mean_params)/(i+1)\n std_params = var_params**0.5\n if set_metadata:\n self.mean_params = mean_params\n self.std_params = std_params\n return mean_params, std_params\n\n def __len__(self):\n return self.num_samples\n\n\nif __name__ == '__main__':\n import random\n\n class Sampler:\n def __init__(self, seed, bandpasses):\n random.seed(seed)\n np.random.seed(seed)\n self.bandpasses = bandpasses\n\n def sample(self):\n sample_dict = dict()\n for bp in self.bandpasses:\n log_sf_inf = np.maximum(np.random.randn()*0.05 + 0.2, 0.2)\n # log_sf_inf = 10**(np.random.randn(N)*(0.25) + -0.8)\n # log_sf_inf = np.ones(N)*0.15\n # tau = 10.0**np.maximum(np.random.randn(N)*0.5 + 2.0, 0.1)\n tau = np.maximum(np.random.randn()*50.0 + 200.0, 10.0)\n # mag = np.maximum(np.random.randn(N) + 19.0, 17.5)\n mag = 0.0\n # z = np.maximum(np.random.randn(N) + 2.0, 0.5)\n sample_dict[f'log_rf_tau_{bp}'] = tau\n sample_dict[f'log_sf_inf_{bp}'] = log_sf_inf\n sample_dict[f'{bp}'] = mag\n sample_dict['redshift'] = 2.0\n sample_dict['M_i'] = -16.0\n sample_dict['BH_mass'] = 10.0\n return sample_dict\n\n train_seed = 123\n sampler = Sampler(train_seed, bandpasses=['i'])\n\n train_dataset = DRWDataset(sampler, 'train_drw_s82',\n num_samples=3,\n seed=train_seed,\n shift_x=-3650*0.5,\n rescale_x=1.0/(3650*0.5)*4.0,\n delta_x=1.0,\n max_x=3650.0,\n err_y=0.01)\n train_dataset.slice_params = [train_dataset.param_names.index(n) for n in ['log_rf_taui', 'log_sf_inf_i', 'M_i']]\n train_dataset.log_params = [True, True, False]\n train_dataset.get_normalizing_metadata()\n print(train_dataset.mean_params, train_dataset.std_params)\n x, y, params = train_dataset[0]\n print(x.shape, y.shape, params.shape)\n\n\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.ones",
"torch.tensor",
"numpy.random.seed",
"numpy.random.randn",
"numpy.arange",
"torch.arange",
"torch.from_numpy",
"torch.log10",
"numpy.array"
]
] |
jld23/sasoptpy | [
"f96911f04d6c0c01fce902f1f995935583df69a8"
] | [
"examples/client_side/decentralization.py"
] | [
"import sasoptpy as so\nimport pandas as pd\n\n\ndef test(cas_conn):\n\n m = so.Model(name='decentralization', session=cas_conn)\n\n DEPTS = ['A', 'B', 'C', 'D', 'E']\n CITIES = ['Bristol', 'Brighton', 'London']\n\n benefit_data = pd.DataFrame([\n ['Bristol', 10, 15, 10, 20, 5],\n ['Brighton', 10, 20, 15, 15, 15]],\n columns=['city'] + DEPTS).set_index('city')\n\n comm_data = pd.DataFrame([\n ['A', 'B', 0.0],\n ['A', 'C', 1.0],\n ['A', 'D', 1.5],\n ['A', 'E', 0.0],\n ['B', 'C', 1.4],\n ['B', 'D', 1.2],\n ['B', 'E', 0.0],\n ['C', 'D', 0.0],\n ['C', 'E', 2.0],\n ['D', 'E', 0.7]], columns=['i', 'j', 'comm']).set_index(['i', 'j'])\n\n cost_data = pd.DataFrame([\n ['Bristol', 'Bristol', 5],\n ['Bristol', 'Brighton', 14],\n ['Bristol', 'London', 13],\n ['Brighton', 'Brighton', 5],\n ['Brighton', 'London', 9],\n ['London', 'London', 10]], columns=['i', 'j', 'cost']).set_index(\n ['i', 'j'])\n\n max_num_depts = 3\n\n benefit = {}\n for city in CITIES:\n for dept in DEPTS:\n try:\n benefit[dept, city] = benefit_data.loc[city, dept]\n except:\n benefit[dept, city] = 0\n\n comm = {}\n for row in comm_data.iterrows():\n (i, j) = row[0]\n comm[i, j] = row[1]['comm']\n comm[j, i] = comm[i, j]\n\n cost = {}\n for row in cost_data.iterrows():\n (i, j) = row[0]\n cost[i, j] = row[1]['cost']\n cost[j, i] = cost[i, j]\n\n assign = m.add_variables(DEPTS, CITIES, vartype=so.BIN, name='assign')\n IJKL = [(i, j, k, l)\n for i in DEPTS for j in CITIES for k in DEPTS for l in CITIES\n if i < k]\n product = m.add_variables(IJKL, vartype=so.BIN, name='product')\n\n totalBenefit = so.expr_sum(benefit[i, j] * assign[i, j]\n for i in DEPTS for j in CITIES)\n\n totalCost = so.expr_sum(comm[i, k] * cost[j, l] * product[i, j, k, l]\n for (i, j, k, l) in IJKL)\n\n m.set_objective(totalBenefit-totalCost, name='netBenefit', sense=so.MAX)\n\n m.add_constraints((so.expr_sum(assign[dept, city] for city in CITIES)\n == 1 for dept in DEPTS), name='assign_dept')\n\n m.add_constraints((so.expr_sum(assign[dept, city] for dept in DEPTS)\n <= max_num_depts for city in CITIES), name='cardinality')\n\n product_def1 = m.add_constraints((assign[i, j] + assign[k, l] - 1\n <= product[i, j, k, l]\n for (i, j, k, l) in IJKL),\n name='pd1')\n\n product_def2 = m.add_constraints((product[i, j, k, l] <= assign[i, j]\n for (i, j, k, l) in IJKL),\n name='pd2')\n\n product_def3 = m.add_constraints((product[i, j, k, l] <= assign[k, l]\n for (i, j, k, l) in IJKL),\n name='pd3')\n\n m.solve()\n print(m.get_problem_summary())\n\n m.drop_constraints(product_def1)\n m.drop_constraints(product_def2)\n m.drop_constraints(product_def3)\n\n m.add_constraints((\n so.expr_sum(product[i, j, k, l]\n for j in CITIES if (i, j, k, l) in IJKL) == assign[k, l]\n for i in DEPTS for k in DEPTS for l in CITIES if i < k),\n name='pd4')\n\n m.add_constraints((\n so.expr_sum(product[i, j, k, l]\n for l in CITIES if (i, j, k, l) in IJKL) == assign[i, j]\n for k in DEPTS for i in DEPTS for j in CITIES if i < k),\n name='pd5')\n\n m.solve()\n print(m.get_problem_summary())\n totalBenefit.set_name('totalBenefit')\n totalCost.set_name('totalCost')\n print(so.get_solution_table(totalBenefit, totalCost))\n print(so.get_solution_table(assign).unstack(level=-1))\n\n return m.get_objective_value()\n"
] | [
[
"pandas.DataFrame"
]
] |
kingsj0405/Explorable-Super-Resolution | [
"6582477ec1e2b0c6f4bd781552ac880fabdb4496"
] | [
"codes/models/modules/architecture.py"
] | [
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torchvision\nfrom . import block as B\nfrom . import spectral_norm as SN\nimport functools\nimport numpy as np\nimport os\nimport models.modules.archs_util as arch_util\nimport torch.nn.functional as F\nimport re\n\n####################\n# Generator\n####################\nclass MSRResNet(nn.Module):\n ''' modified SRResNet'''\n\n def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):\n super(MSRResNet, self).__init__()\n self.upscale = upscale\n\n self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n basic_block = functools.partial(arch_util.ResidualBlock_noBN, nf=nf)\n self.recon_trunk = arch_util.make_layer(basic_block, nb)\n\n # upsampling\n if self.upscale == 2:\n self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(2)\n elif self.upscale == 3:\n self.upconv1 = nn.Conv2d(nf, nf * 9, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(3)\n elif self.upscale == 4:\n self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)\n self.pixel_shuffle = nn.PixelShuffle(2)\n\n self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)\n self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n # initialization\n arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],\n 0.1)\n if self.upscale == 4:\n arch_util.initialize_weights(self.upconv2, 0.1)\n\n def forward(self, x):\n fea = self.lrelu(self.conv_first(x))\n out = self.recon_trunk(fea)\n\n if self.upscale == 4:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n elif self.upscale == 3 or self.upscale == 2:\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n\n out = self.conv_last(self.lrelu(self.HRconv(out)))\n base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)\n out += base\n return out\n\n\nclass SRResNet(nn.Module):\n def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='batch', act_type='relu', \\\n mode='NAC', res_scale=1, upsample_mode='upconv',range_correction=False):\n super(SRResNet, self).__init__()\n n_upscale = int(math.log(upscale, 2))\n if upscale == 3:\n n_upscale = 1\n\n fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)\n resnet_blocks = [B.ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\\\n mode=mode, res_scale=res_scale) for _ in range(nb)]\n LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)\n\n if upsample_mode == 'upconv':\n upsample_block = B.upconv_blcok\n elif upsample_mode == 'pixelshuffle':\n upsample_block = B.pixelshuffle_block\n else:\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\n if upscale == 3:\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\n else:\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\n HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)\n HR_conv1 = B.conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)\n\n self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*resnet_blocks, LR_conv)),\\\n *upsampler, HR_conv0, HR_conv1)\n self.range_correction = bool(range_correction)\n\n def forward(self, x):\n x = self.model(x)\n if self.range_correction:\n x = x/8e-6*0.3+0.45\n return x\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\nclass DnCNN(nn.Module):\n def __init__(self, n_channels, depth, kernel_size = 3, in_nc=64, out_nc=64, norm_type='batch', act_type='leakyrelu',\n latent_input=None,num_latent_channels=None,discriminator=False,expected_input_size=None,chroma_generator=False,spectral_norm=False,pooling_no_FC=False):\n super(DnCNN, self).__init__()\n # assert in_nc in [64,128] and out_nc==64,'Currently only supporting 64 DCT channels'\n assert act_type=='leakyrelu'\n assert norm_type in ['batch','instance','layer',None]\n # self.average_err_collection_counter = 0\n # self.average_abs_err_estimates = np.zeros([8,8])\n self.discriminator_net = discriminator\n if discriminator:\n # Ideally I should not use padding for the discriminator model. I do use padding in the first layers if the input size is too small,\n # so that the dimension of the fully connected layer's input would be at least MIN_DCT_DIMS_4_D x MIN_DCT_DIMS_4_D\n MIN_DCT_DIMS_4_D = 5\n num_padded_layers = max(0,depth-int(np.floor((expected_input_size-MIN_DCT_DIMS_4_D)/(kernel_size-1))))\n layer_num = 0\n self.pooling_no_FC = pooling_no_FC\n else:\n spectral_norm = False\n self.chroma_generator = chroma_generator\n if chroma_generator:\n self.block_size = np.sqrt(out_nc/2)\n assert self.block_size==np.round(self.block_size)\n self.block_size = int(self.block_size)\n padding = kernel_size//2\n self.latent_input = latent_input\n self.num_latent_channels = num_latent_channels\n # if latent_input is None or 'all_layers' not in latent_input or num_latent_channels is None:\n if latent_input not in ['all_layers','first_layer'] or num_latent_channels is None:\n self.num_latent_channels = 0\n\n layers = []\n if self.discriminator_net and layer_num>=num_padded_layers:\n expected_input_size -= (kernel_size - 1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=in_nc+self.num_latent_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,bias=True))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n layers.append(nn.ReLU(inplace=True))\n for layer_num in range(1,depth - 2+1):\n if self.discriminator_net and layer_num>=num_padded_layers:\n expected_input_size -= (kernel_size-1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=n_channels+self.num_latent_channels*(self.latent_input=='all_layers'), out_channels=n_channels, kernel_size=kernel_size, padding=padding,bias=False))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n if norm_type=='batch':\n layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum=0.95))\n elif norm_type=='layer':\n layers.append(nn.LayerNorm(normalized_shape=[n_channels,expected_input_size,expected_input_size],elementwise_affine=False))\n elif norm_type=='instance':\n layers.append(nn.InstanceNorm2d(n_channels))\n layers.append(nn.LeakyReLU(inplace=True))\n layer_num += 1\n if self.discriminator_net and layer_num >= num_padded_layers:\n expected_input_size -= (kernel_size - 1)\n padding = 0\n layers.append(nn.Conv2d(in_channels=n_channels+self.num_latent_channels*(self.latent_input=='all_layers'),\n out_channels=1 if (self.discriminator_net and self.pooling_no_FC) else out_nc, kernel_size=kernel_size, padding=padding,\n bias=self.discriminator_net and self.pooling_no_FC)) #When using a fully convolutional D (when pooling_no_FC), allowing bias in the final layer.\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n if self.discriminator_net:\n layers.append(Flatten())\n if not self.pooling_no_FC:\n layers.append(nn.Linear(in_features=out_nc*(expected_input_size**2),out_features=1))\n if spectral_norm:\n layers[-1] = SN.spectral_norm(layers[-1])\n # layers.append(nn.Linear(in_features=64, out_features=1))\n else:\n layers.append(nn.Sigmoid())\n if False and self.discriminator_net:\n self.dncnn = nn.Sequential(*layers)\n else:\n self.dncnn = nn.ModuleList(layers)\n\n def forward(self, x):\n if False and self.discriminator_net:\n return self.dncnn(x)\n else:\n latent_input, quantized_coeffs = torch.split(x, split_size_or_sections=[self.num_latent_channels,x.size(1)-self.num_latent_channels], dim=1)\n x = 1*quantized_coeffs\n for i, module in enumerate(self.dncnn):\n if self.num_latent_channels>0 and (self.latent_input=='all_layers' or (self.latent_input=='first_layer' and i==0)) and isinstance(module,nn.Conv2d):\n # if self.num_latent_channels>0 and self.latent_input is not None and 'all_layers' in self.latent_input and isinstance(module,nn.Conv2d):\n if self.discriminator_net and latent_input.size(2)!=x.size(2):\n x = torch.cat([torch.nn.functional.interpolate(input=latent_input,size=x.size()[2:],mode='bilinear',align_corners=False),x],dim=1)\n else:\n x = torch.cat([latent_input,x],dim=1)\n x = module(x)\n if self.discriminator_net:\n return torch.mean(x,dim=1,keepdim=True) # Averaging for the case of pooling instead of having a final FC layer. Otherwise it doesn't matter because x.shape[1]=1 anyway.\n quantization_err_estimation = x-0.5\n # quantization_err_estimation = self.dncnn(x)-0.5\n # if not next(self.modules()).training:\n # self.average_err_collection_counter += 1\n # self.average_abs_err_estimates = ((self.average_err_collection_counter-1)*self.average_abs_err_estimates+\n # quantization_err_estimation.abs().mean(-1).mean(-1).mean(0).view(8,8).data.cpu().numpy())/self.average_err_collection_counter\n if self.chroma_generator:\n quantization_err_estimation = quantization_err_estimation.view(quantization_err_estimation.size(0),2,self.block_size//8,8,self.block_size//8,8,\n quantization_err_estimation.size(2),quantization_err_estimation.size(3))\n quantized_coeffs = quantized_coeffs[:,self.block_size**2:,:,:].view(quantized_coeffs.size(0),2,8,8,quantized_coeffs.size(2),quantized_coeffs.size(3))\n quantization_err_estimation[:,:,0,:,0,...] = quantization_err_estimation[:,:,0,:,0,...]+quantized_coeffs\n return quantization_err_estimation.view(quantization_err_estimation.size(0),-1,quantization_err_estimation.size(6),quantization_err_estimation.size(7))\n else:\n return quantized_coeffs+quantization_err_estimation\n\n def return_collected_err_avg(self):\n self.average_err_collection_counter = 0\n natrix_2_return = 1*self.average_abs_err_estimates\n self.average_abs_err_estimates = np.zeros([8,8])\n return natrix_2_return\n\n def save_estimated_errors_fig(self,quantization_err_batch):\n import matplotlib.pyplot as plt\n plt.clf()\n plt.imshow(quantization_err_batch.abs().mean(-1).mean(-1).mean(0).view(8,8).data.cpu().numpy())\n plt.colorbar()\n plt.savefig('Est_quantization_errors_0iters_95Kiters.png')\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.orthogonal_(m.weight)\n print('init weight')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\n\nclass RRDBNet(nn.Module):\n def __init__(self, in_nc, out_nc, nf, nb, gc=32, upscale=4, norm_type=None, \\\n act_type='leakyrelu', mode='CNA', upsample_mode='upconv',latent_input=None,num_latent_channels=None):\n super(RRDBNet, self).__init__()\n self.latent_input = latent_input\n if num_latent_channels is not None and num_latent_channels>0:\n num_latent_channels_HR = 1 * num_latent_channels\n if 'HR_rearranged' in latent_input:\n num_latent_channels *= upscale**2\n self.num_latent_channels = 1*num_latent_channels\n self.upscale = upscale\n n_upscale = int(math.log(upscale, 2))\n if upscale == 3:\n n_upscale = 1\n if latent_input is not None:\n in_nc += num_latent_channels\n if latent_input is None or 'all_layers' not in latent_input:\n num_latent_channels,num_latent_channels_HR = 0,0\n\n USE_MODULE_LISTS = True\n fea_conv = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None,return_module_list=USE_MODULE_LISTS)\n rb_blocks = [B.RRDB(nf, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', \\\n norm_type=norm_type, act_type=act_type, mode='CNA',latent_input_channels=num_latent_channels) for _ in range(nb)]\n LR_conv = B.conv_block(nf+num_latent_channels, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode,return_module_list=USE_MODULE_LISTS)\n\n if upsample_mode == 'upconv':\n upsample_block = B.upconv_blcok\n elif upsample_mode == 'pixelshuffle':\n upsample_block = B.pixelshuffle_block\n else:\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\n if upscale == 3:\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\n else:\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\n if latent_input is not None and 'all_layers' in latent_input:\n if 'LR' in latent_input:\n self.latent_upsampler = nn.Upsample(scale_factor=upscale if upscale==3 else 2)\n HR_conv0 = B.conv_block(nf+num_latent_channels_HR, nf, kernel_size=3, norm_type=None, act_type=act_type,return_module_list=USE_MODULE_LISTS)\n HR_conv1 = B.conv_block(nf+num_latent_channels_HR, out_nc, kernel_size=3, norm_type=None, act_type=None,return_module_list=USE_MODULE_LISTS)\n\n if USE_MODULE_LISTS:\n self.model = nn.ModuleList(fea_conv+\\\n [B.ShortcutBlock(B.sequential(*(rb_blocks+LR_conv),return_module_list=USE_MODULE_LISTS),latent_input_channels=num_latent_channels,\n use_module_list=True)]+upsampler+HR_conv0+HR_conv1)\n else:\n self.model = B.sequential(fea_conv, B.ShortcutBlock(B.sequential(*rb_blocks, LR_conv)),\\\n *upsampler, HR_conv0, HR_conv1)\n\n def forward(self, x):\n if self.latent_input is not None:\n if 'HR_downscaled' in self.latent_input:\n # latent_input_HR = 1*self.Z\n latent_input_HR,x = torch.split(x,split_size_or_sections=[x.size(1)-3,3],dim=1)\n latent_input_HR = latent_input_HR.view([latent_input_HR.size(0)]+[-1]+[self.upscale*val for val in list(latent_input_HR.size()[2:])])\n latent_input = torch.nn.functional.interpolate(input=latent_input_HR,scale_factor=1/self.upscale,mode='bilinear',align_corners=False)\n else:\n latent_input = 1*self.Z\n x = torch.cat([latent_input, x], dim=1)\n for i,module in enumerate(self.model):\n module_children = [str(type(m)) for m in module.children()]\n if i>0 and self.latent_input is not None and 'all_layers' in self.latent_input:\n if len(module_children)>0 and 'Upsample' in module_children[0]:\n if 'LR' in self.latent_input:\n latent_input = self.latent_upsampler(latent_input)\n elif 'HR_rearranged' in self.latent_input:\n raise Exception('Unsupported yet')\n latent_input = latent_input.view()\n elif 'HR_downscaled' in self.latent_input:\n latent_input = 1*latent_input_HR\n elif 'ReLU' not in str(type(module)):\n x = torch.cat([latent_input,x],1)\n x = module(x)\n return x\n\n\n####################\n# Discriminator\n####################\n\nclass PatchGAN_Discriminator(nn.Module):\n DEFAULT_N_LAYERS = 3\n\n def __init__(self, input_nc, opt_net,ndf=64, n_layers=DEFAULT_N_LAYERS, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a PatchGAN discriminator\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(PatchGAN_Discriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n self.decomposed_input = bool(opt_net['decomposed_input'])\n self.pre_clipping = bool(opt_net['pre_clipping'])\n projected_component_sequences = []\n in_ch_addition = input_nc if self.decomposed_input else 0\n kw = 4\n padw = 1\n max_out_channels = 512\n sequences = [nn.Sequential(*[nn.Conv2d(input_nc+in_ch_addition, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)])]\n # if self.decomposed_input:\n # projected_component_sequences = [nn.Conv2d(input_nc, input_nc, kernel_size=kw, stride=2, padding=padw)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n # nf_mult_prev = nf_mult\n # nf_mult = min(2 ** max(0,n-n_layers+self.DEFAULT_N_LAYERS), 8)\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw,\n stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw,\n # stride=2 if n > n_layers - self.DEFAULT_N_LAYERS else 1,\n # padding=padw, bias=use_bias))\n\n # nf_mult_prev = nf_mult\n nf_mult_prev = min(max_out_channels, ndf * nf_mult) // ndf\n nf_mult = min(2 ** n_layers, 8)\n sequences.append(nn.Sequential(*[\n nn.Conv2d(ndf * nf_mult_prev+in_ch_addition, min(max_out_channels, ndf * nf_mult), kernel_size=kw, stride=1,\n padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]))\n # if self.decomposed_input:\n # projected_component_sequences.append(\n # nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=1,\n # padding=padw, bias=use_bias))\n sequences.append(nn.Sequential(*[\n nn.Conv2d(min(max_out_channels, ndf * nf_mult)+in_ch_addition, 1, kernel_size=kw, stride=1,\n padding=padw)])) # output 1 channel prediction map\n self.num_modules = len(sequences)\n if self.decomposed_input:\n for seq in sequences:\n conv_stride = [child.stride[0] for child in seq.children() if 'Conv2d' in str(child.__class__)]\n assert len(conv_stride)<=1,'More than one conv layer in seq?'\n if len(conv_stride)>0:\n projected_component_sequences.append(nn.Conv2d(input_nc,input_nc, kernel_size=kw, stride=conv_stride[0],\n padding=padw, bias=use_bias))\n self.model = nn.ModuleList(sequences+projected_component_sequences)\n\n def forward(self, input):\n # pre-clipping:\n # 1.Making D oblivious to pixel values range, by clipping values to be within valid range\n # 2.Making D oblivious to quantization issues, by quantizing its inputs to 256 possible values\n if self.decomposed_input:\n projected_component = input[0]\n input = input[1]\n if self.pre_clipping:\n input = torch.max(input=torch.min(input=input,other=1-projected_component),other=-projected_component)\n # input = (255*(input+projected_component)).round()/255-projected_component\n elif self.pre_clipping:\n input = torch.clamp(input=input,min=0,max=1)\n # input = (255*input).round()/255\n for i,seq in enumerate(self.model[:self.num_modules]):\n if self.decomposed_input:\n if i > 0:\n projected_component = self.model[self.num_modules + i - 1](projected_component)\n input = seq(torch.cat([projected_component,input],dim=1))\n else:\n input = seq(input)\n return input\n\nclass Discriminator_VGG_128_nonModified(nn.Module):\n def __init__(self, in_nc, nf):\n super(Discriminator_VGG_128_nonModified, self).__init__()\n # [64, 128, 128]\n self.conv0_0 = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)\n self.conv0_1 = nn.Conv2d(nf, nf, 4, 2, 1, bias=False)\n self.bn0_1 = nn.BatchNorm2d(nf, affine=True)\n # [64, 64, 64]\n self.conv1_0 = nn.Conv2d(nf, nf * 2, 3, 1, 1, bias=False)\n self.bn1_0 = nn.BatchNorm2d(nf * 2, affine=True)\n self.conv1_1 = nn.Conv2d(nf * 2, nf * 2, 4, 2, 1, bias=False)\n self.bn1_1 = nn.BatchNorm2d(nf * 2, affine=True)\n # [128, 32, 32]\n self.conv2_0 = nn.Conv2d(nf * 2, nf * 4, 3, 1, 1, bias=False)\n self.bn2_0 = nn.BatchNorm2d(nf * 4, affine=True)\n self.conv2_1 = nn.Conv2d(nf * 4, nf * 4, 4, 2, 1, bias=False)\n self.bn2_1 = nn.BatchNorm2d(nf * 4, affine=True)\n # [256, 16, 16]\n self.conv3_0 = nn.Conv2d(nf * 4, nf * 8, 3, 1, 1, bias=False)\n self.bn3_0 = nn.BatchNorm2d(nf * 8, affine=True)\n self.conv3_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n self.bn3_1 = nn.BatchNorm2d(nf * 8, affine=True)\n # [512, 8, 8]\n self.conv4_0 = nn.Conv2d(nf * 8, nf * 8, 3, 1, 1, bias=False)\n self.bn4_0 = nn.BatchNorm2d(nf * 8, affine=True)\n self.conv4_1 = nn.Conv2d(nf * 8, nf * 8, 4, 2, 1, bias=False)\n self.bn4_1 = nn.BatchNorm2d(nf * 8, affine=True)\n\n self.linear1 = nn.Linear(512 * 4 * 4, 100)\n self.linear2 = nn.Linear(100, 1)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n\n def forward(self, x):\n fea = self.lrelu(self.conv0_0(x))\n fea = self.lrelu(self.bn0_1(self.conv0_1(fea)))\n\n fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))\n fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))\n\n fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))\n fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))\n\n fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))\n fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))\n\n fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))\n fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))\n\n fea = fea.view(fea.size(0), -1)\n fea = self.lrelu(self.linear1(fea))\n out = self.linear2(fea)\n return out\n\n# VGG style Discriminator with input size 128*128\nclass Discriminator_VGG_128(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA',input_patch_size=128,num_2_strides=5,nb=10):\n super(Discriminator_VGG_128, self).__init__()\n assert num_2_strides<=5,'Can be modified by adding more stridable layers, if needed.'\n self.num_2_strides = 1*num_2_strides\n # features\n # hxw, c\n # 128, 64\n FC_end_patch_size = 1*input_patch_size\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type,mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type,act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 64, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 32, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 16, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 8, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2 if num_2_strides>0 else 1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n FC_end_patch_size = np.ceil((FC_end_patch_size-1)/(2 if num_2_strides>0 else 1))\n num_2_strides -= 1\n # 4, 512\n self.features = B.sequential(*([conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,conv9][:nb]))\n\n self.last_FC_layers = self.num_2_strides==5 #Replacing the FC layers with convolutions, which means using a patch discriminator:\n self.last_FC_layers = False\n # classifier\n # FC_end_patch_size = input_patch_size//(2**self.num_2_strides)\n if self.last_FC_layers:\n self.classifier = nn.Sequential(nn.Linear(base_nf*8 * int(FC_end_patch_size)**2, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n else:\n # num_feature_channels = base_nf*8\n num_feature_channels = [l for l in self.features.children()][-2].num_features\n pseudo_FC_conv0 = B.conv_block(num_feature_channels,min(100,num_feature_channels),kernel_size=8,stride=1,norm_type=norm_type,act_type=act_type, mode=mode,pad_type=None)\n pseudo_FC_conv1 = B.conv_block(min(100,num_feature_channels),1,kernel_size=1,stride=1,norm_type=norm_type,act_type=act_type, mode=mode)\n self.classifier = nn.Sequential(pseudo_FC_conv0, nn.LeakyReLU(0.2, False),pseudo_FC_conv1) # Changed the LeakyRelu inplace arg to False here, because it caused a bug for some reason.\n\n def forward(self, x):\n x = self.features(x)\n if self.last_FC_layers:\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\n# VGG style Discriminator with input size 128*128, Spectral Normalization\nclass Discriminator_VGG_128_SN(nn.Module):\n def __init__(self):\n super(Discriminator_VGG_128_SN, self).__init__()\n # features\n # hxw, c\n # 128, 64\n self.lrelu = nn.LeakyReLU(0.2, True)\n\n self.conv0 = SN.spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))\n self.conv1 = SN.spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))\n # 64, 64\n self.conv2 = SN.spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))\n self.conv3 = SN.spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))\n # 32, 128\n self.conv4 = SN.spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))\n self.conv5 = SN.spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))\n # 16, 256\n self.conv6 = SN.spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))\n self.conv7 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))\n # 8, 512\n self.conv8 = SN.spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))\n self.conv9 = SN.spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))\n # 4, 512\n\n # classifier\n self.linear0 = SN.spectral_norm(nn.Linear(512 * 4 * 4, 100))\n self.linear1 = SN.spectral_norm(nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.lrelu(self.conv0(x))\n x = self.lrelu(self.conv1(x))\n x = self.lrelu(self.conv2(x))\n x = self.lrelu(self.conv3(x))\n x = self.lrelu(self.conv4(x))\n x = self.lrelu(self.conv5(x))\n x = self.lrelu(self.conv6(x))\n x = self.lrelu(self.conv7(x))\n x = self.lrelu(self.conv8(x))\n x = self.lrelu(self.conv9(x))\n x = x.view(x.size(0), -1)\n x = self.lrelu(self.linear0(x))\n x = self.linear1(x)\n return x\n\n\nclass Discriminator_VGG_96(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):\n super(Discriminator_VGG_96, self).__init__()\n # features\n # hxw, c\n # 96, 64\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \\\n mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 48, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 24, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 12, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 6, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 3, 512\n self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\\\n conv9)\n\n # classifier\n self.classifier = nn.Sequential(\n nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\nclass Discriminator_VGG_192(nn.Module):\n def __init__(self, in_nc, base_nf, norm_type='batch', act_type='leakyrelu', mode='CNA'):\n super(Discriminator_VGG_192, self).__init__()\n # features\n # hxw, c\n # 192, 64\n conv0 = B.conv_block(in_nc, base_nf, kernel_size=3, norm_type=None, act_type=act_type, \\\n mode=mode)\n conv1 = B.conv_block(base_nf, base_nf, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 96, 64\n conv2 = B.conv_block(base_nf, base_nf*2, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv3 = B.conv_block(base_nf*2, base_nf*2, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 48, 128\n conv4 = B.conv_block(base_nf*2, base_nf*4, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv5 = B.conv_block(base_nf*4, base_nf*4, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 24, 256\n conv6 = B.conv_block(base_nf*4, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv7 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 12, 512\n conv8 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv9 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 6, 512\n conv10 = B.conv_block(base_nf*8, base_nf*8, kernel_size=3, stride=1, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n conv11 = B.conv_block(base_nf*8, base_nf*8, kernel_size=4, stride=2, norm_type=norm_type, \\\n act_type=act_type, mode=mode)\n # 3, 512\n self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8,\\\n conv9, conv10, conv11)\n\n # classifier\n self.classifier = nn.Sequential(\n nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n\n####################\n# Perceptual Network\n####################\nRETRAINING_OBLIGING_MODIFICATIONS = ['num_channel_factor_\\d(\\.\\d)?$','patches_init_first']\n\n# Assume input range is [0, 1]\nclass VGGFeatureExtractor(nn.Module):\n def __init__(self,feature_layer=34,use_bn=False,use_input_norm=True,\n device=torch.device('cpu'),state_dict=None,arch='vgg19',arch_config='',**kwargs):\n super(VGGFeatureExtractor, self).__init__()\n if arch_config!='':\n assert all([re.search(pattern,arch_config) is None for pattern in RETRAINING_OBLIGING_MODIFICATIONS]) or 'untrained_' in arch_config\n # assert (re.search('patches_init_(first|all)',arch_config) is None) or 'untrained' not in arch_config,'Relying on trained weights statistics when setting model weights'\n if arch=='SegNetAE':\n from models.modules import SegNet\n model = nn.DataParallel(SegNet.SegNet(3,encode_only=True,batch_norm_DS=False,num_layers=4))\n loaded_state_dict = torch.load('/home/tiras/ybahat/Autoencoder/models/BEST_checkpoint.tar')['model']\n modified_state_dict = {}\n for key in model.state_dict().keys():\n modified_state_dict[key] = loaded_state_dict[key.replace('.features.0','.down1').replace('.features.1','.down2').replace('.features.2','.down3').replace('.features.3','.down4').replace('.features.4','.down5')]\n model.load_state_dict(modified_state_dict)\n model = model.module\n use_input_norm = False # SegNet model expects non-normalized images\n elif use_bn:\n model = torchvision.models.__dict__[arch+'_bn'](pretrained='untrained' not in arch_config)\n else:\n model = torchvision.models.__dict__[arch](pretrained='untrained' not in arch_config)\n # I now remove all unnecessary layers before changing the model configuration, because this change may make alter the number of layers, thus necessitating changing the feature_layer parameter.\n if state_dict is not None:\n state_dict = dict(zip([key.replace('module.','') for key in state_dict.keys()],[value for value in state_dict.values()]))\n model.load_state_dict(state_dict,strict=False)\n model.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])\n arch_config = arch_config.replace('untrained_','').replace('untrained','')\n if arch_config!='':\n import sys\n sys.path.append(os.path.abspath('../../RandomPooling'))\n from model_modification import Modify_Model\n saved_config_params = kwargs['saved_config_params'] if 'saved_config_params' in kwargs.keys() else None\n saving_path = kwargs['saving_path'] if 'saving_path' in kwargs.keys() else None\n model = Modify_Model(model,arch_config,classification_mode=False,saved_config_params=saved_config_params,saving_path=saving_path)\n self.use_input_norm = use_input_norm\n if self.use_input_norm:\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)\n # [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)\n # [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n # Moved the next line to appear earlier, before altering the number of layers in the model\n # self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])\n self.features = model.features\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def _initialize_weights(self):#This function was copied from the torchvision.models.vgg code:\n for m in self.features.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.use_input_norm:\n x = (x - self.mean) / self.std\n output = self.features(x)\n return output\n\n\n# Assume input range is [0, 1]\nclass ResNet101FeatureExtractor(nn.Module):\n def __init__(self, use_input_norm=True, device=torch.device('cpu')):\n super(ResNet101FeatureExtractor, self).__init__()\n model = torchvision.models.resnet101(pretrained=True)\n self.use_input_norm = use_input_norm\n if self.use_input_norm:\n mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)\n # [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]\n std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)\n # [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]\n self.register_buffer('mean', mean)\n self.register_buffer('std', std)\n self.features = nn.Sequential(*list(model.children())[:8])\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def forward(self, x):\n if self.use_input_norm:\n x = (x - self.mean) / self.std\n output = self.features(x)\n return output\n\n\nclass MINCNet(nn.Module):\n def __init__(self):\n super(MINCNet, self).__init__()\n self.ReLU = nn.ReLU(True)\n self.conv11 = nn.Conv2d(3, 64, 3, 1, 1)\n self.conv12 = nn.Conv2d(64, 64, 3, 1, 1)\n self.maxpool1 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv21 = nn.Conv2d(64, 128, 3, 1, 1)\n self.conv22 = nn.Conv2d(128, 128, 3, 1, 1)\n self.maxpool2 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv31 = nn.Conv2d(128, 256, 3, 1, 1)\n self.conv32 = nn.Conv2d(256, 256, 3, 1, 1)\n self.conv33 = nn.Conv2d(256, 256, 3, 1, 1)\n self.maxpool3 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv41 = nn.Conv2d(256, 512, 3, 1, 1)\n self.conv42 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv43 = nn.Conv2d(512, 512, 3, 1, 1)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, padding=0, ceil_mode=True)\n self.conv51 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv52 = nn.Conv2d(512, 512, 3, 1, 1)\n self.conv53 = nn.Conv2d(512, 512, 3, 1, 1)\n\n def forward(self, x):\n out = self.ReLU(self.conv11(x))\n out = self.ReLU(self.conv12(out))\n out = self.maxpool1(out)\n out = self.ReLU(self.conv21(out))\n out = self.ReLU(self.conv22(out))\n out = self.maxpool2(out)\n out = self.ReLU(self.conv31(out))\n out = self.ReLU(self.conv32(out))\n out = self.ReLU(self.conv33(out))\n out = self.maxpool3(out)\n out = self.ReLU(self.conv41(out))\n out = self.ReLU(self.conv42(out))\n out = self.ReLU(self.conv43(out))\n out = self.maxpool4(out)\n out = self.ReLU(self.conv51(out))\n out = self.ReLU(self.conv52(out))\n out = self.conv53(out)\n return out\n\n# Encoder:\ndef conv3x3(in_planes, out_planes):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,padding=1, bias=True)\n\ndef meanpoolConv(inplanes, outplanes):\n sequence = []\n sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]\n sequence += [nn.Conv2d(inplanes, outplanes,\n kernel_size=1, stride=1, padding=0, bias=True)]\n return nn.Sequential(*sequence)\n\n\ndef convMeanpool(inplanes, outplanes):\n sequence = []\n sequence += [conv3x3(inplanes, outplanes)]\n sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]\n return nn.Sequential(*sequence)\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, outplanes, norm_layer=None, nl_layer=None):\n super(BasicBlock, self).__init__()\n layers = []\n if norm_layer is not None:\n layers += [norm_layer(inplanes)]\n layers += [nl_layer()]\n layers += [conv3x3(inplanes, inplanes)]\n if norm_layer is not None:\n layers += [norm_layer(inplanes)]\n layers += [nl_layer()]\n layers += [convMeanpool(inplanes, outplanes)]\n self.conv = nn.Sequential(*layers)\n self.shortcut = meanpoolConv(inplanes, outplanes)\n\n def forward(self, x):\n out = self.conv(x) + self.shortcut(x)\n return out\n\nclass E_ResNet(nn.Module):\n def __init__(self, input_nc=3, output_nc=1, ndf=64, n_blocks=4,\n norm_layer=None, nl_layer=None, vaeLike=False):\n super(E_ResNet, self).__init__()\n self.vaeLike = vaeLike\n max_ndf = 4\n conv_layers = [\n nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1, bias=True)]\n for n in range(1, n_blocks):\n input_ndf = ndf * min(max_ndf, n)\n output_ndf = ndf * min(max_ndf, n + 1)\n conv_layers += [BasicBlock(input_ndf,\n output_ndf, norm_layer, nl_layer)]\n conv_layers += [nl_layer(), nn.AvgPool2d(8)]\n if vaeLike:\n self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n self.fcVar = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n else:\n self.fc = nn.Sequential(*[nn.Linear(output_ndf, output_nc)])\n self.conv = nn.Sequential(*conv_layers)\n\n def forward(self, x):\n x_conv = self.conv(x)\n conv_flat = x_conv.view(x.size(0), -1)\n output = self.fc(conv_flat)\n if self.vaeLike:\n outputVar = self.fcVar(conv_flat)\n return output, outputVar\n else:\n return output\n return output\n\n# Assume input range is [0, 1]\nclass MINCFeatureExtractor(nn.Module):\n def __init__(self, feature_layer=34, use_bn=False, use_input_norm=True, \\\n device=torch.device('cpu')):\n super(MINCFeatureExtractor, self).__init__()\n\n self.features = MINCNet()\n self.features.load_state_dict(\n torch.load('../experiments/pretrained_models/VGG16minc_53.pth'), strict=True)\n self.features.eval()\n # No need to BP to variable\n for k, v in self.features.named_parameters():\n v.requires_grad = False\n\n def forward(self, x):\n output = self.features(x)\n return output\n"
] | [
[
"torch.min",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.nn.InstanceNorm2d",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"matplotlib.pyplot.savefig",
"torch.nn.init.normal_",
"torch.nn.LayerNorm",
"torch.nn.AvgPool2d",
"torch.device",
"numpy.round",
"torch.mean",
"torch.Tensor",
"torch.nn.PixelShuffle",
"torch.nn.MaxPool2d",
"numpy.ceil",
"numpy.zeros",
"torch.load",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.colorbar",
"torch.clamp",
"torch.nn.Linear",
"torch.nn.init.constant_",
"numpy.floor",
"torch.nn.Sequential",
"numpy.sqrt",
"torch.nn.ReLU",
"torch.nn.init.orthogonal_",
"torch.nn.functional.interpolate",
"torch.nn.LeakyReLU"
]
] |
L-Net-1992/TensorRT | [
"34b664d404001bd724cb56b52a6e0e05e1fd97f2"
] | [
"samples/python/network_api_pytorch_mnist/model.py"
] | [
"#\n# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This file contains functions for training a PyTorch MNIST Model\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport numpy as np\nimport os\n\nfrom random import randint\n\n# Network\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, kernel_size=5)\n self.conv2 = nn.Conv2d(20, 50, kernel_size=5)\n self.fc1 = nn.Linear(800, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.max_pool2d(self.conv1(x), kernel_size=2, stride=2)\n x = F.max_pool2d(self.conv2(x), kernel_size=2, stride=2)\n x = x.view(-1, 800)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\nclass MnistModel(object):\n def __init__(self):\n self.batch_size = 64\n self.test_batch_size = 100\n self.learning_rate = 0.0025\n self.sgd_momentum = 0.9\n self.log_interval = 100\n # Fetch MNIST data set.\n self.train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=1,\n timeout=600)\n self.test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=self.test_batch_size,\n shuffle=True,\n num_workers=1,\n timeout=600)\n self.network = Net()\n\n # Train the network for one or more epochs, validating after each epoch.\n def learn(self, num_epochs=2):\n # Train the network for a single epoch\n def train(epoch):\n self.network.train()\n optimizer = optim.SGD(self.network.parameters(), lr=self.learning_rate, momentum=self.sgd_momentum)\n for batch, (data, target) in enumerate(self.train_loader):\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = self.network(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch % self.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(epoch, batch * len(data), len(self.train_loader.dataset), 100. * batch / len(self.train_loader), loss.data.item()))\n\n # Test the network\n def test(epoch):\n self.network.eval()\n test_loss = 0\n correct = 0\n for data, target in self.test_loader:\n with torch.no_grad():\n data, target = Variable(data), Variable(target)\n output = self.network(data)\n test_loss += F.nll_loss(output, target).data.item()\n pred = output.data.max(1)[1]\n correct += pred.eq(target.data).cpu().sum()\n test_loss /= len(self.test_loader)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(test_loss, correct, len(self.test_loader.dataset), 100. * correct / len(self.test_loader.dataset)))\n\n for e in range(num_epochs):\n train(e + 1)\n test(e + 1)\n\n def get_weights(self):\n return self.network.state_dict()\n\n def get_random_testcase(self):\n data, target = next(iter(self.test_loader))\n case_num = randint(0, len(data) - 1)\n test_case = data.numpy()[case_num].ravel().astype(np.float32)\n test_name = target.numpy()[case_num]\n return test_case, test_name\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.nll_loss",
"torch.autograd.Variable",
"torch.no_grad",
"torch.nn.Conv2d"
]
] |
mlweilert/bpnet | [
"dcc9e8d805f9de774ae9dcc62c20504915be614f"
] | [
"bpnet/samplers.py"
] | [
"\"\"\"\nModule implementing different samplers for the chipnexus data\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom kipoi_utils.external.torch.sampler import Sampler\nfrom kipoi_utils.data_utils import iterable_cycle\nimport warnings\nimport gin\n\n\ndef get_batch_sizes(p_vec, batch_size, verbose=True):\n \"\"\"Compute the individual batch sizes for different probabilities\n\n Args:\n p_vec: list of probabilities for each class\n batch_size: batch size\n\n Returns:\n rounded list p_vec[i]*batch_size\n \"\"\"\n p_vec = np.array(p_vec) / sum(p_vec)\n\n batch_sizes = np.round(p_vec * batch_size).astype(int)\n difference = batch_size - batch_sizes.sum()\n # Increase the largest one\n batch_sizes[batch_sizes.argmax()] += difference\n if verbose:\n print(\"Using batch sizes:\")\n print(batch_sizes)\n assert batch_sizes.sum() == batch_size\n return batch_sizes\n\n\[email protected]\nclass StratifiedRandomBatchSampler(Sampler):\n\n def __init__(self, class_vector, p_vec, batch_size, verbose=False):\n \"\"\"Stratified Sampling\n\n Args:\n class_vector (np.array): a vector of class labels\n p_vec (list[float]): list of probabilities for each class\n batch_size (int): batch_size\n verbose\n \"\"\"\n self.n_splits = int(class_vector.shape[0] / batch_size)\n self.class_vector = class_vector\n self.p_vec = p_vec\n self.batch_size = batch_size\n\n self.batch_sizes = get_batch_sizes(self.p_vec, self.batch_size, verbose=verbose)\n\n # check that the individual batch size will always be > 0\n for i, batch_size in enumerate(self.batch_sizes):\n if batch_size == 0:\n warnings.warn(\"Batch size for class {} is 0.\".format(i))\n\n self.classes = np.arange(len(p_vec))\n assert np.all(np.sort(pd.Series(self.class_vector).unique()) == self.classes)\n\n idx_all = np.arange(len(self.class_vector))\n self.class_idx_iterators = [iterable_cycle(np.random.permutation(idx_all[self.class_vector == cls]))\n for cls in self.classes]\n\n def __iter__(self):\n for i in range(len(self)):\n yield [next(self.class_idx_iterators[i])\n for i, batch_size in enumerate(self.batch_sizes)\n for j in range(batch_size)]\n\n def __len__(self):\n return len(self.class_vector) // self.batch_size\n\n\n# # OLD\n# # convenience samplers for ChIP-nexus data\n\n\n# def random(arr, n=10):\n# \"\"\"\n# Randomly sample the values\n# arr: numpy array\n# n = number of samples to draw\n# \"\"\"\n\n# return list(pd.Series(np.arange(len(arr))).sample(n).index)\n\n\n# def top_max_count(arr, end=10, start=0, keep=None):\n# \"\"\"\n# Return indices where arr has the highest max(pos) + max(neg)\n\n# Args:\n# arr: can be an array or a list of arrays\n# start: Where to start returning the values\n# end: where to stop\n# \"\"\"\n# if keep is None:\n# keep = np.arange(len(arr))\n# assert end > start\n# # Top maxcount indicies\n# return pd.Series(arr.max(1).sum(1))[keep].sort_values(ascending=False).index[start:end]\n\n\n# def top_sum_count(arr, end=10, start=0, keep=None):\n# \"\"\"\n# Return indices where arr has the highest number of counts\n\n# Args:\n# arr: can be an array or a list of arrays\n# start: Where to start returning the values\n# end: where to stop\n# \"\"\"\n# if keep is None:\n# keep = np.arange(len(arr))\n# assert end > start\n# return pd.Series(arr.sum(1).sum(1))[keep].sort_values(ascending=False).index[start:end]\n\n\n# def random_larger(arr, n=10, percentile=50):\n# \"\"\"Randomly sample the values larger than a certain quantile\n\n# arr: numpy array\n# n = number of samples to draw\n# \"\"\"\n# values = arr.sum(1).sum(1)\n# return list(pd.Series(np.arange(len(arr))[values > np.percentile(values, percentile)]).sample(n).index)\n"
] | [
[
"numpy.array",
"pandas.Series",
"numpy.round",
"numpy.random.permutation"
]
] |
ankitdipto/sumo-rl | [
"70d75d463fa09d0ecfc10589b66955c22c8df41b"
] | [
"sumo_rl/environment/env.py"
] | [
"import os\nimport sys\nfrom pathlib import Path\nfrom typing import Optional, Union, Tuple\nimport sumo_rl\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"Please declare the environment variable 'SUMO_HOME'\")\nimport traci\nimport sumolib\nimport gym\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nimport pandas as pd\n\nfrom .traffic_signal import TrafficSignal\n\nfrom gym.utils import EzPickle, seeding\nfrom pettingzoo import AECEnv\nfrom pettingzoo.utils.agent_selector import agent_selector\nfrom pettingzoo import AECEnv\nfrom pettingzoo.utils import agent_selector, wrappers\nfrom pettingzoo.utils.conversions import parallel_wrapper_fn\n\nLIBSUMO = 'LIBSUMO_AS_TRACI' in os.environ\n\n\ndef env(**kwargs):\n env = SumoEnvironmentPZ(**kwargs)\n env = wrappers.AssertOutOfBoundsWrapper(env)\n env = wrappers.OrderEnforcingWrapper(env)\n return env\n\nparallel_env = parallel_wrapper_fn(env)\n\n\nclass SumoEnvironment(gym.Env):\n \"\"\"\n SUMO Environment for Traffic Signal Control\n\n :param net_file: (str) SUMO .net.xml file\n :param route_file: (str) SUMO .rou.xml file\n :param out_csv_name: (Optional[str]) name of the .csv output with simulation results. If None no output is generated\n :param use_gui: (bool) Wheter to run SUMO simulation with GUI visualisation\n :param virtual_display: (Optional[Tuple[int,int]]) Resolution of a virtual display for rendering\n :param begin_time: (int) The time step (in seconds) the simulation starts\n :param num_seconds: (int) Number of simulated seconds on SUMO. The time in seconds the simulation must end.\n :param max_depart_delay: (int) Vehicles are discarded if they could not be inserted after max_depart_delay seconds\n :param delta_time: (int) Simulation seconds between actions\n :param min_green: (int) Minimum green time in a phase\n :param max_green: (int) Max green time in a phase\n :single_agent: (bool) If true, it behaves like a regular gym.Env. Else, it behaves like a MultiagentEnv (https://github.com/ray-project/ray/blob/master/python/ray/rllib/env/multi_agent_env.py)\n :sumo_seed: (int/string) Random seed for sumo. If 'random' it uses a randomly chosen seed.\n :fixed_ts: (bool) If true, it will follow the phase configuration in the route_file and ignore the actions.\n :sumo_warnings: (bool) If False, remove SUMO warnings in the terminal\n \"\"\"\n CONNECTION_LABEL = 0 # For traci multi-client support\n\n def __init__(\n self, \n net_file: str, \n route_file: str, \n out_csv_name: Optional[str] = None, \n use_gui: bool = False, \n virtual_display: Optional[Tuple[int,int]] = None,\n begin_time: int = 0, \n num_seconds: int = 20000, \n max_depart_delay: int = 100000,\n time_to_teleport: int = -1, \n delta_time: int = 5, \n yellow_time: int = 2, \n min_green: int = 5, \n max_green: int = 50, \n single_agent: bool = False, \n sumo_seed: Union[str,int] = 'random', \n fixed_ts: bool = False,\n sumo_warnings: bool = True,\n ):\n self._net = net_file\n self._route = route_file\n self.use_gui = use_gui\n if self.use_gui:\n self._sumo_binary = sumolib.checkBinary('sumo-gui')\n else:\n self._sumo_binary = sumolib.checkBinary('sumo')\n\n self.virtual_display = virtual_display\n\n assert delta_time > yellow_time, \"Time between actions must be at least greater than yellow time.\"\n\n self.begin_time = begin_time\n self.sim_max_time = num_seconds\n self.delta_time = delta_time # seconds on sumo at each step\n self.max_depart_delay = max_depart_delay # Max wait time to insert a vehicle\n self.time_to_teleport = time_to_teleport\n self.min_green = min_green\n self.max_green = max_green\n self.yellow_time = yellow_time\n self.single_agent = single_agent\n self.sumo_seed = sumo_seed\n self.fixed_ts = fixed_ts\n self.sumo_warnings = sumo_warnings\n self.label = str(SumoEnvironment.CONNECTION_LABEL)\n SumoEnvironment.CONNECTION_LABEL += 1\n self.sumo = None\n\n if LIBSUMO:\n traci.start([sumolib.checkBinary('sumo'), '-n', self._net]) # Start only to retrieve traffic light information\n conn = traci\n else:\n traci.start([sumolib.checkBinary('sumo'), '-n', self._net], label='init_connection'+self.label)\n conn = traci.getConnection('init_connection'+self.label)\n self.ts_ids = list(conn.trafficlight.getIDList())\n self.traffic_signals = {ts: TrafficSignal(self, \n ts, \n self.delta_time, \n self.yellow_time, \n self.min_green, \n self.max_green, \n self.begin_time,\n conn) for ts in self.ts_ids}\n conn.close()\n\n self.vehicles = dict()\n self.reward_range = (-float('inf'), float('inf'))\n self.metadata = {}\n self.spec = EnvSpec('SUMORL-v0')\n self.run = 0\n self.metrics = []\n self.out_csv_name = out_csv_name\n self.observations = {ts: None for ts in self.ts_ids}\n self.rewards = {ts: None for ts in self.ts_ids}\n \n def _start_simulation(self):\n sumo_cmd = [self._sumo_binary,\n '-n', self._net,\n '-r', self._route,\n '--max-depart-delay', str(self.max_depart_delay), \n '--waiting-time-memory', '10000',\n '--time-to-teleport', str(self.time_to_teleport)]\n if self.begin_time > 0:\n sumo_cmd.append('-b {}'.format(self.begin_time))\n if self.sumo_seed == 'random':\n sumo_cmd.append('--random')\n else:\n sumo_cmd.extend(['--seed', str(self.sumo_seed)])\n if not self.sumo_warnings:\n sumo_cmd.append('--no-warnings')\n if self.use_gui:\n sumo_cmd.extend(['--start', '--quit-on-end'])\n if self.virtual_display is not None:\n sumo_cmd.extend(['--window-size', f'{self.virtual_display[0]},{self.virtual_display[1]}'])\n from pyvirtualdisplay.smartdisplay import SmartDisplay\n print(\"Creating a virtual display.\")\n self.disp = SmartDisplay(size=self.virtual_display)\n self.disp.start()\n print(\"Virtual display started.\")\n\n if LIBSUMO:\n traci.start(sumo_cmd)\n self.sumo = traci\n else:\n traci.start(sumo_cmd, label=self.label)\n self.sumo = traci.getConnection(self.label)\n \n if self.use_gui:\n self.sumo.gui.setSchema(traci.gui.DEFAULT_VIEW, \"real world\") \n\n def reset(self):\n if self.run != 0:\n self.close()\n self.save_csv(self.out_csv_name, self.run)\n self.run += 1\n self.metrics = []\n\n self._start_simulation()\n\n self.traffic_signals = {ts: TrafficSignal(self, \n ts, \n self.delta_time, \n self.yellow_time, \n self.min_green, \n self.max_green, \n self.begin_time,\n self.sumo) for ts in self.ts_ids}\n self.vehicles = dict()\n\n if self.single_agent:\n return self._compute_observations()[self.ts_ids[0]]\n else:\n return self._compute_observations()\n\n @property\n def sim_step(self):\n \"\"\"\n Return current simulation second on SUMO\n \"\"\"\n return self.sumo.simulation.getTime()\n\n def step(self, action):\n # No action, follow fixed TL defined in self.phases\n if action is None or action == {}:\n for _ in range(self.delta_time):\n self._sumo_step()\n else:\n self._apply_actions(action)\n self._run_steps()\n\n observations = self._compute_observations()\n rewards = self._compute_rewards()\n dones = self._compute_dones()\n self._compute_info()\n\n if self.single_agent:\n return observations[self.ts_ids[0]], rewards[self.ts_ids[0]], dones['__all__'], {}\n else:\n return observations, rewards, dones, {}\n\n def _run_steps(self):\n time_to_act = False\n while not time_to_act:\n self._sumo_step()\n for ts in self.ts_ids:\n self.traffic_signals[ts].update()\n if self.traffic_signals[ts].time_to_act:\n time_to_act = True\n\n def _apply_actions(self, actions):\n \"\"\"\n Set the next green phase for the traffic signals\n :param actions: If single-agent, actions is an int between 0 and self.num_green_phases (next green phase)\n If multiagent, actions is a dict {ts_id : greenPhase}\n \"\"\" \n if self.single_agent:\n if self.traffic_signals[self.ts_ids[0]].time_to_act:\n self.traffic_signals[self.ts_ids[0]].set_next_phase(actions)\n else:\n for ts, action in actions.items():\n if self.traffic_signals[ts].time_to_act:\n self.traffic_signals[ts].set_next_phase(action)\n\n def _compute_dones(self):\n dones = {ts_id: False for ts_id in self.ts_ids}\n dones['__all__'] = self.sim_step > self.sim_max_time\n return dones\n \n def _compute_info(self):\n info = self._compute_step_info()\n self.metrics.append(info)\n\n def _compute_observations(self):\n self.observations.update({ts: self.traffic_signals[ts].compute_observation() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})\n return {ts: self.observations[ts].copy() for ts in self.observations.keys() if self.traffic_signals[ts].time_to_act}\n\n def _compute_rewards(self):\n self.rewards.update({ts: self.traffic_signals[ts].compute_reward() for ts in self.ts_ids if self.traffic_signals[ts].time_to_act})\n return {ts: self.rewards[ts] for ts in self.rewards.keys() if self.traffic_signals[ts].time_to_act}\n\n @property\n def observation_space(self):\n return self.traffic_signals[self.ts_ids[0]].observation_space\n \n @property\n def action_space(self):\n return self.traffic_signals[self.ts_ids[0]].action_space\n \n def observation_spaces(self, ts_id):\n return self.traffic_signals[ts_id].observation_space\n \n def action_spaces(self, ts_id):\n return self.traffic_signals[ts_id].action_space\n\n def _sumo_step(self):\n self.sumo.simulationStep()\n\n def _compute_step_info(self):\n return {\n 'step_time': self.sim_step,\n 'reward': self.traffic_signals[self.ts_ids[0]].last_reward,\n 'total_stopped': sum(self.traffic_signals[ts].get_total_queued() for ts in self.ts_ids),\n 'total_wait_time': sum(sum(self.traffic_signals[ts].get_waiting_time_per_lane()) for ts in self.ts_ids)\n }\n\n def close(self):\n if self.sumo is None:\n return\n if not LIBSUMO:\n traci.switch(self.label)\n traci.close()\n try:\n self.disp.stop()\n except AttributeError:\n pass\n self.sumo = None\n \n def __del__(self):\n self.close()\n \n def render(self, mode='human'):\n if self.virtual_display:\n #img = self.sumo.gui.screenshot(traci.gui.DEFAULT_VIEW,\n # f\"temp/img{self.sim_step}.jpg\", \n # width=self.virtual_display[0],\n # height=self.virtual_display[1])\n img = self.disp.grab()\n if mode == 'rgb_array':\n return np.array(img)\n return img \n \n def save_csv(self, out_csv_name, run):\n if out_csv_name is not None:\n df = pd.DataFrame(self.metrics)\n Path(Path(out_csv_name).parent).mkdir(parents=True, exist_ok=True)\n df.to_csv(out_csv_name + '_conn{}_run{}'.format(self.label, run) + '.csv', index=False)\n\n # Below functions are for discrete state space\n\n def encode(self, state, ts_id):\n phase = int(np.where(state[:self.traffic_signals[ts_id].num_green_phases] == 1)[0])\n min_green = state[self.traffic_signals[ts_id].num_green_phases]\n density_queue = [self._discretize_density(d) for d in state[self.traffic_signals[ts_id].num_green_phases + 1:]]\n # tuples are hashable and can be used as key in python dictionary\n return tuple([phase, min_green] + density_queue)\n\n def _discretize_density(self, density):\n return min(int(density*10), 9)\n\n\nclass SumoEnvironmentPZ(AECEnv, EzPickle):\n metadata = {'render.modes': ['human', 'rgb_array'], 'name': \"sumo_rl_v0\"}\n\n def __init__(self, **kwargs):\n EzPickle.__init__(self, **kwargs)\n self._kwargs = kwargs\n\n self.seed()\n self.env = SumoEnvironment(**self._kwargs)\n\n self.agents = self.env.ts_ids\n self.possible_agents = self.env.ts_ids\n self._agent_selector = agent_selector(self.agents)\n self.agent_selection = self._agent_selector.reset()\n # spaces\n self.action_spaces = {a: self.env.action_spaces(a) for a in self.agents}\n self.observation_spaces = {a: self.env.observation_spaces(a) for a in self.agents}\n\n # dicts\n self.rewards = {a: 0 for a in self.agents}\n self.dones = {a: False for a in self.agents}\n self.infos = {a: {} for a in self.agents}\n\n def seed(self, seed=None):\n self.randomizer, seed = seeding.np_random(seed)\n\n def reset(self):\n self.env.reset()\n self.agents = self.possible_agents[:]\n self.agent_selection = self._agent_selector.reset()\n self.rewards = {agent: 0 for agent in self.agents}\n self._cumulative_rewards = {agent: 0 for agent in self.agents}\n self.dones = {agent: False for agent in self.agents}\n self.infos = {agent: {} for agent in self.agents}\n \n def observation_space(self, agent):\n return self.observation_spaces[agent]\n\n def action_space(self, agent):\n return self.action_spaces[agent]\n\n def observe(self, agent):\n obs = self.env.observations[agent].copy()\n return obs\n\n def state(self):\n raise NotImplementedError('Method state() currently not implemented.')\n\n def close(self):\n self.env.close()\n\n def render(self, mode='human'):\n return self.env.render(mode)\n \n def save_csv(self, out_csv_name, run):\n self.env.save_csv(out_csv_name, run)\n\n def step(self, action):\n if self.dones[self.agent_selection]:\n return self._was_done_step(action)\n agent = self.agent_selection\n if not self.action_spaces[agent].contains(action):\n raise Exception('Action for agent {} must be in Discrete({}).'\n 'It is currently {}'.format(agent, self.action_spaces[agent].n, action))\n\n self.env._apply_actions({agent: action})\n\n if self._agent_selector.is_last():\n self.env._run_steps()\n self.env._compute_observations()\n self.rewards = self.env._compute_rewards()\n self.env._compute_info()\n else:\n self._clear_rewards()\n \n done = self.env._compute_dones()['__all__']\n self.dones = {a : done for a in self.agents}\n\n self.agent_selection = self._agent_selector.next()\n self._cumulative_rewards[agent] = 0\n self._accumulate_rewards()\n"
] | [
[
"numpy.array",
"numpy.where",
"pandas.DataFrame"
]
] |
nick-parker/trimesh | [
"a7bc1e0489ec98e3a3516088a7e64c8beca8b41a"
] | [
"trimesh/remesh.py"
] | [
"\"\"\"\nremesh.py\n-------------\n\nDeal with re- triangulation of existing meshes.\n\"\"\"\n\nimport numpy as np\n\nimport collections\n\nfrom . import util\nfrom . import grouping\n\n\ndef subdivide(vertices, faces, face_index=None):\n \"\"\"\n Subdivide a mesh into smaller triangles.\n\n Parameters\n ----------\n vertices: (n,3) float, verticies\n faces: (n,3) int, indexes of vertices which make up triangular faces\n face_index: faces to subdivide.\n if None: all faces of mesh will be subdivided\n if (n,) int array of indices: only specified faces will be\n subdivided. Note that in this case the mesh will generally\n no longer be manifold, as the additional vertex on the midpoint\n will not be used by the adjacent faces to the faces specified,\n and an additional postprocessing step will be required to\n make resulting mesh watertight\n\n Returns\n ----------\n new_vertices: (n,3) float, vertices\n new_faces: (n,3) int, remeshed faces\n \"\"\"\n if face_index is None:\n face_index = np.arange(len(faces))\n else:\n face_index = np.asanyarray(face_index)\n\n # the (c,3) int set of vertex indices\n faces = faces[face_index]\n # the (c, 3, 3) float set of points in the triangles\n triangles = vertices[faces]\n # the 3 midpoints of each triangle edge vstacked to a (3*c, 3) float\n mid = np.vstack([triangles[:, g, :].mean(axis=1) for g in [[0, 1],\n [1, 2],\n [2, 0]]])\n mid_idx = (np.arange(len(face_index) * 3)).reshape((3, -1)).T\n # for adjacent faces we are going to be generating the same midpoint\n # twice, so we handle it here by finding the unique vertices\n unique, inverse = grouping.unique_rows(mid)\n\n mid = mid[unique]\n mid_idx = inverse[mid_idx] + len(vertices)\n # the new faces, with correct winding\n f = np.column_stack([faces[:, 0], mid_idx[:, 0], mid_idx[:, 2],\n mid_idx[:, 0], faces[:, 1], mid_idx[:, 1],\n mid_idx[:, 2], mid_idx[:, 1], faces[:, 2],\n mid_idx[:, 0], mid_idx[:, 1], mid_idx[:, 2], ]).reshape((-1, 3))\n # add the 3 new faces per old face\n new_faces = np.vstack((faces, f[len(face_index):]))\n # replace the old face with a smaller face\n new_faces[face_index] = f[:len(face_index)]\n\n new_vertices = np.vstack((vertices, mid))\n\n return new_vertices, new_faces\n\n\ndef subdivide_to_size(vertices, faces, max_edge, max_iter=10):\n \"\"\"\n Subdivide a mesh until every edge is shorter than a specified length.\n\n Will return a triangle soup, not a nicely structured mesh.\n\n Parameters\n ------------\n vertices: (n,3) float, vertices in space\n faces: (m,3) int, indices of vertices which make up triangles\n max_edge: float, maximum length of any edge in the result\n max_iter: int, the maximum number of times to run subdivisions\n\n Returns\n ------------\n vertices: (j,3) float, vertices in space\n faces: (q,3) int, indices of vertices\n \"\"\"\n done_face = collections.deque()\n done_vert = collections.deque()\n\n current_faces = faces\n current_vertices = vertices\n\n for i in range(max_iter + 1):\n triangles = current_vertices[current_faces]\n\n # compute the length of every triangle edge\n edge_lengths = (\n np.diff(triangles[:, [0, 1, 2, 0]], axis=1)**2).sum(axis=2) ** .5\n\n too_long = (edge_lengths > max_edge).any(axis=1)\n\n # clean up the faces a little bit so we don't carry a ton of unused\n # vertices\n unique, inverse = np.unique(current_faces[np.logical_not(too_long)],\n return_inverse=True)\n\n done_vert.append(current_vertices[unique])\n done_face.append(inverse.reshape((-1, 3)))\n\n if not too_long.any():\n break\n\n (current_vertices,\n current_faces) = subdivide(current_vertices,\n current_faces[too_long])\n\n vertices, faces = util.append_faces(done_vert, done_face)\n return vertices, faces\n"
] | [
[
"numpy.vstack",
"numpy.diff",
"numpy.column_stack",
"numpy.asanyarray",
"numpy.logical_not"
]
] |
Murat-Karadag/nlu | [
"6a2b5995ea543e63c40baaca1bf9ad8a9db36757"
] | [
"nlu/pipe/viz/streamlit_viz/viz_building_blocks/word_similarity.py"
] | [
"import nlu\nfrom nlu.discovery import Discoverer\nfrom nlu.pipe.utils.storage_ref_utils import StorageRefUtils\nfrom typing import List, Tuple, Optional, Dict, Union\nimport streamlit as st\nfrom nlu.utils.modelhub.modelhub_utils import ModelHubUtils\n\nimport numpy as np\nimport pandas as pd\nfrom nlu.pipe.viz.streamlit_viz.streamlit_utils_OS import StreamlitUtilsOS\nfrom nlu.pipe.viz.streamlit_viz.gen_streamlit_code import get_code_for_viz\nfrom nlu.pipe.viz.streamlit_viz.styles import _set_block_container_style\nimport random\nfrom nlu.pipe.viz.streamlit_viz.streamlit_viz_tracker import StreamlitVizTracker\n\n\nclass WordSimilarityStreamlitBlock():\n @staticmethod\n def display_word_similarity(\n pipe, # nlu pipe\n default_texts: Tuple[str, str] = (\"Donald Trump likes to party!\", \"Angela Merkel likes to party!\"),\n threshold: float = 0.5,\n title: Optional[str] = \"Embeddings Similarity Matrix & Visualizations \",\n sub_tile: Optional[\n str] = \"Visualize `word-wise similarity matrix` and calculate `similarity scores` for `2 texts` and every `word embedding` loaded\",\n write_raw_pandas: bool = False,\n display_embed_information: bool = True,\n similarity_matrix=True,\n show_algo_select: bool = True,\n dist_metrics: List[str] = ('cosine'),\n set_wide_layout_CSS: bool = True,\n generate_code_sample: bool = False,\n key: str = \"NLU_streamlit\",\n num_cols: int = 2,\n display_scalar_similarities: bool = False,\n display_similarity_summary: bool = False,\n model_select_position: str = 'side', # main or side\n show_infos: bool = True,\n show_logo: bool = True,\n ):\n\n \"\"\"We visualize the following cases :\n 1. Simmilarity between 2 words - > sim (word_emb1, word_emb2)\n 2. Simmilarity between 2 sentences -> let weTW stand word word_emb of token T and sentence S\n 2.1. Raw token level with merged embeddings -> sim([we11,we21,weT1], [we12,we22,weT2])\n 2.2 Autogenerate sentemb, basically does 2.1 in the Spark NLP backend\n 2.3 Already using sentence_embedder model -> sim(se1,se2)\n 3. Simmilarity between token and sentence -> sim([we11,w21,wT1], se2)\n 4. Mirrored 3\n \"\"\"\n # https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics.pairwise\n StreamlitVizTracker.footer_displayed = False\n try:\n import plotly.express as px\n from sklearn.metrics.pairwise import distance_metrics\n except:\n st.error(\n \"You need the sklearn and plotly package in your Python environment installed for similarity visualizations. Run <pip install sklearn plotly>\")\n if set_wide_layout_CSS: _set_block_container_style()\n if title: st.header(title)\n if show_logo: StreamlitVizTracker.show_logo()\n if sub_tile: st.subheader(sub_tile)\n\n StreamlitVizTracker.loaded_word_embeding_pipes = []\n dist_metric_algos = distance_metrics()\n dist_algos = list(dist_metric_algos.keys())\n if 'haversine' in dist_algos: dist_algos.remove('haversine') # not applicable in >2D\n if 'precomputed' in dist_algos: dist_algos.remove('precomputed') # Not a dist\n cols = st.beta_columns(2)\n text1 = cols[0].text_input(\"Text or word1\", default_texts[0], key=key+'field_1')\n text2 = cols[1].text_input(\"Text or word2\", default_texts[1], key=key+'field_2') if len(default_texts) > 1 else cols[\n 1].text_input(\"Text or word2\", 'Please enter second string', key=key)\n # exp = st.sidebar.beta_expander(\"Select additional Embedding Models and distance metric to compare \")\n e_coms = StreamlitUtilsOS.find_all_embed_components(pipe)\n embed_algos_to_load = []\n embed_pipes = [pipe]\n dist_algo_selection = dist_metrics\n if show_algo_select:\n # emb_components_usable = Discoverer.get_components('embed')\n emb_components_usable = [e for e in Discoverer.get_components('embed', True, include_aliases=True) if\n 'chunk' not in e and 'sentence' not in e]\n loaded_embed_nlu_refs = []\n loaded_storage_refs = []\n loaded_embed_nlu_refs = list(set(loaded_embed_nlu_refs))\n\n for c in e_coms:\n if not hasattr(c.info, 'nlu_ref'): continue\n r = c.info.nlu_ref\n if 'en.' not in r and 'embed.' not in r and 'ner' not in r:\n loaded_embed_nlu_refs.append('en.embed.' + r)\n elif 'en.' in r and 'embed.' not in r and 'ner' not in r:\n r = r.split('en.')[0]\n loaded_embed_nlu_refs.append('en.embed.' + r)\n else:\n loaded_embed_nlu_refs.append(StorageRefUtils.extract_storage_ref(c))\n loaded_storage_refs.append(StorageRefUtils.extract_storage_ref(c))\n for p in StreamlitVizTracker.loaded_word_embeding_pipes:\n if p != pipe: loaded_embed_nlu_refs.append(p.nlu_ref)\n for l in loaded_embed_nlu_refs:\n if l not in emb_components_usable: emb_components_usable.append(l)\n # embed_algo_selection = exp.multiselect(\"Click to pick additional Embedding Algorithm\",options=emb_components_usable,default=loaded_embed_nlu_refs,key = key)\n # dist_algo_selection = exp.multiselect(\"Click to pick additional Distance Metric\", options=dist_algos, default=dist_metrics, key = key)\n emb_components_usable.sort()\n loaded_embed_nlu_refs.sort()\n dist_algos.sort()\n if model_select_position == 'side':\n embed_algo_selection = st.sidebar.multiselect(\n \"Pick additional Word Embeddings for the Similarity Matrix\", options=emb_components_usable,\n default=loaded_embed_nlu_refs, key=key)\n dist_algo_selection = st.sidebar.multiselect(\"Pick additional Similarity Metrics \", options=dist_algos,\n default=dist_metrics, key=key)\n else:\n exp = st.beta_expander(\"Pick additional Word Embeddings and Similarity Metrics\")\n embed_algo_selection = exp.multiselect(\"Pick additional Word Embeddings for the Similarity Matrix\",\n options=emb_components_usable, default=loaded_embed_nlu_refs,\n key=key)\n dist_algo_selection = exp.multiselect(\"Pick additional Similarity Metrics \", options=dist_algos,\n default=dist_metrics, key=key)\n embed_algos_to_load = list(set(embed_algo_selection) - set(loaded_embed_nlu_refs))\n\n for embedder in embed_algos_to_load: embed_pipes.append(nlu.load(embedder))\n\n if generate_code_sample: st.code(\n get_code_for_viz('SIMILARITY', [StreamlitUtilsOS.extract_name(p) for p in embed_pipes], default_texts))\n\n StreamlitVizTracker.loaded_word_embeding_pipes += embed_pipes\n similarity_metrics = {}\n embed_vector_info = {}\n cols_full = True\n col_index = 0\n # for p in embed_pipes :\n for p in StreamlitVizTracker.loaded_word_embeding_pipes:\n data1 = p.predict(text1, output_level='token', get_embeddings=True).dropna()\n data2 = p.predict(text2, output_level='token', get_embeddings=True).dropna()\n e_coms = StreamlitUtilsOS.find_all_embed_components(p)\n modelhub_links = [ModelHubUtils.get_url_by_nlu_refrence(c.info.nlu_ref) if hasattr(c.info,\n 'nlu_ref') else ModelHubUtils.get_url_by_nlu_refrence(\n '') for c in e_coms]\n e_cols = StreamlitUtilsOS.get_embed_cols(p)\n for num_emb, e_col in enumerate(e_cols):\n if col_index == num_cols - 1: cols_full = True\n if cols_full:\n cols = st.beta_columns(num_cols)\n col_index = 0\n cols_full = False\n else:\n col_index += 1\n tok1 = data1['token']\n tok2 = data2['token']\n emb1 = data1[e_col]\n emb2 = data2[e_col]\n\n def normalize_matrix(m):\n return np.nan_to_num(m / np.linalg.norm(m, axis=1, keepdims=True))\n\n embed_mat1 = normalize_matrix(np.array([x for x in emb1]))\n embed_mat2 = normalize_matrix(np.array([x for x in emb2]))\n # e_name = e_col.split('word_embedding_')[-1]\n e_name = e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info, 'nlu_ref') else \\\n e_col.split('word_embedding_')[-1] if 'en.' in e_col else e_col\n e_name = e_name.split('embed.')[-1] if 'en.' in e_name else e_name\n if 'ner' in e_name: e_name = loaded_storage_refs[num_emb]\n\n embed_vector_info[e_name] = {\"Vector Dimension \": embed_mat1.shape[1],\n \"Num Vectors\": embed_mat1.shape[0] + embed_mat1.shape[0],\n \"NLU_reference\": e_coms[num_emb].info.nlu_ref if hasattr(\n e_coms[num_emb].info, 'nlu_ref') else ' ',\n \"Spark_NLP_reference\": ModelHubUtils.NLU_ref_to_NLP_ref(\n e_coms[num_emb].info.nlu_ref if hasattr(e_coms[num_emb].info,\n 'nlu_ref') else ' '),\n \"Storage Reference\": loaded_storage_refs[num_emb],\n 'Modelhub info': modelhub_links[num_emb]}\n for dist_algo in dist_algo_selection:\n # scalar_similarities[e_col][dist_algo]={}\n sim_score = ((dist_metric_algos[dist_algo](embed_mat1, embed_mat2) - 1) * -1)\n\n sim_score = pd.DataFrame(sim_score)\n sim_score.index = tok1.values\n sim_score.columns = tok2.values\n sim_score.columns = StreamlitVizTracker.pad_duplicate_tokens(list(sim_score.columns))\n sim_score.index = StreamlitVizTracker.pad_duplicate_tokens(list(sim_score.index))\n if write_raw_pandas: st.write(sim_score, key=key)\n if sim_score.shape == (1, 1):\n sim_score = sim_score.iloc[0][0]\n sim_score = round(sim_score, 2)\n if sim_score > threshold:\n st.success(sim_score)\n st.success(f'Scalar Similarity={sim_score} for distance metric={dist_algo}')\n st.error(\n 'No similarity matrix for only 2 tokens. Try entering at least 1 sentences in a field')\n else:\n st.error(f'Scalar Similarity={sim_score} for distance metric={dist_algo}')\n else:\n ploty_avaiable = True\n # for tok emb, sum rows and norm by rows, then sum cols and norm by cols to generate a scalar from matrix\n scalar_sim_score = np.sum((np.sum(sim_score, axis=0) / sim_score.shape[0])) / sim_score.shape[1]\n scalar_sim_score = round(scalar_sim_score, 2)\n\n if display_scalar_similarities:\n if scalar_sim_score > threshold:\n st.success(f'Scalar Similarity :{scalar_sim_score} for distance metric={dist_algo}')\n else:\n st.error(\n f'Scalar Similarity :{scalar_sim_score} for embedder={e_col} distance metric={dist_algo}')\n if similarity_matrix:\n if ploty_avaiable:\n fig = px.imshow(sim_score, labels=dict(\n color=\"similarity\")) # , title=f'Simmilarity Matrix for embedding_model={e_name} distance metric={dist_algo}')\n # st.write(fig,key =key)\n similarity_metrics[f'{e_name}_{dist_algo}_similarity'] = {\n 'scalar_similarity': scalar_sim_score,\n 'dist_metric': dist_algo,\n 'embedding_model': e_name,\n 'modelhub_info': modelhub_links[num_emb],\n }\n subh = f\"\"\"Embedding-Model=`{e_name}`, Similarity-Score=`{scalar_sim_score}`, distance metric=`{dist_algo}`\"\"\"\n cols[col_index].markdown(subh)\n cols[col_index].write(fig, key=key)\n else:\n pass # todo fallback plots\n\n if display_similarity_summary:\n exp = st.beta_expander(\"Similarity summary\")\n exp.write(similarity_metrics)\n if display_embed_information:\n exp = st.beta_expander(\"Embedding vector information\")\n exp.write(embed_vector_info)\n if show_infos:\n # VizUtilsStreamlitOS.display_infos()\n StreamlitVizTracker.display_model_info(pipe.nlu_ref, pipes=[pipe])\n StreamlitVizTracker.display_footer()\n"
] | [
[
"numpy.sum",
"sklearn.metrics.pairwise.distance_metrics",
"pandas.DataFrame",
"numpy.array",
"numpy.linalg.norm"
]
] |
joannetruong/habitat-api | [
"aad2fd7b8545dce44daefd4b7b3941672eb96ee3"
] | [
"evaluation/evaluate_simulation_coda_gan.py"
] | [
"import matplotlib.pyplot as plt\nimport argparse\nimport os\nfrom collections import defaultdict\n\nimport habitat\nimport numpy as np\nimport quaternion\nimport torch\nfrom evaluate_reality import load_model\nfrom gym.spaces.dict_space import Dict as SpaceDict\nfrom habitat.tasks.utils import cartesian_to_polar\nfrom habitat.utils.geometry_utils import quaternion_rotate_vector\nfrom habitat.utils.visualizations.utils import (images_to_video,\n observations_to_image)\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.env_utils import construct_envs\nfrom habitat_baselines.common.environments import get_env_class\nfrom habitat_baselines.common.utils import batch_obs, generate_video\nfrom habitat_baselines.config.default import get_config\nfrom habitat_sim import geo\nfrom habitat_sim.utils.common import quat_from_two_vectors, quat_rotate_vector\nfrom PIL import Image\nfrom predictor import Predictor\n\n\ndef quat_to_rad(rotation):\n heading_vector = quaternion_rotate_vector(\n rotation.inverse(), np.array([0, 0, -1])\n )\n\n phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]\n return phi\n\ndef create_state(position, rotation):\n rotation_mp3d_habitat = quat_from_two_vectors(geo.GRAVITY, np.array([0, 0, -1]))\n pt_mp3d = quat_rotate_vector(rotation_mp3d_habitat, position) # That point in the mp3d scene mesh coordinate frame.\n state_xyt = [pt_mp3d[0], pt_mp3d[1]]\n theta = quat_to_rad(rotation)\n state_xyt.append(theta)\n return state_xyt\n\ndef create_traj_labels(input_arr):\n r, c = input_arr.shape\n # labels: d_x, d_y, cos_d_t, sin_d_t\n diff = np.diff(input_arr, axis=0)\n labels_arr = np.zeros((r-1, 4))\n labels_arr[:, :2] = diff[:, :2]\n labels_arr[:, 2] = np.cos(diff[:, 2])\n labels_arr[:, 3] = np.sin(diff[:, 2])\n return labels_arr\n\ndef convert_embedding(input_arr_embed):\n # SIMULATOR_REALITY_ACTIONS = {\"stop\": 0, \"forward\": 1 , \"left\": 2 , \"right\": 3}\n ONE_HOT_ACTIONS = {\"0\": [0, 0, 0], \"1\": [0, 0, 1] , \"2\": [0, 1, 0] , \"3\": [1, 0, 0]}\n r, c = input_arr_embed.shape\n input_arr_oneHot = np.zeros((r, c+2))\n input_arr_oneHot[:, :4] = input_arr_embed[:, :4]\n for row in range(r):\n input_arr_oneHot[row, 4:] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]\n ## if logging collisions\n # input_arr_oneHot[row, 4:7] = ONE_HOT_ACTIONS[str(int(input_arr_embed[row, 4]))]\n # input_arr_embed[:, -1] = input_arr_embed[:, 5]\n\n return input_arr_oneHot\n\ndef save_trajectory(data, datasplit, traj_dir, traj_ctr, datatype, embed_type=\"\"):\n pathend = datasplit + '_' + '%03d'%traj_ctr\n if embed_type != \"\":\n embed_type += \"_\"\n filename = os.path.join(traj_dir, datatype + '_LRF_' + embed_type + pathend)\n print('saving: ', filename)\n np.save(filename, data[:, :]) \n np.savetxt(filename + '.csv', data[:, :], delimiter=\",\")\n\ndef create_labels_trajectory(labels_arr):\n r, c = labels_arr.shape\n # input embed: x, y, cost, sint, a\n final_labels_arr = np.zeros((r, c+1))\n ## if logging collisions\n # input_arr_embed = np.zeros((r, c+2))\n final_labels_arr[:, :2] = labels_arr[:, :2]\n final_labels_arr[:, 2] = np.cos(labels_arr[:, 2])\n final_labels_arr[:, 3] = np.sin(labels_arr[:, 2])\n return final_labels_arr\n\ndef create_input_trajectory(final_input_arr):\n r, c = final_input_arr.shape\n # input embed: x, y, cost, sint, a\n input_arr_embed = np.zeros((r, c+1))\n ## if logging collisions\n # input_arr_embed = np.zeros((r, c+2))\n input_arr_embed[:, :2] = final_input_arr[:, :2]\n input_arr_embed[:, 2] = np.cos(final_input_arr[:, 2])\n input_arr_embed[:, 3] = np.sin(final_input_arr[:, 2])\n input_arr_embed[:, 4] = final_input_arr[:, 3]\n ## if logging collisions\n # input_arr_embed[:, 5] = final_input_arr[:, 4]\n\n # input oneHot: x, y, cost, sint, a1, a2, a3\n input_arr_oneHot = convert_embedding(input_arr_embed)\n \n return input_arr_embed, input_arr_oneHot\n\ndef create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return\n\ndef get_last_idx(dir_path):\n f = sorted(os.listdir(dir_path))\n if not f:\n ctr = 0\n else:\n ctr = int(f[-1].split('.')[0].split('_')[-1]) +1\n return ctr\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model-path\", type=str, required=True)\n# parser.add_argument(\"--noisy\", action=\"store_true\")\n parser.add_argument(\"--noise\", type=str, required=True)\n parser.add_argument(\"--save-imgs\", action=\"store_true\")\n parser.add_argument(\"--save-traj\", action=\"store_true\")\n parser.add_argument(\"--data-split\", type=str, required=True)\n parser.add_argument(\"--sensors\", type=str, required=True)\n parser.add_argument(\"--hidden-size\", type=int, required=True)\n parser.add_argument(\n \"--normalize-visual-inputs\", type=int, required=True, choices=[0, 1]\n )\n parser.add_argument(\"--depth-only\", action=\"store_true\")\n parser.add_argument(\"--use-gan\", action=\"store_true\")\n parser.add_argument(\"--gan-weights\", type=str, required=False)\n parser.add_argument(\"--noise-type\", type=str, required=True)\n parser.add_argument(\n \"--backbone\",\n type=str,\n required=True,\n choices=[\"resnet50\", \"se_resneXt50\"],\n )\n parser.add_argument(\"--num-recurrent-layers\", type=int, required=True)\n parser.add_argument(\n \"opts\",\n default=None,\n nargs=argparse.REMAINDER,\n help=\"Modify config options from command line\",\n )\n args = parser.parse_args()\n\n # Check torch version\n# vtorch = \"1.2.0\"\n#x assert torch.__version__ == vtorch, \"Please use torch {}\".format(vtorch)\n if args.noise_type == 'poisson_ilqr':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_ilqr.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_poisson.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise_type == 'speckle_mb':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_speckle_mb.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_mb.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_speckle.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_poisson_ilqr.yaml\"\n elif args.noise_type == 'gaussian_proportional':\n if args.noise == 'all':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml\"\n elif args.noise == 'actuation':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_actuation_proportional.yaml\"\n elif args.noise == 'sensors':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_sensors_gaussian.yaml\"\n elif args.noise == 'no_noise':\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_no_noise.yaml\"\n else:\n print('no noise specified. using all noise')\n cfg_file = \"habitat_baselines/config/pointnav/ddppo_pointnav_coda_noisy_gaussian_proportional.yaml\"\n config = get_config(\n cfg_file, args.opts\n )\n if args.save_traj:\n datasplit = args.data_split.split('_')[1]\n split = 'train'\n if datasplit == 'med':\n split = 'test'\n if args.save_imgs:\n if args.noise!=\"no_noise\":\n depth_save_path = 'depth_' + config.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL + '_' + split\n rgb_save_path = 'rgb_' + config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL + '_' + str(config.TASK_CONFIG.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.intensity_constant) + '_' + split\n else:\n depth_save_path = 'depth_no_noise_' + split\n rgb_save_path = 'rgb_no_noise_' + split\n if args.save_traj:\n if args.noise!=\"no_noise\":\n traj_save_path = 'traj_' + config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.CONTROLLER + '_' + str(config.TASK_CONFIG.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER) + '_' + split\n else:\n traj_save_path = 'traj_no_noise_' + split\n\n config.defrost()\n config.TASK_CONFIG.TASK.BASE_STATE = habitat.Config()\n config.TASK_CONFIG.TASK.BASE_STATE.TYPE = \"BaseState\"\n # Add the measure to the list of measures in use\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"BASE_STATE\")\n\n if args.sensors == \"\":\n config.SENSORS = []\n else:\n config.SENSORS = args.sensors.split(\",\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"COLLISIONS\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"SOFT_SPL\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"TOP_DOWN_MAP\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"EPISODE_DISTANCE\")\n config.freeze()\n\n envs = construct_envs(config, get_env_class(config.ENV_NAME))\n sensors_obs = envs.observation_spaces[0]\n\n if args.depth_only:\n config.defrost()\n config.SENSORS=[\"DEPTH_SENSOR\"]\n config.freeze()\n envs2 = construct_envs(config, get_env_class(config.ENV_NAME))\n sensors_obs = envs2.observation_spaces[0]\n\n device = (\n torch.device(\"cuda:{}\".format(config.TORCH_GPU_ID))\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n model = load_model(\n path=args.model_path,\n observation_space=sensors_obs,\n # observation_space=envs.observation_spaces[0],\n action_space=envs.action_spaces[0],\n hidden_size=args.hidden_size,\n normalize_visual_inputs=bool(args.normalize_visual_inputs),\n backbone=args.backbone,\n num_recurrent_layers=args.num_recurrent_layers,\n device=device,\n )\n model.eval()\n\n if args.use_gan:\n predictor = Predictor(args.gan_weights)\n print('METRICS: ', config.TASK_CONFIG.TASK.MEASUREMENTS)\n\n metric_name = \"SPL\"\n metric_cfg = getattr(config.TASK_CONFIG.TASK, metric_name)\n measure_type = baseline_registry.get_measure(metric_cfg.TYPE)\n assert measure_type is not None, \"invalid measurement type {}\".format(\n metric_cfg.TYPE\n )\n metric_uuid = measure_type(None, None)._get_uuid()\n\n print('METRIC UUID: ', metric_uuid)\n observations = envs.reset()\n print('IMAGE TYPE: ' , observations[0][\"rgb\"].dtype, observations[0][\"depth\"].dtype)\n# print(observations[0][\"rgb\"], observations[0][\"depth\"])\n rgbd_img = np.dstack((observations[0][\"rgb\"], (observations[0][\"depth\"]*255).astype(np.uint8)))\n gan_observations = predictor(rgbd_img)\n observations[0][\"depth\"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)\n# print('IMAGE TYPE: ' , observations[0][\"rgb\"].dtype, observations[0][\"depth\"].dtype)\n# print(observations[0][\"rgb\"], observations[0][\"depth\"])\n #observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n if args.depth_only:\n del observations[0][\"rgb\"]\n else:\n# print('GAN TYPE: ', gan_observations[:,:,:3][...,::-1].dtype)\n observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n batch = batch_obs(observations, device)\n\n current_episode_reward = torch.zeros(envs.num_envs, 1, device=device)\n\n test_recurrent_hidden_states = torch.zeros(\n model.net.num_recurrent_layers,\n config.NUM_PROCESSES,\n args.hidden_size,\n device=device,\n )\n prev_actions = torch.zeros(\n config.NUM_PROCESSES, 1, device=device, dtype=torch.long\n )\n not_done_masks = torch.zeros(config.NUM_PROCESSES, 1, device=device)\n\n stats_episodes = dict() # dict of dicts that stores stats per episode\n\n stats_actions = defaultdict(int)\n\n rgb_frames = [\n [] for _ in range(config.NUM_PROCESSES)\n ] # type: List[List[np.ndarray]]\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n sensor_path = 'sim_sensor_imgs'\n traj_path = 'sim_traj'\n if args.save_imgs:\n depth_dir = os.path.join(sensor_path, depth_save_path)\n rgb_dir = os.path.join(sensor_path, rgb_save_path)\n create_dir(depth_dir)\n create_dir(rgb_dir)\n img_ctr = get_last_idx(depth_dir)\n if args.save_traj:\n traj_dir = os.path.join(traj_path, traj_save_path)\n create_dir(traj_dir)\n traj_ctr = get_last_idx(traj_dir)\n\n ## not logging collisions\n final_input_arr = np.array([0, 0, 0, 0])\n ## if logging collisions\n # input_arr = np.array([0, 0, 0, 0, 0])\n # final_input_arr = np.array([0, 0, 0, 0, 0])\n tmp_labels_arr = np.array([0, 0, 0])\n prev_base_state = [0, 0, 0]\n num_actions = 0\n# datasplit = args.data_split.split('_')[1]\n print_once = True\n called_stop = False\n\n while (\n len(stats_episodes) < config.TEST_EPISODE_COUNT and envs.num_envs > 0\n ):\n current_episodes = envs.current_episodes()\n if print_once:\n print(\"Ep_id: \", current_episodes[0].episode_id, \"Start_pos: \", current_episodes[0].start_position, current_episodes[0].start_rotation, \"Goal_pos: \", current_episodes[0].goals[0].position)\n print_once = False\n\n with torch.no_grad():\n _, actions, _, test_recurrent_hidden_states = model.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n num_actions +=1\n for a in actions:\n stats_actions[a[0].item()] += 1\n\n observations, rewards, dones, infos = [list(x) for x in zip(*outputs)]\n if args.save_imgs:\n depth_obs = observations[0][\"depth\"] \n depth_obs = np.squeeze(depth_obs)\n depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode=\"L\")\n depth_img.save(os.path.join(depth_dir, \"real_depth_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n\n rgb_obs = observations[0][\"rgb\"]\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGB\")\n rgb_img.save(os.path.join(rgb_dir, \"real_rgb_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n rgbd_img = np.dstack((observations[0][\"rgb\"], (observations[0][\"depth\"]*255).astype(np.uint8)))\n gan_observations = predictor(rgbd_img)\n observations[0][\"rgb\"] = gan_observations[:,:,:3][...,::-1]\n observations[0][\"depth\"] = np.expand_dims((gan_observations[:,:,-1]/255).astype(np.float32), axis=2)\n if args.save_imgs:\n depth_obs = observations[0][\"depth\"] \n depth_obs = np.squeeze(depth_obs)\n depth_img = Image.fromarray((depth_obs * 255).astype(np.uint8), mode=\"L\")\n depth_img.save(os.path.join(depth_dir, \"sim_depth_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n\n rgb_obs = observations[0][\"rgb\"]\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGB\")\n rgb_img.save(os.path.join(rgb_dir, \"sim_rgb_\" + \"%05d\"%img_ctr + \".jpg\"), \"JPEG\")\n img_ctr +=1\n if args.depth_only:\n del observations[0][\"rgb\"]\n batch = batch_obs(observations, device)\n not_done_masks = torch.tensor(\n [[0.0] if done else [1.0] for done in dones],\n dtype=torch.float,\n device=device,\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=device\n ).unsqueeze(1)\n current_episode_reward += rewards\n next_episodes = envs.current_episodes()\n envs_to_pause = []\n n_envs = envs.num_envs\n for i in range(n_envs):\n if (\n next_episodes[i].scene_id,\n next_episodes[i].episode_id,\n ) in stats_episodes:\n envs_to_pause.append(i)\n # x, y, t, a\n input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()]\n #input_row = prev_base_state + [actions[i][0].cpu().detach().tolist()] + [int(infos[i][\"collisions\"][\"is_collision\"])]\n curr_state = create_state(infos[i][\"base_state\"]['position'], infos[i][\"base_state\"]['rotation'])\n delta_row = np.subtract(curr_state, prev_base_state)\n prev_base_state = curr_state\n\n print(input_row + [int(infos[i][\"collisions\"][\"is_collision\"])])\n if int(infos[i][\"collisions\"][\"is_collision\"]) == 0:\n final_input_arr = np.vstack((final_input_arr, input_row))\n tmp_labels_arr = np.vstack((tmp_labels_arr, delta_row))\n\n# plt.ioff()\n# _ = plt.hist(observations[i][\"depth\"].flatten(), bins='auto')\n# plt.savefig('hist.jpg')\n # TODO: save only good trajectories\n\n # episode ended\n if not_done_masks[i].item() == 0:\n episode_stats = dict()\n episode_stats[metric_uuid] = infos[i][metric_uuid]\n episode_stats[\"success\"] = int(infos[i][metric_uuid] > 0)\n episode_stats[\"reward\"] = current_episode_reward[i].item()\n if actions[i][0].cpu().detach().tolist() == 0:\n called_stop = True\n\n # if infos[i][\"collisions\"] == 0:\n # final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))\n # final_labels_arr = np.vstack((final_labels_arr, labels_arr[2:-1,:]))\n # final_input_arr = np.vstack((final_input_arr, input_arr[2:-1, :]))\n # final_labels_arr = np.vstack((final_labels_arr, create_traj_labels(input_arr[2:, :])))\n\n print(final_input_arr.ndim)\n if final_input_arr.ndim > 1:\n print(\"Final Shape: {}\".format(final_input_arr[2:-1, :].shape))\n input_arr_embed, input_arr_oneHot = create_input_trajectory(final_input_arr[2:-1, :])\n final_labels_arr = create_labels_trajectory(tmp_labels_arr[2:-1, :])\n if args.save_traj:\n save_trajectory(input_arr_embed, datasplit, traj_dir, traj_ctr, 'input', embed_type=\"embed\")\n save_trajectory(input_arr_oneHot, datasplit, traj_dir, traj_ctr, 'input', embed_type=\"oneHot\")\n save_trajectory(final_labels_arr, datasplit, traj_dir, traj_ctr, 'labels', embed_type=\"\")\n traj_ctr +=1\n\n print(\"# Actions: {}\".format(num_actions))\n print(\"# Collisions: {}\".format(infos[i][\"collisions\"][\"count\"]))\n print(\"Success: {}\".format(episode_stats[\"success\"]))\n print(\"Agent Episode Distance: {}\".format(infos[i]['episode_distance']['agent_episode_distance'])) #TODO\n print(\"Final Distance to Goal: {}\".format(infos[i]['episode_distance']['goal_distance'])) #TODO\n print(\"SPL: {}\".format(episode_stats[metric_uuid]))\n print(\"Soft SPL: {}\".format(infos[i][\"softspl\"]))\n print(\"Called Stop: {}\".format(called_stop))\n\n current_episode_reward[i] = 0\n ## not logging collisions\n final_input_arr = np.array([0, 0, 0, 0])\n ## if logging collisions\n # input_arr = np.array([0, 0, 0, 0, 0])\n # final_input_arr = np.array([0, 0, 0, 0, 0])\n tmp_labels_arr = np.array([0, 0, 0])\n prev_base_state = [0, 0, 0]\n num_actions = 0\n print_once = True\n called_stop = False\n\n # use scene_id + episode_id as unique id for storing stats\n stats_episodes[\n (\n current_episodes[i].scene_id,\n current_episodes[i].episode_id,\n )\n ] = episode_stats\n\n if len(config.VIDEO_OPTION) > 0:\n metric_value = episode_stats[metric_uuid]\n video_name = (\n f\"episode_{current_episodes[i].episode_id}\"\n f\"_{metric_name}_{metric_value:.2f}\"\n )\n images_to_video(\n rgb_frames[i], config.VIDEO_DIR, video_name\n )\n\n rgb_frames[i] = []\n\n print(\"Episodes finished: {}\".format(len(stats_episodes)))\n\n # episode continues\n elif len(config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[i], infos[i])\n rgb_frames[i].append(frame)\n\n # pausing self.envs with no new episode\n if len(envs_to_pause) > 0:\n state_index = list(range(envs.num_envs))\n for idx in reversed(envs_to_pause):\n state_index.pop(idx)\n envs.pause_at(idx)\n\n # indexing along the batch dimensions\n test_recurrent_hidden_states = test_recurrent_hidden_states[\n :, state_index\n ]\n not_done_masks = not_done_masks[state_index]\n current_episode_reward = current_episode_reward[state_index]\n prev_actions = prev_actions[state_index]\n\n for k, v in batch.items():\n batch[k] = v[state_index]\n\n if len(config.VIDEO_OPTION) > 0:\n rgb_frames = [rgb_frames[i] for i in state_index]\n\n aggregated_stats = dict()\n for stat_key in next(iter(stats_episodes.values())).keys():\n aggregated_stats[stat_key] = sum(\n [v[stat_key] for v in stats_episodes.values()]\n )\n num_episodes = len(stats_episodes)\n\n episode_reward_mean = aggregated_stats[\"reward\"] / num_episodes\n episode_metric_mean = aggregated_stats[metric_uuid] / num_episodes\n episode_success_mean = aggregated_stats[\"success\"] / num_episodes\n\n print(f\"Number of episodes: {num_episodes}\")\n print(f\"Average episode reward: {episode_reward_mean:.6f}\")\n print(f\"Average episode success: {episode_success_mean:.6f}\")\n print(f\"Average episode {metric_uuid}: {episode_metric_mean:.6f}\")\n\n print(\"Stats actions:\", stats_actions)\n\n envs.close()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.vstack",
"numpy.save",
"numpy.zeros",
"numpy.diff",
"numpy.squeeze",
"numpy.savetxt",
"numpy.subtract",
"torch.no_grad",
"torch.tensor",
"numpy.cos",
"torch.device",
"torch.cuda.is_available",
"torch.zeros",
"numpy.sin",
"numpy.array"
]
] |
casperg92/MaSIF_colab | [
"f030061276cc21b812bb3be652124b75dcdf7e5b"
] | [
"data.py"
] | [
"import torch\nfrom torch_geometric.data import InMemoryDataset, Data, DataLoader\nfrom torch_geometric.transforms import Compose\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\nimport math\nimport urllib.request\nimport tarfile\nfrom pathlib import Path\nimport requests\nfrom data_preprocessing.convert_pdb2npy import convert_pdbs\nfrom data_preprocessing.convert_ply2npy import convert_plys\n\ntensor = torch.FloatTensor\ninttensor = torch.LongTensor\n\n\ndef numpy(x):\n return x.detach().cpu().numpy()\n\n\ndef iface_valid_filter(protein_pair):\n labels1 = protein_pair.y_p1.reshape(-1)\n labels2 = protein_pair.y_p2.reshape(-1)\n valid1 = (\n (torch.sum(labels1) < 0.75 * len(labels1))\n and (torch.sum(labels1) > 30)\n and (torch.sum(labels1) > 0.01 * labels2.shape[0])\n )\n valid2 = (\n (torch.sum(labels2) < 0.75 * len(labels2))\n and (torch.sum(labels2) > 30)\n and (torch.sum(labels2) > 0.01 * labels1.shape[0])\n )\n\n return valid1 and valid2\n\n\nclass RandomRotationPairAtoms(object):\n r\"\"\"Randomly rotate a protein\"\"\"\n\n def __call__(self, data):\n R1 = tensor(Rotation.random().as_matrix())\n R2 = tensor(Rotation.random().as_matrix())\n\n data.atom_coords_p1 = torch.matmul(R1, data.atom_coords_p1.T).T\n data.xyz_p1 = torch.matmul(R1, data.xyz_p1.T).T\n data.normals_p1 = torch.matmul(R1, data.normals_p1.T).T\n\n data.atom_coords_p2 = torch.matmul(R2, data.atom_coords_p2.T).T\n data.xyz_p2 = torch.matmul(R2, data.xyz_p2.T).T\n data.normals_p2 = torch.matmul(R2, data.normals_p2.T).T\n\n data.rand_rot1 = R1\n data.rand_rot2 = R2\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\nclass CenterPairAtoms(object):\n r\"\"\"Centers a protein\"\"\"\n\n def __call__(self, data):\n atom_center1 = data.atom_coords_p1.mean(dim=-2, keepdim=True)\n atom_center2 = data.atom_coords_p2.mean(dim=-2, keepdim=True)\n\n data.atom_coords_p1 = data.atom_coords_p1 - atom_center1\n data.atom_coords_p2 = data.atom_coords_p2 - atom_center2\n\n data.xyz_p1 = data.xyz_p1 - atom_center1\n data.xyz_p2 = data.xyz_p2 - atom_center2\n\n data.atom_center1 = atom_center1\n data.atom_center2 = atom_center2\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\nclass NormalizeChemFeatures(object):\n r\"\"\"Centers a protein\"\"\"\n\n def __call__(self, data):\n pb_upper = 3.0\n pb_lower = -3.0\n\n chem_p1 = data.chemical_features_p1\n chem_p2 = data.chemical_features_p2\n\n pb_p1 = chem_p1[:, 0]\n pb_p2 = chem_p2[:, 0]\n hb_p1 = chem_p1[:, 1]\n hb_p2 = chem_p2[:, 1]\n hp_p1 = chem_p1[:, 2]\n hp_p2 = chem_p2[:, 2]\n\n # Normalize PB\n pb_p1 = torch.clamp(pb_p1, pb_lower, pb_upper)\n pb_p1 = (pb_p1 - pb_lower) / (pb_upper - pb_lower)\n pb_p1 = 2 * pb_p1 - 1\n\n pb_p2 = torch.clamp(pb_p2, pb_lower, pb_upper)\n pb_p2 = (pb_p2 - pb_lower) / (pb_upper - pb_lower)\n pb_p2 = 2 * pb_p2 - 1\n\n # Normalize HP\n hp_p1 = hp_p1 / 4.5\n hp_p2 = hp_p2 / 4.5\n\n data.chemical_features_p1 = torch.stack([pb_p1, hb_p1, hp_p1]).T\n data.chemical_features_p2 = torch.stack([pb_p2, hb_p2, hp_p2]).T\n\n return data\n\n def __repr__(self):\n return \"{}()\".format(self.__class__.__name__)\n\n\ndef load_protein_npy(pdb_id, data_dir, center=False, single_pdb=False):\n \"\"\"Loads a protein surface mesh and its features\"\"\"\n\n # Load the data, and read the connectivity information:\n triangles = (\n None\n if single_pdb\n else inttensor(np.load(data_dir / (pdb_id + \"_triangles.npy\"))).T\n )\n # Normalize the point cloud, as specified by the user:\n points = None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_xyz.npy\")))\n center_location = None if single_pdb else torch.mean(points, axis=0, keepdims=True)\n\n atom_coords = tensor(np.load(data_dir / (pdb_id + \"_atomxyz.npy\")))\n atom_types = tensor(np.load(data_dir / (pdb_id + \"_atomtypes.npy\")))\n\n if center:\n points = points - center_location\n atom_coords = atom_coords - center_location\n\n # Interface labels\n iface_labels = (\n None\n if single_pdb\n else tensor(np.load(data_dir / (pdb_id + \"_iface_labels.npy\")).reshape((-1, 1)))\n )\n\n # Features\n chemical_features = (\n None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_features.npy\")))\n )\n\n # Normals\n normals = (\n None if single_pdb else tensor(np.load(data_dir / (pdb_id + \"_normals.npy\")))\n )\n\n protein_data = Data(\n xyz=points,\n face=triangles,\n chemical_features=chemical_features,\n y=iface_labels,\n normals=normals,\n center_location=center_location,\n num_nodes=None if single_pdb else points.shape[0],\n atom_coords=atom_coords,\n atom_types=atom_types,\n )\n return protein_data\n\n\nclass PairData(Data):\n def __init__(\n self,\n xyz_p1=None,\n xyz_p2=None,\n face_p1=None,\n face_p2=None,\n chemical_features_p1=None,\n chemical_features_p2=None,\n y_p1=None,\n y_p2=None,\n normals_p1=None,\n normals_p2=None,\n center_location_p1=None,\n center_location_p2=None,\n atom_coords_p1=None,\n atom_coords_p2=None,\n atom_types_p1=None,\n atom_types_p2=None,\n atom_center1=None,\n atom_center2=None,\n rand_rot1=None,\n rand_rot2=None,\n ):\n super().__init__()\n self.xyz_p1 = xyz_p1\n self.xyz_p2 = xyz_p2\n self.face_p1 = face_p1\n self.face_p2 = face_p2\n\n self.chemical_features_p1 = chemical_features_p1\n self.chemical_features_p2 = chemical_features_p2\n self.y_p1 = y_p1\n self.y_p2 = y_p2\n self.normals_p1 = normals_p1\n self.normals_p2 = normals_p2\n self.center_location_p1 = center_location_p1\n self.center_location_p2 = center_location_p2\n self.atom_coords_p1 = atom_coords_p1\n self.atom_coords_p2 = atom_coords_p2\n self.atom_types_p1 = atom_types_p1\n self.atom_types_p2 = atom_types_p2\n self.atom_center1 = atom_center1\n self.atom_center2 = atom_center2\n self.rand_rot1 = rand_rot1\n self.rand_rot2 = rand_rot2\n\n def __inc__(self, key, value):\n if key == \"face_p1\":\n return self.xyz_p1.size(0)\n if key == \"face_p2\":\n return self.xyz_p2.size(0)\n else:\n return super(PairData, self).__inc__(key, value)\n\n def __cat_dim__(self, key, value):\n if (\"index\" in key) or (\"face\" in key):\n return 1\n else:\n return 0\n\n\ndef load_protein_pair(pdb_id, data_dir,single_pdb=False):\n \"\"\"Loads a protein surface mesh and its features\"\"\"\n pspl = pdb_id.split(\"_\")\n p1_id = pspl[0] + \"_\" + pspl[1]\n p2_id = pspl[0] + \"_\" + pspl[2]\n\n p1 = load_protein_npy(p1_id, data_dir, center=False,single_pdb=single_pdb)\n p2 = load_protein_npy(p2_id, data_dir, center=False,single_pdb=single_pdb)\n # pdist = ((p1['xyz'][:,None,:]-p2['xyz'][None,:,:])**2).sum(-1).sqrt()\n # pdist = pdist<2.0\n # y_p1 = (pdist.sum(1)>0).to(torch.float).reshape(-1,1)\n # y_p2 = (pdist.sum(0)>0).to(torch.float).reshape(-1,1)\n y_p1 = p1[\"y\"]\n y_p2 = p2[\"y\"]\n\n protein_pair_data = PairData(\n xyz_p1=p1[\"xyz\"],\n xyz_p2=p2[\"xyz\"],\n face_p1=p1[\"face\"],\n face_p2=p2[\"face\"],\n chemical_features_p1=p1[\"chemical_features\"],\n chemical_features_p2=p2[\"chemical_features\"],\n y_p1=y_p1,\n y_p2=y_p2,\n normals_p1=p1[\"normals\"],\n normals_p2=p2[\"normals\"],\n center_location_p1=p1[\"center_location\"],\n center_location_p2=p2[\"center_location\"],\n atom_coords_p1=p1[\"atom_coords\"],\n atom_coords_p2=p2[\"atom_coords\"],\n atom_types_p1=p1[\"atom_types\"],\n atom_types_p2=p2[\"atom_types\"],\n )\n return protein_pair_data\n\n\nclass ProteinPairsSurfaces(InMemoryDataset):\n url = \"\"\n\n def __init__(self, root, ppi=False, train=True, transform=None, pre_transform=None):\n self.ppi = ppi\n super(ProteinPairsSurfaces, self).__init__(root, transform, pre_transform)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self):\n return \"masif_site_masif_search_pdbs_and_ply_files.tar.gz\"\n\n @property\n def processed_file_names(self):\n if not self.ppi:\n file_names = [\n \"training_pairs_data.pt\",\n \"testing_pairs_data.pt\",\n \"training_pairs_data_ids.npy\",\n \"testing_pairs_data_ids.npy\",\n ]\n else:\n file_names = [\n \"training_pairs_data_ppi.pt\",\n \"testing_pairs_data_ppi.pt\",\n \"training_pairs_data_ids_ppi.npy\",\n \"testing_pairs_data_ids_ppi.npy\",\n ]\n return file_names\n\n def download(self):\n url = 'https://zenodo.org/record/2625420/files/masif_site_masif_search_pdbs_and_ply_files.tar.gz'\n target_path = self.raw_paths[0]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(target_path, 'wb') as f:\n f.write(response.raw.read())\n \n #raise RuntimeError(\n # \"Dataset not found. Please download {} from {} and move it to {}\".format(\n # self.raw_file_names, self.url, self.raw_dir\n # )\n #)\n\n def process(self):\n pdb_dir = Path(self.root) / \"raw\" / \"01-benchmark_pdbs\"\n surf_dir = Path(self.root) / \"raw\" / \"01-benchmark_surfaces\"\n protein_dir = Path(self.root) / \"raw\" / \"01-benchmark_surfaces_npy\"\n lists_dir = Path('./lists')\n\n # Untar surface files\n if not (pdb_dir.exists() and surf_dir.exists()):\n tar = tarfile.open(self.raw_paths[0])\n tar.extractall(self.raw_dir)\n tar.close()\n\n if not protein_dir.exists():\n protein_dir.mkdir(parents=False, exist_ok=False)\n convert_plys(surf_dir,protein_dir)\n convert_pdbs(pdb_dir,protein_dir)\n\n with open(lists_dir / \"training.txt\") as f_tr, open(\n lists_dir / \"testing.txt\"\n ) as f_ts:\n training_list = sorted(f_tr.read().splitlines())\n testing_list = sorted(f_ts.read().splitlines())\n\n with open(lists_dir / \"training_ppi.txt\") as f_tr, open(\n lists_dir / \"testing_ppi.txt\"\n ) as f_ts:\n training_pairs_list = sorted(f_tr.read().splitlines())\n testing_pairs_list = sorted(f_ts.read().splitlines())\n pairs_list = sorted(training_pairs_list + testing_pairs_list)\n\n if not self.ppi:\n training_pairs_list = []\n for p in pairs_list:\n pspl = p.split(\"_\")\n p1 = pspl[0] + \"_\" + pspl[1]\n p2 = pspl[0] + \"_\" + pspl[2]\n\n if p1 in training_list:\n training_pairs_list.append(p)\n if p2 in training_list:\n training_pairs_list.append(pspl[0] + \"_\" + pspl[2] + \"_\" + pspl[1])\n\n testing_pairs_list = []\n for p in pairs_list:\n pspl = p.split(\"_\")\n p1 = pspl[0] + \"_\" + pspl[1]\n p2 = pspl[0] + \"_\" + pspl[2]\n if p1 in testing_list:\n testing_pairs_list.append(p)\n if p2 in testing_list:\n testing_pairs_list.append(pspl[0] + \"_\" + pspl[2] + \"_\" + pspl[1])\n\n # # Read data into huge `Data` list.\n training_pairs_data = []\n training_pairs_data_ids = []\n for p in training_pairs_list:\n try:\n protein_pair = load_protein_pair(p, protein_dir)\n except FileNotFoundError:\n continue\n training_pairs_data.append(protein_pair)\n training_pairs_data_ids.append(p)\n\n testing_pairs_data = []\n testing_pairs_data_ids = []\n for p in testing_pairs_list:\n try:\n protein_pair = load_protein_pair(p, protein_dir)\n except FileNotFoundError:\n continue\n testing_pairs_data.append(protein_pair)\n testing_pairs_data_ids.append(p)\n\n if self.pre_filter is not None:\n training_pairs_data = [\n data for data in training_pairs_data if self.pre_filter(data)\n ]\n testing_pairs_data = [\n data for data in testing_pairs_data if self.pre_filter(data)\n ]\n\n if self.pre_transform is not None:\n training_pairs_data = [\n self.pre_transform(data) for data in training_pairs_data\n ]\n testing_pairs_data = [\n self.pre_transform(data) for data in testing_pairs_data\n ]\n\n training_pairs_data, training_pairs_slices = self.collate(training_pairs_data)\n torch.save(\n (training_pairs_data, training_pairs_slices), self.processed_paths[0]\n )\n np.save(self.processed_paths[2], training_pairs_data_ids)\n testing_pairs_data, testing_pairs_slices = self.collate(testing_pairs_data)\n torch.save((testing_pairs_data, testing_pairs_slices), self.processed_paths[1])\n np.save(self.processed_paths[3], testing_pairs_data_ids)\n"
] | [
[
"torch.sum",
"numpy.load",
"numpy.save",
"torch.stack",
"scipy.spatial.transform.Rotation.random",
"torch.load",
"torch.clamp",
"torch.save",
"torch.matmul",
"torch.mean"
]
] |
DaoiestFire/self-supervised-learning-of-object-movement | [
"4db59bf352efd946661feffc7afc4630c6731852"
] | [
"data/datasets.py"
] | [
"import os\r\nimport glob\r\nimport random\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom imageio import mimread\r\nfrom skimage.color import gray2rgb\r\nfrom skimage import io, img_as_float32\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom torch.utils.data import Dataset\r\nfrom data.augmentation import AllAugmentationTransform\r\n\r\n\r\ndef read_video(name, frame_shape):\r\n \"\"\"\r\n Read video which can be:\r\n - an image of concatenated frames\r\n - '.mp4' and'.gif'\r\n - folder with videos\r\n \"\"\"\r\n\r\n if os.path.isdir(name):\r\n frames = sorted(os.listdir(name))\r\n num_frames = len(frames)\r\n video_array = np.array(\r\n [img_as_float32(io.imread(os.path.join(name, frames[idx]))) for idx in range(num_frames)])\r\n elif name.lower().endswith('.png') or name.lower().endswith('.jpg'):\r\n image = io.imread(name)\r\n\r\n if len(image.shape) == 2 or image.shape[2] == 1:\r\n image = gray2rgb(image)\r\n\r\n if image.shape[2] == 4:\r\n image = image[..., :3]\r\n\r\n image = img_as_float32(image)\r\n\r\n video_array = np.moveaxis(image, 1, 0)\r\n\r\n video_array = video_array.reshape((-1,) + frame_shape)\r\n video_array = np.moveaxis(video_array, 1, 2)\r\n elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):\r\n video = np.array(mimread(name))\r\n if len(video.shape) == 3:\r\n video = np.array([gray2rgb(frame) for frame in video])\r\n if video.shape[-1] == 4:\r\n video = video[..., :3]\r\n video_array = img_as_float32(video)\r\n else:\r\n raise Exception(\"Unknown file extensions %s\" % name)\r\n\r\n return video_array\r\n\r\n\r\nclass FramesDataset(Dataset):\r\n \"\"\"\r\n Dataset of videos, each video can be represented as:\r\n - an image of concatenated frames\r\n - '.mp4' or '.gif'\r\n - folder with all frames\r\n \"\"\"\r\n\r\n def __init__(self, root_dir, frame_shape=(256, 256, 3), id_sampling=False, is_train=True,\r\n random_seed=0, pairs_list=None, augmentation_params=None):\r\n self.root_dir = root_dir\r\n self.videos = os.listdir(root_dir)\r\n self.frame_shape = tuple(frame_shape)\r\n self.pairs_list = pairs_list\r\n self.id_sampling = id_sampling\r\n if os.path.exists(os.path.join(root_dir, 'train')):\r\n assert os.path.exists(os.path.join(root_dir, 'test'))\r\n print(\"Use predefined train-test split.\")\r\n if id_sampling:\r\n train_videos = {os.path.basename(video).split('#')[0] for video in\r\n os.listdir(os.path.join(root_dir, 'train'))}\r\n train_videos = list(train_videos)\r\n else:\r\n train_videos = os.listdir(os.path.join(root_dir, 'train'))\r\n test_videos = os.listdir(os.path.join(root_dir, 'test'))\r\n self.root_dir = os.path.join(self.root_dir, 'train' if is_train else 'test')\r\n else:\r\n print(\"Use random train-test split.\")\r\n train_videos, test_videos = train_test_split(self.videos, random_state=random_seed, test_size=0.2)\r\n\r\n if is_train:\r\n self.videos = train_videos\r\n else:\r\n self.videos = test_videos\r\n\r\n self.is_train = is_train\r\n\r\n if self.is_train:\r\n self.transform = AllAugmentationTransform(**augmentation_params)\r\n else:\r\n self.transform = None\r\n\r\n def __len__(self):\r\n return len(self.videos)\r\n\r\n def __getitem__(self, idx):\r\n if self.is_train and self.id_sampling:\r\n name = self.videos[idx]\r\n path = np.random.choice(glob.glob(os.path.join(self.root_dir, name + '*.mp4')))\r\n else:\r\n name = self.videos[idx]\r\n path = os.path.join(self.root_dir, name)\r\n\r\n video_name = os.path.basename(path)\r\n\r\n if self.is_train and os.path.isdir(path):\r\n frames = os.listdir(path)\r\n num_frames = len(frames)\r\n frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2))\r\n video_array = [img_as_float32(io.imread(os.path.join(path, frames[idx]))) for idx in frame_idx]\r\n else:\r\n video_array = read_video(path, frame_shape=self.frame_shape)\r\n num_frames = len(video_array)\r\n frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2)) if self.is_train else range(\r\n num_frames)\r\n video_array = list(video_array[frame_idx])\r\n\r\n if self.transform is not None:\r\n video_array = self.transform(video_array)\r\n\r\n out = dict()\r\n if self.is_train:\r\n source = np.array(video_array[0], dtype='float32')\r\n driving = np.array(video_array[1], dtype='float32')\r\n\r\n out['source'] = source.transpose((2, 0, 1))\r\n out['driving'] = driving.transpose((2, 0, 1))\r\n else:\r\n video = np.array(video_array, dtype='float32')\r\n out['video'] = video.transpose((3, 0, 1, 2))\r\n\r\n out['name'] = video_name\r\n\r\n return out\r\n\r\n\r\nclass PairedDataset(Dataset):\r\n \"\"\"Dataset of pairs for animation.\"\"\"\r\n\r\n def __init__(self, initial_dataset, number_of_pairs, seed=0):\r\n self.initial_dataset = initial_dataset\r\n pairs_list = self.initial_dataset.pairs_list\r\n\r\n np.random.seed(seed)\r\n\r\n if pairs_list is None:\r\n max_idx = min(number_of_pairs, len(initial_dataset))\r\n nx, ny = max_idx, max_idx\r\n xy = np.mgrid[:nx, :ny].reshape(2, -1).T\r\n number_of_pairs = min(xy.shape[0], number_of_pairs)\r\n self.pairs = xy.take(np.random.choice(xy.shape[0], number_of_pairs, replace=False), axis=0)\r\n else:\r\n videos = self.initial_dataset.videos\r\n name_to_index = {name: index for index, name in enumerate(videos)}\r\n pairs = pd.read_csv(pairs_list)\r\n pairs = pairs[np.logical_and(pairs['source'].isin(videos), pairs['driving'].isin(videos))]\r\n\r\n number_of_pairs = min(pairs.shape[0], number_of_pairs)\r\n self.pairs = []\r\n self.start_frames = []\r\n for ind in range(number_of_pairs):\r\n self.pairs.append(\r\n (name_to_index[pairs['driving'].iloc[ind]], name_to_index[pairs['source'].iloc[ind]]))\r\n\r\n def __len__(self):\r\n return len(self.pairs)\r\n\r\n def __getitem__(self, idx):\r\n pair = self.pairs[idx]\r\n first = self.initial_dataset[pair[0]]\r\n second = self.initial_dataset[pair[1]]\r\n first = {'driving_' + key: value for key, value in first.items()}\r\n second = {'source_' + key: value for key, value in second.items()}\r\n\r\n return {**first, **second}\r\n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.moveaxis",
"numpy.random.choice",
"numpy.array",
"sklearn.model_selection.train_test_split"
]
] |
liuhanyao98/nums_gpu_draft | [
"48df59afe605f02ea2bd609c5f9e0006fbc27a5d"
] | [
"nums/core/array/application.py"
] | [
"# coding=utf-8\n# Copyright (C) 2020 NumS Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import List\n\nimport numpy as np\n\nfrom nums.core.array.blockarray import BlockArray, Block\nfrom nums.core.array import utils as array_utils\nfrom nums.core.storage.storage import ArrayGrid, StoredArray, StoredArrayS3\n# TODO(hme): Remove dependence on specific system and scheduler implementations.\nfrom nums.core.systems.systems import System, RaySystem, SerialSystem\nfrom nums.core.systems.gpu_systems import CupyParallelSystem\nfrom nums.core.systems.schedulers import BlockCyclicScheduler\nfrom nums.core.systems import utils as systems_utils\nfrom nums.core.systems.filesystem import FileSystem\nfrom nums.core.array.random import NumsRandomState\n\n# pylint: disable = too-many-lines\n\n\nclass ArrayApplication(object):\n\n def __init__(self, system: System, filesystem: FileSystem):\n self.system: System = system\n self._filesystem: FileSystem = filesystem\n self._array_grids: (str, ArrayGrid) = {}\n self.random = self.random_state()\n\n self.one_half = self.scalar(.5)\n self.two = self.scalar(2.0)\n self.one = self.scalar(1.0)\n self.zero = self.scalar(0.0)\n self._block_shape_map = {}\n\n def num_cores_total(self):\n if isinstance(self.system, RaySystem):\n system: RaySystem = self.system\n nodes = system.nodes()\n num_cores = sum(map(lambda n: n[\"Resources\"][\"CPU\"], nodes))\n elif isinstance(self.system, CupyParallelSystem):\n system: CupyParallelSystem = self.system\n num_cores = system.num_gpus\n else:\n assert isinstance(self.system, SerialSystem)\n num_cores = systems_utils.get_num_cores()\n return int(num_cores)\n\n def compute_block_shape(self,\n shape: tuple,\n dtype: np.dtype,\n cluster_shape=None,\n num_cores=None):\n # TODO (hme): Add support for downstream optimizer to decide block shape.\n if dtype in (np.float32, np.float64, float):\n dtype = np.finfo(dtype).dtype\n elif dtype in (np.int32, np.int64, int):\n dtype = np.iinfo(dtype).dtype\n elif dtype in (bool, np.bool_):\n dtype = np.dtype(np.bool_)\n else:\n raise ValueError(\"dtype %s not supported\" % str(dtype))\n\n nbytes = dtype.alignment\n size = np.product(shape) * nbytes\n # If the object is less than 100 megabytes, there's not much value in constructing\n # a block tensor.\n if size < 10 ** 8:\n block_shape = shape\n return block_shape\n\n if num_cores is not None:\n pass\n else:\n num_cores = self.num_cores_total()\n\n if cluster_shape is not None:\n pass\n elif isinstance(self.system, RaySystem) \\\n and isinstance(self.system.scheduler, BlockCyclicScheduler):\n # This configuration is the default.\n cluster_shape = self.system.scheduler.cluster_shape\n elif isinstance(self.system, CupyParallelSystem):\n cluster_shape = self.system.cluster_shape\n else:\n assert isinstance(self.system, SerialSystem)\n cluster_shape = (1, 1)\n\n if len(shape) < len(cluster_shape):\n cluster_shape = cluster_shape[:len(shape)]\n elif len(shape) > len(cluster_shape):\n cluster_shape = list(cluster_shape)\n for axis in range(len(shape)):\n if axis >= len(cluster_shape):\n cluster_shape.append(1)\n cluster_shape = tuple(cluster_shape)\n\n shape_np = np.array(shape, dtype=np.int)\n # Softmax on cluster shape gives strong preference to larger dimensions.\n cluster_weights = np.exp(np.array(cluster_shape)) / np.sum(np.exp(cluster_shape))\n shape_fracs = np.array(shape) / np.sum(shape)\n # cluster_weights weight the proportion of cores available along each axis,\n # and shape_fracs is the proportion of data along each axis.\n weighted_shape_fracs = cluster_weights * shape_fracs\n weighted_shape_fracs = weighted_shape_fracs / np.sum(weighted_shape_fracs)\n\n # Compute dimensions of grid shape\n # so that the number of blocks are close to the number of cores.\n grid_shape_frac = num_cores ** weighted_shape_fracs\n grid_shape = np.floor(grid_shape_frac)\n # Put remainder on largest axis.\n remaining = np.sum(grid_shape_frac - grid_shape)\n grid_shape[np.argmax(shape)] += remaining\n grid_shape = np.ceil(grid_shape).astype(np.int)\n\n # We use ceiling of floating block shape\n # so that resulting grid shape is <= to what we compute above.\n block_shape = tuple((shape_np + grid_shape - 1) // grid_shape)\n return block_shape\n\n def get_block_shape(self, shape, dtype: np.dtype):\n # Simple way to ensure shape compatibility for basic linear algebra operations.\n block_shape = self.compute_block_shape(shape, dtype)\n final_block_shape = []\n for axis in range(len(shape)):\n shape_dim = shape[axis]\n block_shape_dim = block_shape[axis]\n if shape_dim not in self._block_shape_map:\n self._block_shape_map[shape_dim] = block_shape_dim\n final_block_shape.append(self._block_shape_map[shape_dim])\n return tuple(final_block_shape)\n\n def _get_array_grid(self, filename: str, stored_array_cls) -> ArrayGrid:\n if filename not in self._array_grids:\n store_inst: StoredArray = stored_array_cls(filename)\n self._array_grids[filename] = store_inst.get_grid()\n return self._array_grids[filename]\n\n ######################################\n # Filesystem API\n ######################################\n\n def write_fs(self, ba: BlockArray, filename: str):\n res = self._write(ba, filename, self._filesystem.write_block_fs)\n self._filesystem.write_meta_fs(ba, filename)\n return res\n\n def read_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n ba: BlockArray = BlockArray(grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n ba.blocks[grid_entry].oid = self._filesystem.read_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n return ba\n\n def delete_fs(self, filename: str):\n meta = self._filesystem.read_meta_fs(filename)\n addresses = meta[\"addresses\"]\n grid_meta = meta[\"grid_meta\"]\n grid = ArrayGrid.from_meta(grid_meta)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in addresses:\n node_address = addresses[grid_entry]\n options = {\"resources\": {node_address: 1.0 / 10 ** 4}}\n rarr.blocks[grid_entry].oid = self._filesystem.delete_block_fs(filename,\n grid_entry,\n grid_meta,\n options=options)\n self._filesystem.delete_meta_fs(filename)\n return rarr\n\n def write_s3(self, ba: BlockArray, filename: str):\n grid_entry = tuple(np.zeros_like(ba.shape, dtype=np.int))\n result = self._filesystem.write_meta_s3(filename,\n grid_meta=ba.grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": ba.grid.grid_shape\n })\n assert \"ETag\" in self.system.get(result).item(), \"Metadata write failed.\"\n return self._write(ba, filename, self._filesystem.write_block_s3)\n\n def _write(self, ba: BlockArray, filename, remote_func):\n grid = ba.grid\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(ba.blocks[grid_entry].oid,\n filename,\n grid_entry,\n grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_s3(self, filename: str):\n store_cls, remote_func = StoredArrayS3, self._filesystem.read_block_s3\n grid = self._get_array_grid(filename, store_cls)\n grid_meta = grid.to_meta()\n grid_entry_iterator = grid.get_entry_iterator()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid_entry_iterator:\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def delete_s3(self, filename: str):\n grid = self._get_array_grid(filename, StoredArrayS3)\n grid_entry = tuple(np.zeros_like(grid.shape, dtype=np.int))\n result = self._filesystem.delete_meta_s3(filename,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n deleted_key = self.system.get(result).item()[\"Deleted\"][0][\"Key\"]\n assert deleted_key == StoredArrayS3(filename, grid).get_meta_key()\n results: BlockArray = self._delete(filename,\n StoredArrayS3,\n self._filesystem.delete_block_s3)\n return results\n\n def _delete(self, filename, store_cls, remote_func):\n grid = self._get_array_grid(filename, store_cls)\n result_grid = ArrayGrid(grid.grid_shape,\n tuple(np.ones_like(grid.shape, dtype=np.int)),\n dtype=dict.__name__)\n rarr = BlockArray(result_grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = remote_func(filename, grid_entry, grid.to_meta(),\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def read_csv(self, filename, dtype=np.float, delimiter=',', has_header=False, num_workers=None):\n if num_workers is None:\n num_workers = self.num_cores_total()\n arrays: list = self._filesystem.read_csv(filename, dtype, delimiter, has_header,\n num_workers)\n shape = np.zeros(len(arrays[0].shape), dtype=int)\n for array in arrays:\n shape += np.array(array.shape, dtype=int)\n shape = tuple(shape)\n block_shape = self.get_block_shape(shape, dtype)\n result = self.concatenate(arrays, axis=0, axis_block_size=block_shape[0])\n # Release references immediately, in case we need to do another reshape.\n del arrays\n if result.block_shape[1] != block_shape[1]:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def loadtxt(self, fname, dtype=float, comments='# ', delimiter=' ',\n converters=None, skiprows=0, usecols=None, unpack=False,\n ndmin=0, encoding='bytes', max_rows=None, num_workers=None) -> BlockArray:\n if num_workers is None:\n num_workers = self.num_cores_total()\n return self._filesystem.loadtxt(\n fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=converters, skiprows=skiprows,\n usecols=usecols, unpack=unpack, ndmin=ndmin,\n encoding=encoding, max_rows=max_rows, num_workers=num_workers)\n\n ######################################\n # Array Operations API\n ######################################\n\n def scalar(self, value):\n return BlockArray.from_scalar(value, self.system)\n\n def array(self, array: np.ndarray, block_shape: tuple = None):\n assert len(array.shape) == len(block_shape)\n return BlockArray.from_np(array,\n block_shape=block_shape,\n copy=False,\n system=self.system)\n\n def zeros(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"zeros\", shape, block_shape, dtype)\n\n def ones(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"ones\", shape, block_shape, dtype)\n\n def empty(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n return self._new_array(\"empty\", shape, block_shape, dtype)\n\n def _new_array(self, op_name: str, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape)\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n rarr.blocks[grid_entry].oid = self.system.new_block(op_name,\n grid_entry,\n grid_meta,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid.grid_shape\n })\n return rarr\n\n def concatenate(self, arrays: List, axis: int, axis_block_size: int = None):\n num_arrs = len(arrays)\n assert num_arrs > 1\n first_arr: BlockArray = arrays[0]\n num_axes = len(first_arr.shape)\n # Check assumptions and define result shapes and block shapes.\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n assert num_axes == len(curr_ba.shape), \"Unequal num axes.\"\n assert curr_ba.dtype == first_arr.dtype, \"Incompatible dtypes \" \\\n \"%s, %s\" % (curr_ba.dtype, first_arr.dtype)\n for curr_axis in range(num_axes):\n first_block_size = first_arr.block_shape[curr_axis]\n block_size = curr_ba.block_shape[curr_axis]\n if first_block_size == block_size:\n continue\n elif axis == curr_axis:\n assert axis_block_size is not None, \"block axis size is required \" \\\n \"when block shapes are neq.\"\n else:\n raise ValueError(\"Other axis shapes and block shapes must be equal.\")\n\n # Compute result shapes.\n result_shape = []\n result_block_shape = []\n for curr_axis in range(num_axes):\n if curr_axis == axis:\n if axis_block_size is None:\n # They are all equal.\n axis_block_size = first_arr.block_shape[curr_axis]\n result_block_size = axis_block_size\n result_size = 0\n for i in range(num_arrs):\n curr_ba: BlockArray = arrays[i]\n size = curr_ba.shape[curr_axis]\n result_size += size\n else:\n result_size = first_arr.shape[curr_axis]\n result_block_size = first_arr.block_shape[curr_axis]\n result_shape.append(result_size)\n result_block_shape.append(result_block_size)\n result_shape, result_block_shape = tuple(result_shape), tuple(result_block_shape)\n result_ba = self.empty(result_shape, result_block_shape, first_arr.dtype)\n\n # Write result blocks.\n # TODO (hme): This can be optimized by updating blocks directly.\n pos = 0\n for arr in arrays:\n delta = arr.shape[axis]\n axis_slice = slice(pos, pos+delta)\n result_selector = tuple([slice(None, None) for _ in range(axis)] + [axis_slice, ...])\n result_ba[result_selector] = arr\n pos += delta\n return result_ba\n\n def eye(self, shape: tuple, block_shape: tuple, dtype: np.dtype = None):\n assert len(shape) == len(block_shape) == 2\n if dtype is None:\n dtype = np.float64\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.new_block(\"eye\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n return rarr\n\n def diag(self, X: BlockArray) -> BlockArray:\n if len(X.shape) == 1:\n shape = X.shape[0], X.shape[0]\n block_shape = X.block_shape[0], X.block_shape[0]\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n grid_meta = grid.to_meta()\n rarr = BlockArray(grid, self.system)\n for grid_entry in grid.get_entry_iterator():\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[grid_entry].oid = self.system.diag(X.blocks[grid_entry[0]].oid,\n syskwargs=syskwargs)\n else:\n rarr.blocks[grid_entry].oid = self.system.new_block(\"zeros\",\n grid_entry,\n grid_meta,\n syskwargs=syskwargs)\n elif len(X.shape) == 2:\n assert X.shape[0] == X.shape[1]\n assert X.block_shape[0] == X.block_shape[1]\n shape = X.shape[0],\n block_shape = X.block_shape[0],\n grid = ArrayGrid(shape, block_shape, X.dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in X.grid.get_entry_iterator():\n out_grid_entry = grid_entry[:1]\n out_grid_shape = grid.grid_shape[:1]\n syskwargs = {\"grid_entry\": out_grid_entry, \"grid_shape\": out_grid_shape}\n if np.all(np.diff(grid_entry) == 0):\n # This is a diagonal block.\n rarr.blocks[out_grid_entry].oid = self.system.diag(X.blocks[grid_entry].oid,\n syskwargs=syskwargs)\n else:\n raise ValueError(\"X must have 1 or 2 axes.\")\n return rarr\n\n def arange(self, shape, block_shape, step=1, dtype=np.int64) -> BlockArray:\n assert step == 1\n # Generate ranges per block.\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for _, grid_entry in enumerate(grid.get_entry_iterator()):\n syskwargs = {\"grid_entry\": grid_entry, \"grid_shape\": grid.grid_shape}\n start = block_shape[0] * grid_entry[0]\n entry_shape = grid.get_block_shape(grid_entry)\n stop = start + entry_shape[0]\n rarr.blocks[grid_entry].oid = self.system.arange(start,\n stop,\n step,\n dtype,\n syskwargs=syskwargs)\n return rarr\n\n def linspace(self, start, stop, shape, block_shape, endpoint, retstep, dtype, axis):\n assert axis == 0\n assert endpoint is True\n assert retstep is False\n step_size = (stop - start) / (shape[0]-1)\n result = self.arange(shape, block_shape)\n result = start + result * step_size\n if dtype is not None and dtype != result.dtype:\n result = result.astype(dtype)\n return result\n\n def log(self, X: BlockArray):\n return X.ufunc(\"log\")\n\n def exp(self, X: BlockArray):\n return X.ufunc(\"exp\")\n\n def abs(self, X: BlockArray):\n return X.ufunc(\"abs\")\n\n def min(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"min\", X, axis, keepdims)\n\n def max(self, X: BlockArray, axis=None, keepdims=False):\n return self.reduce(\"max\", X, axis, keepdims)\n\n def argmin(self, X: BlockArray, axis=None):\n pass\n\n def sum(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n return self.reduce(\"sum\", X, axis, keepdims, dtype)\n\n def reduce(self, op_name: str, X: BlockArray, axis=None, keepdims=False, dtype=None):\n res = X.reduce_axis(op_name, axis, keepdims=keepdims)\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def mean(self, X: BlockArray, axis=None, keepdims=False, dtype=None):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n num_summed = np.product(X.shape) if axis is None else X.shape[axis]\n res = self.sum(X, axis=axis, keepdims=keepdims) / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def var(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n mean = self.mean(X, axis=axis, keepdims=True)\n ss = self.sum((X - mean)**self.two, axis=axis, keepdims=keepdims)\n num_summed = (np.product(X.shape) if axis is None else X.shape[axis]) - ddof\n res = ss / num_summed\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def std(self, X: BlockArray, axis=None, ddof=0, keepdims=False, dtype=None):\n res = self.sqrt(self.var(X, axis, ddof, keepdims))\n if dtype is not None:\n res = res.astype(dtype)\n return res\n\n def argop(self, op_name: str, arr: BlockArray, axis=None):\n if len(arr.shape) > 1:\n raise NotImplementedError(\"%s currently supports one-dimensional arrays.\" % op_name)\n if axis is None:\n axis = 0\n assert axis == 0\n grid = ArrayGrid(shape=(), block_shape=(), dtype=np.int64.__name__)\n result = BlockArray(grid, self.system)\n reduction_result = None, None\n for grid_entry in arr.grid.get_entry_iterator():\n block_slice: slice = arr.grid.get_slice(grid_entry)[0]\n block: Block = arr.blocks[grid_entry]\n syskwargs = {\n \"grid_entry\": grid_entry,\n \"grid_shape\": arr.grid.grid_shape,\n \"options\": {\"num_returns\": 2},\n }\n reduction_result = self.system.arg_op(op_name,\n block.oid,\n block_slice,\n *reduction_result,\n syskwargs=syskwargs)\n argoptima, _ = reduction_result\n result.blocks[()].oid = argoptima\n return result\n\n def sqrt(self, X):\n if X.dtype not in (float, np.float32, np.float64):\n X = X.astype(np.float64)\n return X.ufunc(\"sqrt\")\n\n def norm(self, X):\n return self.sqrt(X.T @ X)\n\n def xlogy(self, x: BlockArray, y: BlockArray) -> BlockArray:\n if x.dtype not in (float, np.float32, np.float64):\n x = x.astype(np.float64)\n if x.dtype not in (float, np.float32, np.float64):\n y = y.astype(np.float64)\n return self.map_bop(\"xlogy\", x, y)\n\n def where(self, condition: BlockArray, x=None, y=None):\n result_oids = []\n shape_oids = []\n num_axes = max(1, len(condition.shape))\n # Stronger constraint than necessary, but no reason for anything stronger.\n if x is not None or y is not None:\n assert x is not None and y is not None\n assert condition.shape == x.shape == y.shape\n assert condition.block_shape == x.block_shape == y.block_shape\n for grid_entry in condition.grid.get_entry_iterator():\n block: Block = condition.blocks[grid_entry]\n block_slice_tuples = condition.grid.get_slice_tuples(grid_entry)\n roids = self.system.where(block.oid, x, y,\n block_slice_tuples,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": condition.grid.grid_shape,\n \"options\": {\"num_returns\": num_axes+1}\n })\n block_oids, shape_oid = roids[:-1], roids[-1]\n shape_oids.append(shape_oid)\n result_oids.append(block_oids)\n shapes = self.system.get(shape_oids)\n result_shape = (np.sum(shapes),)\n if result_shape == (0,):\n return (self.array(np.array([], dtype=np.int64), block_shape=(0,)),)\n # Remove empty shapes.\n result_shape_pair = []\n for i, shape in enumerate(shapes):\n if np.sum(shape) > 0:\n result_shape_pair.append((result_oids[i], shape))\n result_block_shape = self.compute_block_shape(result_shape, np.int64)\n result_arrays = []\n for axis in range(num_axes):\n block_arrays = []\n for i in range(len(result_oids)):\n if shapes[i] == (0,):\n continue\n block_arrays.append(BlockArray.from_oid(result_oids[i][axis],\n shapes[i],\n np.int64,\n self.system))\n if len(block_arrays) == 1:\n axis_result = block_arrays[0]\n else:\n axis_result = self.concatenate(block_arrays, 0, result_block_shape[0])\n result_arrays.append(axis_result)\n return tuple(result_arrays)\n\n def map_uop(self,\n op_name: str,\n arr: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n \"\"\"\n A map, for unary operators, that applies to every entry of an array.\n :param op_name: An element-wise unary operator.\n :param arr: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n args = () if args is None else args\n kwargs = {} if kwargs is None else kwargs\n shape = arr.shape\n block_shape = arr.block_shape\n dtype = array_utils.get_uop_output_type(op_name, arr.dtype)\n assert len(shape) == len(block_shape)\n if out is None:\n grid = ArrayGrid(shape, block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n else:\n rarr = out\n grid = rarr.grid\n assert rarr.shape == arr.shape and rarr.block_shape == arr.block_shape\n for grid_entry in grid.get_entry_iterator():\n # TODO(hme): Faster to create ndarray first,\n # and instantiate block array on return\n # to avoid instantiating blocks on BlockArray initialization.\n rarr.blocks[grid_entry] = arr.blocks[grid_entry].uop_map(op_name,\n args=args,\n kwargs=kwargs)\n return rarr\n\n def matmul(self,\n arr_1: BlockArray,\n arr_2: BlockArray) -> BlockArray:\n return arr_1 @ arr_2\n\n def tensordot(self,\n arr_1: BlockArray,\n arr_2: BlockArray,\n axes: int = 2) -> BlockArray:\n return arr_1.tensordot(arr_2, axes)\n\n def map_bop(self,\n op_name: str,\n arr_1: BlockArray,\n arr_2: BlockArray,\n out: BlockArray = None,\n where=True,\n args=None,\n kwargs=None) -> BlockArray:\n # TODO (hme): Move this into BlockArray, and invoke on operator implementations.\n \"\"\"\n A map, for binary operators, that applies element-wise to every entry of the input arrays.\n :param op_name: An element-wise binary operator.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :param out: A BlockArray to which the result is written.\n :param where: An indicator specifying the indices to which op is applied.\n :param args: Args provided to op.\n :param kwargs: Keyword args provided to op.\n :return: A BlockArray.\n \"\"\"\n if where is not True:\n raise NotImplementedError(\"'where' argument is not yet supported.\")\n if args is not None:\n raise NotImplementedError(\"'args' is not yet supported.\")\n if not (kwargs is None or len(kwargs) == 0):\n raise NotImplementedError(\"'kwargs' is not yet supported.\")\n\n try:\n ufunc = np.__getattribute__(op_name)\n if (op_name.endswith(\"max\") or op_name == \"maximum\"\n or op_name.endswith(\"min\") or op_name == \"minimum\"\n or op_name.startswith(\"logical\")):\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n else:\n result_blocks: np.ndarray = ufunc(arr_1.blocks, arr_2.blocks)\n rarr = BlockArray.from_blocks(result_blocks,\n result_shape=None,\n system=self.system)\n except Exception as _:\n rarr = self._broadcast_bop(op_name, arr_1, arr_2)\n if out is not None:\n assert out.grid.grid_shape == rarr.grid.grid_shape\n assert out.shape == rarr.shape\n assert out.block_shape == rarr.block_shape\n out.blocks[:] = rarr.blocks[:]\n rarr = out\n return rarr\n\n def _broadcast_bop(self, op_name, arr_1, arr_2) -> BlockArray:\n \"\"\"\n We want to avoid invoking this op whenever possible; NumPy's imp is faster.\n :param op_name: Name of binary operation.\n :param arr_1: A BlockArray.\n :param arr_2: A BlockArray.\n :return: A BlockArray.\n \"\"\"\n if arr_1.shape != arr_2.shape:\n output_grid_shape = array_utils.broadcast_shape(arr_1.grid.grid_shape,\n arr_2.grid.grid_shape)\n arr_1 = arr_1.broadcast_to(output_grid_shape)\n arr_2 = arr_2.broadcast_to(output_grid_shape)\n dtype = array_utils.get_bop_output_type(op_name,\n arr_1.dtype,\n arr_2.dtype)\n grid = ArrayGrid(arr_1.shape, arr_1.block_shape, dtype.__name__)\n rarr = BlockArray(grid, self.system)\n for grid_entry in rarr.grid.get_entry_iterator():\n block_1: Block = arr_1.blocks[grid_entry]\n block_2: Block = arr_2.blocks[grid_entry]\n rarr.blocks[grid_entry] = block_1.bop(op_name, block_2, {})\n return rarr\n\n def get(self, *arrs):\n if len(arrs) == 1:\n if isinstance(arrs[0], BlockArray):\n return arrs[0].get()\n else:\n return arrs[0]\n else:\n r = []\n for item in arrs:\n if isinstance(item, BlockArray):\n r.append(item.get())\n else:\n r.append(item)\n return r\n\n def allclose(self, a: BlockArray, b: BlockArray, rtol=1.e-5, atol=1.e-8):\n assert a.shape == b.shape and a.block_shape == b.block_shape\n bool_list = []\n grid_shape = a.grid.grid_shape\n for grid_entry in a.grid.get_entry_iterator():\n a_block, b_block = a.blocks[grid_entry].oid, b.blocks[grid_entry].oid\n bool_list.append(self.system.allclose(a_block, b_block, rtol, atol,\n syskwargs={\n \"grid_entry\": grid_entry,\n \"grid_shape\": grid_shape\n }))\n oid = self.system.logical_and(*bool_list,\n syskwargs={\"grid_entry\": (0, 0), \"grid_shape\": (1, 1)})\n return BlockArray.from_oid(oid, (), np.bool, self.system)\n\n def qr(self, X: BlockArray):\n return self.indirect_tsqr(X)\n\n def indirect_tsr(self, X: BlockArray, reshape_output=True):\n assert len(X.shape) == 2\n # TODO (hme): This assertion is temporary and ensures returned\n # shape of qr of block is correct.\n assert X.block_shape[0] >= X.shape[1]\n # Compute R for each block.\n grid = X.grid\n grid_shape = grid.grid_shape\n shape = X.shape\n block_shape = X.block_shape\n R_oids = []\n # Assume no blocking along second dim.\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n R_oids.append(self.system.qr(*row,\n mode=\"r\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 1}\n })\n )\n\n # Construct R by summing over R blocks.\n # TODO (hme): Communication may be inefficient due to redundancy of data.\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = BlockArray(ArrayGrid(shape=R_shape,\n block_shape=R_shape,\n dtype=X.dtype.__name__),\n self.system)\n tsR.blocks[0, 0].oid = self.system.qr(*R_oids,\n mode=\"r\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 1}\n })\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R = tsR\n return R\n\n def indirect_tsqr(self, X: BlockArray, reshape_output=True):\n shape = X.shape\n block_shape = X.block_shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self.indirect_tsr(X, reshape_output=False)\n\n # Compute inverse of R.\n tsR_inverse = self.inv(tsR)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape != R_block_shape:\n R_inverse = tsR_inverse.reshape(shape=R_shape, block_shape=R_block_shape)\n if reshape_output:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n else:\n R = tsR\n else:\n R_inverse = tsR_inverse\n R = tsR\n\n Q = X @ R_inverse\n return Q, R\n\n def direct_tsqr(self, X, reshape_output=True):\n assert len(X.shape) == 2\n\n # Compute R for each block.\n shape = X.shape\n grid = X.grid\n grid_shape = grid.grid_shape\n block_shape = X.block_shape\n Q_oids = []\n R_oids = []\n QR_dims = []\n Q2_shape = [0, shape[1]]\n for i in range(grid_shape[0]):\n # Select a row according to block_shape.\n row = []\n for j in range(grid_shape[1]):\n row.append(X.blocks[i, j].oid)\n # We invoke \"reduced\", so q, r is returned with dimensions (M, K), (K, N), K = min(M, N)\n M = grid.get_block_shape((i, 0))[0]\n N = shape[1]\n K = min(M, N)\n QR_dims.append(((M, K), (K, N)))\n Q2_shape[0] += K\n # Run each row on separate nodes along first axis.\n # This maintains some data locality.\n Q_oid, R_oid = self.system.qr(*row,\n mode=\"reduced\",\n axis=1,\n syskwargs={\n \"grid_entry\": (i, 0),\n \"grid_shape\": (grid_shape[0], 1),\n \"options\": {\"num_returns\": 2}\n })\n R_oids.append(R_oid)\n Q_oids.append(Q_oid)\n\n # TODO (hme): This pulls several order N^2 R matrices on a single node.\n # A solution is the recursive extension to direct TSQR.\n Q2_oid, R2_oid = self.system.qr(*R_oids,\n mode=\"reduced\",\n axis=0,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1),\n \"options\": {\"num_returns\": 2}\n })\n\n Q2_shape = tuple(Q2_shape)\n Q2_block_shape = (QR_dims[0][1][0], shape[1])\n Q2 = self._vec_from_oids([Q2_oid],\n shape=Q2_shape,\n block_shape=Q2_block_shape,\n dtype=X.dtype)\n # The resulting Q's from this operation are N^2 (same size as above R's).\n Q2_oids = list(map(lambda block: block.oid, Q2.blocks.flatten()))\n\n # Construct Q.\n Q = self.zeros(shape=shape,\n block_shape=(block_shape[0], shape[1]),\n dtype=X.dtype)\n for i, grid_entry in enumerate(Q.grid.get_entry_iterator()):\n Q_dims, R_dims = QR_dims[i]\n Q1_block_shape = Q_dims\n Q2_block_shape = R_dims\n Q.blocks[grid_entry].oid = self.system.bop(\"tensordot\", Q_oids[i], Q2_oids[i],\n a1_shape=Q1_block_shape,\n a2_shape=Q2_block_shape,\n a1_T=False, a2_T=False, axes=1,\n syskwargs={\"grid_entry\": grid_entry,\n \"grid_shape\": Q.grid.grid_shape})\n\n # Construct R.\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n tsR = self._vec_from_oids([R2_oid], shape=R_shape, block_shape=R_shape, dtype=X.dtype)\n # If blocking is \"tall-skinny,\" then we're done.\n if R_shape == R_block_shape or not reshape_output:\n R = tsR\n else:\n R = tsR.reshape(shape=R_shape, block_shape=R_block_shape)\n\n if Q.shape != block_shape or not reshape_output:\n Q = Q.reshape(shape=shape, block_shape=block_shape)\n\n return Q, R\n\n def svd(self, X):\n # TODO(hme): Optimize by merging with direct qr to compute U directly,\n # to avoid wasting space storing intermediate Q.\n # This may not really help until we have operator fusion.\n assert len(X.shape) == 2\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n assert R.shape == R.block_shape\n R_U, S, VT = self.system.svd(R.blocks[(0, 0)].oid,\n syskwargs={\"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)})\n R_U: BlockArray = self._vec_from_oids([R_U], R_shape, R_block_shape, X.dtype)\n S: BlockArray = self._vec_from_oids([S], R_shape[:1], R_block_shape[:1], X.dtype)\n VT = self._vec_from_oids([VT], R_shape, R_block_shape, X.dtype)\n U = Q @ R_U\n\n return U, S, VT\n\n def inv(self, X: BlockArray):\n return self._inv(self.system.inv, {}, X)\n\n def _inv(self, remote_func, kwargs, X: BlockArray):\n # TODO (hme): Implement scalable version.\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = remote_func(result.blocks[0, 0].oid,\n **kwargs,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def cholesky(self, X: BlockArray):\n # TODO (hme): Implement scalable version.\n # Note:\n # A = Q, R\n # A.T @ A = R.T @ R\n # A.T @ A = L @ L.T\n # => R == L.T\n block_shape = X.block_shape\n assert len(X.shape) == 2\n assert X.shape[0] == X.shape[1]\n single_block = X.shape[0] == X.block_shape[0] and X.shape[1] == X.block_shape[1]\n if single_block:\n result = X.copy()\n else:\n result = X.reshape(block_shape=X.shape)\n result.blocks[0, 0].oid = self.system.cholesky(result.blocks[0, 0].oid,\n syskwargs={\n \"grid_entry\": (0, 0),\n \"grid_shape\": (1, 1)\n })\n if not single_block:\n result = result.reshape(block_shape=block_shape)\n return result\n\n def fast_linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.indirect_tsqr(X, reshape_output=False)\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def linear_regression(self, X: BlockArray, y: BlockArray):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n Q, R = self.direct_tsqr(X, reshape_output=False)\n # Invert R.\n R_inv = self.inv(R)\n if R_shape != R_block_shape:\n R_inv = R_inv.reshape(shape=R_shape, block_shape=R_block_shape)\n theta = R_inv @ (Q.T @ y)\n return theta\n\n def ridge_regression(self, X: BlockArray, y: BlockArray, lamb: float):\n assert len(X.shape) == 2\n assert len(y.shape) == 1\n assert lamb >= 0\n block_shape = X.block_shape\n shape = X.shape\n R_shape = (shape[1], shape[1])\n R_block_shape = (block_shape[1], block_shape[1])\n R = self.indirect_tsr(X)\n lamb_vec = self.array(lamb*np.eye(R_shape[0]), block_shape=R_block_shape)\n # TODO (hme): A better solution exists, which inverts R by augmenting X and y.\n # See Murphy 7.5.2.\n theta = self.inv(lamb_vec + R.T @ R) @ (X.T @ y)\n return theta\n\n def _vec_from_oids(self, oids, shape, block_shape, dtype):\n arr = BlockArray(ArrayGrid(shape=shape,\n block_shape=shape,\n dtype=dtype.__name__),\n self.system)\n # Make sure resulting grid shape is a vector (1 dimensional).\n assert np.sum(arr.grid.grid_shape) == (max(arr.grid.grid_shape)\n + len(arr.grid.grid_shape) - 1)\n for i, grid_entry in enumerate(arr.grid.get_entry_iterator()):\n arr.blocks[grid_entry].oid = oids[i]\n if block_shape != shape:\n return arr.reshape(block_shape=block_shape)\n return arr\n\n def random_state(self, seed=None):\n return NumsRandomState(self.system, seed)\n"
] | [
[
"numpy.sum",
"numpy.zeros_like",
"numpy.eye",
"numpy.ceil",
"numpy.diff",
"numpy.dtype",
"numpy.floor",
"numpy.ones_like",
"numpy.exp",
"numpy.argmax",
"numpy.iinfo",
"numpy.product",
"numpy.__getattribute__",
"numpy.array",
"numpy.finfo"
]
] |
caglasozen/wilds | [
"db2ff095304891244962509459ee48e2fc5fd5e6"
] | [
"examples/pretraining/swav/src/logger.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport os\nimport logging\nimport time\nfrom datetime import timedelta\nimport pandas as pd\n\n\nclass LogFormatter:\n def __init__(self):\n self.start_time = time.time()\n\n def format(self, record):\n elapsed_seconds = round(record.created - self.start_time)\n\n prefix = \"%s - %s - %s\" % (\n record.levelname,\n time.strftime(\"%x %X\"),\n timedelta(seconds=elapsed_seconds),\n )\n message = record.getMessage()\n message = message.replace(\"\\n\", \"\\n\" + \" \" * (len(prefix) + 3))\n return \"%s - %s\" % (prefix, message) if message else \"\"\n\n\ndef create_logger(filepath, rank):\n \"\"\"\n Create a logger.\n Use a different log file for each process.\n \"\"\"\n # create log formatter\n log_formatter = LogFormatter()\n\n # create file handler and set level to debug\n if filepath is not None:\n if rank > 0:\n filepath = \"%s-%i\" % (filepath, rank)\n file_handler = logging.FileHandler(filepath, \"a\")\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(log_formatter)\n\n # create console handler and set level to info\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(log_formatter)\n\n # create logger and set level to debug\n logger = logging.getLogger()\n logger.handlers = []\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n if filepath is not None:\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n # reset logger elapsed time\n def reset_time():\n log_formatter.start_time = time.time()\n\n logger.reset_time = reset_time\n\n return logger\n\n\nclass PD_Stats(object):\n \"\"\"\n Log stuff with pandas library\n \"\"\"\n\n def __init__(self, path, columns):\n self.path = path\n\n # reload path stats\n if os.path.isfile(self.path):\n self.stats = pd.read_pickle(self.path)\n\n # check that columns are the same\n assert list(self.stats.columns) == list(columns)\n\n else:\n self.stats = pd.DataFrame(columns=columns)\n\n def update(self, row, save=True):\n self.stats.loc[len(self.stats.index)] = row\n\n # save the statistics\n if save:\n self.stats.to_pickle(self.path)\n"
] | [
[
"pandas.read_pickle",
"pandas.DataFrame"
]
] |
yugangzhang/pyFAI | [
"e0453b279dac1f165f637e2a2ed1d4ddf57d31ba"
] | [
"pyFAI/opencl/OCLFullSplit.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n#\n# Copyright (C) 2014-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n# Giannis Ashiotis\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n__authors__ = [\"Jérôme Kieffer\", \"Giannis Ashiotis\"]\n__license__ = \"MIT\"\n__date__ = \"18/02/2020\"\n__copyright__ = \"2014, ESRF, Grenoble\"\n__contact__ = \"[email protected]\"\n\nimport os\nimport logging\nimport threading\nimport numpy\nfrom . import ocl, pyopencl\nfrom ..ext.splitBBoxLUT import HistoBBox1d\n\nif pyopencl:\n mf = pyopencl.mem_flags\nelse:\n raise ImportError(\"pyopencl is not installed\")\nfrom ..utils import crc32, get_cl_file\nlogger = logging.getLogger(__name__)\n\n\nclass OCLFullSplit1d(object):\n def __init__(self,\n pos,\n bins=100,\n pos0Range=None,\n pos1Range=None,\n mask=None,\n mask_checksum=None,\n allow_pos0_neg=False,\n unit=\"undefined\",\n workgroup_size=256,\n devicetype=\"all\",\n platformid=None,\n deviceid=None,\n profile=False):\n\n self.bins = bins\n self.lut_size = 0\n self.allow_pos0_neg = allow_pos0_neg\n\n if len(pos.shape) == 3:\n assert pos.shape[1] == 4\n assert pos.shape[2] == 2\n elif len(pos.shape) == 4:\n assert pos.shape[2] == 4\n assert pos.shape[3] == 2\n else:\n raise ValueError(\"Pos array dimentions are wrong\")\n self.pos_size = pos.size\n self.size = self.pos_size / 8\n self.pos = numpy.ascontiguousarray(pos.ravel(), dtype=numpy.float32)\n self.pos0Range = numpy.empty(2, dtype=numpy.float32)\n self.pos1Range = numpy.empty(2, dtype=numpy.float32)\n\n if (pos0Range is not None) and (len(pos0Range) == 2):\n self.pos0Range[0] = min(pos0Range) # do it on GPU?\n self.pos0Range[1] = max(pos0Range)\n if (not self.allow_pos0_neg) and (self.pos0Range[0] < 0):\n self.pos0Range[0] = 0.0\n if self.pos0Range[1] < 0:\n print(\"Warning: Invalid 0-dim range! Using the data derived range instead\")\n self.pos0Range[1] = 0.0\n # self.pos0Range[0] = pos0Range[0]\n # self.pos0Range[1] = pos0Range[1]\n else:\n self.pos0Range[0] = 0.0\n self.pos0Range[1] = 0.0\n if (pos1Range is not None) and (len(pos1Range) == 2):\n self.pos1Range[0] = min(pos1Range) # do it on GPU?\n self.pos1Range[1] = max(pos1Range)\n # self.pos1Range[0] = pos1Range[0]\n # self.pos1Range[1] = pos1Range[1]\n else:\n self.pos1Range[0] = 0.0\n self.pos1Range[1] = 0.0\n\n if mask is not None:\n assert mask.size == self.size\n self.check_mask = True\n self.cmask = numpy.ascontiguousarray(mask.ravel(), dtype=numpy.int8)\n if mask_checksum:\n self.mask_checksum = mask_checksum\n else:\n self.mask_checksum = crc32(mask)\n else:\n self.check_mask = False\n self.mask_checksum = None\n\n self._sem = threading.Semaphore()\n self.profile = profile\n self._cl_kernel_args = {}\n self._cl_mem = {}\n self.events = []\n self.workgroup_size = workgroup_size\n if self.size < self.workgroup_size:\n raise RuntimeError(\"Fatal error in workgroup size selection. Size (%d) must be >= workgroup size (%d)\\n\", self.size, self.workgroup_size)\n if (platformid is None) and (deviceid is None):\n platformid, deviceid = ocl.select_device(devicetype)\n elif platformid is None:\n platformid = 0\n elif deviceid is None:\n deviceid = 0\n self.platform = ocl.platforms[platformid]\n self.device = self.platform.devices[deviceid]\n self.device_type = self.device.type\n\n if (self.device_type == \"CPU\") and (self.platform.vendor == \"Apple\"):\n logger.warning(\"This is a workaround for Apple's OpenCL on CPU: enforce BLOCK_SIZE=1\")\n self.workgroup_size = 1\n try:\n self._ctx = pyopencl.Context(devices=[pyopencl.get_platforms()[platformid].get_devices()[deviceid]])\n if self.profile:\n self._queue = pyopencl.CommandQueue(self._ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)\n else:\n self._queue = pyopencl.CommandQueue(self._ctx)\n self._compile_kernels()\n self._calc_boundaries()\n self._calc_LUT()\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n\n def _compile_kernels(self, kernel_file=None):\n \"\"\"\n Call the OpenCL compiler\n :param kernel_file: path tothe\n \"\"\"\n kernel_name = \"ocl_lut.cl\"\n if kernel_file is None:\n if os.path.isfile(kernel_name):\n kernel_file = os.path.abspath(kernel_name)\n else:\n kernel_file = get_cl_file(\"pyfai:openCL/\" + kernel_name)\n else:\n kernel_file = str(kernel_file)\n kernel_src = open(kernel_file).read()\n compile_options = \"-D BINS=%i -D POS_SIZE=%i -D SIZE=%i -D WORKGROUP_SIZE=%i -D EPS=%e\" % \\\n (self.bins, self.pos_size, self.size, self.workgroup_size, numpy.finfo(numpy.float32).eps)\n logger.info(\"Compiling file %s with options %s\", kernel_file, compile_options)\n try:\n self._program = pyopencl.Program(self._ctx, kernel_src).build(options=compile_options)\n except pyopencl.MemoryError as error:\n raise MemoryError(error)\n\n def _calc_boundaries(self):\n \"\"\"\n comments\n \"\"\"\n # # # # # # # # Check for memory# # # # # # # #\n size_of_float = numpy.dtype(numpy.float32).itemsize\n\n ualloc = (self.pos_size * size_of_float)\n ualloc += (self.workgroup_size * 4 * size_of_float)\n ualloc += (4 * size_of_float)\n memory = self.device.memory\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n # No returned event for profiling\n # self._cl_mem[\"pos\"] = pyopencl.array.to_device(self._queue, self.pos)\n # self._cl_mem[\"preresult\"] = pyopencl.array.empty(self._queue, (4*self.workgroup_size,), dtype=numpy.float32)\n # self._cl_mem[\"minmax\"] = pyopencl.array.empty(self._queue, (4,), dtype=numpy.float32)\n self._cl_mem[\"pos\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * self.pos_size)\n self._cl_mem[\"preresult\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4 * self.workgroup_size)\n self._cl_mem[\"minmax\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4)\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # move data # # # # # # # # # #\n with self._sem:\n copy_pos = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos\"], self.pos)\n self.events += [(\"copy pos\", copy_pos)]\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"reduce_minmax_1\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"preresult\"]]\n self._cl_kernel_args[\"reduce_minmax_2\"] = [self._cl_mem[\"preresult\"], self._cl_mem[\"minmax\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # do the minmax reduction # # # # # #\n with self._sem:\n reduce_minmax_1 = self._program.reduce_minmax_1(self._queue, (self.workgroup_size * self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args[\"reduce_minmax_1\"])\n self.events += [(\"reduce_minmax_1\", reduce_minmax_1)]\n reduce_minmax_2 = self._program.reduce_minmax_2(self._queue, (self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args[\"reduce_minmax_2\"])\n self.events += [(\"reduce_minmax_2\", reduce_minmax_2)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # release the redundant data # # # # #\n self._cl_mem[\"preresult\"].release()\n self._cl_mem.pop(\"preresult\")\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n # check memory of d_pos + d_preresult + d_minmax\n # load d_pos\n # allocate d_preresult\n # allocate d_minmax\n # run reduce1\n # run reduce2\n # save reference to d_minMax\n # free d_preresult\n\n def _calc_LUT(self):\n \"\"\"\n first need to call lut_1 and lut_2 to find the size of the LUT and the lut_3 to create it\n \"\"\"\n # # # # # # # # Check for memory# # # # # # # #\n size_of_float = numpy.dtype(numpy.float32).itemsize\n size_of_int = numpy.dtype(numpy.int32).itemsize\n\n ualloc = (self.pos_size * size_of_float) # pos\n ualloc += (4 * size_of_float) # minmax\n ualloc += (2 * size_of_float) * 2 # pos0Range, pos1Range\n ualloc += (self.bins * size_of_int) # outMax\n ualloc += (1 * size_of_int) # lutsize\n ualloc += ((self.bins + 1) * size_of_int) # idx_ptr\n memory = self.device.memory\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n # self._cl_mem[\"pos0Range\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)\n # self._cl_mem[\"pos1Range\"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)\n self._cl_mem[\"outMax\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.bins)\n self._cl_mem[\"lutsize\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 1)\n self._cl_mem[\"idx_ptr\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * (self.bins + 1))\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # # move data # # # # # # # # # #\n # with self._sem:\n # copy_pos0Range = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos0Range\"], self.pos0Range)\n # self.events += [(\"copy pos0Range\", copy_pos0Range)]\n # copy_pos1Range = pyopencl.enqueue_copy(self._queue, self._cl_mem[\"pos1Range\"], self.pos1Range)\n # self.events += [(\"copy pos1Range\", copy_pos1Range)]\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"memset_outMax\"] = [self._cl_mem[\"outMax\"]]\n self._cl_kernel_args[\"lut_1\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"minmax\"], self.pos0Range.data, self.pos1Range.data, self._cl_mem[\"outMax\"]]\n self._cl_kernel_args[\"lut_2\"] = [self._cl_mem[\"outMax\"], self._cl_mem[\"idx_ptr\"], self._cl_mem[\"lutsize\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # start the LUT creation # # # # # #\n memset_size = (self.bins + self.workgroup_size - 1) & ~(self.workgroup_size - 1),\n global_size = (self.size + self.workgroup_size - 1) & ~(self.workgroup_size - 1),\n with self._sem:\n memset_outMax = self._program.memset_outMax(self._queue, memset_size, (self.workgroup_size,), *self._cl_kernel_args[\"memset_outMax\"])\n self.events += [(\"memset_outMax\", memset_outMax)]\n lut_1 = self._program.lut_1(self._queue, global_size, (self.workgroup_size,), *self._cl_kernel_args[\"lut_1\"])\n self.events += [(\"lut_1\", lut_1)]\n lut_2 = self._program.lut_2(self._queue, (1,), (1,), *self._cl_kernel_args[\"lut_2\"])\n self.events += [(\"lut_2\", lut_2)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # get the lutsize # # # # # # # #\n self.lutsize = numpy.ndarray(1, dtype=numpy.int32)\n get_lutsize = pyopencl.enqueue_copy(self._queue, self.lutsize, self._cl_mem[\"lutsize\"])\n self.events += [(\"get_lutsize\", get_lutsize)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # check memory # # # # # # # #\n ualloc += (self.lutsize * size_of_int) # indices\n ualloc += (self.lutsize * size_of_float) # data\n if ualloc >= memory:\n raise MemoryError(\"Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)\" % (ualloc, memory))\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # allocate memory # # # # # # # #\n try:\n self._cl_mem[\"indices\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_int * self.lutsize[0])\n self._cl_mem[\"data\"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.lutsize[0])\n except pyopencl.MemoryError as error:\n self._free_device_memory()\n raise MemoryError(error)\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # # # # set arguments # # # # # # # # #\n self._cl_kernel_args[\"lut_3\"] = [self._cl_mem[\"pos\"], self._cl_mem[\"minmax\"], self.pos0Range.data, self.pos1Range.data, self._cl_mem[\"outMax\"], self._cl_mem[\"idx_ptr\"], self._cl_mem[\"indices\"], self._cl_mem[\"data\"]]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # finish the LUT creation # # # # #\n with self._sem:\n memset_outMax = self._program.memset_outMax(self._queue, memset_size, (self.workgroup_size,), *self._cl_kernel_args[\"memset_outMax\"])\n self.events += [(\"memset_outMax\", memset_outMax)]\n lut_3 = self._program.lut_3(self._queue, global_size, (self.workgroup_size,), *self._cl_kernel_args[\"lut_3\"])\n self.events += [(\"lut_3\", lut_3)]\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # # # # release the redundant data # # # # #\n self._cl_mem[\"pos\"].release()\n self._cl_mem.pop(\"pos\")\n self._cl_mem[\"minmax\"].release()\n self._cl_mem.pop(\"minmax\")\n # self._cl_mem[\"pos0Range\"].release()\n # self._cl_mem.pop(\"pos0Range\")\n # self._cl_mem[\"pos1Range\"].release()\n # self._cl_mem.pop(\"pos1Range\")\n self._cl_mem[\"outMax\"].release()\n self._cl_mem.pop(\"outMax\")\n self._cl_mem[\"lutsize\"].release()\n self._cl_mem.pop(\"lutsize\")\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n # check memory of d_pos + d_minmax + d_outMax + d_lutsize\n # allocate d_outMax\n # allocate d_lutsize\n # memset d_outMax\n # run lut1\n # run lut2\n # save d_lutsize\n # memset d_outMax\n # allocate d_data\n # allocate d_indices\n # run lut3\n # free d_pos\n # free d_minMax\n # free d_lutsize\n # run lut4\n # free d_outMax\n\n def _free_device_memory(self):\n \"\"\"\n free all memory allocated on the device\n \"\"\"\n for buffer_name in list(self._cl_mem.keys())[:]:\n buf = self._cl_mem.pop[buffer_name]\n if buf is not None:\n try:\n buf.release()\n except pyopencl.LogicError:\n logger.error(\"Error while freeing buffer %s\", buffer_name)\n\n def get_platform(self):\n pass\n\n def get_queue(self):\n pass\n"
] | [
[
"numpy.ndarray",
"numpy.dtype",
"numpy.finfo",
"numpy.empty"
]
] |
seib2/PypeIt | [
"4c68b38cb907345a480d7afee58200a05ecd4556"
] | [
"pypeit/tests/test_save.py"
] | [
"\"\"\"\nModule to run tests on arsave\n\"\"\"\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom astropy import units\nfrom astropy.io import fits\n\nfrom pypeit import specobjs\nfrom pypeit.core import save\n\nfrom pypeit.tests.tstutils import dummy_fitstbl\nfrom pypeit.spectrographs import util\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'files')\n return os.path.join(data_dir, filename)\n\n\ndef mk_specobj(flux=5, objid=500):\n # specobj\n npix = 100\n specobj = specobjs.SpecObj((100,100), 0, (0.4,0.6), objtype='science',\n spat_pixpos=300)\n specobj.boxcar = dict(wave=np.arange(npix)*units.AA, counts=np.ones(npix)*flux)\n specobj.optimal = dict(wave=np.arange(npix)*units.AA, counts=np.ones(npix)*flux-0.5)\n specobj.objid = objid\n specobj.trace_spat = np.arange(npix) / npix\n specobj.fwhmfit = np.arange(npix) / npix\n # Return\n return specobj\n\n\ndef test_save2d_fits():\n #settings.dummy_settings()\n #fitsdict = arutils.dummy_fitsdict(nfile=1, spectrograph='none', directory=data_path(''))\n fitstbl = dummy_fitstbl(directory=data_path(''))\n # Kludge\n fitstbl.table.remove_column('filename')\n fitstbl['filename'] = 'b1.fits.gz'\n # Settings\n #settings.argflag['run']['directory']['science'] = data_path('')\n spectrograph = 'shane_kast_blue'\n # Fill with dummy images\n dum = np.ones((100,100))\n sci_dict = {}\n sci_dict[0] = {}\n sci_dict[0]['sciframe'] = dum\n sci_dict[0]['finalvar'] = dum * 2\n sci_dict[0]['finalsky'] = dum + 0.1\n\n sci_dict['meta'] = {}\n sci_dict['meta']['vel_corr'] = 0.\n sci_dict['meta']['ir_redux'] = False\n\n basename = 'test'\n scidx = 5\n path = fitstbl['directory'][scidx]\n ifile = fitstbl['filename'][scidx]\n rawfile = os.path.join(path, ifile)\n master_dir = data_path('MF')+'_'+spectrograph\n outfile = data_path('') + 'spec2d_{:s}.fits'.format(basename)\n # Create a dummy master_key_dict\n master_key_dict = dict(frame='', bpm='bpmkey',bias='',arc='',trace='',flat='')\n raw_hdr = fits.open(rawfile)[0].header\n save.save_2d_images(sci_dict, raw_hdr, spectrograph, master_key_dict, master_dir, outfile)\n # Read and test\n head0 = fits.getheader(data_path('spec2d_test.fits'))\n assert head0['PYPMFDIR'] == master_dir\n assert head0['BPMMKEY'] == 'bpm' # See save_2d_images; removes last 3 characters\n assert 'PYPEIT' in head0['PIPELINE']\n\n\ndef test_save1d_fits():\n \"\"\" save1d to FITS and HDF5\n \"\"\"\n # Init\n fitstbl = dummy_fitstbl(spectro_name='shane_kast_blue', directory=data_path(''))\n sobj = mk_specobj()\n specObjs = specobjs.SpecObjs([sobj])\n spectrograph = util.load_spectrograph('shane_kast_blue')\n # Write to FITS\n basename = 'test'\n outfile = data_path('') + 'spec1d_{:s}.fits'.format(basename)\n save.save_1d_spectra_fits(specObjs, fitstbl[5], spectrograph, outfile)\n\n\n# NEEDS REFACTORING\n#def test_save1d_hdf5():\n# \"\"\" save1d to FITS and HDF5\n# \"\"\"\n# # Dummy self\n# fitstbl = arsort.dummy_fitstbl(spectrograph='shane_kast_blue', directory=data_path(''))\n# slf = arsciexp.dummy_self(fitstbl=fitstbl)\n# # specobj\n# slf._specobjs = []\n# slf._specobjs.append([])\n# slf._specobjs[0].append([mk_specobj(objid=455), mk_specobj(flux=3., objid=555)])\n# # Write to HDF5\n# arsave.save_1d_spectra_hdf5(slf, fitstbl)\n\n"
] | [
[
"numpy.arange",
"numpy.ones"
]
] |
gskdhiman/zomato-recommendation | [
"76d050d654f5ae4db4801eadb065db324baacf5e"
] | [
"backend_code.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 00:14:39 2020\n\n@author: Gursewak\n\"\"\"\n\nimport pandas as pd\nimport re\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.neighbors import NearestNeighbors\nfrom datetime import datetime\ndata_path = 'data.csv'\ndf = pd.read_csv(data_path)\nN = 3 # maximum recommendations\n\ncost_for_two = 'approx_cost(for two people)'\nlocation = 'listed_in(city)'\nlisting_type = 'listed_in(type)'\nlisting_city = 'listed_in(city)'\nonline_order = 'online_order'\n\n# making cost of two as float\ndf[cost_for_two]=df[cost_for_two].str.replace(\",\",'').astype(float)\n\ndef create_knn():\n STOPWORDS = set(stopwords.words('english'))\n url_pattern = re.compile(r'https?://\\S+|www\\.\\S+')\n def clean_data(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n text = \" \".join([word for word in str(text).split() if word not in STOPWORDS])\n return url_pattern.sub(r'', text)\n \n df[\"reviews_list\"] = df[\"reviews_list\"].apply(lambda x: clean_data(x))\n \n tfidf = TfidfVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')\n \n corpus = df['reviews_list'].tolist()\n tfidf_matrix = tfidf.fit_transform(corpus )\n \n knn_recomm = NearestNeighbors(metric = 'cosine', algorithm = 'brute',n_neighbors=30)\n knn_recomm.fit(tfidf_matrix)\n return knn_recomm,tfidf\n\nknn_recomm,tfidf = create_knn()\n\ndef restaurant_recommend(user_input_text,budget,location,cuisine_type):\n start_time = datetime.now() \n user_inp_mat = tfidf.transform([user_input_text]) \n # user_inp_mat.shape\n score,idx = knn_recomm.kneighbors(user_inp_mat.reshape(1, -1))\n score_idx = dict(zip(idx[0],score[0]))\n df_user = df.iloc[idx[0]]\n \n df_loc = df_user\n if location is not None:\n df_loc = df_user[df_user['location'].str.lower().str.contains(location.lower())]\n \n df_budget = df_loc\n if budget is not None:\n df_budget = df_loc[df_loc[cost_for_two] <= budget]\n \n df_cuisine = df_budget\n if cuisine_type is not None:\n df_cuisine = df_budget[df_budget['cuisines'].str.lower().str.contains(cuisine_type.lower())]\n \n final_recommend = {}\n for idx,row in df_cuisine.iterrows():\n rest_name = row['name']\n score = score_idx[idx]\n score = str(round(score, 2)*100)+\" %\"\n final_recommend[rest_name] = score \n \n final_recommend = sorted(final_recommend.items(), key=lambda x: x[1], reverse=True)\n final_recommend = final_recommend[:N]\n recomendation_time = (datetime.now() -start_time).seconds\n return final_recommend,recomendation_time \n \n\n\n# restaurant_recommend(user_input_text = 'Lassi and paratha',\n# budget = 1000,\n# location = 'Koramangala',\n# cuisine_type= 'north indian')\n\n\n# restaurant_recommend(user_input_text = 'good ambiance restaurants, serving fish',\n# budget = None,\n# location = 'Koramangala',\n# cuisine_type= None)\n\n# restaurant_recommend(user_input_text = 'must visit restaurants',\n# budget = 1000,\n# location = None,\n# cuisine_type= 'north indian')\n\n\n# restaurant_recommend(user_input_text = 'best cakes',\n# budget = 1000,\n# location = 'Koramangala',\n# cuisine_type= None)\n\n\n# restaurant_recommend(user_input_text = 'authentic chicken biryani',\n# budget = 800,\n# location = 'BTM',\n# cuisine_type= None)\n"
] | [
[
"pandas.read_csv",
"sklearn.neighbors.NearestNeighbors",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
Weilory/python-matplotlib-graphs | [
"4578c184daba587417becc6df1ad4566e881343a"
] | [
"graph/hist_bin.py"
] | [
"import pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\nplt.style.use(\"fivethirtyeight\")\r\n\r\npath = input(\"please input the age.csv file path here: \")\r\ndata = pd.read_csv(path)\r\nids = data[\"Responder_id\"]\r\nages = data[\"Age\"]\r\n\r\nbins = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\r\n\r\nplt.hist(ages, bins=bins, edgecolor=\"black\", log=True)\r\n\r\nmedian_age = 29\r\nred = \"#fc4f30\"\r\n\r\nplt.axvline(median_age, color=red, linewidth=5, label=\"Age Median\")\r\n\r\nplt.legend()\r\nplt.title(\"Ages of Respondents\")\r\nplt.xlabel(\"Ages\")\r\nplt.ylabel(\"Total Respondents\")\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n"
] | [
[
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel"
]
] |
hellomoto-ai/splatoon2-ml | [
"4bd24eed527d6b56ce4369b70d24f20058962383"
] | [
"spml/trainer/vae_gan.py"
] | [
"\"\"\"Training mechanism for VAE-GAN\"\"\"\nimport os\nimport time\nimport logging\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom spml import (\n image_util,\n loss_utils,\n)\nfrom . import (\n misc_utils,\n saved_model_manager,\n)\n\n_LG = logging.getLogger(__name__)\n\n\ndef _save_images(images, src_path, step, output_dir):\n src_name = os.path.splitext(os.path.basename(src_path))[0]\n save_path = os.path.join(\n output_dir, 'images', src_name, 'step_%d.png' % step)\n misc_utils.ensure_dir(save_path)\n\n images = [img.detach().cpu().numpy() for img in images]\n images = np.concatenate(images, axis=1)\n image_util.save_image(images, save_path)\n\n\ndef _log_header():\n fields = ' '.join(['%10s'] * 9) % (\n 'KLD', 'BETA', 'F_RECON',\n 'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', '[PIXEL]',\n )\n _LG.info('%5s %5s: %s', '', 'PHASE', fields)\n\n\n_LOGGED = {'last': 0}\n\n\ndef _log_loss(loss, phase, progress=None):\n if _LOGGED['last'] % 30 == 0:\n _log_header()\n _LOGGED['last'] += 1\n\n header = '' if progress is None else '%3d %%' % progress\n fields = ' '.join(['%10.2e'] * 9) % (\n loss['kld'], loss['beta'], loss['feats_recon'],\n loss['gen_recon'], loss['gen_fake'],\n loss['disc_orig'], loss['disc_recon'], loss['disc_fake'],\n loss['pixel'],\n )\n _LG.info('%5s %5s: %s', header, phase, fields)\n\n\ndef _get_latent_stats(z, z_std):\n z = z.detach().cpu().numpy()\n z_std = z_std.detach().cpu().numpy()\n return {\n 'z_mean': np.mean(z),\n 'z_min': np.min(z),\n 'z_max': np.max(z),\n 'z_var': np.var(z),\n 'z_std_mean': np.mean(z_std),\n 'z_std_min': np.min(z_std),\n 'z_std_max': np.max(z_std),\n 'z_std_var': np.var(z_std),\n }\n\n\nclass Trainer:\n def __init__(\n self, model, optimizers,\n train_loader, test_loader,\n device, output_dir,\n initial_beta=10.0,\n beta_step=0.1,\n target_kld=0.1,\n beta_momentum=0.1,\n samples=None,\n ):\n self.model = model.float().to(device)\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.optimizers = optimizers\n self.device = device\n self.output_dir = output_dir\n\n self.beta = initial_beta\n self.beta_step = beta_step\n self.target_kld = target_kld\n self.beta_momentum = beta_momentum\n\n self.samples = samples\n\n self.saved_model_manager = saved_model_manager.SavedModelManager()\n\n fields = [\n 'PHASE', 'TIME', 'STEP', 'EPOCH', 'KLD', 'BETA', 'F_RECON',\n 'G_RECON', 'G_FAKE', 'D_REAL', 'D_RECON', 'D_FAKE', 'PIXEL',\n 'Z_MEAN', 'Z_MIN', 'Z_MAX', 'Z_VAR',\n 'Z_STD_MEAN', 'Z_STD_MIN', 'Z_STD_MAX', 'Z_STD_VAR',\n ]\n logfile = open(os.path.join(output_dir, 'result.csv'), 'w')\n self.writer = misc_utils.CSVWriter(fields, logfile)\n\n self.step = 0\n self.epoch = 0\n\n self.latent_stats = loss_utils.MovingStats(beta_momentum)\n\n def _write(self, phase, loss, stats):\n self.writer.write(\n PHASE=phase, STEP=self.step, EPOCH=self.epoch, TIME=time.time(),\n KLD=loss['kld'], BETA=loss['beta'],\n F_RECON=loss['feats_recon'],\n G_RECON=loss['gen_recon'], G_FAKE=loss['gen_fake'],\n D_REAL=loss['disc_orig'],\n D_RECON=loss['disc_recon'], D_FAKE=loss['disc_fake'],\n PIXEL=loss['pixel'],\n Z_MEAN=stats['z_mean'], Z_VAR=stats['z_var'],\n Z_MIN=stats['z_min'], Z_MAX=stats['z_max'],\n Z_STD_MEAN=stats['z_std_mean'], Z_STD_VAR=stats['z_std_var'],\n Z_STD_MIN=stats['z_std_min'], Z_STD_MAX=stats['z_std_max'],\n )\n\n def save(self):\n filename = 'epoch_%s_step_%s.pt' % (self.epoch, self.step)\n output = os.path.join(self.output_dir, 'checkpoints', filename)\n\n _LG.info('Saving checkpoint at %s', output)\n misc_utils.ensure_dir(output)\n torch.save({\n 'model': self.model.state_dict(),\n 'optimizers': {\n key: opt.state_dict()\n for key, opt in self.optimizers.items()\n },\n 'epoch': self.epoch,\n 'step': self.step,\n }, output)\n return output\n\n def manage_saved(self, path, loss):\n path = self.saved_model_manager.update(path, loss)\n if path:\n os.remove(path)\n\n def load(self, checkpoint):\n _LG.info('Loading checkpoint from %s', checkpoint)\n data = torch.load(checkpoint, map_location=self.device)\n self.model.load_state_dict(data['model'])\n for key, opt in data['optimizers'].items():\n self.optimizers[key].load_state_dict(opt)\n self.epoch = data['epoch']\n self.step = data['step']\n\n def _forward_gan(self, orig, update=False):\n # Update discriminator with original image\n preds_orig, _ = self.model.discriminator(orig)\n disc_loss_orig = loss_utils.bce(preds_orig, 1)\n if update:\n self.model.zero_grad()\n disc_loss_orig.backward()\n self.optimizers['discriminator'].step()\n\n # Update discriminator with reconstructed image\n recon, latent = self.model.vae(orig)\n preds_recon, _ = self.model.discriminator(recon.detach())\n disc_loss_recon = loss_utils.bce(preds_recon, 0)\n if update:\n self.model.zero_grad()\n disc_loss_recon.backward()\n self.optimizers['discriminator'].step()\n\n # Update generator with reconstructed image\n preds_recon, _ = self.model.discriminator(recon)\n gen_loss_recon = loss_utils.bce(preds_recon, 1)\n if update:\n self.model.zero_grad()\n gen_loss_recon.backward()\n self.optimizers['decoder'].step()\n\n # Update discriminator with fake image\n sample = torch.randn_like(latent[0], requires_grad=True)\n fake = self.model.vae.decoder(sample)\n preds_fake, _ = self.model.discriminator(fake.detach())\n disc_loss_fake = loss_utils.bce(preds_fake, 0)\n if update:\n self.model.zero_grad()\n disc_loss_fake.backward()\n self.optimizers['discriminator'].step()\n\n # Update generator with fake image\n preds_fake, _ = self.model.discriminator(fake)\n gen_loss_fake = loss_utils.bce(preds_fake, 1)\n if update:\n self.model.zero_grad()\n gen_loss_fake.backward()\n self.optimizers['decoder'].step()\n\n return {\n 'disc_orig': disc_loss_orig.item(),\n 'disc_recon': disc_loss_recon.item(),\n 'disc_fake': disc_loss_fake.item(),\n 'gen_recon': gen_loss_recon.item(),\n 'gen_fake': gen_loss_fake.item(),\n }\n\n def _forward_vae(self, orig, update=False):\n # Update feature\n recon, _ = self.model.vae(orig)\n _, feats_orig = self.model.discriminator(orig)\n _, feats_recon = self.model.discriminator(recon)\n feats_loss = F.mse_loss(input=feats_recon, target=feats_orig)\n if update:\n self.model.zero_grad()\n feats_loss.backward()\n self.optimizers['encoder'].step()\n self.optimizers['decoder'].step()\n\n # KLD\n sample, latent = self.model.vae.encoder(orig)\n latent_stats = self.latent_stats(sample, update)\n kld = torch.mean(loss_utils.kld_loss(*latent_stats))\n if update:\n beta_latent_loss = self.beta * kld\n self.model.zero_grad()\n beta_latent_loss.backward()\n self.optimizers['encoder'].step()\n\n # Adjust beta\n if update:\n kld_error = kld.item() - self.target_kld\n self.beta += self.beta_step * kld_error\n self.beta = max(1e-3, self.beta)\n\n loss = {\n 'kld': kld.item(),\n 'beta': self.beta,\n 'feats_recon': feats_loss.item(),\n }\n stats = _get_latent_stats(*latent)\n return recon, loss, stats\n\n def _get_pixel_loss(self, orig):\n recon, _ = self.model.vae(orig)\n return F.mse_loss(orig, recon)\n\n def _forward(self, orig, update=False):\n loss_gan = self._forward_gan(orig, update=update)\n recon, loss_vae, stats = self._forward_vae(orig, update=update)\n with torch.no_grad():\n pixel_loss = self._get_pixel_loss(orig)\n\n loss = {'pixel': pixel_loss.item()}\n loss.update(loss_vae)\n loss.update(loss_gan)\n return recon, loss, stats\n\n def train_batch(self, batch):\n self.model.train()\n orig = batch['image'].float().to(self.device)\n _, loss, stats = self._forward(orig, update=True)\n self._write('train', loss, stats)\n return loss\n\n def test(self):\n with torch.no_grad():\n return self._test()\n\n def _test(self):\n self.model.eval()\n loss_tracker = misc_utils.StatsTracker()\n stats_tracker = misc_utils.StatsTracker()\n for i, batch in enumerate(self.test_loader):\n orig, path = batch['image'].float().to(self.device), batch['path']\n recon, loss, stats = self._forward(orig, update=False)\n loss_tracker.update(loss)\n stats_tracker.update(stats)\n if i % 10 == 0:\n _save_images(\n (orig[0], recon[0]), path[0],\n self.step, self.output_dir)\n self._write('test', loss_tracker, stats_tracker)\n _log_loss(loss_tracker, phase='Test')\n return loss_tracker\n\n def generate(self, samples=None):\n samples = self.samples if samples is None else samples\n with torch.no_grad():\n self._generate(samples)\n\n def _generate(self, samples):\n self.model.eval()\n recons = self.model.vae.decoder(samples)\n for i, recon in enumerate(recons):\n path = 'sample_%d.png' % i\n _save_images([recon], path, self.step, self.output_dir)\n\n def train_one_epoch(self, report_every=180, test_interval=1000):\n last_report = 0\n for i, batch in enumerate(self.train_loader):\n loss = self.train_batch(batch)\n self.step += 1\n if time.time() - last_report > report_every:\n progress = 100. * i / len(self.train_loader)\n _log_loss(loss, 'Train', progress)\n last_report = time.time()\n if self.step % test_interval == 0:\n self.generate()\n loss = self.test()\n path = self.save()\n self.manage_saved(path, loss['pixel'])\n self.epoch += 1\n\n def __repr__(self):\n opt = '\\n'.join([\n '%s: %s' % (key, val) for key, val in self.optimizers.items()\n ])\n beta = '\\n'.join([\n 'Beta: %s' % self.beta,\n 'Beta Step: %s' % self.beta_step,\n 'Target KLD: %s' % self.target_kld,\n 'Beta Momuntum: %s' % self.beta_momentum,\n ])\n return 'Epoch: %d\\nStep: %d\\nModel: %s\\nOptimizers: %s\\n%s\\n' % (\n self.epoch, self.step, self.model, opt, beta\n )\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.randn_like",
"torch.load",
"numpy.var",
"torch.no_grad",
"numpy.max",
"numpy.min",
"numpy.concatenate",
"numpy.mean"
]
] |
BOSS-Danuphan/coralinedb | [
"23458c82528ac7ceb78c17e23163d542ad96b79a"
] | [
"coralinedb/coralinedb.py"
] | [
"\"\"\"\n Coraline DB Manager - This will take care of reading and saving tables to SQL database\n\"\"\"\n\n# import python packages\nimport pandas as pd\nimport time\n\n\nclass BaseDB:\n \"\"\"\n Base class for all DB\n These functions must be inherited by sub-class\n - create_connection\n - show_databases\n - show_tables\n \"\"\"\n def __init__(self, host, username, passwd):\n \"\"\"\n Initial object by specify host username and password for database connection\n :param host: host name of the database (str)\n :param username: username of the database (str)\n :param passwd: password of the database (str)\n \"\"\"\n self.host = host\n self.username = username\n self.passwd = passwd\n self.engines = {}\n\n def __del__(self):\n \"\"\"\n On delete object\n :return:\n \"\"\"\n for en_key in self.engines:\n engine = self.engines[en_key]\n try:\n engine.dispose()\n except :\n # engine cannot be dispose #TODO fix it!!\n pass\n\n def get_engine(self, db_name):\n \"\"\"\n Get engine for db name\n :return:\n \"\"\"\n pass\n\n def create_connection(self, db_name=None):\n \"\"\"\n Create Connection and engine for database\n :param: db_name : name of connecting database (str)\n :return: engine and connection\n \"\"\"\n connected = False\n max_tries = 10\n\n # if db_name is not defined, let it be empty string\n if db_name is None:\n db_name = \"\"\n\n # Reconnect until max_tries exceeded\n while not connected and max_tries > 0:\n try:\n # create engine from db settings\n engine = self.get_engine(db_name)\n\n # Create connection for query\n connection = engine.connect()\n\n connected = True\n\n return engine, connection\n except Exception as e:\n print(\"Database Connection Error: {}\".format(e))\n print(\"Network is unreachable. Retrying to connect to database in 10 seconds...\")\n time.sleep(10)\n max_tries -= 1\n\n def try_decoration(self, func):\n \"\"\"\n Decoration for looping tries\n :return:\n \"\"\"\n while True:\n try:\n func()\n break\n except:\n print(\"\")\n\n def load_table(self, db_name, table_name):\n \"\"\"\n Load a table from database\n *The whole table will be download, please make sure you have enough memory*\n :param db_name: name of database (str)\n :param table_name: table name to be read (str)\n :return: pandas dataframe if table exists. Otherwise, None\n \"\"\"\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists and read\n if engine.dialect.has_table(engine, table_name):\n sql = 'SELECT * FROM %s' % table_name\n result = pd.read_sql(sql, connection, coerce_float=True)\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result\n\n def load_tables(self, db_name, table_names):\n \"\"\"\n Load all tables from database\n *The whole table will be download, please make sure you have enough memory*\n :param db_name: name of database (str)\n :param table_names: list of table names (list of strings)\n :return: list of pandas dataframes if the corresponding table exists. Otherwise, None\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n dfs = []\n\n # Load each table\n for tbn in table_names:\n if engine.dialect.has_table(engine, tbn):\n df = pd.read_sql('SELECT * FROM %s' % tbn, connection, coerce_float=True)\n else:\n print(tbn, \"does not exist\")\n df = None\n dfs.append(df)\n\n # Close connection\n connection.close()\n\n return dfs\n\n def save_table(self, df, db_name, table_name, **kwargs):\n \"\"\"\n Save pandas dataframe to database\n :param df: dataframe to be save (pandas dataframe)\n :param db_name: name of database (str)\n :param table_name: name of table (str)\n :param kwargs: pandas to_sql arguments e.g. if_exists, dtype, ...\n :return:\n \"\"\"\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Set default if_exists to replace\n if 'if_exists' not in kwargs:\n kwargs['if_exists'] = 'replace'\n\n # Write df to database\n df.to_sql(name=table_name, con=engine, index=False, **kwargs)\n\n # Close connection\n connection.close()\n\n def get_databases(self):\n \"\"\"\n list of all accessable databases on this host\n :return: list of database names\n \"\"\"\n pass\n\n def get_tables(self, db_name):\n \"\"\"\n List all tables in database\n :param db_name: database name (str)\n :return: list of table names\n \"\"\"\n pass\n\n def query(self, sql_statement, db_name=None):\n \"\"\"\n Run SQL query\n :param sql_statement: SQL statement (str)\n :param db_name: database name\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n result = pd.read_sql(sql_statement, connection, coerce_float=True)\n\n # Close connection\n connection.close()\n\n return result\n\n def get_count(self, db_name, table_name):\n \"\"\"\n Get number of rows of a table\n :param db_name: database name (str)\n :param table_name: table name (str)\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists\n if engine.dialect.has_table(engine, table_name):\n sql = 'select count(*) from %s;' % table_name\n result = pd.read_sql(sql, connection, coerce_float=True).iloc[:, 0].values[0]\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result\n\n def execute(self, sql_statement, db_name=None):\n \"\"\"\n Execute SQL Statement to database\n :param sql_statement: sql statement (str)\n :param db_name: database name (str)\n :return:\n \"\"\"\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Execute SQL\n connection.execute(sql_statement)\n\n # Close connection\n connection.close()\n\n\ndef print_help():\n \"\"\"\n print help\n :return:\n \"\"\"\n print(\"Please go to https://pypi.org/project/coralinedb/ to see how to use the package\")\n\n"
] | [
[
"pandas.read_sql"
]
] |
yonesuke/prax | [
"6957776b11c297d4463fba6d15cd06671dfbd45f"
] | [
"examples/hodgkinhuxley.py"
] | [
"import jax.numpy as jnp\nfrom prax import Oscillator\nfrom jax.config import config; config.update(\"jax_enable_x64\", True)\n\nimport matplotlib.pyplot as plt\n\nclass HodgkinHuxley(Oscillator):\n def __init__(self, input_current, C=1.0, G_Na=120.0, G_K=36.0, G_L=0.3, E_Na=50.0, E_K=-77.0, E_L=-54.4, dt=0.01, eps=10**-5):\n super().__init__(n_dim=4, dt=dt, eps=eps)\n self.input_current = input_current\n self.C = C\n self.G_Na = G_Na\n self.G_K = G_K\n self.G_L = G_L\n self.E_Na = E_Na\n self.E_K = E_K\n self.E_L = E_L\n\n def alpha_m(self, V):\n return 0.1*(V+40.0)/(1.0 - jnp.exp(-(V+40.0) / 10.0))\n \n def beta_m(self, V):\n return 4.0*jnp.exp(-(V+65.0) / 18.0)\n \n def alpha_h(self, V):\n return 0.07*jnp.exp(-(V+65.0) / 20.0)\n \n def beta_h(self, V):\n return 1.0/(1.0 + jnp.exp(-(V+35.0) / 10.0))\n \n def alpha_n(self, V):\n return 0.01*(V+55.0)/(1.0 - jnp.exp(-(V+55.0) / 10.0))\n \n def beta_n(self, V):\n return 0.125*jnp.exp(-(V+65) / 80.0)\n\n def forward(self, state):\n V, m, h, n = state\n dVdt = self.G_Na * (m ** 3) * h * (self.E_Na - V) + self.G_K * (n ** 4) * (self.E_K - V) + self.G_L * (self.E_L - V) + self.input_current\n dVdt /= self.C\n dmdt = self.alpha_m(V) * (1.0 - m) - self.beta_m(V) * m\n dhdt = self.alpha_h(V) * (1.0 - h) - self.beta_h(V) * h\n dndt = self.alpha_n(V) * (1.0 - n) - self.beta_n(V) * n\n return jnp.array([dVdt, dmdt, dhdt, dndt])\n\nmodel = HodgkinHuxley(input_current=30.0)\ninit_val = jnp.array([-75, 0.6, 0.05, 0.32])\nmodel.find_periodic_orbit(init_val)\nmodel.calc_phase_response()\n\nplt.figure(figsize=[12,8])\n\nplt.subplot(2,2,1)\nplt.title(\"periodic orbit\")\nplt.xlabel(\"t\")\nplt.ylabel(\"V\")\nplt.plot(model.ts, model.periodic_orbit[:, 0])\n\nplt.subplot(2,2,2)\nplt.title(\"phase response curve\")\nplt.plot(model.ts, model.phase_response_curve[:,0])\nplt.legend(labels=[\"$Z_V$\"])\nplt.xlabel(\"t\")\nplt.ylabel(\"$Z_V$\")\n\nplt.subplot(2,2,3)\nplt.xlabel(\"t\")\nplt.ylabel(\"m,h,n\")\nplt.plot(model.ts, model.periodic_orbit[:, 1:])\n\nplt.subplot(2,2,4)\nplt.plot(model.ts, model.phase_response_curve[:,1:])\nplt.legend(labels=[\"$Z_m$\",\"$Z_h$\",\"$Z_n$\"])\nplt.xlabel(\"t\")\nplt.ylabel(\"$Z_m,Z_h,Z_n$\")\n\nplt.savefig(\"hodgkinhuxley.svg\")"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
LukiBa/zybo_face | [
"5f229818727b65ffa82efee2f63522234364fbe2"
] | [
"PC/application_pc.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 1 18:09:27 2021\n\n@author: lukas\n\"\"\"\n\nimport cv2\nimport dlib\nimport numpy as np\nimport timeit\nimport utils\nimport queue\nimport multiprocessing\nimport pathlib\nimport argparse\nimport time\n\n\ndef _create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--descriptor_file', type=str,\n default='./saved_descriptors', help='path to descriptor file')\n parser.add_argument('--threshold', type=float, default=0.6,\n help='Threshold of euclidean distance to distinguish persons.')\n parser.add_argument('--max_angle', type=float, default=4.0, help='maximum rotation angle of the face.')\n parser.add_argument('--max_fps', type=float, default=5.0, help='maximum frame rate of the application.')\n parser.add_argument(\n '--cam_url', type=str,\n default=\"http://10.0.0.241/zm/cgi-bin/nph-zms?mode=jpeg&monitor=2&maxfps=5&scale=100&user=admin&pass=admin\",\n help=\"IP camera url including username and password\")\n parser.add_argument('--landmarkPredictor', type=str,\n default=\"../dlib_models/shape_predictor_68_face_landmarks.dat\",\n help=\"Path to dlib 68 face landmark predictor: shape_predictor_68_face_landmarks.dat\")\n parser.add_argument('--faceDescriptor', type=str,\n default=\"../dlib_models/dlib_face_recognition_resnet_model_v1.dat\",\n help=\"Path to dlibs face recognition model: dlib_face_recognition_resnet_model_v1.dat\")\n return parser.parse_args()\n\n\nclass StateMachine():\n def __init__(self, url, predictorPath, facerecPath, descriptorFilePath,\n threshold=0.6, maxFps: float = 5.0, imgSize: int = 384,\n maxAngle: float = 4.0, MaxMovement=50.0, showLandmarks: bool = False) -> None:\n\n predictor = dlib.shape_predictor(predictorPath)\n detector = dlib.get_frontal_face_detector()\n\n self.__decriptorHandler = utils.Descriptor_FileHandler(descriptorFilePath, threshold)\n\n self.__minLatency = 1000.0/maxFps\n self.__imgSize = imgSize\n self.__maxAngle = maxAngle\n self.__MaxMovement = MaxMovement\n self.__showLandmarks = showLandmarks\n\n self.__state = self.___waitForFace\n self.__imgPos = np.zeros((4), dtype=np.int32)\n self.__name = \"processing..\"\n self.__faceDetected = False\n self.__score = 0.0\n self.__maxMissDetection = 2\n self.__missDetections = 0\n self.__ReqHeadRot = 8.0\n\n self.__imgQueue = queue.Queue(maxsize=3)\n self.__detectQueue = queue.Queue(maxsize=3)\n self.__faceRecQueueIn = multiprocessing.Queue(maxsize=3)\n self.__faceRecQueueOut = multiprocessing.Queue(maxsize=3)\n\n self.__ImageWorker = utils.Image_loader(self.__imgQueue, url, imgSize,\n maxFps)\n self.__DetectionWorker = utils.Detector(self.__imgQueue, self.__detectQueue,\n detector, predictor)\n\n self.__FaceRecWorker = utils.FaceDecriptorProcess(self.__faceRecQueueIn, self.__faceRecQueueOut, \n facerecPath)\n\n self.__ImageWorker()\n self.__DetectionWorker()\n self.__FaceRecWorker()\n\n def __del__(self) -> None:\n return self.__FaceRecWorker.kill()\n\n def __call__(self, key) -> np.ndarray:\n # execute current state\n return self.__state(key)\n\n def __discardCurrentDescriptor(self) -> None:\n # If Output Queue is empty -> Face descriptor computations are not done yet --> kill the process and restart it\n self.__name == \"processing..\"\n self.__faceDetected = False\n self.__score = 0.0\n \n if self.__faceRecQueueOut.empty():\n self.__FaceRecWorker.kill()\n self.__FaceRecWorker()\n return\n # If Output Queue is not empty -> Face descriptor computations are done --> discard the face descriptor in the Queue\n _ = self.__faceRecQueueOut.get()\n return\n\n def ___waitForFace(self, key) -> np.ndarray:\n rects, shapes, img = self.__detectQueue.get()\n\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n tilt_angle = utils.get_angle(shape_np[45, :]-shape_np[36, :],\n np.array([1, 0]))\n\n if np.abs(rot_angle) > self.__maxAngle or np.abs(tilt_angle) > self.__maxAngle:\n outtext = \"Look straight into the camera. Current rot angle: \" + \\\n str(rot_angle) + \" tilt angle: \" + str(tilt_angle)\n\n utils.write_text_bottom(img, outtext, (0, 127, 255))\n return img\n\n # start Computation of face descriptor\n self.__faceRecQueueIn.put((shapes, img))\n\n # draw rectangle\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n\n # draw landmarks\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n\n # store face position for tracking\n self.__imgPos = rect_np\n\n # next state --> Do life check look left\n self.__state = self.__lifeCheckLookLeft\n return img\n\n def __lifeCheckLookLeft(self, key) -> np.ndarray:\n return self.__lifeCheckLookLeftRight(False, self.__lifeCheckLookRight)\n\n def __lifeCheckLookRight(self, key) -> np.ndarray:\n return self.__lifeCheckLookLeftRight(True, self.__tracking)\n\n def __lifeCheckLookLeftRight(self, nLeftRight, nextState) -> np.ndarray:\n rects, shapes, img = self.__detectQueue.get()\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # Check for feasible movement -> If face jumps around most properly it is no real person\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n movement = np.linalg.norm(rect_np-self.__imgPos)\n if movement > self.__MaxMovement:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n\n if not nLeftRight and (rot_angle < self.__ReqHeadRot):\n outtext = \"Rotate your head LEFT. Current rot angle: \" + str(rot_angle)\n utils.write_text_bottom(img, outtext, (100, 255, 255))\n cv2.arrowedLine(img, (30, int(self.__imgSize/2)), (5, int(self.__imgSize/2)),\n (100, 255, 255), 3)\n elif nLeftRight and (rot_angle > (-self.__ReqHeadRot)):\n outtext = \"Rotate your head RIGHT. Current rot angle: \" + str(rot_angle)\n utils.write_text_bottom(img, outtext, (255, 255, 100))\n cv2.arrowedLine(img, (self.__imgSize-30, int(self.__imgSize/2)-5),\n (self.__imgSize, int(self.__imgSize/2)),\n (255, 255, 100), 3)\n else:\n outtext = \"Good\"\n utils.write_text_bottom(img, outtext, (0, 255, 0))\n # next state --> Do life check look left\n self.__state = nextState\n\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n self.__imgPos = rect_np\n return img\n\n def __tracking(self, key):\n rects, shapes, img = self.__detectQueue.get()\n # Multiple persons\n if len(rects) > 1:\n outtext = \"Error: Multiple faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # No Person\n if len(rects) < 1:\n outtext = \"Error: No faces detected.\"\n utils.write_text_bottom(img, outtext, (0, 0, 255))\n self.__missDetections += 1\n if self.__missDetections > self.__maxMissDetection:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n # Check for feasible movement -> If face jumps around most properly it is no real person\n rect_np = utils.rect_to_np(rects[0], dtpye=np.int32) # convert dlib rectangle to numpy\n movement = np.linalg.norm(rect_np-self.__imgPos)\n if movement > self.__MaxMovement:\n self.__discardCurrentDescriptor()\n self.__state = self.___waitForFace\n return img\n\n shape_np = utils.shape_to_np(shapes[0])\n\n # Check face alignment\n rot_angle = utils.get_angle(shape_np[27, :]-shape_np[30, :],\n shape_np[27, :]-shape_np[33, :])\n tilt_angle = utils.get_angle(shape_np[45, :]-shape_np[36, :],\n np.array([1, 0]))\n\n if self.__faceDetected:\n outtext = self.__name + \" detected with {}\\% confidence.\".format(self.__score)\n utils.write_text_bottom(img, outtext, (0, 255, 0))\n else:\n if self.__faceRecQueueOut.empty():\n outtext = self.__name\n utils.write_text_bottom(img, outtext, (255, 0, 0))\n else:\n faceDescriptor = self.__faceRecQueueOut.get()\n self.__faceDetected, self.__name = self.__decriptorHandler.exists(faceDescriptor)\n self.__score = 99.38 # dlib face recognition accuracy\n\n img = utils.drawBoxAndName(img, rect_np, self.__name, self.__score)\n if self.__showLandmarks:\n for (px, py) in shape_np:\n cv2.circle(img, (int(px), int(py)), 1, (255, 0, 0), -1)\n self.__imgPos = rect_np\n return img\n\n\ndef main(opt):\n\n stm = StateMachine(opt.cam_url, opt.landmarkPredictor, opt.faceDescriptor,\n opt.descriptor_file, opt.threshold,\n maxFps=opt.max_fps, imgSize=384, showLandmarks=True)\n\n key = 0\n while(True):\n img = stm(key)\n cv2.imshow(\"Face Detector\", img)\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\n # Destroy all the windows\n cv2.destroyAllWindows()\n print(\"Leave Face Detection\")\n\n\nif __name__ == '__main__':\n opt = _create_parser()\n print(opt)\n main(opt)\n"
] | [
[
"numpy.array",
"numpy.abs",
"numpy.linalg.norm",
"numpy.zeros"
]
] |
tasx0823/BBN | [
"7992e908842f5934f0d1ee3f430d796621e81975"
] | [
"lib/utils/utils.py"
] | [
"import logging\r\nimport time\r\nimport os\r\n\r\nimport torch\r\nfrom utils.lr_scheduler import WarmupMultiStepLR\r\nfrom net import Network\r\n\r\n\r\ndef create_logger(cfg):\r\n dataset = cfg.DATASET.DATASET\r\n net_type = cfg.BACKBONE.TYPE\r\n module_type = cfg.MODULE.TYPE\r\n log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n time_str = time.strftime(\"%Y-%m-%d-%H-%M\")\r\n log_name = \"{}_{}_{}_{}.log\".format(dataset, net_type, module_type, time_str)\r\n log_file = os.path.join(log_dir, log_name)\r\n # set up logger\r\n print(\"=> creating log {}\".format(log_file))\r\n head = \"%(asctime)-15s %(message)s\"\r\n logging.basicConfig(filename=str(log_file), format=head)\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.INFO)\r\n console = logging.StreamHandler()\r\n logging.getLogger(\"\").addHandler(console)\r\n\r\n logger.info(\"---------------------Cfg is set as follow--------------------\")\r\n logger.info(cfg)\r\n logger.info(\"-------------------------------------------------------------\")\r\n return logger, log_file\r\n\r\n\r\ndef get_optimizer(cfg, model):\r\n base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR\r\n params = []\r\n\r\n for name, p in model.named_parameters():\r\n if p.requires_grad:\r\n params.append({\"params\": p})\r\n\r\n if cfg.TRAIN.OPTIMIZER.TYPE == \"SGD\":\r\n optimizer = torch.optim.SGD(\r\n params,\r\n lr=base_lr,\r\n momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,\r\n weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,\r\n nesterov=True,\r\n )\r\n elif cfg.TRAIN.OPTIMIZER.TYPE == \"ADAM\":\r\n optimizer = torch.optim.Adam(\r\n params,\r\n lr=base_lr,\r\n betas=(0.9, 0.999),\r\n weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,\r\n )\r\n return optimizer\r\n\r\n\r\ndef get_scheduler(cfg, optimizer):\r\n if cfg.TRAIN.LR_SCHEDULER.TYPE == \"multistep\":\r\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\r\n optimizer,\r\n cfg.TRAIN.LR_SCHEDULER.LR_STEP,\r\n gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,\r\n )\r\n elif cfg.TRAIN.LR_SCHEDULER.TYPE == \"cosine\":\r\n if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\r\n optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4\r\n )\r\n else:\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\r\n optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4\r\n )\r\n elif cfg.TRAIN.LR_SCHEDULER.TYPE == \"warmup\":\r\n scheduler = WarmupMultiStepLR(\r\n optimizer,\r\n cfg.TRAIN.LR_SCHEDULER.LR_STEP,\r\n gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,\r\n warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported LR Scheduler: {}\".format(cfg.TRAIN.LR_SCHEDULER.TYPE))\r\n\r\n return scheduler\r\n\r\n\r\ndef get_model(cfg, num_classes, device, logger):\r\n model = Network(cfg, mode=\"train\", num_classes=num_classes)\r\n\r\n if cfg.BACKBONE.FREEZE == True:\r\n model.freeze_backbone()\r\n logger.info(\"Backbone has been freezed\")\r\n\r\n if cfg.CPU_MODE:\r\n model = model.to(device)\r\n else:\r\n model = torch.nn.DataParallel(model).cuda()\r\n\r\n return model\r\n\r\ndef get_category_list(annotations, num_classes, cfg):\r\n num_list = [0] * num_classes\r\n cat_list = []\r\n print(\"Weight List has been produced\")\r\n for anno in annotations:\r\n category_id = anno[\"category_id\"]\r\n num_list[category_id] += 1\r\n cat_list.append(category_id)\r\n return num_list, cat_list"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.optim.SGD",
"torch.optim.Adam",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.DataParallel"
]
] |
dwhu/pandas | [
"283fa07e723fac091685366ba83727624748fddb"
] | [
"pandas/core/internals/blocks.py"
] | [
"from datetime import datetime, timedelta\nimport functools\nimport inspect\nimport re\nfrom typing import Any, List\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import NaT, algos as libalgos, lib, tslib, writers\nfrom pandas._libs.index import convert_scalar\nimport pandas._libs.internals as libinternals\nfrom pandas._libs.tslibs import Timedelta, conversion\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe,\n find_common_type,\n infer_dtype_from,\n infer_dtype_from_scalar,\n maybe_downcast_numeric,\n maybe_downcast_to_dtype,\n maybe_infer_dtype_type,\n maybe_promote,\n maybe_upcast,\n soft_convert_objects,\n)\nfrom pandas.core.dtypes.common import (\n _NS_DTYPE,\n _TD_DTYPE,\n ensure_platform_int,\n is_bool_dtype,\n is_categorical,\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_re,\n is_re_compilable,\n is_sparse,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.concat import concat_categorical, concat_datetime\nfrom pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCExtensionArray,\n ABCPandasArray,\n ABCSeries,\n)\nfrom pandas.core.dtypes.missing import (\n _isna_compat,\n array_equivalent,\n is_valid_nat_for_dtype,\n isna,\n)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import Categorical, DatetimeArray, PandasDtype, TimedeltaArray\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexers import (\n check_setitem_lengths,\n is_empty_indexer,\n is_scalar_indexer,\n)\nimport pandas.core.missing as missing\nfrom pandas.core.nanops import nanpercentile\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\nclass Block(PandasObject):\n \"\"\"\n Canonical n-dimensional unit of homogeneous dtype contained in a pandas\n data structure\n\n Index-ignorant; let the container take care of that\n \"\"\"\n\n __slots__ = [\"_mgr_locs\", \"values\", \"ndim\"]\n is_numeric = False\n is_float = False\n is_integer = False\n is_complex = False\n is_datetime = False\n is_datetimetz = False\n is_timedelta = False\n is_bool = False\n is_object = False\n is_categorical = False\n is_extension = False\n _can_hold_na = False\n _can_consolidate = True\n _verify_integrity = True\n _validate_ndim = True\n _ftype = \"dense\"\n _concatenator = staticmethod(np.concatenate)\n\n def __init__(self, values, placement, ndim=None):\n self.ndim = self._check_ndim(values, ndim)\n self.mgr_locs = placement\n self.values = values\n\n if self._validate_ndim and self.ndim and len(self.mgr_locs) != len(self.values):\n raise ValueError(\n f\"Wrong number of items passed {len(self.values)}, \"\n f\"placement implies {len(self.mgr_locs)}\"\n )\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if self._validate_ndim and values.ndim != ndim:\n raise ValueError(\n \"Wrong number of dimensions. \"\n f\"values.ndim != ndim [{values.ndim} != {ndim}]\"\n )\n return ndim\n\n @property\n def _holder(self):\n \"\"\"The array-like that can hold the underlying values.\n\n None for 'Block', overridden by subclasses that don't\n use an ndarray.\n \"\"\"\n return None\n\n @property\n def _consolidate_key(self):\n return (self._can_consolidate, self.dtype.name)\n\n @property\n def _is_single_block(self):\n return self.ndim == 1\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n return self.values.base is not None\n\n @property\n def is_datelike(self):\n \"\"\" return True if I am a non-datelike \"\"\"\n return self.is_datetime or self.is_timedelta\n\n def is_categorical_astype(self, dtype):\n \"\"\"\n validate that we have a astypeable to categorical,\n returns a boolean if we are a categorical\n \"\"\"\n if dtype is Categorical or dtype is CategoricalDtype:\n # this is a pd.Categorical, but is not\n # a valid type for astypeing\n raise TypeError(f\"invalid type {dtype} for astype\")\n\n elif is_categorical_dtype(dtype):\n return True\n\n return False\n\n def external_values(self, dtype=None):\n \"\"\" return an outside world format, currently just the ndarray \"\"\"\n return self.values\n\n def internal_values(self, dtype=None):\n \"\"\" return an internal format, currently just the ndarray\n this should be the pure internal API format\n \"\"\"\n return self.values\n\n def get_values(self, dtype=None):\n \"\"\"\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n \"\"\"\n if is_object_dtype(dtype):\n return self.values.astype(object)\n return self.values\n\n def get_block_values(self, dtype=None):\n \"\"\"\n This is used in the JSON C code\n \"\"\"\n return self.get_values(dtype=dtype)\n\n def to_dense(self):\n return self.values.view()\n\n @property\n def fill_value(self):\n return np.nan\n\n @property\n def mgr_locs(self):\n return self._mgr_locs\n\n @mgr_locs.setter\n def mgr_locs(self, new_mgr_locs):\n if not isinstance(new_mgr_locs, libinternals.BlockPlacement):\n new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)\n\n self._mgr_locs = new_mgr_locs\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return self.dtype\n\n def make_block(self, values, placement=None) -> \"Block\":\n \"\"\"\n Create a new block, with type inference propagate any values that are\n not specified\n \"\"\"\n if placement is None:\n placement = self.mgr_locs\n\n return make_block(values, placement=placement, ndim=self.ndim)\n\n def make_block_same_class(self, values, placement=None, ndim=None):\n \"\"\" Wrap given values in a block of same type as self. \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if ndim is None:\n ndim = self.ndim\n return make_block(values, placement=placement, ndim=ndim, klass=type(self))\n\n def __repr__(self) -> str:\n # don't want to print out all of the items here\n name = type(self).__name__\n if self._is_single_block:\n\n result = f\"{name}: {len(self)} dtype: {self.dtype}\"\n\n else:\n\n shape = \" x \".join(pprint_thing(s) for s in self.shape)\n result = (\n f\"{name}: {pprint_thing(self.mgr_locs.indexer)}, \"\n f\"{shape}, dtype: {self.dtype}\"\n )\n\n return result\n\n def __len__(self) -> int:\n return len(self.values)\n\n def __getstate__(self):\n return self.mgr_locs.indexer, self.values\n\n def __setstate__(self, state):\n self.mgr_locs = libinternals.BlockPlacement(state[0])\n self.values = state[1]\n self.ndim = self.values.ndim\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n return self.values[slicer]\n\n def getitem_block(self, slicer, new_mgr_locs=None):\n \"\"\"\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n \"\"\"\n if new_mgr_locs is None:\n if isinstance(slicer, tuple):\n axis0_slicer = slicer[0]\n else:\n axis0_slicer = slicer\n new_mgr_locs = self.mgr_locs[axis0_slicer]\n\n new_values = self._slice(slicer)\n\n if self._validate_ndim and new_values.ndim != self.ndim:\n raise ValueError(\"Only same dim slicing is allowed\")\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n @property\n def ftype(self):\n if getattr(self.values, \"_pandas_ftype\", False):\n dtype = self.dtype.subtype\n else:\n dtype = self.dtype\n return f\"{dtype}:{self._ftype}\"\n\n def merge(self, other):\n return _merge_blocks([self, other])\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._concatenator(\n [blk.values for blk in to_concat], axis=self.ndim - 1\n )\n return self.make_block_same_class(\n values, placement=placement or slice(0, len(values), 1)\n )\n\n def iget(self, i):\n return self.values[i]\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n self.values[locs] = values\n\n def delete(self, loc):\n \"\"\"\n Delete given loc(-s) from block in-place.\n \"\"\"\n self.values = np.delete(self.values, loc, 0)\n self.mgr_locs = self.mgr_locs.delete(loc)\n\n def apply(self, func, **kwargs):\n \"\"\" apply the function to my values; return a block if we are not\n one\n \"\"\"\n with np.errstate(all=\"ignore\"):\n result = func(self.values, **kwargs)\n\n if is_extension_array_dtype(result) and result.ndim > 1:\n # if we get a 2D ExtensionArray, we need to split it into 1D pieces\n nbs = []\n for i, loc in enumerate(self.mgr_locs):\n vals = result[i]\n nv = _block_shape(vals, ndim=self.ndim)\n block = self.make_block(values=nv, placement=[loc])\n nbs.append(block)\n return nbs\n\n if not isinstance(result, Block):\n result = self.make_block(values=_block_shape(result, ndim=self.ndim))\n\n return result\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n \"\"\" fillna on the block with the value. If we fail, then convert to\n ObjectBlock and try again\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n mask = isna(self.values)\n if limit is not None:\n limit = libalgos._validate_limit(None, limit=limit)\n mask[mask.cumsum(self.ndim - 1) > limit] = False\n\n if not self._can_hold_na:\n if inplace:\n return self\n else:\n return self.copy()\n\n if self._can_hold_element(value):\n # equivalent: _try_coerce_args(value) would not raise\n blocks = self.putmask(mask, value, inplace=inplace)\n return self._maybe_downcast(blocks, downcast)\n\n # we can't process the value, but nothing to do\n if not mask.any():\n return self if inplace else self.copy()\n\n # operate column-by-column\n def f(mask, val, idx):\n block = self.coerce_to_target_dtype(value)\n\n # slice out our block\n if idx is not None:\n # i.e. self.ndim == 2\n block = block.getitem_block(slice(idx, idx + 1))\n return block.fillna(value, limit=limit, inplace=inplace, downcast=None)\n\n return self.split_and_operate(None, f, inplace)\n\n def split_and_operate(self, mask, f, inplace: bool):\n \"\"\"\n split the block per-column, and apply the callable f\n per-column, return a new block for each. Handle\n masking which will not change a block unless needed.\n\n Parameters\n ----------\n mask : 2-d boolean mask\n f : callable accepting (1d-mask, 1d values, indexer)\n inplace : boolean\n\n Returns\n -------\n list of blocks\n \"\"\"\n\n if mask is None:\n mask = np.broadcast_to(True, shape=self.shape)\n\n new_values = self.values\n\n def make_a_block(nv, ref_loc):\n if isinstance(nv, list):\n assert len(nv) == 1, nv\n assert isinstance(nv[0], Block)\n block = nv[0]\n else:\n # Put back the dimension that was taken from it and make\n # a block out of the result.\n nv = _block_shape(nv, ndim=self.ndim)\n block = self.make_block(values=nv, placement=ref_loc)\n return block\n\n # ndim == 1\n if self.ndim == 1:\n if mask.any():\n nv = f(mask, new_values, None)\n else:\n nv = new_values if inplace else new_values.copy()\n block = make_a_block(nv, self.mgr_locs)\n return [block]\n\n # ndim > 1\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n m = mask[i]\n v = new_values[i]\n\n # need a new block\n if m.any():\n nv = f(m, v, i)\n else:\n nv = v if inplace else v.copy()\n\n block = make_a_block(nv, [ref_loc])\n new_blocks.append(block)\n\n return new_blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n # no need to downcast our float\n # unless indicated\n if downcast is None and (\n self.is_float or self.is_timedelta or self.is_datetime\n ):\n return blocks\n\n return _extend_blocks([b.downcast(downcast) for b in blocks])\n\n def downcast(self, dtypes=None):\n \"\"\" try to downcast each item to the dict of dtypes if present \"\"\"\n\n # turn it off completely\n if dtypes is False:\n return self\n\n values = self.values\n\n # single block handling\n if self._is_single_block:\n\n # try to cast all non-floats here\n if dtypes is None:\n dtypes = \"infer\"\n\n nv = maybe_downcast_to_dtype(values, dtypes)\n return self.make_block(nv)\n\n # ndim > 1\n if dtypes is None:\n return self\n\n if not (dtypes == \"infer\" or isinstance(dtypes, dict)):\n raise ValueError(\n \"downcast must have a dictionary or 'infer' as its argument\"\n )\n elif dtypes != \"infer\":\n raise AssertionError(\"dtypes as dict is not supported yet\")\n\n # operate column-by-column\n # this is expensive as it splits the blocks items-by-item\n def f(mask, val, idx):\n val = maybe_downcast_to_dtype(val, dtype=\"infer\")\n return val\n\n return self.split_and_operate(None, f, False)\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n Coerce to the new dtype.\n\n Parameters\n ----------\n dtype : str, dtype convertible\n copy : bool, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'ignore'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n Block\n \"\"\"\n errors_legal_values = (\"raise\", \"ignore\")\n\n if errors not in errors_legal_values:\n invalid_arg = (\n \"Expected value of kwarg 'errors' to be one of \"\n f\"{list(errors_legal_values)}. Supplied value is '{errors}'\"\n )\n raise ValueError(invalid_arg)\n\n if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype):\n msg = (\n f\"Expected an instance of {dtype.__name__}, \"\n \"but got the class instead. Try instantiating 'dtype'.\"\n )\n raise TypeError(msg)\n\n # may need to convert to categorical\n if self.is_categorical_astype(dtype):\n\n if is_categorical_dtype(self.values):\n # GH 10696/18593: update an existing categorical efficiently\n return self.make_block(self.values.astype(dtype, copy=copy))\n\n return self.make_block(Categorical(self.values, dtype=dtype))\n\n dtype = pandas_dtype(dtype)\n\n # astype processing\n if is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n\n # force the copy here\n if self.is_extension:\n # TODO: Should we try/except this astype?\n values = self.values.astype(dtype)\n else:\n if issubclass(dtype.type, str):\n\n # use native type formatting for datetime/tz/timedelta\n if self.is_datelike:\n values = self.to_native_types()\n\n # astype formatting\n else:\n values = self.get_values()\n\n else:\n values = self.get_values(dtype=dtype)\n\n # _astype_nansafe works fine with 1-d only\n vals1d = values.ravel()\n try:\n values = astype_nansafe(vals1d, dtype, copy=True)\n except (ValueError, TypeError):\n # e.g. astype_nansafe can fail on object-dtype of strings\n # trying to convert to float\n if errors == \"raise\":\n raise\n newb = self.copy() if copy else self\n return newb\n\n # TODO(extension)\n # should we make this attribute?\n if isinstance(values, np.ndarray):\n values = values.reshape(self.shape)\n\n newb = make_block(values, placement=self.mgr_locs, ndim=self.ndim)\n\n if newb.is_numeric and self.is_numeric:\n if newb.shape != self.shape:\n raise TypeError(\n f\"cannot set astype for copy = [{copy}] for dtype \"\n f\"({self.dtype.name} [{self.shape}]) to different shape \"\n f\"({newb.dtype.name} [{newb.shape}])\"\n )\n return newb\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ):\n \"\"\" attempt to coerce any object types to better types return a copy\n of the block (if copy = True) by definition we are not an ObjectBlock\n here!\n \"\"\"\n\n return self.copy() if copy else self\n\n def _can_hold_element(self, element: Any) -> bool:\n \"\"\" require the same dtype as ourselves \"\"\"\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)\n\n def to_native_types(self, slicer=None, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n values = self.get_values()\n\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n itemsize = writers.word_len(na_rep)\n\n if not self.is_object and not quoting and itemsize:\n values = values.astype(f\"<U{itemsize}\")\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return values\n\n # block actions #\n def copy(self, deep=True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy()\n return self.make_block_same_class(values, ndim=self.ndim)\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n \"\"\"replace the to_replace value with value, possible to create new\n blocks here this is just a call to putmask. regex is not used here.\n It is used in ObjectBlocks. It is here for API compatibility.\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n original_to_replace = to_replace\n\n # If we cannot replace with own dtype, convert to ObjectBlock and\n # retry\n if not self._can_hold_element(to_replace):\n if not isinstance(to_replace, list):\n if inplace:\n return [self]\n return [self.copy()]\n\n to_replace = [x for x in to_replace if self._can_hold_element(x)]\n if not len(to_replace):\n # GH#28084 avoid costly checks since we can infer\n # that there is nothing to replace in this block\n if inplace:\n return [self]\n return [self.copy()]\n\n if len(to_replace) == 1:\n # _can_hold_element checks have reduced this back to the\n # scalar case and we can avoid a costly object cast\n return self.replace(\n to_replace[0],\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n # GH 22083, TypeError or ValueError occurred within error handling\n # causes infinite loop. Cast and retry only if not objectblock.\n if is_object_dtype(self):\n raise AssertionError\n\n # try again with a compatible block\n block = self.astype(object)\n return block.replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n values = self.values\n if lib.is_scalar(to_replace) and isinstance(values, np.ndarray):\n # The only non-DatetimeLike class that also has a non-trivial\n # try_coerce_args is ObjectBlock, but that overrides replace,\n # so does not get here.\n to_replace = convert_scalar(values, to_replace)\n\n mask = missing.mask_missing(values, to_replace)\n if filter is not None:\n filtered_out = ~self.mgr_locs.isin(filter)\n mask[filtered_out.nonzero()[0]] = False\n\n if not mask.any():\n if inplace:\n return [self]\n return [self.copy()]\n\n try:\n blocks = self.putmask(mask, value, inplace=inplace)\n # Note: it is _not_ the case that self._can_hold_element(value)\n # is always true at this point. In particular, that can fail\n # for:\n # \"2u\" with bool-dtype, float-dtype\n # 0.5 with int64-dtype\n # np.nan with int64-dtype\n except (TypeError, ValueError):\n # GH 22083, TypeError or ValueError occurred within error handling\n # causes infinite loop. Cast and retry only if not objectblock.\n if is_object_dtype(self):\n raise\n\n assert not self._can_hold_element(value), value\n\n # try again with a compatible block\n block = self.astype(object)\n return block.replace(\n to_replace=original_to_replace,\n value=value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n if convert:\n blocks = [b.convert(numeric=False, copy=not inplace) for b in blocks]\n return blocks\n\n def _replace_single(self, *args, **kwargs):\n \"\"\" no-op on a non-ObjectBlock \"\"\"\n return self if kwargs[\"inplace\"] else self.copy()\n\n def setitem(self, indexer, value):\n \"\"\"\n Set the value inplace, returning a a maybe different typed block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n transpose = self.ndim == 2\n\n # coerce None values, if appropriate\n if value is None:\n if self.is_numeric:\n value = np.nan\n\n # coerce if block dtype can store value\n values = self.values\n if self._can_hold_element(value):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(value):\n value = convert_scalar(values, value)\n\n else:\n # current dtype cannot store value, coerce to common dtype\n find_dtype = False\n\n if hasattr(value, \"dtype\"):\n dtype = value.dtype\n find_dtype = True\n\n elif lib.is_scalar(value) and not isna(value):\n dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)\n find_dtype = True\n\n if find_dtype:\n dtype = find_common_type([values.dtype, dtype])\n if not is_dtype_equal(self.dtype, dtype):\n b = self.astype(dtype)\n return b.setitem(indexer, value)\n\n # value must be storeable at this moment\n if is_extension_array_dtype(getattr(value, \"dtype\", None)):\n # We need to be careful not to allow through strings that\n # can be parsed to EADtypes\n arr_value = value\n else:\n arr_value = np.array(value)\n\n # cast the values to a type that can hold nan (if necessary)\n if not self._can_hold_element(value):\n dtype, _ = maybe_promote(arr_value.dtype)\n values = values.astype(dtype)\n\n if transpose:\n values = values.T\n\n # length checking\n check_setitem_lengths(indexer, value, values)\n\n if is_empty_indexer(indexer, arr_value):\n # GH#8669 empty indexers\n pass\n\n elif is_scalar_indexer(indexer, arr_value):\n # setting a single element for each dim and with a rhs that could\n # be e.g. a list; see GH#6043\n values[indexer] = value\n\n # if we are an exact match (ex-broadcasting),\n # then use the resultant dtype\n elif (\n len(arr_value.shape)\n and arr_value.shape[0] == values.shape[0]\n and arr_value.size == values.size\n ):\n values[indexer] = value\n try:\n values = values.astype(arr_value.dtype)\n except ValueError:\n pass\n\n # set\n else:\n values[indexer] = value\n\n if transpose:\n values = values.T\n block = self.make_block(values)\n return block\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):\n \"\"\" putmask the data to the block; it is possible that we may create a\n new dtype of block\n\n return the resulting block(s)\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n axis : int\n transpose : boolean\n Set to True if self is stored with axes reversed\n\n Returns\n -------\n a list of new blocks, the result of the putmask\n \"\"\"\n\n new_values = self.values if inplace else self.values.copy()\n\n new = getattr(new, \"values\", new)\n mask = getattr(mask, \"values\", mask)\n\n # if we are passed a scalar None, convert it here\n if not is_list_like(new) and isna(new) and not self.is_object:\n # FIXME: make sure we have compatible NA\n new = self.fill_value\n\n if self._can_hold_element(new):\n # We only get here for non-Extension Blocks, so _try_coerce_args\n # is only relevant for DatetimeBlock and TimedeltaBlock\n if lib.is_scalar(new):\n new = convert_scalar(new_values, new)\n\n if transpose:\n new_values = new_values.T\n\n # If the default repeat behavior in np.putmask would go in the\n # wrong direction, then explicitly repeat and reshape new instead\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim and axis == 1:\n new = np.repeat(new, new_values.shape[-1]).reshape(self.shape)\n new = new.astype(new_values.dtype)\n\n # we require exact matches between the len of the\n # values we are setting (or is compat). np.putmask\n # doesn't check this and will simply truncate / pad\n # the output, but we want sane error messages\n #\n # TODO: this prob needs some better checking\n # for 2D cases\n if (\n is_list_like(new)\n and np.any(mask[mask])\n and getattr(new, \"ndim\", 1) == 1\n ):\n if mask[mask].shape[-1] == len(new):\n # GH 30567\n # If length of ``new`` is less than the length of ``new_values``,\n # `np.putmask` would first repeat the ``new`` array and then\n # assign the masked values hence produces incorrect result.\n # `np.place` on the other hand uses the ``new`` values at it is\n # to place in the masked locations of ``new_values``\n np.place(new_values, mask, new)\n elif mask.shape[-1] == len(new) or len(new) == 1:\n np.putmask(new_values, mask, new)\n else:\n raise ValueError(\"cannot assign mismatch length to masked array\")\n else:\n np.putmask(new_values, mask, new)\n\n # maybe upcast me\n elif mask.any():\n if transpose:\n mask = mask.T\n if isinstance(new, np.ndarray):\n new = new.T\n axis = new_values.ndim - axis - 1\n\n # Pseudo-broadcast\n if getattr(new, \"ndim\", 0) >= 1:\n if self.ndim - 1 == new.ndim:\n new_shape = list(new.shape)\n new_shape.insert(axis, 1)\n new = new.reshape(tuple(new_shape))\n\n # operate column-by-column\n def f(mask, val, idx):\n\n if idx is None:\n # ndim==1 case.\n n = new\n else:\n\n if isinstance(new, np.ndarray):\n n = np.squeeze(new[idx % new.shape[0]])\n else:\n n = np.array(new)\n\n # type of the new block\n dtype, _ = maybe_promote(n.dtype)\n\n # we need to explicitly astype here to make a copy\n n = n.astype(dtype)\n\n nv = _putmask_smart(val, mask, n)\n return nv\n\n new_blocks = self.split_and_operate(mask, f, inplace)\n return new_blocks\n\n if inplace:\n return [self]\n\n if transpose:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def coerce_to_target_dtype(self, other):\n \"\"\"\n coerce the current block to a dtype compat for other\n we will return a block, possibly object, and not raise\n\n we can also safely try to coerce to the same dtype\n and will receive the same block\n \"\"\"\n\n # if we cannot then coerce to object\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\n\n if is_dtype_equal(self.dtype, dtype):\n return self\n\n if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):\n # we don't upcast to bool\n return self.astype(object)\n\n elif (self.is_float or self.is_complex) and (\n is_integer_dtype(dtype) or is_float_dtype(dtype)\n ):\n # don't coerce float/complex to int\n return self\n\n elif (\n self.is_datetime\n or is_datetime64_dtype(dtype)\n or is_datetime64tz_dtype(dtype)\n ):\n\n # not a datetime\n if not (\n (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype))\n and self.is_datetime\n ):\n return self.astype(object)\n\n # don't upcast timezone with different timezone or no timezone\n mytz = getattr(self.dtype, \"tz\", None)\n othertz = getattr(dtype, \"tz\", None)\n\n if not tz_compare(mytz, othertz):\n return self.astype(object)\n\n raise AssertionError(\n f\"possible recursion in coerce_to_target_dtype: {self} {other}\"\n )\n\n elif self.is_timedelta or is_timedelta64_dtype(dtype):\n\n # not a timedelta\n if not (is_timedelta64_dtype(dtype) and self.is_timedelta):\n return self.astype(object)\n\n raise AssertionError(\n f\"possible recursion in coerce_to_target_dtype: {self} {other}\"\n )\n\n try:\n return self.astype(dtype)\n except (ValueError, TypeError, OverflowError):\n return self.astype(object)\n\n def interpolate(\n self,\n method=\"pad\",\n axis=0,\n index=None,\n values=None,\n inplace=False,\n limit=None,\n limit_direction=\"forward\",\n limit_area=None,\n fill_value=None,\n coerce=False,\n downcast=None,\n **kwargs,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n def check_int_bool(self, inplace):\n # Only FloatBlocks will contain NaNs.\n # timedelta subclasses IntBlock\n if (self.is_bool or self.is_integer) and not self.is_timedelta:\n if inplace:\n return self\n else:\n return self.copy()\n\n # a fill na type method\n try:\n m = missing.clean_fill_method(method)\n except ValueError:\n m = None\n\n if m is not None:\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate_with_fill(\n method=m,\n axis=axis,\n inplace=inplace,\n limit=limit,\n fill_value=fill_value,\n coerce=coerce,\n downcast=downcast,\n )\n # validate the interp method\n m = missing.clean_interp_method(method, **kwargs)\n\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate(\n method=m,\n index=index,\n values=values,\n axis=axis,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n def _interpolate_with_fill(\n self,\n method=\"pad\",\n axis=0,\n inplace=False,\n limit=None,\n fill_value=None,\n coerce=False,\n downcast=None,\n ):\n \"\"\" fillna but using the interpolate machinery \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # if we are coercing, then don't force the conversion\n # if the block can't hold the type\n if coerce:\n if not self._can_hold_na:\n if inplace:\n return [self]\n else:\n return [self.copy()]\n\n values = self.values if inplace else self.values.copy()\n\n # We only get here for non-ExtensionBlock\n fill_value = convert_scalar(self.values, fill_value)\n\n values = missing.interpolate_2d(\n values,\n method=method,\n axis=axis,\n limit=limit,\n fill_value=fill_value,\n dtype=self.dtype,\n )\n\n blocks = [self.make_block_same_class(values, ndim=self.ndim)]\n return self._maybe_downcast(blocks, downcast)\n\n def _interpolate(\n self,\n method=None,\n index=None,\n values=None,\n fill_value=None,\n axis=0,\n limit=None,\n limit_direction=\"forward\",\n limit_area=None,\n inplace=False,\n downcast=None,\n **kwargs,\n ):\n \"\"\" interpolate using scipy wrappers \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n data = self.values if inplace else self.values.copy()\n\n # only deal with floats\n if not self.is_float:\n if not self.is_integer:\n return self\n data = data.astype(np.float64)\n\n if fill_value is None:\n fill_value = self.fill_value\n\n if method in (\"krogh\", \"piecewise_polynomial\", \"pchip\"):\n if not index.is_monotonic:\n raise ValueError(\n f\"{method} interpolation requires that the index be monotonic.\"\n )\n # process 1-d slices in the axis direction\n\n def func(x):\n\n # process a 1-d slice, returning it\n # should the axis argument be handled below in apply_along_axis?\n # i.e. not an arg to missing.interpolate_1d\n return missing.interpolate_1d(\n index,\n x,\n method=method,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n bounds_error=False,\n **kwargs,\n )\n\n # interp each column independently\n interp_values = np.apply_along_axis(func, axis, data)\n\n blocks = [self.make_block_same_class(interp_values)]\n return self._maybe_downcast(blocks, downcast)\n\n def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.bb\n\n \"\"\"\n\n # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock\n # so need to preserve types\n # sparse is treated like an ndarray, but needs .get_values() shaping\n\n values = self.values\n\n if fill_tuple is None:\n fill_value = self.fill_value\n allow_fill = False\n else:\n fill_value = fill_tuple[0]\n allow_fill = True\n\n new_values = algos.take_nd(\n values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value\n )\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (axis == 0 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n if not is_dtype_equal(new_values.dtype, self.dtype):\n return self.make_block(new_values, new_mgr_locs)\n else:\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def diff(self, n: int, axis: int = 1) -> List[\"Block\"]:\n \"\"\" return block for the diff of the values \"\"\"\n new_values = algos.diff(self.values, n, axis=axis)\n return [self.make_block(values=new_values)]\n\n def shift(self, periods, axis=0, fill_value=None):\n \"\"\" shift the block by periods, possibly upcast \"\"\"\n\n # convert integer to float if necessary. need to do a lot more than\n # that, handle boolean etc also\n new_values, fill_value = maybe_upcast(self.values, fill_value)\n\n # make sure array sent to np.roll is c_contiguous\n f_ordered = new_values.flags.f_contiguous\n if f_ordered:\n new_values = new_values.T\n axis = new_values.ndim - axis - 1\n\n if np.prod(new_values.shape):\n new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)\n\n axis_indexer = [slice(None)] * self.ndim\n if periods > 0:\n axis_indexer[axis] = slice(None, periods)\n else:\n axis_indexer[axis] = slice(periods, None)\n new_values[tuple(axis_indexer)] = fill_value\n\n # restore original order\n if f_ordered:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def where(\n self,\n other,\n cond,\n align=True,\n errors=\"raise\",\n try_cast: bool = False,\n axis: int = 0,\n ) -> List[\"Block\"]:\n \"\"\"\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : the condition to respect\n align : boolean, perform alignment on other/cond\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n axis : int\n\n Returns\n -------\n a new block(s), the result of the func\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n assert errors in [\"raise\", \"ignore\"]\n transpose = self.ndim == 2\n\n values = self.values\n orig_other = other\n if transpose:\n values = values.T\n\n other = getattr(other, \"_values\", getattr(other, \"values\", other))\n cond = getattr(cond, \"values\", cond)\n\n # If the default broadcasting would go in the wrong direction, then\n # explicitly reshape other instead\n if getattr(other, \"ndim\", 0) >= 1:\n if values.ndim - 1 == other.ndim and axis == 1:\n other = other.reshape(tuple(other.shape + (1,)))\n elif transpose and values.ndim == self.ndim - 1:\n cond = cond.T\n\n if not hasattr(cond, \"shape\"):\n raise ValueError(\"where must have a condition that is ndarray like\")\n\n # our where function\n def func(cond, values, other):\n\n if not (\n (self.is_integer or self.is_bool)\n and lib.is_float(other)\n and np.isnan(other)\n ):\n # np.where will cast integer array to floats in this case\n if not self._can_hold_element(other):\n raise TypeError\n if lib.is_scalar(other) and isinstance(values, np.ndarray):\n other = convert_scalar(values, other)\n\n fastres = expressions.where(cond, values, other)\n return fastres\n\n if cond.ravel().all():\n result = values\n else:\n # see if we can operate on the entire block, or need item-by-item\n # or if we are a single block (ndim == 1)\n try:\n result = func(cond, values, other)\n except TypeError:\n\n # we cannot coerce, return a compat dtype\n # we are explicitly ignoring errors\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(\n orig_other,\n cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=axis,\n )\n return self._maybe_downcast(blocks, \"infer\")\n\n if self._can_hold_na or self.ndim == 1:\n\n if transpose:\n result = result.T\n\n return [self.make_block(result)]\n\n # might need to separate out blocks\n axis = cond.ndim - 1\n cond = cond.swapaxes(axis, 0)\n mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool)\n\n result_blocks = []\n for m in [mask, ~mask]:\n if m.any():\n taken = result.take(m.nonzero()[0], axis=axis)\n r = maybe_downcast_numeric(taken, self.dtype)\n nb = self.make_block(r.T, placement=self.mgr_locs[m])\n result_blocks.append(nb)\n\n return result_blocks\n\n def equals(self, other) -> bool:\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return array_equivalent(self.values, other.values)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n \"\"\"Return a list of unstacked blocks of self\n\n Parameters\n ----------\n unstacker_func : callable\n Partially applied unstacker.\n new_columns : Index\n All columns of the unstacked BlockManager.\n n_rows : int\n Only used in ExtensionBlock._unstack\n fill_value : int\n Only used in ExtensionBlock._unstack\n\n Returns\n -------\n blocks : list of Block\n New blocks of unstacked values.\n mask : array_like of bool\n The mask of columns of `blocks` we should keep.\n \"\"\"\n unstacker = unstacker_func(self.values.T)\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n\n blocks = [make_block(new_values, placement=new_placement)]\n return blocks, mask\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n \"\"\"\n compute the quantiles of the\n\n Parameters\n ----------\n qs: a scalar or list of the quantiles to be computed\n interpolation: type of interpolation, default 'linear'\n axis: axis to compute, default 0\n\n Returns\n -------\n Block\n \"\"\"\n # We should always have ndim == 2 because Series dispatches to DataFrame\n assert self.ndim == 2\n\n values = self.get_values()\n\n is_empty = values.shape[axis] == 0\n orig_scalar = not is_list_like(qs)\n if orig_scalar:\n # make list-like, unpack later\n qs = [qs]\n\n if is_empty:\n # create the array of na_values\n # 2d len(values) * len(qs)\n result = np.repeat(\n np.array([self.fill_value] * len(qs)), len(values)\n ).reshape(len(values), len(qs))\n else:\n # asarray needed for Sparse, see GH#24600\n mask = np.asarray(isna(values))\n result = nanpercentile(\n values,\n np.array(qs) * 100,\n axis=axis,\n na_value=self.fill_value,\n mask=mask,\n ndim=values.ndim,\n interpolation=interpolation,\n )\n\n result = np.array(result, copy=False)\n result = result.T\n\n if orig_scalar and not lib.is_scalar(result):\n # result could be scalar in case with is_empty and self.ndim == 1\n assert result.shape[-1] == 1, result.shape\n result = result[..., 0]\n result = lib.item_from_zerodim(result)\n\n ndim = np.ndim(result)\n return make_block(result, placement=np.arange(len(result)), ndim=ndim)\n\n def _replace_coerce(\n self, to_replace, value, inplace=True, regex=False, convert=False, mask=None\n ):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n\n if mask.any():\n if not regex:\n self = self.coerce_to_target_dtype(value)\n return self.putmask(mask, value, inplace=inplace)\n else:\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n regex=regex,\n convert=convert,\n mask=mask,\n )\n return self\n\n\nclass NonConsolidatableMixIn:\n \"\"\" hold methods for the nonconsolidatable blocks \"\"\"\n\n _can_consolidate = False\n _verify_integrity = False\n _validate_ndim = False\n\n def __init__(self, values, placement, ndim=None):\n \"\"\"Initialize a non-consolidatable block.\n\n 'ndim' may be inferred from 'placement'.\n\n This will call continue to call __init__ for the other base\n classes mixed in with this Mixin.\n \"\"\"\n # Placement must be converted to BlockPlacement so that we can check\n # its length\n if not isinstance(placement, libinternals.BlockPlacement):\n placement = libinternals.BlockPlacement(placement)\n\n # Maybe infer ndim from placement\n if ndim is None:\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n super().__init__(values, placement, ndim=ndim)\n\n @property\n def shape(self):\n if self.ndim == 1:\n return ((len(self.values)),)\n return (len(self.mgr_locs), len(self.values))\n\n def iget(self, col):\n\n if self.ndim == 2 and isinstance(col, tuple):\n col, loc = col\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n elif isinstance(col, slice):\n if col != slice(None):\n raise NotImplementedError(col)\n return self.values[[loc]]\n return self.values[loc]\n else:\n if col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values\n\n def should_store(self, value):\n return isinstance(value, self._holder)\n\n def set(self, locs, values, check=False):\n assert locs.tolist() == [0]\n self.values = values\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False):\n \"\"\"\n putmask the data to the block; we must be a single block and not\n generate other blocks\n\n return the resulting block\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n\n Returns\n -------\n a new block, the result of the putmask\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # use block's copy logic.\n # .values may be an Index which does shallow copy by default\n new_values = self.values if inplace else self.copy().values\n\n if isinstance(new, np.ndarray) and len(new) == len(mask):\n new = new[mask]\n\n mask = _safe_reshape(mask, new_values.shape)\n\n new_values[mask] = new\n return [self.make_block(values=new_values)]\n\n def _get_unstack_items(self, unstacker, new_columns):\n \"\"\"\n Get the placement, values, and mask for a Block unstack.\n\n This is shared between ObjectBlock and ExtensionBlock. They\n differ in that ObjectBlock passes the values, while ExtensionBlock\n passes the dummy ndarray of positions to be used by a take\n later.\n\n Parameters\n ----------\n unstacker : pandas.core.reshape.reshape._Unstacker\n new_columns : Index\n All columns of the unstacked BlockManager.\n\n Returns\n -------\n new_placement : ndarray[int]\n The placement of the new columns in `new_columns`.\n new_values : Union[ndarray, ExtensionArray]\n The first return value from _Unstacker.get_new_values.\n mask : ndarray[bool]\n The second return value from _Unstacker.get_new_values.\n \"\"\"\n # shared with ExtensionBlock\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n return new_placement, new_values, mask\n\n\nclass ExtensionBlock(NonConsolidatableMixIn, Block):\n \"\"\"Block for holding extension types.\n\n Notes\n -----\n This holds all 3rd-party extension array types. It's also the immediate\n parent class for our internal extension types' blocks, CategoricalBlock.\n\n ExtensionArrays are limited to 1-D.\n \"\"\"\n\n is_extension = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super().__init__(values, placement, ndim)\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Unbox to an extension array.\n\n This will unbox an ExtensionArray stored in an Index or Series.\n ExtensionArrays pass through. No dtype coercion is done.\n\n Parameters\n ----------\n values : Index, Series, ExtensionArray\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n return extract_array(values)\n\n @property\n def _holder(self):\n # For extension blocks, the holder is values-dependent.\n return type(self.values)\n\n @property\n def fill_value(self):\n # Used in reindex_indexer\n return self.values.dtype.na_value\n\n @property\n def _can_hold_na(self):\n # The default ExtensionArray._can_hold_na is True\n return self._holder._can_hold_na\n\n @property\n def is_view(self):\n \"\"\"Extension arrays are never treated as views.\"\"\"\n return False\n\n @property\n def is_numeric(self):\n return self.values.dtype._is_numeric\n\n def setitem(self, indexer, value):\n \"\"\"Set the value inplace, returning a same-typed block.\n\n This differs from Block.setitem by not allowing setitem to change\n the dtype of the Block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n if isinstance(indexer, tuple):\n # we are always 1-D\n indexer = indexer[0]\n\n check_setitem_lengths(indexer, value, self.values)\n self.values[indexer] = value\n return self\n\n def get_values(self, dtype=None):\n # ExtensionArrays must be iterable, so this works.\n values = np.asarray(self.values)\n if values.ndim == self.ndim - 1:\n values = values.reshape((1,) + values.shape)\n return values\n\n def to_dense(self):\n return np.asarray(self.values)\n\n def to_native_types(self, slicer=None, na_rep=\"nan\", quoting=None, **kwargs):\n \"\"\"override to use ExtensionArray astype for the conversion\"\"\"\n values = self.values\n if slicer is not None:\n values = values[slicer]\n mask = isna(values)\n\n values = np.asarray(values.astype(object))\n values[mask] = na_rep\n\n # we are expected to return a 2-d ndarray\n return values.reshape(1, len(values))\n\n def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.\n \"\"\"\n if fill_tuple is None:\n fill_value = None\n else:\n fill_value = fill_tuple[0]\n\n # axis doesn't matter; we are really a single-dim object\n # but are passed the axis depending on the calling routing\n # if its REALLY axis 0, then this will be a reindex and not a take\n new_values = self.values.take(indexer, fill_value=fill_value, allow_fill=True)\n\n # Called from three places in managers, all of which satisfy\n # this assertion\n assert not (self.ndim == 1 and new_mgr_locs is None)\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def _can_hold_element(self, element: Any) -> bool:\n # XXX: We may need to think about pushing this onto the array.\n # We're doing the same as CategoricalBlock here.\n return True\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n\n # slice the category\n # return same dims as we currently have\n\n if isinstance(slicer, tuple) and len(slicer) == 2:\n if not com.is_null_slice(slicer[0]):\n raise AssertionError(\"invalid slicing for a 1-ndim categorical\")\n slicer = slicer[1]\n\n return self.values[slicer]\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._holder._concat_same_type([blk.values for blk in to_concat])\n placement = placement or slice(0, len(values), 1)\n return self.make_block_same_class(values, ndim=self.ndim, placement=placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n values = self.values if inplace else self.values.copy()\n values = values.fillna(value=value, limit=limit)\n return [\n self.make_block_same_class(\n values=values, placement=self.mgr_locs, ndim=self.ndim\n )\n ]\n\n def interpolate(\n self, method=\"pad\", axis=0, inplace=False, limit=None, fill_value=None, **kwargs\n ):\n\n values = self.values if inplace else self.values.copy()\n return self.make_block_same_class(\n values=values.fillna(value=fill_value, method=method, limit=limit),\n placement=self.mgr_locs,\n )\n\n def shift(\n self,\n periods: int,\n axis: libinternals.BlockPlacement = 0,\n fill_value: Any = None,\n ) -> List[\"ExtensionBlock\"]:\n \"\"\"\n Shift the block by `periods`.\n\n Dispatches to underlying ExtensionArray and re-boxes in an\n ExtensionBlock.\n \"\"\"\n return [\n self.make_block_same_class(\n self.values.shift(periods=periods, fill_value=fill_value),\n placement=self.mgr_locs,\n ndim=self.ndim,\n )\n ]\n\n def where(\n self,\n other,\n cond,\n align=True,\n errors=\"raise\",\n try_cast: bool = False,\n axis: int = 0,\n ) -> List[\"Block\"]:\n if isinstance(other, ABCDataFrame):\n # ExtensionArrays are 1-D, so if we get here then\n # `other` should be a DataFrame with a single column.\n assert other.shape[1] == 1\n other = other.iloc[:, 0]\n\n other = extract_array(other, extract_numpy=True)\n\n if isinstance(cond, ABCDataFrame):\n assert cond.shape[1] == 1\n cond = cond.iloc[:, 0]\n\n cond = extract_array(cond, extract_numpy=True)\n\n if lib.is_scalar(other) and isna(other):\n # The default `other` for Series / Frame is np.nan\n # we want to replace that with the correct NA value\n # for the type\n other = self.dtype.na_value\n\n if is_sparse(self.values):\n # TODO(SparseArray.__setitem__): remove this if condition\n # We need to re-infer the type of the data after doing the\n # where, for cases where the subtypes don't match\n dtype = None\n else:\n dtype = self.dtype\n\n result = self.values.copy()\n icond = ~cond\n if lib.is_scalar(other):\n set_other = other\n else:\n set_other = other[icond]\n try:\n result[icond] = set_other\n except (NotImplementedError, TypeError):\n # NotImplementedError for class not implementing `__setitem__`\n # TypeError for SparseArray, which implements just to raise\n # a TypeError\n result = self._holder._from_sequence(\n np.where(cond, self.values, other), dtype=dtype\n )\n\n return [self.make_block_same_class(result, placement=self.mgr_locs)]\n\n @property\n def _ftype(self):\n return getattr(self.values, \"_pandas_ftype\", Block._ftype)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n # ExtensionArray-safe unstack.\n # We override ObjectBlock._unstack, which unstacks directly on the\n # values of the array. For EA-backed blocks, this would require\n # converting to a 2-D ndarray of objects.\n # Instead, we unstack an ndarray of integer positions, followed by\n # a `take` on the actual values.\n dummy_arr = np.arange(n_rows)\n dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)\n unstacker = dummy_unstacker(dummy_arr)\n\n new_placement, new_values, mask = self._get_unstack_items(\n unstacker, new_columns\n )\n\n blocks = [\n self.make_block_same_class(\n self.values.take(indices, allow_fill=True, fill_value=fill_value),\n [place],\n )\n for indices, place in zip(new_values.T, new_placement)\n ]\n return blocks, mask\n\n\nclass ObjectValuesExtensionBlock(ExtensionBlock):\n \"\"\"\n Block providing backwards-compatibility for `.values`.\n\n Used by PeriodArray and IntervalArray to ensure that\n Series[T].values is an ndarray of objects.\n \"\"\"\n\n def external_values(self, dtype=None):\n return self.values.astype(object)\n\n\nclass NumericBlock(Block):\n __slots__ = ()\n is_numeric = True\n _can_hold_na = True\n\n\nclass FloatOrComplexBlock(NumericBlock):\n __slots__ = ()\n\n def equals(self, other) -> bool:\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n left, right = self.values, other.values\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n\nclass FloatBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_float = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(\n tipo.type, (np.datetime64, np.timedelta64)\n )\n return isinstance(\n element, (float, int, np.floating, np.int_)\n ) and not isinstance(\n element,\n (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64),\n )\n\n def to_native_types(\n self,\n slicer=None,\n na_rep=\"\",\n float_format=None,\n decimal=\".\",\n quoting=None,\n **kwargs,\n ):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n\n # see gh-13418: no special formatting is desired at the\n # output (important for appropriate 'quoting' behaviour),\n # so do not pass it through the FloatArrayFormatter\n if float_format is None and decimal == \".\":\n mask = isna(values)\n\n if not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype=\"object\")\n\n values[mask] = na_rep\n return values\n\n from pandas.io.formats.format import FloatArrayFormatter\n\n formatter = FloatArrayFormatter(\n values,\n na_rep=na_rep,\n float_format=float_format,\n decimal=decimal,\n quoting=quoting,\n fixed_width=False,\n )\n return formatter.get_result_as_array()\n\n def should_store(self, value):\n # when inserting a column should not coerce integers to floats\n # unnecessarily\n return issubclass(value.dtype.type, np.floating) and value.dtype == self.dtype\n\n\nclass ComplexBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_complex = True\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating))\n return isinstance(\n element, (float, int, complex, np.float_, np.int_)\n ) and not isinstance(element, (bool, np.bool_))\n\n def should_store(self, value):\n return issubclass(value.dtype.type, np.complexfloating)\n\n\nclass IntBlock(NumericBlock):\n __slots__ = ()\n is_integer = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (\n issubclass(tipo.type, np.integer)\n and not issubclass(tipo.type, (np.datetime64, np.timedelta64))\n and self.dtype.itemsize >= tipo.itemsize\n )\n return is_integer(element)\n\n def should_store(self, value):\n return is_integer_dtype(value) and value.dtype == self.dtype\n\n\nclass DatetimeLikeBlockMixin:\n \"\"\"Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.\"\"\"\n\n @property\n def _holder(self):\n return DatetimeArray\n\n @property\n def fill_value(self):\n return np.datetime64(\"NaT\", \"ns\")\n\n def get_values(self, dtype=None):\n \"\"\"\n return object dtype as boxed values, such as Timestamps/Timedelta\n \"\"\"\n if is_object_dtype(dtype):\n values = self.values.ravel()\n result = self._holder(values).astype(object)\n return result.reshape(self.values.shape)\n return self.values\n\n\nclass DatetimeBlock(DatetimeLikeBlockMixin, Block):\n __slots__ = ()\n is_datetime = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _can_hold_na(self):\n return True\n\n def _maybe_coerce_values(self, values):\n \"\"\"\n Input validation for values passed to __init__. Ensure that\n we have datetime64ns, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : ndarray[datetime64ns]\n\n Overridden by DatetimeTZBlock.\n \"\"\"\n if values.dtype != _NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n if isinstance(values, DatetimeArray):\n values = values._data\n\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n def astype(self, dtype, copy: bool = False, errors: str = \"raise\"):\n \"\"\"\n these automatically copy, so copy=True has no effect\n raise on an except if raise == True\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n # if we are passed a datetime64[ns, tz]\n if is_datetime64tz_dtype(dtype):\n values = self.values\n if getattr(values, \"tz\", None) is None:\n values = DatetimeArray(values).tz_localize(\"UTC\")\n values = values.tz_convert(dtype.tz)\n return self.make_block(values)\n\n # delegate\n return super().astype(dtype=dtype, copy=copy, errors=errors)\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n if self.is_datetimetz:\n # require exact match, since non-nano does not exist\n return is_dtype_equal(tipo, self.dtype) or is_valid_nat_for_dtype(\n element, self.dtype\n )\n\n # GH#27419 if we get a non-nano datetime64 object\n return is_datetime64_dtype(tipo)\n elif element is NaT:\n return True\n elif isinstance(element, datetime):\n if self.is_datetimetz:\n return tz_compare(element.tzinfo, self.dtype.tz)\n return element.tzinfo is None\n\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def to_native_types(\n self, slicer=None, na_rep=None, date_format=None, quoting=None, **kwargs\n ):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n i8values = self.values.view(\"i8\")\n\n if slicer is not None:\n values = values[..., slicer]\n i8values = i8values[..., slicer]\n\n from pandas.io.formats.format import _get_format_datetime64_from_values\n\n fmt = _get_format_datetime64_from_values(values, date_format)\n\n result = tslib.format_array_from_datetime(\n i8values.ravel(),\n tz=getattr(self.values, \"tz\", None),\n format=fmt,\n na_rep=na_rep,\n ).reshape(i8values.shape)\n return np.atleast_2d(result)\n\n def should_store(self, value):\n return (\n issubclass(value.dtype.type, np.datetime64)\n and not is_datetime64tz_dtype(value)\n and not is_extension_array_dtype(value)\n )\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n values = conversion.ensure_datetime64ns(values, copy=False)\n\n self.values[locs] = values\n\n def external_values(self):\n return np.asarray(self.values.astype(\"datetime64[ns]\", copy=False))\n\n\nclass DatetimeTZBlock(ExtensionBlock, DatetimeBlock):\n \"\"\" implement a datetime64 block with a tz attribute \"\"\"\n\n __slots__ = ()\n is_datetimetz = True\n is_extension = True\n\n _can_hold_element = DatetimeBlock._can_hold_element\n to_native_types = DatetimeBlock.to_native_types\n fill_value = np.datetime64(\"NaT\", \"ns\")\n\n @property\n def _holder(self):\n return DatetimeArray\n\n def _maybe_coerce_values(self, values):\n \"\"\"Input validation for values passed to __init__. Ensure that\n we have datetime64TZ, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : DatetimeArray\n \"\"\"\n if not isinstance(values, self._holder):\n values = self._holder(values)\n\n if values.tz is None:\n raise ValueError(\"cannot create a DatetimeTZBlock without a tz\")\n\n return values\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n # check the ndarray values of the DatetimeIndex values\n return self.values._data.base is not None\n\n def get_values(self, dtype=None):\n \"\"\"\n Returns an ndarray of values.\n\n Parameters\n ----------\n dtype : np.dtype\n Only `object`-like dtypes are respected here (not sure\n why).\n\n Returns\n -------\n values : ndarray\n When ``dtype=object``, then and object-dtype ndarray of\n boxed values is returned. Otherwise, an M8[ns] ndarray\n is returned.\n\n DatetimeArray is always 1-d. ``get_values`` will reshape\n the return value to be the same dimensionality as the\n block.\n \"\"\"\n values = self.values\n if is_object_dtype(dtype):\n values = values.astype(object)\n\n values = np.asarray(values)\n\n if self.ndim == 2:\n # Ensure that our shape is correct for DataFrame.\n # ExtensionArrays are always 1-D, even in a DataFrame when\n # the analogous NumPy-backed column would be a 2-D ndarray.\n values = values.reshape(1, -1)\n return values\n\n def to_dense(self):\n # we request M8[ns] dtype here, even though it discards tzinfo,\n # as lots of code (e.g. anything using values_from_object)\n # expects that behavior.\n return np.asarray(self.values, dtype=_NS_DTYPE)\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n if isinstance(slicer, tuple):\n col, loc = slicer\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(f\"{self} only contains one item\")\n return self.values[loc]\n return self.values[slicer]\n\n def diff(self, n: int, axis: int = 0) -> List[\"Block\"]:\n \"\"\"\n 1st discrete difference.\n\n Parameters\n ----------\n n : int\n Number of periods to diff.\n axis : int, default 0\n Axis to diff upon.\n\n Returns\n -------\n A list with a new TimeDeltaBlock.\n\n Notes\n -----\n The arguments here are mimicking shift so they are called correctly\n by apply.\n \"\"\"\n if axis == 0:\n # Cannot currently calculate diff across multiple blocks since this\n # function is invoked via apply\n raise NotImplementedError\n new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8\n\n # Reshape the new_values like how algos.diff does for timedelta data\n new_values = new_values.reshape(1, len(new_values))\n new_values = new_values.astype(\"timedelta64[ns]\")\n return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]\n\n def concat_same_type(self, to_concat, placement=None):\n # need to handle concat([tz1, tz2]) here, since DatetimeArray\n # only handles cases where all the tzs are the same.\n # Instead of placing the condition here, it could also go into the\n # is_uniform_join_units check, but I'm not sure what is better.\n if len({x.dtype for x in to_concat}) > 1:\n values = concat_datetime([x.values for x in to_concat])\n placement = placement or slice(0, len(values), 1)\n\n if self.ndim > 1:\n values = np.atleast_2d(values)\n return ObjectBlock(values, ndim=self.ndim, placement=placement)\n return super().concat_same_type(to_concat, placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n # We support filling a DatetimeTZ with a `value` whose timezone\n # is different by coercing to object.\n if self._can_hold_element(value):\n return super().fillna(value, limit, inplace, downcast)\n\n # different timezones, or a non-tz\n return self.astype(object).fillna(\n value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n def setitem(self, indexer, value):\n # https://github.com/pandas-dev/pandas/issues/24020\n # Need a dedicated setitem until #24020 (type promotion in setitem\n # for extension arrays) is designed and implemented.\n if self._can_hold_element(value) or (\n isinstance(indexer, np.ndarray) and indexer.size == 0\n ):\n return super().setitem(indexer, value)\n\n obj_vals = self.values.astype(object)\n newb = make_block(\n obj_vals, placement=self.mgr_locs, klass=ObjectBlock, ndim=self.ndim\n )\n return newb.setitem(indexer, value)\n\n def equals(self, other) -> bool:\n # override for significant performance improvement\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return (self.values.view(\"i8\") == other.values.view(\"i8\")).all()\n\n def quantile(self, qs, interpolation=\"linear\", axis=0):\n naive = self.values.view(\"M8[ns]\")\n\n # kludge for 2D block with 1D values\n naive = naive.reshape(self.shape)\n\n blk = self.make_block(naive)\n res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis)\n\n # ravel is kludge for 2D block with 1D values, assumes column-like\n aware = self._holder(res_blk.values.ravel(), dtype=self.dtype)\n return self.make_block_same_class(aware, ndim=res_blk.ndim)\n\n\nclass TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):\n __slots__ = ()\n is_timedelta = True\n _can_hold_na = True\n is_numeric = False\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n\n def __init__(self, values, placement, ndim=None):\n if values.dtype != _TD_DTYPE:\n values = conversion.ensure_timedelta64ns(values)\n if isinstance(values, TimedeltaArray):\n values = values._data\n assert isinstance(values, np.ndarray), type(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _holder(self):\n return TimedeltaArray\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.timedelta64)\n elif element is NaT:\n return True\n elif isinstance(element, (timedelta, np.timedelta64)):\n return True\n return is_valid_nat_for_dtype(element, self.dtype)\n\n def fillna(self, value, **kwargs):\n\n # allow filling with integers to be\n # interpreted as nanoseconds\n if is_integer(value):\n # Deprecation GH#24694, GH#19233\n raise TypeError(\n \"Passing integers to fillna for timedelta64[ns] dtype is no \"\n \"longer supported. To obtain the old behavior, pass \"\n \"`pd.Timedelta(seconds=n)` instead.\"\n )\n return super().fillna(value, **kwargs)\n\n def should_store(self, value):\n return issubclass(\n value.dtype.type, np.timedelta64\n ) and not is_extension_array_dtype(value)\n\n def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n\n rvalues = np.empty(values.shape, dtype=object)\n if na_rep is None:\n na_rep = \"NaT\"\n rvalues[mask] = na_rep\n imask = (~mask).ravel()\n\n # FIXME:\n # should use the formats.format.Timedelta64Formatter here\n # to figure what format to pass to the Timedelta\n # e.g. to not show the decimals say\n rvalues.flat[imask] = np.array(\n [Timedelta(val)._repr_base(format=\"all\") for val in values.ravel()[imask]],\n dtype=object,\n )\n return rvalues\n\n def external_values(self, dtype=None):\n return np.asarray(self.values.astype(\"timedelta64[ns]\", copy=False))\n\n\nclass BoolBlock(NumericBlock):\n __slots__ = ()\n is_bool = True\n _can_hold_na = False\n\n def _can_hold_element(self, element: Any) -> bool:\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.bool_)\n return isinstance(element, (bool, np.bool_))\n\n def should_store(self, value):\n return issubclass(value.dtype.type, np.bool_) and not is_extension_array_dtype(\n value\n )\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n to_replace_values = np.atleast_1d(to_replace)\n if not np.can_cast(to_replace_values, bool):\n return self\n return super().replace(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n\n\nclass ObjectBlock(Block):\n __slots__ = ()\n is_object = True\n _can_hold_na = True\n\n def __init__(self, values, placement=None, ndim=2):\n if issubclass(values.dtype.type, str):\n values = np.array(values, dtype=object)\n\n super().__init__(values, ndim=ndim, placement=placement)\n\n @property\n def is_bool(self):\n \"\"\" we can be a bool if we have only bool values but are of type\n object\n \"\"\"\n return lib.is_bool_array(self.values.ravel())\n\n def convert(\n self,\n copy: bool = True,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n coerce: bool = False,\n ):\n \"\"\" attempt to coerce any object types to better types return a copy of\n the block (if copy = True) by definition we ARE an ObjectBlock!!!!!\n\n can return multiple blocks!\n \"\"\"\n\n # operate column-by-column\n def f(mask, val, idx):\n shape = val.shape\n values = soft_convert_objects(\n val.ravel(),\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n if isinstance(values, np.ndarray):\n # TODO: allow EA once reshape is supported\n values = values.reshape(shape)\n\n values = _block_shape(values, ndim=self.ndim)\n return values\n\n if self.ndim == 2:\n blocks = self.split_and_operate(None, f, False)\n else:\n values = f(None, self.values.ravel(), None)\n blocks = [make_block(values, ndim=self.ndim, placement=self.mgr_locs)]\n\n return blocks\n\n def _maybe_downcast(self, blocks: List[\"Block\"], downcast=None) -> List[\"Block\"]:\n\n if downcast is not None:\n return blocks\n\n # split and convert the blocks\n return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks])\n\n def _can_hold_element(self, element: Any) -> bool:\n return True\n\n def should_store(self, value):\n return not (\n issubclass(\n value.dtype.type,\n (np.integer, np.floating, np.complexfloating, np.datetime64, np.bool_),\n )\n or is_extension_array_dtype(value)\n )\n\n def replace(\n self, to_replace, value, inplace=False, filter=None, regex=False, convert=True\n ):\n to_rep_is_list = is_list_like(to_replace)\n value_is_list = is_list_like(value)\n both_lists = to_rep_is_list and value_is_list\n either_list = to_rep_is_list or value_is_list\n\n result_blocks = []\n blocks = [self]\n\n if not either_list and is_re(to_replace):\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=True,\n convert=convert,\n )\n elif not (either_list or regex):\n return super().replace(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n elif both_lists:\n for to_rep, v in zip(to_replace, value):\n result_blocks = []\n for b in blocks:\n result = b._replace_single(\n to_rep,\n v,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n elif to_rep_is_list and regex:\n for to_rep in to_replace:\n result_blocks = []\n for b in blocks:\n result = b._replace_single(\n to_rep,\n value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert,\n )\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n return self._replace_single(\n to_replace,\n value,\n inplace=inplace,\n filter=filter,\n convert=convert,\n regex=regex,\n )\n\n def _replace_single(\n self,\n to_replace,\n value,\n inplace=False,\n filter=None,\n regex=False,\n convert=True,\n mask=None,\n ):\n \"\"\"\n Replace elements by the given value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n filter : list, optional\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n a new block, the result after replacing\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # to_replace is regex compilable\n to_rep_re = regex and is_re_compilable(to_replace)\n\n # regex is regex compilable\n regex_re = is_re_compilable(regex)\n\n # only one will survive\n if to_rep_re and regex_re:\n raise AssertionError(\n \"only one of to_replace and regex can be regex compilable\"\n )\n\n # if regex was passed as something that can be a regex (rather than a\n # boolean)\n if regex_re:\n to_replace = regex\n\n regex = regex_re or to_rep_re\n\n # try to get the pattern attribute (compiled re) or it's a string\n if is_re(to_replace):\n pattern = to_replace.pattern\n else:\n pattern = to_replace\n\n # if the pattern is not empty and to_replace is either a string or a\n # regex\n if regex and pattern:\n rx = re.compile(to_replace)\n else:\n # if the thing to replace is not a string or compiled regex call\n # the superclass method -> to_replace is some kind of object\n return super().replace(\n to_replace, value, inplace=inplace, filter=filter, regex=regex\n )\n\n new_values = self.values if inplace else self.values.copy()\n\n # deal with replacing values with objects (strings) that match but\n # whose replacement is not a string (numeric, nan, object)\n if isna(value) or not isinstance(value, str):\n\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return value if rx.search(s) is not None else s\n else:\n return s\n\n else:\n # value is guaranteed to be a string here, s can be either a string\n # or null if it's null it gets returned\n def re_replacer(s):\n if is_re(rx) and isinstance(s, str):\n return rx.sub(value, s)\n else:\n return s\n\n f = np.vectorize(re_replacer, otypes=[self.dtype])\n\n if filter is None:\n filt = slice(None)\n else:\n filt = self.mgr_locs.isin(filter).nonzero()[0]\n\n if mask is None:\n new_values[filt] = f(new_values[filt])\n else:\n new_values[filt][mask] = f(new_values[filt][mask])\n\n # convert\n block = self.make_block(new_values)\n if convert:\n block = block.convert(numeric=False)\n return block\n\n def _replace_coerce(\n self, to_replace, value, inplace=True, regex=False, convert=False, mask=None\n ):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n if mask.any():\n block = super()._replace_coerce(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n regex=regex,\n convert=convert,\n mask=mask,\n )\n if convert:\n block = [b.convert(numeric=False, copy=True) for b in block]\n return block\n if convert:\n return [self.convert(numeric=False, copy=True)]\n return self\n\n\nclass CategoricalBlock(ExtensionBlock):\n __slots__ = ()\n is_categorical = True\n _verify_integrity = True\n _can_hold_na = True\n _concatenator = staticmethod(concat_categorical)\n\n def __init__(self, values, placement, ndim=None):\n # coerce to categorical if we can\n values = extract_array(values)\n assert isinstance(values, Categorical), type(values)\n super().__init__(values, placement=placement, ndim=ndim)\n\n @property\n def _holder(self):\n return Categorical\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return np.object_\n\n def to_dense(self):\n # Categorical.get_values returns a DatetimeIndex for datetime\n # categories, so we can't simply use `np.asarray(self.values)` like\n # other types.\n return self.values._internal_get_values()\n\n def to_native_types(self, slicer=None, na_rep=\"\", quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n # Categorical is always one dimension\n values = values[slicer]\n mask = isna(values)\n values = np.array(values, dtype=\"object\")\n values[mask] = na_rep\n\n # we are expected to return a 2-d ndarray\n return values.reshape(1, len(values))\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n\n Note that this CategoricalBlock._concat_same_type *may* not\n return a CategoricalBlock. When the categories in `to_concat`\n differ, this will return an object ndarray.\n\n If / when we decide we don't like that behavior:\n\n 1. Change Categorical._concat_same_type to use union_categoricals\n 2. Delete this method.\n \"\"\"\n values = self._concatenator(\n [blk.values for blk in to_concat], axis=self.ndim - 1\n )\n # not using self.make_block_same_class as values can be object dtype\n return make_block(\n values, placement=placement or slice(0, len(values), 1), ndim=self.ndim\n )\n\n def replace(\n self,\n to_replace,\n value,\n inplace: bool = False,\n filter=None,\n regex: bool = False,\n convert: bool = True,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n result = self if inplace else self.copy()\n if filter is None: # replace was called on a series\n result.values.replace(to_replace, value, inplace=True)\n if convert:\n return result.convert(numeric=False, copy=not inplace)\n else:\n return result\n else: # replace was called on a DataFrame\n if not isna(value):\n result.values.add_categories(value, inplace=True)\n return super(CategoricalBlock, result).replace(\n to_replace, value, inplace, filter, regex, convert\n )\n\n\n# -----------------------------------------------------------------\n# Constructor Helpers\n\n\ndef get_block_type(values, dtype=None):\n \"\"\"\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n \"\"\"\n dtype = dtype or values.dtype\n vtype = dtype.type\n\n if is_sparse(dtype):\n # Need this first(ish) so that Sparse[datetime] is sparse\n cls = ExtensionBlock\n elif is_categorical(values):\n cls = CategoricalBlock\n elif issubclass(vtype, np.datetime64):\n assert not is_datetime64tz_dtype(values)\n cls = DatetimeBlock\n elif is_datetime64tz_dtype(values):\n cls = DatetimeTZBlock\n elif is_interval_dtype(dtype) or is_period_dtype(dtype):\n cls = ObjectValuesExtensionBlock\n elif is_extension_array_dtype(values):\n cls = ExtensionBlock\n elif issubclass(vtype, np.floating):\n cls = FloatBlock\n elif issubclass(vtype, np.timedelta64):\n assert issubclass(vtype, np.integer)\n cls = TimeDeltaBlock\n elif issubclass(vtype, np.complexfloating):\n cls = ComplexBlock\n elif issubclass(vtype, np.integer):\n cls = IntBlock\n elif dtype == np.bool_:\n cls = BoolBlock\n else:\n cls = ObjectBlock\n return cls\n\n\ndef make_block(values, placement, klass=None, ndim=None, dtype=None):\n # Ensure that we don't allow PandasArray / PandasDtype in internals.\n # For now, blocks should be backed by ndarrays when possible.\n if isinstance(values, ABCPandasArray):\n values = values.to_numpy()\n if ndim and ndim > 1:\n values = np.atleast_2d(values)\n\n if isinstance(dtype, PandasDtype):\n dtype = dtype.numpy_dtype\n\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(values, dtype)\n\n elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):\n # TODO: This is no longer hit internally; does it need to be retained\n # for e.g. pyarrow?\n values = DatetimeArray._simple_new(values, dtype=dtype)\n\n return klass(values, ndim=ndim, placement=placement)\n\n\n# -----------------------------------------------------------------\n\n\ndef _extend_blocks(result, blocks=None):\n \"\"\" return a new extended blocks, given the result \"\"\"\n from pandas.core.internals import BlockManager\n\n if blocks is None:\n blocks = []\n if isinstance(result, list):\n for r in result:\n if isinstance(r, list):\n blocks.extend(r)\n else:\n blocks.append(r)\n elif isinstance(result, BlockManager):\n blocks.extend(result.blocks)\n else:\n blocks.append(result)\n return blocks\n\n\ndef _block_shape(values, ndim=1, shape=None):\n \"\"\" guarantee the shape of the values to be at least 1 d \"\"\"\n if values.ndim < ndim:\n if shape is None:\n shape = values.shape\n if not is_extension_array_dtype(values):\n # TODO: https://github.com/pandas-dev/pandas/issues/23023\n # block.shape is incorrect for \"2D\" ExtensionArrays\n # We can't, and don't need to, reshape.\n values = values.reshape(tuple((1,) + shape))\n return values\n\n\ndef _merge_blocks(blocks, dtype=None, _can_consolidate=True):\n\n if len(blocks) == 1:\n return blocks[0]\n\n if _can_consolidate:\n\n if dtype is None:\n if len({b.dtype for b in blocks}) != 1:\n raise AssertionError(\"_merge_blocks are invalid!\")\n\n # FIXME: optimization potential in case all mgrs contain slices and\n # combination of those slices is a slice, too.\n new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])\n new_values = np.vstack([b.values for b in blocks])\n\n argsort = np.argsort(new_mgr_locs)\n new_values = new_values[argsort]\n new_mgr_locs = new_mgr_locs[argsort]\n\n return make_block(new_values, placement=new_mgr_locs)\n\n # no merge\n return blocks\n\n\ndef _safe_reshape(arr, new_shape):\n \"\"\"\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n \"\"\"\n if isinstance(arr, ABCSeries):\n arr = arr._values\n if not isinstance(arr, ABCExtensionArray):\n arr = arr.reshape(new_shape)\n return arr\n\n\ndef _putmask_smart(v, mask, n):\n \"\"\"\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : `values`, updated in-place (array like)\n mask : np.ndarray\n Applies to both sides (array like).\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n \"\"\"\n\n # we cannot use np.asarray() here as we cannot have conversions\n # that numpy does when numeric are mixed with strings\n\n # n should be the length of the mask or a scalar here\n if not is_list_like(n):\n n = np.repeat(n, len(mask))\n\n # see if we are only masking values that if putted\n # will work in the current dtype\n try:\n nn = n[mask]\n except TypeError:\n # TypeError: only integer scalar arrays can be converted to a scalar index\n pass\n else:\n # make sure that we have a nullable type\n # if we have nulls\n if not _isna_compat(v, nn[0]):\n pass\n elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):\n # only compare integers/floats\n pass\n elif not (is_float_dtype(v.dtype) or is_integer_dtype(v.dtype)):\n # only compare integers/floats\n pass\n else:\n\n # we ignore ComplexWarning here\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n nn_at = nn.astype(v.dtype)\n\n comp = nn == nn_at\n if is_list_like(comp) and comp.all():\n nv = v.copy()\n nv[mask] = nn_at\n return nv\n\n n = np.asarray(n)\n\n def _putmask_preserve(nv, n):\n try:\n nv[mask] = n[mask]\n except (IndexError, ValueError):\n nv[mask] = n\n return nv\n\n # preserves dtype if possible\n if v.dtype.kind == n.dtype.kind:\n return _putmask_preserve(v, n)\n\n # change the dtype if needed\n dtype, _ = maybe_promote(n.dtype)\n\n if is_extension_array_dtype(v.dtype) and is_object_dtype(dtype):\n v = v._internal_get_values(dtype)\n else:\n v = v.astype(dtype)\n\n return _putmask_preserve(v, n)\n"
] | [
[
"numpy.any",
"numpy.asarray",
"pandas._libs.lib.is_float",
"pandas.core.dtypes.common.is_categorical",
"pandas._libs.tslibs.Timedelta",
"numpy.datetime64",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas._libs.index.convert_scalar",
"pandas.io.formats.format.FloatArrayFormatter",
"pandas.core.computation.expressions.where",
"numpy.timedelta64",
"pandas.core.dtypes.cast.maybe_downcast_numeric",
"numpy.atleast_2d",
"pandas.core.dtypes.common.is_period_dtype",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.missing.interpolate_2d",
"numpy.repeat",
"numpy.putmask",
"pandas.core.dtypes.cast.find_common_type",
"pandas._libs.algos._validate_limit",
"pandas.core.indexers.is_empty_indexer",
"pandas.core.missing.interpolate_1d",
"pandas.core.algorithms.diff",
"numpy.errstate",
"numpy.array",
"pandas.core.dtypes.missing.array_equivalent",
"pandas.core.missing.clean_interp_method",
"pandas._libs.lib.is_scalar",
"numpy.vectorize",
"pandas.core.arrays.DatetimeArray",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.missing._isna_compat",
"numpy.vstack",
"pandas._libs.internals.BlockPlacement",
"pandas.io.formats.printing.pprint_thing",
"pandas._libs.tslibs.conversion.ensure_timedelta64ns",
"numpy.delete",
"pandas.core.indexers.is_scalar_indexer",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.cast.maybe_upcast",
"numpy.concatenate",
"pandas.core.dtypes.common.is_integer_dtype",
"numpy.argsort",
"pandas.core.arrays.Categorical",
"numpy.apply_along_axis",
"pandas.core.construction.extract_array",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.missing.clean_fill_method",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_re_compilable",
"numpy.isnan",
"pandas._libs.writers.word_len",
"numpy.can_cast",
"pandas.core.dtypes.cast.maybe_infer_dtype_type",
"numpy.arange",
"numpy.ndim",
"numpy.prod",
"pandas.io.formats.format._get_format_datetime64_from_values",
"pandas.core.missing.mask_missing",
"numpy.atleast_1d",
"pandas.core.dtypes.common.is_sparse",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.common.is_re",
"pandas.core.indexers.check_setitem_lengths",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.common.is_null_slice",
"pandas.core.dtypes.concat.concat_datetime",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"numpy.where",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.algorithms.take_nd",
"pandas.core.dtypes.common.is_interval_dtype",
"numpy.broadcast_to",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.empty",
"pandas.core.dtypes.cast.maybe_promote",
"numpy.squeeze",
"pandas.core.dtypes.cast.infer_dtype_from",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.arrays.DatetimeArray._simple_new",
"numpy.place",
"pandas._libs.tslibs.conversion.ensure_datetime64ns"
]
] |
zhonglihanzhu/tensorflow-objectDetection | [
"aa3d1b754d5c78b8401ce86d4c20f45741fc2b77"
] | [
"builders/losses_builder_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for losses_builder.\"\"\"\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom builders import losses_builder\nfrom core import losses\nfrom protos import losses_pb2\n\n\nclass LocalizationLossBuilderTest(tf.test.TestCase):\n\n def test_build_weighted_l2_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedL2LocalizationLoss))\n\n def test_build_weighted_smooth_l1_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_smooth_l1 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedSmoothL1LocalizationLoss))\n\n def test_build_weighted_iou_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_iou {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedIOULocalizationLoss))\n\n def test_anchorwise_output(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_smooth_l1 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, localization_loss, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedSmoothL1LocalizationLoss))\n predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])\n targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]])\n weights = tf.constant([[1.0, 1.0]])\n loss = localization_loss(predictions, targets, weights=weights)\n self.assertEqual(loss.shape, [1, 2])\n\n def test_raise_error_on_empty_localization_config(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder._build_localization_loss(losses_proto)\n\n\nclass ClassificationLossBuilderTest(tf.test.TestCase):\n\n def test_build_weighted_sigmoid_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n\n def test_build_weighted_sigmoid_focal_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid_focal {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.SigmoidFocalClassificationLoss))\n self.assertAlmostEqual(classification_loss._alpha, None)\n self.assertAlmostEqual(classification_loss._gamma, 2.0)\n\n def test_build_weighted_sigmoid_focal_loss_non_default(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid_focal {\n alpha: 0.25\n gamma: 3.0\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.SigmoidFocalClassificationLoss))\n self.assertAlmostEqual(classification_loss._alpha, 0.25)\n self.assertAlmostEqual(classification_loss._gamma, 3.0)\n\n def test_build_weighted_softmax_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_weighted_softmax_classification_loss_with_logit_scale(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_softmax {\n logit_scale: 2.0\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_bootstrapped_sigmoid_classification_loss(self):\n losses_text_proto = \"\"\"\n classification_loss {\n bootstrapped_sigmoid {\n alpha: 0.5\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.BootstrappedSigmoidClassificationLoss))\n\n def test_anchorwise_output(self):\n losses_text_proto = \"\"\"\n classification_loss {\n weighted_sigmoid {\n anchorwise_output: true\n }\n }\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss, _, _, _, _ = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]])\n targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]])\n weights = tf.constant([[1.0, 1.0]])\n loss = classification_loss(predictions, targets, weights=weights)\n self.assertEqual(loss.shape, [1, 2, 3])\n\n def test_raise_error_on_empty_config(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder.build(losses_proto)\n\n\nclass HardExampleMinerBuilderTest(tf.test.TestCase):\n\n def test_do_not_build_hard_example_miner_by_default(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertEqual(hard_example_miner, None)\n\n def test_build_hard_example_miner_for_classification_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n loss_type: CLASSIFICATION\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._loss_type, 'cls')\n\n def test_build_hard_example_miner_for_localization_loss(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n loss_type: LOCALIZATION\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._loss_type, 'loc')\n\n def test_build_hard_example_miner_with_non_default_values(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n num_hard_examples: 32\n iou_threshold: 0.5\n loss_type: LOCALIZATION\n max_negatives_per_positive: 10\n min_negatives_per_image: 3\n }\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n _, _, _, _, hard_example_miner = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertEqual(hard_example_miner._num_hard_examples, 32)\n self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5)\n self.assertEqual(hard_example_miner._max_negatives_per_positive, 10)\n self.assertEqual(hard_example_miner._min_negatives_per_image, 3)\n\n\nclass LossBuilderTest(tf.test.TestCase):\n\n def test_build_all_loss_parameters(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_softmax {\n }\n }\n hard_example_miner {\n }\n classification_weight: 0.8\n localization_weight: 0.2\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n (classification_loss, localization_loss,\n classification_weight, localization_weight,\n hard_example_miner) = losses_builder.build(losses_proto)\n self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner))\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n self.assertTrue(isinstance(localization_loss,\n losses.WeightedL2LocalizationLoss))\n self.assertAlmostEqual(classification_weight, 0.8)\n self.assertAlmostEqual(localization_weight, 0.2)\n\n def test_raise_error_when_both_focal_loss_and_hard_example_miner(self):\n losses_text_proto = \"\"\"\n localization_loss {\n weighted_l2 {\n }\n }\n classification_loss {\n weighted_sigmoid_focal {\n }\n }\n hard_example_miner {\n }\n classification_weight: 0.8\n localization_weight: 0.2\n \"\"\"\n losses_proto = losses_pb2.Loss()\n text_format.Merge(losses_text_proto, losses_proto)\n with self.assertRaises(ValueError):\n losses_builder.build(losses_proto)\n\n\nclass FasterRcnnClassificationLossBuilderTest(tf.test.TestCase):\n\n def test_build_sigmoid_loss(self):\n losses_text_proto = \"\"\"\n weighted_sigmoid {\n }\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSigmoidClassificationLoss))\n\n def test_build_softmax_loss(self):\n losses_text_proto = \"\"\"\n weighted_softmax {\n }\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n def test_build_softmax_loss_by_default(self):\n losses_text_proto = \"\"\"\n \"\"\"\n losses_proto = losses_pb2.ClassificationLoss()\n text_format.Merge(losses_text_proto, losses_proto)\n classification_loss = losses_builder.build_faster_rcnn_classification_loss(\n losses_proto)\n self.assertTrue(isinstance(classification_loss,\n losses.WeightedSoftmaxClassificationLoss))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.constant",
"tensorflow.test.main"
]
] |
nicola144/auxiliary-particle-filters | [
"61d72e9163abb73007c0fbd30f68d4cc6d7ab4e9"
] | [
"src/utils.py"
] | [
"import re\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import rcParams\nfrom scipy.integrate import simps\nfrom scipy.special import logsumexp\nfrom scipy.optimize import minimize\n# from sklearn.cluster import DBSCAN\n# from sklearn.preprocessing import StandardScaler\nimport time\nimport random\nfrom tqdm import tqdm\n\n# random_seed = 5\n\ndef compute_cluster_centers(all_points):\n\tall_points = StandardScaler().fit_transform(all_points)\n\tdb = DBSCAN(eps=0.3, min_samples=10).fit(all_points)\n\tlabels = db.labels_\n\tprint(labels)\n\tsys.exit()\n\tlabels_unique = np.unique(db.labels_)\n\n\tprint(labels_unique.shape)\n\tsys.exit()\n\n\tcentroids = []\n\tfor i in range(len(labels_unique)):\n\t\tcentroids.append(np.mean(all_points[labels_unique==i,:], axis=0))\n\n\tcentroids = np.asarray(centroids)\n\tprint(centroids.shape)\n\tsys.exit()\n\n# Implements the IHS update. \"right\" means the term on the right of the difference\n# in the update rule (same for \"left\")\ndef ihs_step(lamb, A, b, S, old_lambda, K):\n\tright = (A.T.dot(b - A.dot(old_lambda))).T.dot(lamb)\n\tnorm_term = S.dot(A.dot(lamb - old_lambda))\n\tleft = (1. / (2. * K)) * norm_term.T.dot(norm_term)\n\tres = left - right\n\treturn res\n\ndef randomized_nnls(A, b, n_particle):\n\t\"\"\"\n\tAim is to solve NNLS problem, using Iterative Hessian Sketching:\n\tfind NONNEGATIVE lambda = arg min || A . lambda - b ||_{2}^{2}\n\n\tAlso, there should be at least one non zero lambda.\n\t\"\"\"\n\tK = int(n_particle / 2)\n\t# Initialized at 0 for iteration 0 as in Pilanci & Wainwright 2016\n\tlambdas = np.zeros(b.shape)\n\n\t# Result lambdas should NOT be ALL zeros.\n\t# cons = ({'type': 'ineq', 'fun': lambda x: x.dot(x) - 0.01})\n\n\tfor i in range(5): # 5 iterations\n\t\tinit_lambdas = np.random.multivariate_normal(mean=np.zeros(b.shape), cov=np.eye(b.shape[0]))\n\n\t\t# Sketching matrix. Using Gaussian Sketch.\n\t\tS = np.random.normal(0, 1, (K, b.shape[0]))\n\t\tS /= np.sqrt(K)\n\n\t\t# Minimize the IHS objective, subject to a positive result, with the added constraint as above\n\t\tres = minimize(ihs_step, init_lambdas, (A, b, S, lambdas, K), bounds=[(0., None)] * b.shape[0])\n\t\tlambdas = res['x']\n\n\treturn lambdas\n\n\ndef scale_reduced_system(smaller_A, smaller_b):\n\tsmallest_exp_A = np.min(smaller_A)\n\tsmallest_exp_b = np.min(smaller_b)\n\tsmallest = np.min([smallest_exp_A, smallest_exp_b])\n\tsmallest = np.format_float_scientific(smallest)\n\tmin_exp = int(re.findall(r'\\d+', smallest)[-1])\n\tscaled_smaller_A = smaller_A * (10 ** min_exp)\n\tscaled_smaller_b = smaller_b * (10 ** min_exp)\n\n\treturn scaled_smaller_A, scaled_smaller_b\n\n\n\ndef safe_invert(matrix):\n\treturn np.linalg.lstsq(matrix, np.eye(matrix.shape[0]))[0]\n\t# return np.linalg.inv(matrix)\n\ndef reduce_system(n_particle, A, b):\n\n\t# K = int(n_particle / 50)\n\tK = 5\n\tindices_tokeep = b.argsort()[-K:][::-1]\n\t# indices_tokeep = np.round(np.linspace(0, b.shape[0] - 1, K)).astype(int)\n\n\tsmaller_b = b[indices_tokeep]\n\ttemp = A[:, indices_tokeep]\n\tsmaller_A = temp[indices_tokeep, :]\n\n\treturn smaller_A, smaller_b, indices_tokeep\n\n\ndef sanity_checks(unnormalized):\n\tif np.all(unnormalized == 0.):\n\t\tprint('ALL zeros ... \\n ')\n\t\tprint(unnormalized)\n\t\tsys.exit()\n\n\tif np.isnan(np.log(unnormalized)).any():\n\t\tprint(unnormalized)\n\t\tprint('some log nan')\n\t\tsys.exit()\n\n\n# def set_plotting():\n# \t# Set plotting\n# \tparams = {\n# \t\t'axes.labelsize': 25,\n# \t\t'font.size': 20,\n# \t\t'legend.fontsize': 30,\n# \t\t'xtick.labelsize': 25,\n# \t\t'ytick.labelsize': 25,\n# \t\t'text.usetex': False,\n# \t\t'figure.figsize': [20, 12],\n# \t\t'axes.labelpad': 10,\n# \t\t'lines.linewidth': 10,\n# \t\t'legend.loc': 'upper right'\n# \t}\n# \trcParams['agg.path.chunksize'] = 10000\n# \trcParams.update(params)\n# \tplt.style.use('bmh')\ndef set_plotting():\n\t# Set plotting\n\tparams = {\n\t\t'axes.labelsize': 28,\n\t\t'font.size': 20,\n\t\t'legend.fontsize': 28,\n\t\t'xtick.labelsize': 28,\n\t\t'ytick.labelsize': 28,\n\t\t'text.usetex': False,\n\t\t'figure.figsize': [20, 12],\n\t\t'axes.labelpad': 10,\n\t\t'lines.linewidth': 10,\n\t\t'legend.loc': 'upper right'\n\t}\n\trcParams['agg.path.chunksize'] = 10000\n\trcParams.update(params)\n\tplt.style.use('bmh')\n\n\n\ndef is_pos_def(x):\n\treturn np.all(np.linalg.eigvals(x) > 0)\n\n\ndef chi_square(target, proposal, x):\n\treturn simps((target - proposal) ** 2 / (proposal), dx=x[1] - x[0])\n\n\n# def mse(x, y):\n# \treturn np.average((x - y) ** 2, axis=0)\n\n# Normalized ?\ndef mse(x, y):\n\treturn np.average((x - y) ** 2, axis=0) / np.average(np.sum(y**2,axis=-1))\n\n\ndef sparsity(x):\n\treturn 100. - ((float(np.count_nonzero(x)) / float(x.size)) * 100)\n\n\ndef normalize(unnormalized):\n\treturn unnormalized / np.sum(unnormalized)\n\n\ndef check_symmetric(a, rtol=1e-05, atol=1e-08):\n\treturn np.allclose(a, a.T, rtol=rtol, atol=atol)\n\n\ndef normalize_log(l):\n\treturn np.exp(l - logsumexp(l)).flatten()\n\ndef log_normalize_log(unnormalized):\n\treturn unnormalized - logsumexp(unnormalized)\n\ndef get_ess(logw_norm):\n\treturn np.exp(-logsumexp(2*logw_norm))\n\n\ndef logmatmulexp(log_A, log_B):\n\t\"\"\"Given matrix log_A of shape ϴ×R and matrix log_B of shape R×I, calculates\n\t(log_A.exp() @ log_B.exp()).log() in a numerically stable way.\n\tHas O(ϴRI) time complexity and space complexity.\"\"\"\n\n\tif len(log_B.shape) == 1:\n\t\tlog_B = log_B.reshape(-1, 1)\n\n\tϴ, R = log_A.shape\n\tI = log_B.shape[1]\n\tassert log_B.shape == (R, I)\n\tlog_A_expanded = np.broadcast_to(np.expand_dims(log_A, 2), (ϴ, R, I))\n\tlog_B_expanded = np.broadcast_to(np.expand_dims(log_B, 0), (ϴ, R, I))\n\tlog_pairwise_products = log_A_expanded + log_B_expanded # shape: (ϴ, R, I)\n\n\tif log_B.shape[1] == 1:\n\t\treturn logsumexp(log_pairwise_products, axis=1).flatten()\n\n\treturn logsumexp(log_pairwise_products, axis=1)\n\n# works , but useless \n# def cost(log_params,logA,logb):\n\n# with precision(300):\n\n# # print(log_params)\n\n# left = np.logaddexp( logmatmulexp(logA, log_params) , - logb).reshape(1,-1) \n\n# # print(left)\n\n# right = np.logaddexp( logmatmulexp(logA, log_params), - logb ) \n\n# # print(right)\n\n# res = logmatmulexp( left, right )\n\n# # print(np.exp(res))\n\n# return res\n"
] | [
[
"numpy.sum",
"numpy.asarray",
"numpy.linalg.eigvals",
"numpy.log",
"numpy.allclose",
"matplotlib.pyplot.style.use",
"numpy.expand_dims",
"matplotlib.rcParams.update",
"numpy.average",
"numpy.unique",
"numpy.mean",
"numpy.eye",
"numpy.format_float_scientific",
"numpy.zeros",
"scipy.optimize.minimize",
"numpy.random.normal",
"numpy.count_nonzero",
"numpy.all",
"scipy.special.logsumexp",
"numpy.min",
"scipy.integrate.simps",
"numpy.sqrt"
]
] |
fredshentu/public_model_based_controller | [
"9301699bc56aa49ba5c699f7d5be299046a8aa0c"
] | [
"railrl/predictors/state_action_network.py"
] | [
"import abc\nimport tensorflow as tf\n\nfrom railrl.core.neuralnet import NeuralNetwork\nfrom rllab.misc.overrides import overrides\n\n\nclass StateActionNetwork(NeuralNetwork, metaclass=abc.ABCMeta):\n \"\"\"\n A map from (state, action) to a vector\n \"\"\"\n\n def __init__(\n self,\n name_or_scope,\n output_dim,\n env_spec=None,\n action_dim=None,\n observation_dim=None,\n action_input=None,\n observation_input=None,\n **kwargs\n ):\n \"\"\"\n Create a state-action network.\n\n :param name_or_scope: a string or VariableScope\n :param output_dim: int, output dimension of this network\n :param env_spec: env spec for an Environment\n :param action_dim: int, action dimension\n :param observation_input: tf.Tensor, observation input. If None,\n a placeholder of shape [None, observation dim] will be made\n :param action_input: tf.Tensor, observation input. If None,\n a placeholder of shape [None, action dim] will be made\n :param kwargs: kwargs to be passed to super\n \"\"\"\n self.setup_serialization(locals())\n super(StateActionNetwork, self).__init__(name_or_scope, **kwargs)\n self.output_dim = output_dim\n\n assert env_spec or (action_dim and observation_dim)\n if action_dim is None:\n self.action_dim = env_spec.action_space.flat_dim\n else:\n self.action_dim = action_dim\n\n if observation_dim is None:\n self.observation_dim = env_spec.observation_space.flat_dim\n else:\n self.observation_dim = observation_dim\n\n with tf.variable_scope(self.scope_name):\n if action_input is None:\n action_input = tf.placeholder(\n tf.float32,\n [None, self.action_dim],\n \"_actions\")\n if observation_input is None:\n if hasattr(self.observation_dim, '__len__'):\n observation_input = tf.placeholder(\n tf.float32,\n [None] + list(self.observation_dim),\n \"_observation\")\n else:\n observation_input = tf.placeholder(\n tf.float32,\n [None ,self.observation_dim],\n \"_observation\")\n\n self.action_input = action_input\n self.observation_input = observation_input\n self._create_network(observation_input=observation_input,\n action_input=action_input)\n\n @property\n @overrides\n def _input_name_to_values(self):\n return dict(\n observation_input=self.observation_input,\n action_input=self.action_input,\n )\n\n # TODO(vpong): make it so that the inputs get automatically processed\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.variable_scope"
]
] |
topolphukhanh/xam | [
"3fa958ba8b0c8e8e266cac9997b7a7d0c309f55c"
] | [
"xam/preprocessing/binning/mdlp.py"
] | [
"\"\"\"\nMinimum Description Length Principle (MDLP) binning\n\n- Original paper: http://sci2s.ugr.es/keel/pdf/algorithm/congreso/fayyad1993.pdf\n- Implementation inspiration: https://www.ibm.com/support/knowledgecenter/it/SSLVMB_21.0.0/com.ibm.spss.statistics.help/alg_optimal-binning.htm\n\"\"\"\n\nimport collections\nimport math\n\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.utils import check_X_y\n\nfrom .base import BaseSupervisedBinner\n\n\nclass MDLPBinner(BaseSupervisedBinner):\n\n def fit(self, X, y, **fit_params):\n \"\"\"Determine which are the best cut points for each column in X based on y.\"\"\"\n\n X, y = check_X_y(X, y, y_numeric=True)\n\n self.cut_points_ = [mdlp_cut(x, y, []) for x in X.T]\n return self\n\n @property\n def cut_points(self):\n return self.cut_points_\n\n\ndef calc_class_entropy(y):\n class_counts = np.unique(y, return_counts=True)[1]\n return stats.entropy(class_counts, base=2)\n\n\ndef calc_class_information_entropy(x, y, cut_point):\n partition = x <= cut_point\n\n y_1 = y[partition]\n y_2 = y[~partition]\n\n ent_1 = calc_class_entropy(y_1)\n ent_2 = calc_class_entropy(y_2)\n\n return (y_1.size * ent_1 + y_2.size * ent_2) / (y_1.size + y_2.size)\n\n\ndef mdlp_cut(x, y, cut_points):\n\n # No cut is necessary if there is only one class\n if len(np.unique(y)) == 1:\n return\n\n # Calculate the current entropy\n y_ent = calc_class_entropy(y)\n\n # Sort x and y according to x\n sorted_indexes = x.argsort()\n x = x[sorted_indexes]\n y = y[sorted_indexes]\n\n # Find the potential cut points\n potential_cut_points = []\n for i in range(x.size - 1):\n potential_cut_points.append((x[i] + x[i+1]) / 2)\n\n # Ignore the cut points that appear more than once\n potential_cut_points = list(set(potential_cut_points))\n\n # Find the cut point with gives the lowest class information entropy\n cut_point = min(\n potential_cut_points,\n key=lambda cut_point: calc_class_information_entropy(x, y, cut_point)\n )\n\n # Calculate the information gain obtained with the obtained cut point\n new_ent = calc_class_information_entropy(x, y, cut_point)\n gain = y_ent - new_ent\n\n # Partition the data\n partition = x <= cut_point\n x_1 = x[partition]\n y_1 = y[partition]\n x_2 = x[~partition]\n y_2 = y[~partition]\n\n # Get the number of unique classes in each group\n k = len(np.unique(y))\n k_1 = len(np.unique(y_1))\n k_2 = len(np.unique(y_2))\n\n # Calculate the entropy of each group\n y_1_ent = calc_class_entropy(y_1)\n y_2_ent = calc_class_entropy(y_2)\n\n # Calculate the acceptance criterion\n delta = math.log2(3 ** k) - k * y_ent + k_1 * y_1_ent + k_2 * y_2_ent\n n = y.size\n acceptance_criterion = (math.log2(n - 1) + delta) / n\n\n # Add the cut point if the gain is higher than the acceptance criterion\n if gain > acceptance_criterion:\n cut_points.append(cut_point)\n # Recursively check if further cuts are possible\n mdlp_cut(x_1, y_1, cut_points)\n mdlp_cut(x_2, y_2, cut_points)\n\n return sorted(cut_points)\n"
] | [
[
"sklearn.utils.check_X_y",
"scipy.stats.entropy",
"numpy.unique"
]
] |
ezeddin/random_forest | [
"07a23af1764fbf7a54a27e79d5ac68c69a64f0b1"
] | [
"bin/AdaBoost.py"
] | [
"from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.datasets import make_gaussian_quantiles\nfrom sklearn.ensemble import RandomForestClassifier\n\ndef AdaBoost(X_train, y_train, X_test, DEPTH, N_ESTIMATORS):\n\t# Create and fit an AdaBoosted decision tree\n\tbdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=DEPTH),\n\t\t\t algorithm=\"SAMME\",\n\t\t\t n_estimators=N_ESTIMATORS)\n\n\tbdt.fit(X_train, y_train)\n\n\t# Test classifier on test data\n\ty_out = bdt.predict(X_test)\n\treturn y_out\n\ndef ForestIB(X_train, y_train, X_test, DEPTH, N_ESTIMATORS):\n\n\t# Create the random forest object which will include all the parameters\n\t# for the fit\n\tforest = RandomForestClassifier(n_estimators = N_ESTIMATORS, max_depth=DEPTH, max_features=DEPTH, criterion='gini', n_jobs=-1)\n\n\t# Fit the training data to the Survived labels and create the decision trees\n\tforest = forest.fit(X_train, y_train)\n\n\t# Take the same decision trees and run it on the test data\n\ty_out = forest.predict(X_test)\n\treturn y_out\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.RandomForestClassifier"
]
] |
BioSystemsUM/biotmpy | [
"f981d58cf7f53a2aa09708e13d6561533c164e1f"
] | [
"pipelines/cv_biobert_lstm_ft.py"
] | [
"model_name= 'cv_biobert_lstm_ft'\r\n\r\nimport sys \r\nsys.path.append('../')\r\nimport os\r\nimport tensorflow \r\nimport numpy as np\r\nimport random\r\n\r\n\r\nseed_value = 123123\r\n#seed_value = None\r\n\r\nenvironment_name = sys.executable.split('/')[-3]\r\nprint('Environment:', environment_name)\r\nos.environ[environment_name] = str(seed_value)\r\n\r\nnp.random.seed(seed_value)\r\nrandom.seed(seed_value)\r\ntensorflow.random.set_seed(seed_value)\r\n\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\nimport tensorflow.compat.v1.keras.backend as K\r\nconfig = ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsession = InteractiveSession(config=config)\r\nK.set_session(session)\r\n\r\nmultiple_gpus = [0,1,2,3]\r\n#multiple_gpus = None\r\n\r\nimport os\r\nimport tensorflow as tf\r\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\r\n\r\nif multiple_gpus:\r\n devices = []\r\n for gpu in multiple_gpus:\r\n devices.append('/gpu:' + str(gpu)) \r\n strategy = tensorflow.distribute.MirroredStrategy(devices=devices)\r\n\r\nelse:\r\n # Get the GPU device name.\r\n device_name = tensorflow.test.gpu_device_name()\r\n # The device name should look like the following:\r\n if device_name == '/device:GPU:0':\r\n print('Using GPU: {}'.format(device_name))\r\n else:\r\n raise SystemError('GPU device not found')\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = device_name\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n\r\nfrom wrappers.bioc_wrapper import bioc_to_docs, bioc_to_relevances\r\nfrom wrappers.pandas_wrapper import relevances_to_pandas, docs_to_pandasdocs\r\nfrom mlearning.dl import DL_preprocessing\r\nfrom mlearning.dl_models import Bert_LSTM_opt\r\nfrom mlearning.dl_models import DeepDTA\r\nfrom mlearning.embeddings import compute_embedding_matrix, glove_embeddings_2\r\nimport numpy as np\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score, matthews_corrcoef\r\nfrom sklearn.metrics import cohen_kappa_score\r\nfrom sklearn.metrics import roc_auc_score, auc, roc_curve, precision_recall_curve\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom tensorflow.keras.models import load_model\r\nfrom mlearning.dl import plot_training_history, Bert_preprocessing\r\nfrom mlearning.dl_config import DLConfig\r\nfrom mlearning.dl import average_precision\r\nfrom tensorflow.keras.preprocessing import text\r\nfrom mlearning.dl import plot_roc_n_pr_curves\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import StratifiedKFold\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('wordnet')\r\nnltk.download('punkt')\r\nfrom nltk.corpus import stopwords\r\nimport seaborn as sns\r\nimport pandas as pd\r\nimport os\r\nfrom keras import backend as K\r\nimport pickle\r\nfrom transformers import BertTokenizer\r\n\r\ntrain_dataset_path = '../datasets/PMtask_Triage_TrainingSet.xml'\r\ntest_dataset_path = '../datasets/PMtask_Triage_TestSet.xml'\r\n\r\n\r\n\r\ndl_config = DLConfig(model_name=model_name, seed_value=seed_value)\r\n#dl_config.stop_words = set(stopwords.words('english')) \r\ndl_config.stop_words = None\r\ndl_config.lower = False \r\ndl_config.remove_punctuation = False\r\ndl_config.split_by_hyphen = False\r\ndl_config.lemmatization = False \r\ndl_config.stems = False \r\n\r\n\r\ndocs_train = bioc_to_docs(train_dataset_path, dl_config=dl_config)\r\nrelevances_train = bioc_to_relevances(train_dataset_path, 'protein-protein')\r\n\r\n\r\nx_train_df = docs_to_pandasdocs(docs_train)\r\ny_train_df = relevances_to_pandas(x_train_df, relevances_train)\r\n\r\n#Parameters\r\ndl_config.padding = 'post' #'pre' -> default; 'post' -> alternative\r\ndl_config.truncating = 'post' #'pre' -> default; 'post' -> alternative #####\r\n\r\ndl_config.max_sent_len = 512 #sentences will have a maximum of \"max_sent_len\" words\r\ndl_config.nmr_sentences = 1 #[1 or 2]\r\n\r\n\r\ndl_config.learning_rate = 3e-5\r\ndl_config.epochs = 2\r\n\r\ndl_config.batch_size=16\r\n\r\n\r\n\r\ndl_config.k_fold=10\r\nkfold = StratifiedKFold(n_splits=dl_config.k_fold, shuffle=True, random_state=dl_config.seed_value)\r\n\r\ncv_avp_scores = []\r\ncv_acc_scores=[]\r\ncv_prec_scores = []\r\ncv_rec_scores = []\r\ncv_f1_scores = []\r\n\r\nfor train_index, test_index in kfold.split(x_train_df.to_numpy(), y_train_df.to_numpy()):\r\n print(len(train_index))\r\n print(len(test_index))\r\n K.clear_session()\r\n\r\n\r\n environment_name = sys.executable.split('/')[-3]\r\n print('Environment:', environment_name)\r\n os.environ[environment_name] = str(dl_config.seed_value)\r\n\r\n np.random.seed(dl_config.seed_value)\r\n random.seed(dl_config.seed_value)\r\n tensorflow.random.set_seed(dl_config.seed_value)\r\n\r\n dl_config.tokenizer = BertTokenizer.from_pretrained('biobert_v1.1_pubmed', do_lower_case=False)\r\n\r\n x_train, y_train = Bert_preprocessing(x_train_df.iloc[train_index,], y_train_df.iloc[train_index,],\r\n dl_config=dl_config, \r\n validation_percentage=0,\r\n seed_value=dl_config.seed_value)\r\n\r\n\r\n biobert_path = './biobert_v1.1_pubmed'\r\n\r\n if multiple_gpus:\r\n with strategy.scope():\r\n model = Bert_LSTM_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=False, bert_name_or_path=biobert_path, bert_config=True)\r\n else:\r\n model = Bert_LSTM_opt(dl_config, learning_rate=dl_config.learning_rate,static_bert=False, bert_name_or_path=biobert_path, bert_config=True)\r\n\r\n history = model.fit(x_train, y_train,\r\n epochs=dl_config.epochs,\r\n batch_size=dl_config.batch_size)\r\n\r\n x_test, y_test = Bert_preprocessing(x_train_df.iloc[test_index,], y_train_df.iloc[test_index,], dl_config=dl_config)\r\n\r\n yhat_probs = model.predict(x_test, verbose=0)\r\n yhat_probs = yhat_probs[:, 0]\r\n\r\n yhat_classes = np.where(yhat_probs > 0.5, 1, yhat_probs)\r\n yhat_classes = np.where(yhat_classes < 0.5, 0, yhat_classes).astype(np.int64)\r\n \r\n test_avp = average_precision(y_train_df.iloc[test_index,], yhat_probs)\r\n test_acc = accuracy_score(y_test, yhat_classes)\r\n test_prec = precision_score(y_test, yhat_classes)\r\n test_rec = recall_score(y_test, yhat_classes)\r\n test_f1 = f1_score(y_test, yhat_classes)\r\n cv_avp_scores.append(test_avp)\r\n cv_acc_scores.append(test_acc)\r\n cv_prec_scores.append(test_prec)\r\n cv_rec_scores.append(test_rec)\r\n cv_f1_scores.append(test_f1)\r\n\r\n K.clear_session()\r\n del model\r\n tf.compat.v1.reset_default_graph()\r\n\r\ndl_config.cv_avp = cv_avp_scores\r\ndl_config.cv_acc = cv_acc_scores\r\ndl_config.cv_prec = cv_prec_scores\r\ndl_config.cv_rec = cv_rec_scores\r\ndl_config.cv_f1 = cv_f1_scores\r\n\r\n\r\n\r\ndl_config.save()\r\n\r\n\r\ndl_config.write_report()\r\n"
] | [
[
"tensorflow.compat.v1.reset_default_graph",
"sklearn.model_selection.StratifiedKFold",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.test.gpu_device_name",
"tensorflow.compat.v1.InteractiveSession",
"numpy.random.seed",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.precision_score",
"tensorflow.distribute.MirroredStrategy",
"sklearn.metrics.recall_score",
"numpy.where",
"tensorflow.random.set_seed"
]
] |
johnfmaddox/kilojoule | [
"b4c146ded82e3ef51a0252ff48b1066a076e9aeb"
] | [
"kilojoule/humidair.py"
] | [
"from .units import Quantity, units\nfrom .common import (\n invert_dict,\n CP_symbUpper_to_units,\n preferred_units_from_type,\n preferred_units_from_symbol,\n)\nfrom .realfluid import Properties as rfprop\nfrom .plotting import PropertyPlot, plt\nimport CoolProp\nfrom CoolProp.CoolProp import HAPropsSI,set_reference_state\nimport numpy as np\nimport re\nfrom numpy import floor,ceil,log10\nimport functools\n\n# Default CoolProps units for symbols\nCP_HA_units_to_symb = {\n 'K':['T','B','Twb','T_wb','WetBulb','D','Tdp','DewPoint','T_dp','Tdb','T_db'],\n 'Pa':['P','P_w'],\n 'J/kg_dry_air/K':['C','cp','CV','S','Sda','Entropy'],\n 'J/kg_dry_air/K':['Cw','cpw','CV','S','Sda','Entropy'],\n 'J/kg_humid_air/K':['Cha','cp_ha','CVha','cv_ha','Sha'],\n 'J/kg_dry_air':['H','Hda','Enthalpy'],\n 'J/kg_humid_air':['Hha'],\n 'J/lb_water':['Hw'],\n 'W/m/degK':['K','k','Conductivity'],\n 'Pa*s':['M','Visc','mu'],\n 'mol_water/mol_humid_air':['psi_w','Y'],\n 'm^3/kg_dry_air':['V','Vda'],\n 'm^3/kg_humid_air':['Vha'],\n 'kg_water/kg_dry_air':['W','Omega','HumRat'],\n ' ':['R','RH','RelHum','phi']\n }\nCP_HA_symb_to_units = invert_dict(CP_HA_units_to_symb)\n\nCP_HA_trans_inv = {\n 'Twb':['B','Twb','T_wb','WetBulb'],\n 'Tdb':['Tdb','T_db','DryBulb','T'],\n 'Tdp':['Tdp','D','DewPoint','T_dp'],\n 'C':['C','cp','Cp','C_p','c_p'],\n 'Cha':['Cha','C_ha','cha','c_ha'],\n 'Cv':['Cv','Cv','cv','c_v'],\n 'Cvha':['Cvha','Cv_ha','cvha','c_v_ha'],\n 'H':['H','Hda','Enthalpy','h','hda','h_da'],\n 'Hha':['Hha','h_ha','hha','Enthalpy_Humid_Air'],\n 'K':['K','k','conductivity','Conductivity'],\n 'M':['M','Visc','mu','viscosity'],\n 'Y':['Y','psi_w','mole_fraction','y'],\n 'P':['P','p','pressure'],\n 'P_w':['P_w','p_w','partial_pressure_water'],\n 'R':['R','RelHum','RH','rel_hum','phi'],\n 'S':['S','s','sda','Sda','s_da','Entropy'],\n 'Sha':['Sha','s_ha','sha'],\n 'V':['V','v','v_da','vda'],\n 'Vha':['Vha','v_ha','vha'],\n 'W':['W','w','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],\n 'Z':['Z','compressibility_factor'],\n }\nCP_HA_trans = invert_dict(CP_HA_trans_inv)\n\nCP_HA_symb_to_local = {\n 'Twb':'T_wb',\n 'Tdb':'T_db',\n 'Tdp':'T_dp',\n 'C':'Cp',\n 'Cha':'Cp_ha',\n 'Cv':'Cv',\n 'Cvha':'Cv_ha',\n 'H':'h',\n 'Hha':'h_ha',\n 'K':'conductivity',\n 'M':'viscosity',\n 'Y':'psi_w',\n 'P':'p',\n 'P_w':'p_w',\n 'R':'rel_hum',\n 'S':'s',\n 'Sha':'s_ha',\n 'V':'v',\n 'Vha':'v_ha',\n 'W':'spec_hum',\n 'Z':'Z'\n }\n\nCP_HA_type_to_symb = {\n 'temperature':['B','Twb','T_wb','WetBulb','Tdb','T_db','DryBulb','T','Tdp','D','DewPoint','T_dp'],\n 'pressure':['P','p','pressure','P_w','p_w','partial_pressure_water'],\n 'density':['D','d','rho'],\n 'dry air specific volume':['V','v','v_da','vda'],\n 'humid air specific volume':['Vha','v_ha','vha'],\n 'dry air specific energy':['H','Hda','Enthalpy','h','hda','h_da'],\n 'humid air specific energy':['Hha','h_ha','hha','Enthalpy_Humid_Air'],\n 'dry air specific heat':['C','cp','Cp','C_p','c_p','Cv','Cv','cv','c_v'],\n 'dry air specific entropy':['S','s','sda','Sda','s_da','Entropy'],\n 'humid air specific heat':['Cha','C_ha','cha','c_ha','Cvha','Cv_ha','cvha','c_v_ha'],\n 'humid air specific entropy':['Sha','s_ha','sha'],\n 'conductivity':['K','k','conductivity','Conductivity'],\n 'viscosity':['M','Visc','mu','viscosity'],\n 'water mole fraction':['Y','psi_w','y'],\n 'humidity ratio':['W','Omega','HumRat','spec_hum','specific_humidity','omega','humidity','absolute_humidity'],\n 'dimensionless':['R','RelHum','RH','rel_hum','phi','Z']\n}\nCP_HA_symb_to_type = invert_dict(CP_HA_type_to_symb)\n\n\n\ndef PropertyLookup(\n desired,\n unit_system=None,\n verbose=False,\n **kwargs,\n):\n \"\"\"\n Each of the follow properties/parameters is expected to be a quantity with units\n\n :param desired: Dependent from two of the following independent properties\n :param T: dry-bulb Temperature (Default value = None)\n :param T_wb: wet-bulb Temperature (Default value = None)\n :param T_dp: dew-point Temperature (Default value = None)\n :param p: pressure (Default value = None)\n :param p_w: partial pressure of water vapor (Default value = None)\n :param w: humidity ratio (Default value = None)\n :param v: mixture volume per unit dry air (Default value = None)\n :param v_ha: mixture volume per unit humid air (Default value = None)\n :param h: mixture enthalpy per unit dry air (Default value = None)\n :param h_ha: mixture enthalpy per unit humid air (Default value = None)\n :param s: mixture entropy per unit dry air (Default value = None)\n :param rel_hum: relative humidity (Default value = None)\n :param y: water mole fraction (Default value = None)\n :param unit_system: unit system for return value - one of 'SI_C', 'SI_K', 'English_F', 'English_R' (Default value = )\n :param verbose: show debug information (Default value = False)\n :param **kwargs:\n\n \"\"\"\n desired = CP_HA_trans[desired]\n PropsSI_args =[desired] # add the desired parameter as the first argument to pass to CoolProp.PropsSI\n\n def process_indep_arg(arg, CPSymb):\n \"\"\"\n Add a property symbol and its value to the CoolProp.PropSI argument string\n\n :param arg: value of independent parameter\n :param CPSymb: CoolProp symbol\n :param exponent: exponent used to invert the value (Default value = 1)\n :param AltSymb: symbol to use for inverted values (Default value = None)\n\n \"\"\"\n if arg is not None:\n # if AltSymb: PropsSI_args.append(AltSymb)\n # else:\n PropsSI_args.append(CPSymb) # Add independent parameter symbol to argument list\n if CP_HA_symb_to_units[CPSymb] is not None:\n value = float(arg.to(CP_HA_symb_to_units[CPSymb]).magnitude) # Add independent parameter value to argument list with appropriate magnitude and units stripped\n elif isinstance(arg,Quantity):\n value = float(arg.magnitude)\n else:\n value = float(arg) # Add independent paramter value directly to argument list if it has no units that need to be adjusted\n PropsSI_args.append(value)\n for k,v in kwargs.items():\n if k in CP_HA_trans.keys():\n process_indep_arg(v,CP_HA_trans[k])\n\n def humidity_search(PropsSI_args):\n desired = PropsSI_args[0]\n for i,v in enumerate(PropsSI_args):\n if v == 'P':\n P = PropsSI_args[i+1]\n elif v == 'R':\n R_target = PropsSI_args[i+1]\n elif v == 'W':\n W = PropsSI_args[i+1]\n T = 273.15 # starting guess\n T_guess = T\n n_steps = 100\n search_steps = [5,-5,1,-1,0.1,-0.1,0.01,-0.01]\n for step in search_steps:\n cont = True\n n_step = 0\n while cont:\n if n_step > 0:\n T_guess += step\n try:\n R = HAPropsSI('R','T',T_guess,'W',W,'P',P)\n error = abs(R_target-R)\n if step>0:\n T = T_guess\n if R<R_target:\n cont=False\n elif step<0 and R<R_target:\n T = T_guess\n else:\n cont=False\n except ValueError:\n if step<0: cont=False\n n_step += 1\n if n_step > n_steps: cont=False\n \n if desired == 'Tdb':\n return T\n else:\n return HAPropsSI(desired,'P',P,'W',W,'Tdb',T)\n \n if verbose:\n print('Calling: CoolProp.CoolProp.HAPropsSI({})'.format(','.join([str(i) for i in PropsSI_args])))\n print(PropsSI_args)\n\n if \"R\" in PropsSI_args[1:] and \"W\" in PropsSI_args[1:]:\n result = humidity_search(PropsSI_args)\n else:\n result = HAPropsSI(*PropsSI_args)\n \n # Determine the units of the value as returned from CoolProp\n CP_return_units = CP_HA_symb_to_units[desired]\n CP_return_type = CP_HA_symb_to_type[desired]\n # Determine the preferred units for the value\n if unit_system is None:\n result_units = preferred_units_from_type(CP_return_type, units.preferred_units)\n else:\n result_units = preferred_units_from_type(CP_return_type, unit_system)\n # Convert the returned value to the preferred units\n if result_units is not None:\n result = Quantity(result,CP_return_units).to(result_units)\n return result\n\n\nclass Properties:\n \"\"\"\n A class to return thermodynamic properties for a real fluid\n\n :param p: pressure (Default value = 1 atm)\n :param unit_system: units for return values - one of 'SI_C','SI_K','English_F','English_R' (Default = 'SI_C')\n :returns: an object with methods to evaluate real fluid properties\n \"\"\"\n\n def __init__(self, p=None, unit_system=\"kSI_C\"):\n self.fluid='humidair'\n if p is None:\n self.__p = Quantity(1.0,'atm')\n else:\n self.__p = p\n self.unit_system = unit_system\n # legacy definitions/aliases\n self.relhum = self.phi = self.rel_hum\n self.omega = self.hum_rat = self.humrat = self.w\n self.Cp = self.cp\n self.Cv = self.cv\n self.mu = self.viscosity\n self.nu = self.kinematic_viscosity\n self.water = rfprop('Water',unit_system=unit_system)\n\n def _lookup(self, desired, **kwargs):\n \"\"\"\n Call PropertyLookup to evaluate the desired property for the indepent properties specified\n as keyword arguments\n\n :param desired: desired property\n :param **kwargs: any three dimensional quantities of T,T_wb,T_dp,p,p_w,w,v,v_ha,h,h_ha,s,s_ha,rel_hum,mole_fraction,\n \"\"\"\n unit_system = kwargs.pop('unit_system',self.unit_system)\n return PropertyLookup(\n desired, unit_system=self.unit_system, **kwargs\n )\n\n \n def _update_kwargs(self, args, kwargs, water=False):\n \"\"\"use argument unit to identify appropriate keyword\"\"\"\n for arg in args:\n if isinstance(arg, Quantity):\n try:\n arg_symb = arg.property_symbol\n arg_dict = {arg_symb:arg}\n kwargs = dict(**arg_dict, **kwargs)\n except:\n try:\n arg.to('K') # Temperature\n kwargs = dict(T=arg, **kwargs)\n except:\n try:\n arg.to('kPa') # pressure\n kwargs = dict(p=arg, **kwargs)\n except:\n try:\n arg.to('m^3/kg') # specific volume\n kwargs = dict(v=arg, **kwargs)\n except:\n try:\n arg.to('kJ/kg/K') # entropy\n kwargs = dict(s=arg, **kwargs)\n except:\n try:\n arg.to('J/kg_dry_air') # enthalpy\n kwargs = dict(h=arg, **kwargs)\n except:\n try:\n arg.to('J/kg_humid_air') # enthalpy humid air\n kwargs = dict(h_ha=arg, **kwargs)\n except:\n try:\n arg.to('kg_water/kg_dry_air') # molar density\n kwargs = dict(w=arg, **kwargs)\n except:\n try:\n if arg.dimensionless and (0<= arg <= 1): # relative humidity\n kwargs = dict(rel_hum=arg, **kwargs)\n except:\n print(f'Unable to determine property type for {f} based on units')\n elif 0<= arg <= 1: # quality\n kwargs = dict(rel_hum=arg, **kwargs)\n if not water and \"p\" not in kwargs.keys():\n kwargs = dict(p=self.__p, **kwargs)\n return kwargs\n\n @property\n def p(self):\n \"\"\"\n set or retrieve pressure for humid air\n\n example:\n >> humair.p = Quantity(1,'atm')\n >> humair.p\n '1 atm'\n\n :param pressure: pressure as a dimensional quantity\n :returns: pressure as a dimensional quantity\n \"\"\"\n return self.__p\n\n @p.setter\n def p(self, pressure):\n self.__p = pressure\n\n def T(self, *args, **kwargs):\n \"\"\"\n Dry-bulb Temperature from two independent intensive properties\n\n example:\n >> humair.T(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Dry-bulb Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T\", **kwargs)\n \n def T_wb(self, *args, **kwargs):\n \"\"\"\n Wet-bulb Temperature from two independent intensive properties\n\n example:\n >> humair.T_wb(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Wet-bulb Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T_wb\", **kwargs)\n\n def T_dp(self, *args, **kwargs):\n \"\"\"\n Dew-point Temperature from two independent intensive properties\n\n example:\n >> humair.T_dp(rel_hum=rel_hum_2, h=h_1)\n\n :param **kwargs: any two dimensional quantities of p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: Dew-point Temperature as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"T_dp\", **kwargs)\n \n def w(self, *args, **kwargs):\n \"\"\"\n humidity ratio from two independent intensive properties\n\n example:\n >> fluid.v(T=T_1, h=h_2)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: humidity ratio as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"w\", **kwargs)\n\n def v(self, *args, **kwargs):\n \"\"\"\n mixture volume per unit of dry air from two independent intensive properties\n\n example:\n >> fluid.v(T=T_1, h=p_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"v\", **kwargs)\n\n def v_ha(self, *args, **kwargs):\n \"\"\"\n mixture volume per unit of humid air from two independent intensive properties\n\n example:\n >> fluid.v_ha(T=T_1, h=p_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"v_ha\", **kwargs)\n\n def v_w(self, *args, **kwargs):\n \"\"\"\n specific volume of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.v_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.v(**kwargs).to('m^3/kg').magnitude, 'm^3/kg_water')\n \n def h(self, *args, **kwargs):\n \"\"\"\n enthalpy per unit dry air from two independent intensive properties\n\n example:\n >> fluid.h(T=T_1, rel_hum=re1_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific enthalpy per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"h\", **kwargs)\n\n def h_ha(self, *args, **kwargs):\n \"\"\"\n enthalpy per unit humid air from two independent intensive properties\n\n example:\n >> fluid.h_ha(T=T_1, rel_hum=re1_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific enthalpy per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"h_ha\", **kwargs)\n\n def h_w(self, *args, **kwargs):\n \"\"\"\n specific enthalpy of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.h_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.h(**kwargs).to('kJ/kg').magnitude, 'kJ/kg_water') \n \n def s(self, *args, **kwargs):\n \"\"\"\n entropy per unit dry air from two independent intensive properties\n\n example:\n >> fluid.s(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific entropy per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"s\", **kwargs)\n\n def s_ha(self, *args, **kwargs):\n \"\"\"\n entropy per unit humid air from two independent intensive properties\n\n example:\n >> fluid.s_ha(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: specific entropy per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"s_ha\", **kwargs)\n\n def s_w(self, *args, **kwargs):\n \"\"\"\n specific entropy of water per unit of humid water from two independent intensive properties\n\n example:\n >> fluid.s_w(T=T_1, x=x_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,u_molar,h_molar,s_molar,d_molar\n :returns: specific volume per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs,water=True)\n return Quantity(self.water.s(**kwargs).to('kJ/kg/K').magnitude, 'kJ/kg_water/K') \n \n def rel_hum(self, *args, **kwargs):\n \"\"\"\n relative humidity from two independent intensive properties\n\n example:\n >> fluid.rel_hum(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: relative humidity as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"rel_hum\", **kwargs)\n\n def y(self, *args, **kwargs):\n \"\"\"\n water mole fraction from two independent intensive properties\n\n example:\n >> fluid.y(T=T_1, h=h_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar\n :returns: water mole fraction as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"Y\", **kwargs)\n\n def cp(self, *args, **kwargs):\n \"\"\"\n specific heat per unit dry air from two independent intensive properties\n\n example:\n >> fluid.cp(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: specific heat per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cp\", **kwargs)\n\n def cp_ha(self, *args, **kwargs):\n \"\"\"\n specific heat per unit humid air from two independent intensive properties\n\n example:\n >> fluid.cp_ha(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: specific heat per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cp_ha\", **kwargs)\n \n def cv(self, *args, **kwargs):\n \"\"\"\n constant volume specific heat per unit dry air from two independent intensive properties\n\n example:\n >> fluid.cv(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: constant volume specific heat per unit dry air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cv\", **kwargs)\n\n def cv_ha(self, *args, **kwargs):\n \"\"\"\n constant volume specific heat per unit humid air from two independent intensive properties\n\n example:\n >> fluid.cv_ha(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: constant volume specific heat per unit humid air as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"cv_ha\", **kwargs)\n\n def conductivity(self, *args, **kwargs):\n \"\"\"\n thermal conductivity from two independent intensive properties\n\n example:\n >> fluid.conductivity(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: thermal conductivity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"k\", **kwargs)\n\n def viscosity(self, *args, **kwargs):\n \"\"\"\n dynamic viscosity from two independent intensive properties\n\n example:\n >> fluid.viscosity(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: dynamic viscosity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"mu\", **kwargs)\n\n def kinematic_viscosity(self, *args, **kwargs):\n \"\"\"\n dynamic viscosity from two independent intensive properties\n\n example:\n >> fluid.kinematic_viscosity(T=T1, p=p1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,u_molar,h_molar,s_molar,d_molar\n :returns: kinematic viscosity as a dimensional quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"viscosity\", **kwargs)/self._lookup(\"v\", **kwargs)\n \n def Z(self, *args, **kwargs):\n \"\"\"\n Compressibility factor\n\n example:\n >> fluid.Pr(T=T_1, rel_hum=rel_hum_1)\n\n :param **kwargs: any two dimensional quantities of T,p,v,u,h,s,x,d,rho,u_molar,h_molar,s_molar,d_molar\n :returns: Compressibility factor as a dimensionless quantity\n \"\"\"\n kwargs = self._update_kwargs(args,kwargs)\n return self._lookup(\"Z\", **kwargs)\n\n def property_diagram(\n self,\n x=None,\n y=None,\n x_units=None,\n y_units=None,\n saturation=False,\n unit_system=None,\n **kwargs,\n ):\n unit_system = unit_system or self.unit_system\n return PropertyPlot(\n x=x,\n y=y,\n x_units=x_units,\n y_units=y_units,\n property_table=self,\n saturation=saturation,\n unit_system=unit_system,\n **kwargs,\n )\n\n def format_units(self,units,displaystyle=True):\n units = re.sub('_water','_w',units)\n units = re.sub('_dry_air','_a',units)\n units = re.sub('deg',r'^\\\\circ{}\\!',units)\n match = re.match('(.*)/(.*)',units)\n if match and displaystyle:\n units = f'\\\\frac{{{match.group(1)}}}{{{match.group(2)}}}'\n return units\n\n def rounded_array(self,val1,val2,n=20,spacing=None):\n if spacing is not None:\n spacing_mag = floor(log10(spacing))\n start = spacing*10**spacing_mag*round(val1/(spacing*10**spacing_mag))\n ret_array = np.arange(start, val2+spacing, spacing)\n else:\n dir = 1 if val2>val1 else -1\n delta = abs(val2-val1)\n mag_delta = floor(log10(delta))\n spacing = round(delta/n,-int(floor(log10(delta/n))))\n spacing_mag = floor(log10(spacing))\n spacings={}\n lists={}\n lengths={}\n for i in [1,2,2.5,5,10]:\n spacings[i] = dir*i*10**spacing_mag*round(spacing/(i*10**spacing_mag))\n spacings[i] = dir*i*10**spacing_mag\n start = i*10**spacing_mag*round(val1/(i*10**spacing_mag))\n if spacings[i] == 0: spacings[i] = i*10**spacing_mag\n lists[i] = np.arange(start,val2+spacings[i],spacings[i])\n if lists[i][0] == -0: lists[i][0]=0\n lengths[i] = len(lists[i])\n kys= list(lengths.keys()) \n lst = list(lengths.values())\n L = lst[min(range(len(lst)), key = lambda i: abs(lst[i]-n))]\n K = kys[lst.index(L)]\n ret_array = lists[K]\n if ret_array[0] == -0: ret_array[0]=0\n if ret_array[-1]>val2 or ret_array[-1]<val1: ret_array = ret_array[:-1]\n if ret_array[0]<val1 or ret_array[-1]>val2: ret_array = ret_array[1:]\n return ret_array\n\n def psychrometric_chart(\n self,\n Tmin=None,\n Tmax=None,\n wmin=None,\n wmax=None,\n main_labels_color=None,\n major_grid_style=None,\n minor_grid_style=None,\n n_h = 15,\n n_v = 20,\n h_isoline_style=None,\n v_isoline_style=None,\n rel_hum_isoline_style=None,\n Twb_isoline_style=None,\n unit_system=None,\n redraw=False,\n cache=True,\n **kwargs\n ):\n if self.cached_psychrometric_chart.cache_info().currsize>0:\n show_psych = True\n else:\n show_psych = False\n\n if redraw or not cache:\n self.cached_psychrometric_chart.cache_clear()\n \n psych = self.cached_psychrometric_chart(\n Tmin,\n Tmax,\n wmin,\n wmax,\n main_labels_color,\n major_grid_style,\n minor_grid_style,\n n_h,\n n_v,\n h_isoline_style,\n v_isoline_style,\n rel_hum_isoline_style,\n Twb_isoline_style,\n unit_system,\n **kwargs\n )\n\n if show_psych: psych.show()\n return psych\n\n @functools.lru_cache()\n def cached_psychrometric_chart(\n self,\n Tmin=None,\n Tmax=None,\n wmin=None,\n wmax=None,\n main_labels_color=None,\n major_grid_style=None,\n minor_grid_style=None,\n n_h = 15,\n n_v = 20,\n h_isoline_style=None,\n v_isoline_style=None,\n rel_hum_isoline_style=None,\n Twb_isoline_style=None,\n unit_system=None,\n **kwargs\n ):\n unit_system = unit_system or self.unit_system\n psych = self.property_diagram(x=\"T\", y=\"omega\", saturation=False, unit_system=unit_system, p=self.__p, **kwargs)\n\n # Line Styles\n main_labels_color = main_labels_color or 'black'\n major_grid_style = major_grid_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4]\n )\n minor_grid_style = minor_grid_style or dict(\n linestyle='-',\n linewidth=0.25,\n color=[0.4,0.4,0.4,0.4]\n )\n h_isoline_style = h_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n pos=0,\n labelprops=dict(\n ha='right',\n va='center',\n pos=0.0\n )\n )\n v_isoline_style = v_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n labelprops=dict(color='grey',offset=2))\n rel_hum_isoline_style = rel_hum_isoline_style or dict(\n linestyle='-',\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n labelprops=dict(\n ha='right',\n color='grey',\n offset=2\n )\n )\n Twb_isoline_style = Twb_isoline_style or dict(\n linestyle=(0,(5,10)),\n linewidth=0.5,\n color=[0.4,0.4,0.4,0.4],\n pos=0.2,\n labelprops=dict(\n ha='left',\n color='grey',\n offset=2\n )\n )\n \n # Set Axis limits\n if Tmin is None: Tmin = Quantity(30.0,'degF')\n Tmin = Tmin.to(psych.x_units)\n if Tmax is None: Tmax = Quantity(50.0,'degC')\n Tmax = Tmax.to(psych.x_units)\n if wmin is None: wmin = Quantity(0.0,'kg_water/kg_dry_air')\n wmin = wmin.to(psych.y_units)\n if wmax is None: wmax = Quantity(0.03,'kg_water/kg_dry_air')\n wmax = wmax.to(psych.y_units)\n psych.Tmin,psych.Tmax,psych.wmin,psych.wmax = Tmin,Tmax,wmin,wmax\n psych.ax.set_xlim(left=Tmin.magnitude,right=Tmax.magnitude)\n psych.ax.set_ylim(bottom=wmin.magnitude,top=wmax.magnitude)\n \n # Set axis labels\n x_units_str = f\"{self.format_units(f'{psych.x_units}')}\"\n y_units_str = f\"{self.format_units(f'{psych.y_units}')}\"\n psych.ax.set_xlabel(f\"Dry-Bulb Temperature, $T_{{\\\\mathrm{{db}}}}\\\\ [\\\\mathrm{{{x_units_str}}}]$\")\n psych.ax.set_ylabel(f\"Humidity Ratio, $\\\\omega\\\\ \\\\left[\\mathrm{{{y_units_str}}}\\\\right]$\")\n \n # Set axis style\n psych.ax.yaxis.tick_right()\n psych.ax.yaxis.set_label_position(\"right\")\n psych.ax.spines[\"right\"].set_visible(True)\n psych.ax.spines[\"left\"].set_visible(False)\n \n # Add Plot Title\n try:\n pressure_str = f'{psych.props.p}'\n except:\n pressure_str = f'{psych.props.p:~L}'\n title = f'Psychrometric Chart\\nPressure: $\\mathrm{{{pressure_str}}}$'\n psych.text((0.05*(Tmax-Tmin)+Tmin).magnitude, (0.9*(wmax-wmin)+wmin).magnitude, title, fontsize=12)\n \n # Draw grid\n # Dry-bulb grid\n tickscale=1\n x_major_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=5)\n x_minor_ticks = self.rounded_array(Tmin.magnitude,Tmax.magnitude,spacing=1)\n plt.xticks(x_major_ticks)\n ymin = wmin\n for i in x_major_ticks:\n ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)\n psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**major_grid_style)\n for i in x_minor_ticks:\n ymax = min(psych.props.w(T_db=Quantity(i,psych.x_units),rel_hum=1),wmax)\n psych.ax.plot([i,i],[ymin.magnitude,ymax.magnitude],**minor_grid_style) \n \n # Humidity ratio grid\n y_minor_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.001)\n y_major_ticks = self.rounded_array(wmin.magnitude,wmax.magnitude,spacing=0.005)\n plt.yticks(y_major_ticks)\n xmax = Tmax\n for i in y_major_ticks:\n xmin=Tmin\n try:\n phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))\n except:\n xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)\n psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**major_grid_style)\n for i in y_minor_ticks:\n xmin=Tmin\n try:\n phi_left_lim = psych.props.rel_hum(T_db=Tmin,w=Quantity(i,psych.y_units))\n except:\n xmin = psych.props.T(w=Quantity(i,psych.y_units),rel_hum=1).to(psych.x_units)\n psych.ax.plot([xmin.magnitude,xmax.magnitude],[i,i],**minor_grid_style) \n\n # Saturated line\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=False,linestyle='-',color='black')\n # Relative humidity lines\n for i in [0.1]:\n lstyle = dict(**rel_hum_isoline_style)\n lstyle['labelprops'] = dict(**rel_hum_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=i,label=f'$\\phi=10\\%$',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**lstyle)\n for i in [0.02,0.04,0.06,0.08,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7,0.8,0.9]:\n rel_hum = i\n xmin,xmax = Tmin,Tmax\n if psych.props.w(rel_hum=rel_hum,T=Tmax) > wmax: \n xmax = psych.props.T(w=wmax,rel_hum=rel_hum)\n psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',ycoor=(wmin+0.95*(wmax-wmin)).magnitude,**rel_hum_isoline_style)\n else:\n psych.plot_iso_line(iso_symb='rel_hum',iso_value=rel_hum,x_range=[xmin,xmax],label=f'{int(i*100)}%',xcoor=(Tmin+0.95*(Tmax-Tmin)).magnitude,**rel_hum_isoline_style)\n \n # Enthalpy lines\n hmin = psych.props.h(T=Tmin,w=wmin)\n hmax = psych.props.h(T=Tmax,w=wmax)\n h_units = hmin.units\n h_units_str = f\"{self.format_units(f'{h_units}')}\"\n for i in self.rounded_array(hmin.magnitude,hmax.magnitude,15):\n h = Quantity(i,h_units)\n xmin = max(psych.props.T(h=h,rel_hum=1),Tmin,psych.props.T(h=h,w=wmax))\n xmax = min(psych.props.T(h=h,w=wmin),Tmax,psych.props.T(h=h,w=wmin))\n try:\n psych.plot_iso_line(iso_symb='h',iso_value=h,x_range=[xmin,xmax],label=f'{int(i) if i.is_integer() else i}',**h_isoline_style)\n except:\n pass\n # Enthalpy axis label\n psych._plot_iso_wrapper(iso_symb='rel_hum',iso_value=1,label=f'Enthalpy, $h$ $\\\\left[\\\\mathrm{{{h_units_str}}}\\\\right]$',linewidth=0,pos=0.5,labelprops=dict(offset=25)) \n \n # Specific volume lines\n vmin = psych.props.v(T=Tmin,omega=wmin)\n vmax = psych.props.v(T=Tmax,omega=wmax)\n v_units = vmin.units\n v_units_str = f\"{self.format_units(f'{v_units}',displaystyle=False)}\"\n v_list = self.rounded_array(vmin.magnitude,vmax.magnitude,20)\n v_main_label_index = int(len(v_list)*0.6)\n for i,val in enumerate(v_list):\n v = Quantity(val,v_units)\n ymax = min(psych.props.w(v=v,rel_hum=1),wmax)\n try:\n ymin = max(psych.props.w(T=Tmax,v=v),wmin)\n except ValueError:\n ymin = wmin\n v_string = int(val) if val.is_integer() else f'{val:.5}'.rstrip() \n if i == v_main_label_index:\n lstyle = dict(**v_isoline_style)\n lstyle['labelprops'] = dict(**v_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],n_points=10,label=f'$v={v_string}\\ \\mathrm{{{v_units_str}}}$',pos=0.7,**lstyle)\n else:\n try:\n psych.plot_iso_line(iso_symb='v',iso_value=v,y_range=[ymax,ymin],label=v_string,n_points=10,pos=0.7,**v_isoline_style)\n except:\n pass\n \n # Wet-bulb Temperature lines\n T_units = Tmin.units\n T_units_str = f\"{self.format_units(f'{T_units}',displaystyle=False)}\"\n Twb_main_label_index = int(len(x_major_ticks)*0.5)\n for i,T in enumerate(x_major_ticks[:-1]):\n Twb = Quantity(T,psych.x_units)\n ymax = min(psych.props.w(T=Twb,rel_hum=1),wmax)\n try:\n ymin = max(psych.props.w(T=Tmax,T_wb=Twb),wmin)\n except ValueError:\n ymin = wmin\n if ymin<wmax:\n if i == Twb_main_label_index:\n lstyle = dict(**Twb_isoline_style)\n lstyle['labelprops'] = dict(**Twb_isoline_style['labelprops'])\n lstyle['labelprops']['color'] = main_labels_color\n psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'$T_\\mathrm{{wb}}={int(T)}\\mathrm{{{T_units_str}}}$',**lstyle)\n else:\n psych.plot_iso_line(iso_symb='T_wb',iso_value=Twb,y_range=[ymax,ymin],n_points=10,label=f'${int(T)}\\mathrm{{{T_units_str}}}$',**Twb_isoline_style)\n\n return psych\n\n def Ts_diagram(self, unit_system=None, saturation=False, **kwargs):\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"s\", y=\"T\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def pv_diagram(self, unit_system=None, saturation=None, log_x=None, log_y=None, **kwargs):\n if self.fluid == 'Air':\n saturation = saturation or False\n log_x = log_x or False\n log_y = log_y or False\n else:\n saturation = True\n log_x = log_x or True\n log_y = log_y or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"v\", y=\"p\", unit_system=unit_system, saturation=saturation, log_x=log_x, log_y=log_y, **kwargs\n )\n\n def Tv_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"v\", y=\"T\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def hs_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"s\", y=\"h\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def ph_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"h\", y=\"p\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n def pT_diagram(self, unit_system=None, saturation=None, **kwargs):\n if self.fluid == 'Air': saturation = saturation or False\n else: saturation = saturation or True\n unit_system = unit_system or self.unit_system\n return self.property_diagram(\n x=\"T\", y=\"p\", unit_system=unit_system, saturation=saturation, **kwargs\n )\n\n\ndef LegacyPropertyPlot(\n x=None,\n y=None,\n x_units=None,\n y_units=None,\n plot_type=None,\n fluid=None,\n saturation=False,\n unit_system=\"SI_C\",\n **kwargs,\n):\n props = Properties(fluid=fluid, unit_system=unit_system, **kwargs)\n return PropertyPlot(\n x=x,\n y=y,\n x_units=x_units,\n y_units=y_units,\n property_table=props,\n saturation=saturation,\n unit_system=unit_system,\n **kwargs,\n )\n"
] | [
[
"numpy.arange",
"numpy.log10"
]
] |
Dongzhou-1996/tf_learning | [
"fe764e78cc1a934707ae01d0847f901cb6fbb8b9"
] | [
"tf_mnist.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\nimport tensorflow as tf\nimport os\nimport numpy as np\nimport argparse\nimport shutil\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nparser = argparse.ArgumentParser('MNIST Softmax')\nparser.add_argument('--data_dir', type=str, default='/tmp/mnist-data', \n help='the directory of MNIST dataset')\nparser.add_argument('--lr', type=float, default=0.01, help='learning rate')\nparser.add_argument('--batch_size', type=int, default=32, help='batch size')\nparser.add_argument('--max_train_step', type=int, default=50000, help='the maximum training step')\nparser.add_argument('--model_path', type=str, default='', help='the path of checkpoint file')\nargs = parser.parse_args()\n\ndef model():\n x = tf.placeholder(tf.float32, [None, 784], name='x')\n gt = tf.placeholder(tf.float32, [None, 10], name='groundtruth')\n with tf.variable_scope('layer1'):\n w1 = tf.get_variable('weight1', [784, 1024], initializer=tf.random_normal_initializer())\n b1 = tf.get_variable('bias1', [1024], initializer=tf.constant_initializer(0.0))\n h1 = tf.nn.relu(tf.matmul(x, w1) + b1)\n with tf.variable_scope('layer2'):\n w2 = tf.get_variable('weight2', [1024, 1024], initializer=tf.random_normal_initializer())\n b2 = tf.get_variable('bias2', [1024], initializer=tf.constant_initializer(0.0))\n h2 = tf.nn.relu(tf.matmul(h1, w2) + b2)\n with tf.variable_scope('layer3'):\n w3 = tf.get_variable('weight3', [1024, 10], initializer=tf.random_normal_initializer())\n b3 = tf.get_variable('bias3', [10], initializer=tf.constant_initializer(0.0))\n y = tf.matmul(h2, w3) + b3\n # losses\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=gt, logits=y))\n # optimizer\n optimizer = tf.train.GradientDescentOptimizer(args.lr)\n # define one-step train ops\n train_op = optimizer.minimize(cross_entropy)\n return x, y, gt, train_op \n \nif __name__ == \"__main__\":\n max_train_step = args.max_train_step\n batch_size = args.batch_size\n mnist = input_data.read_data_sets(args.data_dir, one_hot=True)\n x, y, gt, train_op = model()\n \n # create saver\n saver = tf.train.Saver()\n if os.path.exists('./mnist'):\n print('=> directory is existed!')\n else:\n print('=> creating temporary directory ...')\n os.makedirs('./mnist')\n\n with tf.Session() as sess:\n if args.model_path == '':\n tf.global_variables_initializer().run()\n else:\n saver.restore(sess, args.model_path)\n\n for i in range(max_train_step):\n batch_x, batch_gt = mnist.train.next_batch(batch_size)\n sess.run(train_op, feed_dict={x: batch_x, gt: batch_gt})\n\n if i % 100 == 0:\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(gt, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print('=> accuracy: {}'.format(sess.run(accuracy, feed_dict={x: mnist.test.images, gt: mnist.test.labels})))\n saver.save(sess, 'mnist/mnist_{:02d}.ckpt'.format(int(i / 100) + 1))\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.random_normal_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.Saver",
"tensorflow.Session",
"tensorflow.argmax"
]
] |
MalayAgr/DeepNeuralNetworksFromScratch | [
"ded75b148d9bb497014c016bfd2d7d0280c007ab"
] | [
"dnn/loss.py"
] | [
"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nfrom numba import njit\n\n\n@njit(cache=True)\ndef _clip(a: np.ndarray, epsilon: float) -> np.ndarray:\n if ((a == 1) | (a <= 0)).any():\n a = np.maximum(a, epsilon).astype(np.float32)\n a = np.minimum(1 - epsilon, a).astype(np.float32)\n return a\n\n\n@njit(cache=True)\ndef _binary_crossentropy(\n labels: np.ndarray, preds: np.ndarray, epsilon: float\n) -> float:\n preds = _clip(preds, epsilon)\n loss = labels * np.log(preds)\n loss += (1 - labels) * np.log(1 - preds)\n loss = np.sum(-loss)\n loss /= labels.shape[-1]\n return loss\n\n\n@njit(cache=True)\ndef _binary_crossentropy_derivative(\n labels: np.ndarray, preds: np.ndarray, epsilon: float\n) -> np.ndarray:\n preds = _clip(preds, epsilon)\n grad = 1 - labels\n grad /= 1 - preds\n grad -= labels / preds\n grad /= labels.shape[-1]\n return grad\n\n\n@njit(cache=True)\ndef _categorial_crossentropy(labels: np.ndarray, preds: np.ndarray) -> float:\n prod = labels * np.log(preds)\n bs = labels.shape[-1]\n loss = 0.0\n for idx in np.arange(bs):\n loss += -prod[..., idx].sum()\n loss /= bs\n return loss\n\n\n@njit(cache=True)\ndef _categorial_crossentropy_derivative(\n labels: np.ndarray, preds: np.ndarray\n) -> np.ndarray:\n grad = -labels\n grad /= preds\n grad /= labels.shape[-1]\n return grad\n\n\nclass Loss(ABC):\n names: list[str] = None\n REGISTRY: dict[str, type[Loss]] = {}\n ndim: int = None\n\n def __init_subclass__(cls, **kwargs) -> None:\n if (names := cls.names) is not None:\n Loss.REGISTRY.update({name: cls for name in names})\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}()\"\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def validate_input(self, labels: np.ndarray, preds: np.ndarray) -> None:\n if labels.shape != preds.shape:\n raise AttributeError(\n \"The labels and the predictions should have the same shape\"\n )\n\n if labels.ndim < self.ndim:\n raise AttributeError(\n f\"{self.__class__.__name__} expects at least {self.ndim}-dimensional inputs\"\n )\n\n def should_reshape(self, shape: tuple[int, ...]) -> bool:\n \"\"\"Method to determine if the labels and predictions should be reshaped.\"\"\"\n return False\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Method to reshape the labels and predictions if they should be reshaped.\"\"\"\n return labels, preds\n\n @abstractmethod\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n \"\"\"\n The formula used to calculate the loss.\n Subclasses classes must implement this.\n\n If the loss is J with inputs preds and Y,\n this should return J(preds, Y).\n\n Arguments:\n preds: Numpy-array, the predictions to be used for calculating the loss.\n\n Returns:\n A float representing the loss.\n \"\"\"\n\n @abstractmethod\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n \"\"\"\n The formula used to calculate the derivative of the loss function\n With respect to preds.\n Subclasses classes must implement this.\n\n If the loss is J with inputs preds and Y,\n this should return J'(preds, Y).\n\n Arguments:\n preds: Numpy-array, the predictions to be used for calculating the derivatives.\n\n Returns:\n A Numpy-array with the calculated derivatives.\n \"\"\"\n\n def compute_loss(self, labels: np.ndarray, preds: np.ndarray) -> float:\n self.validate_input(labels, preds)\n\n if self.should_reshape(labels.shape):\n labels, preds = self.reshape_labels_and_preds(labels, preds)\n\n return self.loss_func(labels, preds)\n\n def compute_derivatives(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n self.validate_input(labels, preds)\n\n old_shape = None\n\n if self.should_reshape(labels.shape):\n old_shape = labels.shape\n labels, preds = self.reshape_labels_and_preds(labels, preds)\n\n grad = self.loss_derivative(labels, preds).astype(np.float32)\n\n if old_shape is not None:\n grad.shape = old_shape\n\n return grad\n\n\nclass BinaryCrossEntropy(Loss):\n names = [\"binary_crossentropy\", \"bce\"]\n ndim = 2\n epsilon = 1e-15\n\n def should_reshape(self, shape: tuple[int, ...]) -> bool:\n return len(shape) > self.ndim or shape[0] != 1\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n return labels.reshape(1, -1), preds.reshape(1, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n return _binary_crossentropy(labels=labels, preds=preds, epsilon=self.epsilon)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n return _binary_crossentropy_derivative(\n labels=labels,\n preds=preds,\n epsilon=self.epsilon,\n )\n\n\nclass MeanSquaredError(Loss):\n names = [\"mean_squared_error\", \"mse\"]\n ndim = 2\n\n def should_reshape(self, shape: tuple) -> bool:\n return len(shape) > self.ndim or shape[0] != 1\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n return labels.reshape(1, -1), preds.reshape(1, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n loss = preds - labels\n loss **= 2\n loss = np.sum(loss / labels.shape[-1])\n\n return np.squeeze(loss)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n grad = preds - labels\n grad *= 2\n grad /= labels.shape[-1]\n\n return grad\n\n\nclass CategoricalCrossEntropy(Loss):\n names = [\"categorial_crossentropy\", \"cce\"]\n ndim = 2\n\n def should_reshape(self, shape: tuple) -> bool:\n return len(shape) > self.ndim\n\n @staticmethod\n def reshape_labels_and_preds(\n labels: np.ndarray, preds: np.ndarray\n ) -> tuple[np.ndarray, np.ndarray]:\n classes = labels.shape[0]\n return labels.reshape(classes, -1), preds.reshape(classes, -1)\n\n def loss_func(self, labels: np.ndarray, preds: np.ndarray) -> float:\n return _categorial_crossentropy(labels=labels, preds=preds)\n\n def loss_derivative(self, labels: np.ndarray, preds: np.ndarray) -> np.ndarray:\n return _categorial_crossentropy_derivative(labels=labels, preds=preds)\n"
] | [
[
"numpy.sum",
"numpy.squeeze",
"numpy.arange",
"numpy.log",
"numpy.maximum",
"numpy.minimum"
]
] |
dmuraco3/CompuTradePython | [
"5c2bc4d1d3baabd68677b8c3d78b8caeed52ba28"
] | [
"build/lib/Quantuiti/_ema.py"
] | [
"import numpy as np\ndef ema(self, N):\n \"\"\"\n Simple Moving Average = (N - PeriodSum) / N\n N = number of days in a given period\n PeriodSum = sum of stock closing prices in that period\n \"\"\"\n name = 'ema_' + str(N)\n dependent = 'sma_' + str(N)\n try:\n return self.data[name][self.index]\n \n except Exception as error:\n self.sma(N)\n print(self.data.head())\n temp=[]\n for index, row in self.data.iterrows():\n # print(type(row[dependent]))\n print(np.isnan(row[dependent]))\n if np.isnan(row[dependent]):\n temp.append(row[dependent])\n # print(row[dependent])\n else:\n if np.isnan(temp[-1]):\n ema = (self.data['Close'][index] - self.data[dependent][index]) * (2 / (N + 1)) + self.data[dependent][index]\n else:\n ema = (self.data['Close'][index] - temp[-1]) * (2 / (N + 1)) + temp[-1]\n \n temp.append(ema)\n \n self.data[name] = temp\n return self.data[name][self.index]\n \n \n # setattr(self, name, [sma])"
] | [
[
"numpy.isnan"
]
] |
debowin/pytext | [
"91126bb34bd689f3513f25ca0d356ad374e004ab"
] | [
"pytext/metrics/__init__.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport itertools\nfrom collections import defaultdict\nfrom json import dumps as json_dumps\nfrom typing import (\n Any,\n DefaultDict,\n Dict,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport numpy as np\nfrom pytext.utils import cuda\nfrom pytext.utils.ascii_table import ascii_table, ascii_table_from_dict\n\n\nRECALL_AT_PRECISION_THRESHOLDS = [0.2, 0.4, 0.6, 0.8, 0.9]\n\n\"\"\"\nBasic metric classes and functions for single-label prediction problems.\nExtending to multi-label support\n\"\"\"\n\n\nclass LabelPrediction(NamedTuple):\n \"\"\"\n Label predictions of an example.\n\n Attributes:\n label_scores: Confidence scores that each label receives.\n predicted_label: Index of the predicted label. This is usually the label with\n the highest confidence score in label_scores.\n expected_label: Index of the true label.\n \"\"\"\n\n label_scores: List[float]\n predicted_label: int\n expected_label: int\n\n\nclass LabelListPrediction(NamedTuple):\n \"\"\"\n Label list predictions of an example.\n\n Attributes:\n label_scores: Confidence scores that each label receives.\n predicted_label: List of indices of the predicted label.\n expected_label: List of indices of the true label.\n \"\"\"\n\n label_scores: List[float]\n predicted_label: List[int]\n expected_label: List[int]\n\n\nclass PRF1Scores(NamedTuple):\n \"\"\"\n Precision/recall/F1 scores for a collection of predictions.\n\n Attributes:\n true_positives: Number of true positives.\n false_positives: Number of false positives.\n false_negatives: Number of false negatives.\n precision: TP / (TP + FP).\n recall: TP / (TP + FN).\n f1: 2 * TP / (2 * TP + FP + FN).\n \"\"\"\n\n true_positives: int\n false_positives: int\n false_negatives: int\n precision: float\n recall: float\n f1: float\n\n\nclass SoftClassificationMetrics(NamedTuple):\n \"\"\"\n Classification scores that are independent of thresholds.\n \"\"\"\n\n average_precision: float\n recall_at_precision: Dict[float, float]\n decision_thresh_at_precision: Dict[float, float]\n roc_auc: Optional[float]\n\n\nclass MacroPRF1Scores(NamedTuple):\n \"\"\"\n Macro precision/recall/F1 scores (averages across each label).\n\n Attributes:\n num_label: Number of distinct labels.\n precision: Equally weighted average of precisions for each label.\n recall: Equally weighted average of recalls for each label.\n f1: Equally weighted average of F1 scores for each label.\n \"\"\"\n\n num_labels: int\n precision: float\n recall: float\n f1: float\n\n\nclass MacroPRF1Metrics(NamedTuple):\n \"\"\"\n Aggregated metric class for macro precision/recall/F1 scores.\n\n Attributes:\n per_label_scores: Mapping from label string to the corresponding\n precision/recall/F1 scores.\n macro_scores: Macro precision/recall/F1 scores across the labels in\n `per_label_scores`.\n \"\"\"\n\n per_label_scores: Dict[str, PRF1Scores]\n macro_scores: MacroPRF1Scores\n\n def print_metrics(self, indentation=\"\") -> None:\n print(\n ascii_table(\n [\n {\n \"label\": label,\n \"precision\": f\"{metrics.precision:.2f}\",\n \"recall\": f\"{metrics.recall:.2f}\",\n \"f1\": f\"{metrics.f1:.2f}\",\n \"support\": metrics.true_positives + metrics.false_negatives,\n }\n for label, metrics in sorted(self.per_label_scores.items())\n ],\n human_column_names={\n \"label\": \"Label\",\n \"precision\": \"Precision\",\n \"recall\": \"Recall\",\n \"f1\": \"F1\",\n \"support\": \"Support\",\n },\n footer={\n \"label\": \"Overall macro scores\",\n \"precision\": f\"{self.macro_scores.precision:.2f}\",\n \"recall\": f\"{self.macro_scores.recall:.2f}\",\n \"f1\": f\"{self.macro_scores.f1:.2f}\",\n },\n alignments={\"label\": \"<\"},\n indentation=indentation,\n )\n )\n\n\nclass PRF1Metrics(NamedTuple):\n \"\"\"\n Metric class for all types of precision/recall/F1 scores.\n\n Attributes:\n per_label_scores: Map from label string to the corresponding precision/recall/F1\n scores.\n macro_scores: Macro precision/recall/F1 scores across the labels in\n `per_label_scores`.\n micro_scores: Micro (regular) precision/recall/F1 scores for the same\n collection of predictions.\n \"\"\"\n\n per_label_scores: Dict[str, PRF1Scores]\n macro_scores: MacroPRF1Scores\n micro_scores: PRF1Scores\n\n def print_metrics(self) -> None:\n res = (\n f\"\\t{'Per label scores':<40}\"\n f\"\\t{'Precision':<10}\"\n f\"\\t{'Recall':<10}\"\n f\"\\t{'F1':<10}\"\n f\"\\t{'Support':<10}\\n\\n\"\n )\n for label, label_metrics in self.per_label_scores.items():\n support = label_metrics.true_positives + label_metrics.false_negatives\n res += (\n f\"\\t{label:<40}\"\n f\"\\t{label_metrics.precision * 100:<10.3f}\"\n f\"\\t{label_metrics.recall * 100:<10.3f}\"\n f\"\\t{label_metrics.f1 * 100:<10.3f}\"\n f\"\\t{support:<10}\\n\"\n )\n support = self.micro_scores.true_positives + self.micro_scores.false_negatives\n res += (\n f\"\\n\\t{'Overall micro scores':<40}\"\n f\"\\t{self.micro_scores.precision * 100:<10.3f}\"\n f\"\\t{self.micro_scores.recall * 100:<10.3f}\"\n f\"\\t{self.micro_scores.f1 * 100:<10.3f}\"\n f\"\\t{support:<10}\\n\"\n )\n res += (\n f\"\\t{'Overall macro scores':<40}\"\n f\"\\t{self.macro_scores.precision * 100:<10.3f}\"\n f\"\\t{self.macro_scores.recall * 100:<10.3f}\"\n f\"\\t{self.macro_scores.f1 * 100:<10.3f}\\n\"\n )\n print(res)\n\n\nclass ClassificationMetrics(NamedTuple):\n \"\"\"\n Metric class for various classification metrics.\n\n Attributes:\n accuracy: Overall accuracy of predictions.\n macro_prf1_metrics: Macro precision/recall/F1 scores.\n per_label_soft_scores: Per label soft metrics.\n mcc: Matthews correlation coefficient.\n roc_auc: Area under the Receiver Operating Characteristic curve.\n loss: Training loss (only used for selecting best model, no need to print).\n \"\"\"\n\n accuracy: float\n macro_prf1_metrics: MacroPRF1Metrics\n per_label_soft_scores: Optional[Dict[str, SoftClassificationMetrics]]\n mcc: Optional[float]\n roc_auc: Optional[float]\n loss: float\n\n def print_metrics(self, report_pep=False) -> None:\n print(f\"Accuracy: {self.accuracy * 100:.2f}\")\n print(\"\\nSoft Metrics:\")\n if self.per_label_soft_scores:\n soft_scores = [\n {\n \"label\": label,\n \"avg_pr\": f\"{metrics.average_precision:.3f}\",\n \"roc_auc\": f\"{(metrics.roc_auc or 0.0):.3f}\",\n }\n for label, metrics in sorted(self.per_label_soft_scores.items())\n ]\n columns = {\n \"label\": \"Label\",\n \"avg_pr\": \"Average precision\",\n \"roc_auc\": \"ROC AUC\",\n }\n print(ascii_table(soft_scores, columns))\n all_thresholds = set(\n itertools.chain.from_iterable(\n metrics.recall_at_precision\n for metrics in self.per_label_soft_scores.values()\n )\n )\n print(\"\\nRecall at Precision\")\n print(\n ascii_table(\n (\n dict(\n {\"label\": label},\n **{\n str(p): f\"{r:.3f}\"\n for p, r in metrics.recall_at_precision.items()\n },\n )\n for label, metrics in sorted(self.per_label_soft_scores.items())\n ),\n dict(\n {\"label\": \"Label\"},\n **{str(t): f\"R@P {t}\" for t in all_thresholds},\n ),\n alignments={\"label\": \"<\"},\n )\n )\n if self.mcc:\n print(f\"\\nMatthews correlation coefficient: {self.mcc :.3f}\")\n if self.roc_auc:\n print(f\"\\nROC AUC: {self.roc_auc:.3f}\")\n if report_pep:\n self.print_pep()\n\n def print_pep(self):\n metrics = {\"Accuracy\": f\"{self.accuracy * 100:.2f}\"}\n if self.roc_auc:\n metrics[\"ROC AUC\"] = f\"{self.roc_auc :.3f}\"\n for key, value in metrics.items():\n info = {\"type\": \"NET\", \"metric\": key, \"unit\": \"None\", \"value\": value}\n print(\"PyTorchObserver \" + json_dumps(info))\n\n\nclass Confusions:\n \"\"\"\n Confusion information for a collection of predictions.\n\n Attributes:\n TP: Number of true positives.\n FP: Number of false positives.\n FN: Number of false negatives.\n \"\"\"\n\n __slots__ = \"TP\", \"FP\", \"FN\"\n\n def __init__(self, TP: int = 0, FP: int = 0, FN: int = 0) -> None:\n self.TP: int = TP\n self.FP: int = FP\n self.FN: int = FN\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Confusions):\n return NotImplemented\n return self.TP == other.TP and self.FP == other.FP and self.FN == other.FN\n\n def __add__(self, other: \"Confusions\") -> \"Confusions\":\n return Confusions(\n TP=self.TP + other.TP, FP=self.FP + other.FP, FN=self.FN + other.FN\n )\n\n def __iadd__(self, other: \"Confusions\") -> \"Confusions\":\n self.TP += other.TP\n self.FP += other.FP\n self.FN += other.FN\n return self\n\n def _asdict(self) -> Dict:\n return {\"TP\": self.TP, \"FP\": self.FP, \"FN\": self.FN}\n\n def compute_metrics(self) -> PRF1Scores:\n precision, recall, f1 = compute_prf1(self.TP, self.FP, self.FN)\n return PRF1Scores(\n true_positives=self.TP,\n false_positives=self.FP,\n false_negatives=self.FN,\n precision=precision,\n recall=recall,\n f1=f1,\n )\n\n\nclass PerLabelConfusions:\n \"\"\"\n Per label confusion information.\n\n Attributes:\n label_confusions_map: Map from label string to the corresponding confusion\n counts.\n \"\"\"\n\n __slots__ = \"label_confusions_map\"\n\n def __init__(self) -> None:\n self.label_confusions_map: DefaultDict[str, Confusions] = defaultdict(\n Confusions\n )\n\n def update(self, label: str, item: str, count: int) -> None:\n \"\"\"\n Increase one of TP, FP or FN count for a label by certain amount.\n\n Args:\n label: Label to be modified.\n item: Type of count to be modified, should be one of \"TP\", \"FP\" or \"FN\".\n count: Amount to be added to the count.\n\n Returns:\n None\n \"\"\"\n confusions = self.label_confusions_map[label]\n setattr(confusions, item, getattr(confusions, item) + count)\n\n def compute_metrics(self) -> MacroPRF1Metrics:\n per_label_scores: Dict[str, PRF1Scores] = {}\n precision_sum, recall_sum, f1_sum = 0.0, 0.0, 0.0\n for label, confusions in sorted(self.label_confusions_map.items()):\n scores = confusions.compute_metrics()\n per_label_scores[label] = scores\n if confusions.TP + confusions.FN > 0:\n precision_sum += scores.precision\n recall_sum += scores.recall\n f1_sum += scores.f1\n num_labels = len(self.label_confusions_map)\n return MacroPRF1Metrics(\n per_label_scores=per_label_scores,\n macro_scores=MacroPRF1Scores(\n num_labels=num_labels,\n precision=safe_division(precision_sum, num_labels),\n recall=safe_division(recall_sum, num_labels),\n f1=safe_division(f1_sum, num_labels),\n ),\n )\n\n\nclass AllConfusions:\n \"\"\"\n Aggregated class for per label confusions.\n\n Attributes:\n per_label_confusions: Per label confusion information.\n confusions: Overall TP, FP and FN counts across the labels in\n `per_label_confusions`.\n \"\"\"\n\n __slots__ = \"per_label_confusions\", \"confusions\"\n\n def __init__(self) -> None:\n self.per_label_confusions = PerLabelConfusions()\n self.confusions = Confusions()\n\n def compute_metrics(self) -> PRF1Metrics:\n per_label_metrics = self.per_label_confusions.compute_metrics()\n return PRF1Metrics(\n per_label_scores=per_label_metrics.per_label_scores,\n macro_scores=per_label_metrics.macro_scores,\n micro_scores=self.confusions.compute_metrics(),\n )\n\n\nclass PairwiseRankingMetrics(NamedTuple):\n \"\"\"\n Metric class for pairwise ranking\n\n Attributes:\n num_examples (int): number of samples\n accuracy (float): how many times did we rank in the correct order\n average_score_difference (float): average score(higherRank) - score(lowerRank)\n\n \"\"\"\n\n num_examples: int\n accuracy: float\n average_score_difference: float\n\n def print_metrics(self) -> None:\n print(f\"RankingAccuracy: {self.accuracy * 100:.2f}\")\n print(f\"AvgScoreDiff: {self.average_score_difference}\")\n print(f\"NumExamples: {self.num_examples}\")\n\n\nclass RegressionMetrics(NamedTuple):\n \"\"\"\n Metrics for regression tasks.\n\n Attributes:\n num_examples (int): number of examples\n pearson_correlation (float): correlation between predictions and labels\n mse (float): mean-squared error between predictions and labels\n \"\"\"\n\n num_examples: int\n pearson_correlation: float\n mse: float\n\n def print_metrics(self):\n print(f\"Num examples: {self.num_examples}\")\n print(f\"Pearson correlation: {self.pearson_correlation:.3f}\")\n print(f\"Mean squared error: {self.mse:.3f}\")\n\n\nclass RealtimeMetrics(NamedTuple):\n \"\"\"\n Realtime Metrics for tracking training progress and performance.\n\n Attributes:\n samples (int): number of samples\n tps (float): tokens per second\n ups (float): updates per second\n \"\"\"\n\n samples: int\n tps: float\n ups: float\n\n def _format(self, key, value):\n if key in (\"tps\", \"ups\"):\n return round(value)\n return value\n\n def __str__(self):\n metrics = {\"num_gpus\": cuda.DISTRIBUTED_WORLD_SIZE}\n for key, value in self._asdict().items():\n if not value:\n continue\n metrics[key] = self._format(key, value)\n return str(metrics)\n\n\ndef safe_division(n: Union[int, float], d: int) -> float:\n return float(n) / d if d else 0.0\n\n\ndef compute_prf1(tp: int, fp: int, fn: int) -> Tuple[float, float, float]:\n precision = safe_division(tp, tp + fp)\n recall = safe_division(tp, tp + fn)\n f1 = safe_division(2 * tp, 2 * tp + fp + fn)\n return (precision, recall, f1)\n\n\ndef average_precision_score(\n y_true_sorted: np.ndarray, y_score_sorted: np.ndarray\n) -> float:\n \"\"\"\n Computes average precision, which summarizes the precision-recall curve as the\n precisions achieved at each threshold weighted by the increase in recall since the\n previous threshold.\n\n Args:\n y_true_sorted: Numpy array sorted according to decreasing confidence scores\n indicating whether each prediction is correct.\n y_score_sorted Numpy array of confidence scores for the predictions in\n decreasing order.\n\n Returns:\n Average precision score.\n\n TODO: This is too slow, improve the performance\n \"\"\"\n ap = 0.0\n tp = 0\n threshold = y_score_sorted[0]\n y_score_sorted = np.append(y_score_sorted[1:], np.NAN)\n total_positive = np.sum(y_true_sorted)\n added_positives = 0\n\n for k, (label, score) in enumerate(zip(y_true_sorted, y_score_sorted)):\n if label:\n added_positives += 1\n if score != threshold:\n threshold = score\n recall_diff = added_positives / total_positive\n tp += added_positives\n added_positives = 0\n p_at_tresh = tp / (k + 1)\n ap += p_at_tresh * recall_diff\n return float(ap)\n\n\ndef sort_by_score(y_true_list: Sequence[bool], y_score_list: Sequence[float]):\n y_true = np.array(y_true_list)\n y_score = np.array(y_score_list)\n sort_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_true = y_true[sort_indices]\n y_score = y_score[sort_indices]\n return y_true, y_score\n\n\ndef recall_at_precision(\n y_true_sorted: np.ndarray, y_score_sorted: np.ndarray, thresholds: Sequence[float]\n) -> Dict[float, float]:\n \"\"\"\n Computes recall at various precision levels\n\n Args:\n y_true_sorted: Numpy array sorted according to decreasing confidence scores\n indicating whether each prediction is correct.\n y_score_sorted: Numpy array of confidence scores for the predictions in\n decreasing order.\n thresholds: Sequence of floats indicating the requested precision thresholds\n\n Returns:\n Dictionary of maximum recall at requested precision thresholds.\n \"\"\"\n y_score_shift = np.append(y_score_sorted[1:], np.nan)\n score_change = (y_score_sorted - y_score_shift) != 0\n cum_sum = np.cumsum(y_true_sorted)\n recall_at_precision_dict = {t: 0.0 for t in thresholds}\n decision_thresh_at_precision_dict = {t: 0.0 for t in thresholds}\n sum_y_true = y_true_sorted.sum()\n if sum_y_true == 0:\n return recall_at_precision_dict, decision_thresh_at_precision_dict\n recall = cum_sum / sum_y_true\n precision = cum_sum / np.array(range(1, len(y_true_sorted) + 1))\n\n for threshold in thresholds:\n meets_requirements = np.logical_and(precision >= threshold, score_change)\n if not np.any(meets_requirements):\n continue\n\n recall_at_precision_dict[threshold] = float(\n max(np.extract(meets_requirements, recall))\n )\n decision_thresh_at_precision_dict[threshold] = float(\n min(np.extract(meets_requirements, y_score_sorted))\n )\n\n return recall_at_precision_dict, decision_thresh_at_precision_dict\n\n\ndef compute_soft_metrics(\n predictions: Sequence[LabelPrediction],\n label_names: Sequence[str],\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> Dict[str, SoftClassificationMetrics]:\n \"\"\"\n Computes soft classification metrics (for now, average precision) given a list of\n label predictions.\n\n Args:\n predictions: Label predictions, including the confidence score for each label.\n label_names: Indexed label names.\n recall_at_precision_thresholds: precision thresholds at which to calculate\n recall\n\n\n Returns:\n Dict from label strings to their corresponding soft metrics.\n \"\"\"\n soft_metrics = {}\n for i, label_name in enumerate(label_names):\n y_true = []\n y_score = []\n for label_scores, _, expected in predictions:\n y_true.append(expected == i)\n y_score.append(label_scores[i])\n y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)\n ap = average_precision_score(y_true_sorted, y_score_sorted)\n recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(\n y_true_sorted, y_score_sorted, recall_at_precision_thresholds\n )\n roc_auc = compute_roc_auc(predictions, target_class=i)\n soft_metrics[label_name] = SoftClassificationMetrics(\n average_precision=ap,\n recall_at_precision=recall_at_precision_dict,\n decision_thresh_at_precision=decision_thresh_at_precision,\n roc_auc=roc_auc,\n )\n return soft_metrics\n\n\ndef compute_multi_label_soft_metrics(\n predictions: Sequence[LabelListPrediction],\n label_names: Sequence[str],\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> Dict[str, SoftClassificationMetrics]:\n \"\"\"\n Computes multi-label soft classification metrics\n (for now, average precision)\n\n Args:\n predictions: multi-label predictions,\n including the confidence score for each label.\n label_names: Indexed label names.\n recall_at_precision_thresholds: precision thresholds at which to calculate\n recall\n\n\n Returns:\n Dict from label strings to their corresponding soft metrics.\n \"\"\"\n soft_metrics = {}\n for i, label_name in enumerate(label_names):\n y_true = []\n y_score = []\n for label_scores, _, expected in predictions:\n y_true.append(i in expected)\n y_score.append(label_scores[i])\n y_true_sorted, y_score_sorted = sort_by_score(y_true, y_score)\n ap = average_precision_score(y_true_sorted, y_score_sorted)\n recall_at_precision_dict, decision_thresh_at_precision = recall_at_precision(\n y_true_sorted, y_score_sorted, recall_at_precision_thresholds\n )\n roc_auc = compute_roc_auc(predictions, target_class=i)\n soft_metrics[label_name] = SoftClassificationMetrics(\n average_precision=ap,\n recall_at_precision=recall_at_precision_dict,\n decision_thresh_at_precision=decision_thresh_at_precision,\n roc_auc=roc_auc,\n )\n return soft_metrics\n\n\ndef compute_matthews_correlation_coefficients(\n TP: int, FP: int, FN: int, TN: int\n) -> float:\n \"\"\"\n Computes Matthews correlation coefficient, a way to summarize all four counts (TP,\n FP, FN, TN) in the confusion matrix of binary classification.\n\n Args:\n TP: Number of true positives.\n FP: Number of false positives.\n FN: Number of false negatives.\n TN: Number of true negatives.\n\n Returns:\n Matthews correlation coefficient, which is `sqrt((TP + FP) * (TP + FN) *\n (TN + FP) * (TN + FN))`.\n \"\"\"\n mcc = safe_division(\n (TP * TN) - (FP * FN),\n np.sqrt(float((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))),\n )\n return mcc\n\n\ndef compute_roc_auc(\n predictions: Sequence[LabelPrediction], target_class: int = 0\n) -> Optional[float]:\n \"\"\"\n Computes area under the Receiver Operating Characteristic curve, for binary\n classification. Implementation based off of (and explained at)\n https://www.ibm.com/developerworks/community/blogs/jfp/entry/Fast_Computation_of_AUC_ROC_score?lang=en.\n \"\"\"\n # Collect scores\n y_true = [expected == target_class for _, _, expected in predictions]\n y_score = [label_scores[target_class] for label_scores, _, _ in predictions]\n y_true_sorted, _ = sort_by_score(y_true, y_score)\n\n # Compute auc as probability that a positive example is scored higher than\n # a negative example.\n n_false = 0\n n_correct_pair_order = 0\n\n for y in reversed(y_true_sorted): # want low predicted to high predicted\n if y:\n n_correct_pair_order += n_false\n else:\n n_false += 1\n\n n_true = len(y_true) - n_false\n if n_true == 0 or n_false == 0:\n return None\n\n return float(n_correct_pair_order / (n_true * n_false))\n\n\ndef compute_classification_metrics(\n predictions: Sequence[LabelPrediction],\n label_names: Sequence[str],\n loss: float,\n average_precisions: bool = True,\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> ClassificationMetrics:\n \"\"\"\n A general function that computes classification metrics given a list of label\n predictions.\n\n Args:\n predictions: Label predictions, including the confidence score for each label.\n label_names: Indexed label names.\n average_precisions: Whether to compute average precisions for labels or not.\n Defaults to True.\n recall_at_precision_thresholds: precision thresholds at which to calculate recall\n\n\n Returns:\n ClassificationMetrics which contains various classification metrics.\n \"\"\"\n num_correct = 0\n per_label_confusions = PerLabelConfusions()\n for _, predicted, expected in predictions:\n predicted_label = label_names[predicted]\n expected_label = label_names[expected]\n if predicted_label == expected_label:\n num_correct += 1\n per_label_confusions.update(expected_label, \"TP\", 1)\n else:\n per_label_confusions.update(expected_label, \"FN\", 1)\n per_label_confusions.update(predicted_label, \"FP\", 1)\n accuracy = safe_division(num_correct, len(predictions))\n macro_prf1_metrics = per_label_confusions.compute_metrics()\n\n soft_metrics = (\n compute_soft_metrics(predictions, label_names, recall_at_precision_thresholds)\n if average_precisions\n else None\n )\n\n if len(label_names) == 2:\n confusion_dict = per_label_confusions.label_confusions_map\n # Since MCC is symmetric, it doesn't matter which label is 0 and which is 1\n TP = confusion_dict[label_names[0]].TP\n FP = confusion_dict[label_names[0]].FP\n FN = confusion_dict[label_names[0]].FN\n TN = confusion_dict[label_names[1]].TP\n mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)\n roc_auc: Optional[float] = compute_roc_auc(predictions)\n else:\n mcc = None\n roc_auc = None\n\n return ClassificationMetrics(\n accuracy=accuracy,\n macro_prf1_metrics=macro_prf1_metrics,\n per_label_soft_scores=soft_metrics,\n mcc=mcc,\n roc_auc=roc_auc,\n loss=loss,\n )\n\n\ndef compute_multi_label_classification_metrics(\n predictions: Sequence[LabelListPrediction],\n label_names: Sequence[str],\n loss: float,\n average_precisions: bool = True,\n recall_at_precision_thresholds: Sequence[float] = RECALL_AT_PRECISION_THRESHOLDS,\n) -> ClassificationMetrics:\n \"\"\"\n A general function that computes classification metrics given a list of multi-label\n predictions.\n\n Args:\n predictions: multi-label predictions,\n including the confidence score for each label.\n label_names: Indexed label names.\n average_precisions: Whether to compute average precisions for labels or not.\n Defaults to True.\n recall_at_precision_thresholds: precision thresholds at which\n to calculate recall\n\n\n Returns:\n ClassificationMetrics which contains various classification metrics.\n \"\"\"\n\n num_correct = 0\n num_expected_labels = 0\n per_label_confusions = PerLabelConfusions()\n for _, predicted, expected in predictions:\n # \"predicted\" is in the format of n_hot_encoding\n # Calculate TP & FN\n for true_label_idx in expected:\n if true_label_idx < 0:\n # padded label \"-1\"\n break\n num_expected_labels += 1\n expected_label = label_names[true_label_idx]\n if predicted[true_label_idx] == 1:\n num_correct += 1\n per_label_confusions.update(expected_label, \"TP\", 1)\n else:\n per_label_confusions.update(expected_label, \"FN\", 1)\n # Calculate FP\n for idx, pred in enumerate(predicted):\n if pred == 1 and idx not in expected:\n predicted_label = label_names[idx]\n per_label_confusions.update(predicted_label, \"FP\", 1)\n\n accuracy = safe_division(num_correct, num_expected_labels)\n macro_prf1_metrics = per_label_confusions.compute_metrics()\n\n soft_metrics = (\n compute_multi_label_soft_metrics(\n predictions, label_names, recall_at_precision_thresholds\n )\n if average_precisions\n else None\n )\n\n if len(label_names) == 2:\n confusion_dict = per_label_confusions.label_confusions_map\n # Since MCC is symmetric, it doesn't matter which label is 0 and which is 1\n TP = confusion_dict[label_names[0]].TP\n FP = confusion_dict[label_names[0]].FP\n FN = confusion_dict[label_names[0]].FN\n TN = confusion_dict[label_names[1]].TP\n mcc: Optional[float] = compute_matthews_correlation_coefficients(TP, FP, FN, TN)\n roc_auc: Optional[float] = compute_roc_auc(predictions)\n else:\n mcc = None\n roc_auc = None\n\n return ClassificationMetrics(\n accuracy=accuracy,\n macro_prf1_metrics=macro_prf1_metrics,\n per_label_soft_scores=soft_metrics,\n mcc=mcc,\n roc_auc=roc_auc,\n loss=loss,\n )\n\n\ndef compute_pairwise_ranking_metrics(\n predictions: Sequence[int], scores: Sequence[float]\n) -> PairwiseRankingMetrics:\n \"\"\"\n Computes metrics for pairwise ranking given sequences of predictions and scores\n\n Args:\n predictions : 1 if ranking was correct, 0 if ranking was incorrect\n scores : score(higher-ranked-sample) - score(lower-ranked-sample)\n\n Returns:\n PairwiseRankingMetrics object\n \"\"\"\n return PairwiseRankingMetrics(\n num_examples=len(predictions),\n accuracy=safe_division(sum(predictions), len(predictions)),\n average_score_difference=safe_division(sum(scores), len(predictions)),\n )\n\n\ndef compute_regression_metrics(\n predictions: Sequence[float], targets: Sequence[float]\n) -> RegressionMetrics:\n \"\"\"\n Computes metrics for regression tasks.abs\n\n Args:\n predictions: 1-D sequence of float predictions\n targets: 1-D sequence of float labels\n\n Returns:\n RegressionMetrics object\n \"\"\"\n preds, targs = np.array(predictions), np.array(targets)\n pred_mean, targ_mean = preds.mean(), targs.mean()\n covariance = (preds - pred_mean).dot(targs - targ_mean) / preds.size\n corr = covariance / preds.std() / targs.std()\n\n mse = np.square(preds - targs).mean()\n return RegressionMetrics(num_examples=len(preds), pearson_correlation=corr, mse=mse)\n"
] | [
[
"numpy.sum",
"numpy.extract",
"numpy.cumsum",
"numpy.append",
"numpy.logical_and",
"numpy.any",
"numpy.argsort",
"numpy.array",
"numpy.square"
]
] |
Seb-Park/tensorflow-for-poets-2 | [
"1ef4112553b25f1c687b40b6872d4a97b9d44762"
] | [
"scripts/label_image_flask.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nfrom flask import Flask, escape, request, jsonify\n\nimport argparse\nimport sys\nimport time\n\nimport numpy as np\nimport requests\nimport tensorflow as tf\n\napp = Flask(__name__)\n\[email protected]('/')\ndef mainServer():\n file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n model_file = \"../tf_files/retrained_graph.pb\"#add ../ at beginning if running from scripts folder. If running from classiefier folder\n #e.g.python -m scripts.label_image_flask --graph=tf_files/retrained_graph.pb --image=caterCard.png\n\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n # parser.add_argument(request.args.get('image'))\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n # testString = request.args.get('str')\n # graph = \"../tf_files/retrained_graph.pb\"\n\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n # if args.image:\n # file_name = args.image\n file_name = request.args.get('img')\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_url(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n end=time.time()\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n template = '\"name\":\"{}\", \"score\":\"{:0.5f}\"'\n stringToReturn = '{\"possible_pokemon\": ['\n listOfPossiblePokes = []\n for i in top_k:\n listOfPossiblePokes.append({'name':labels[i],'score':str(results[i])})\n # print(template.format(labels[i], results[i]))\n stringToReturn += \"{\" + template.format(labels[i], results[i])+\"},\"\n stringToReturn = stringToReturn[:-1]#THIS REMOVES THE LAST COMMA\n stringToReturn += ']}'\n # print(jsonify(listOfPossiblePokes))\n return jsonify({'possible_pokemon':listOfPossiblePokes})\n # return (stringToReturn)\n # return \"<h1>Label Image Server!</h1>\" + \"\\n<h2>enter</h2>\"\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n # image_reader = tf.image.decode_jpeg(\n # requests.get(file_name).content, channels=3, name=\"jpeg_reader\")\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n # sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef read_tensor_from_image_url(url, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(url, input_name)\n if url.endswith(\".png\"):\n image_reader = tf.image.decode_png(requests.get(url).content, channels = 3,\n name='png_reader')\n elif url.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(requests.get(url).content,\n name='gif_reader'))\n elif url.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(requests.get(url).content, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(\n requests.get(url).content, channels=3, name=\"jpeg_reader\")\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n # sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=3008, host='0.0.0.0')\n # file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n # model_file = \"tf_files/retrained_graph.pb\"\n # label_file = \"tf_files/retrained_labels.txt\"\n # input_height = 224\n # input_width = 224\n # input_mean = 128\n # input_std = 128\n # input_layer = \"input\"\n # output_layer = \"final_result\"\n #\n # parser = argparse.ArgumentParser()\n # parser.add_argument(\"--image\", help=\"image to be processed\")\n # parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n # parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n # parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n # parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n # parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n # parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n # parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n # parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n # mainServer(parser)\n"
] | [
[
"tensorflow.image.decode_bmp",
"numpy.squeeze",
"tensorflow.subtract",
"tensorflow.io.gfile.GFile",
"tensorflow.expand_dims",
"tensorflow.read_file",
"tensorflow.Graph",
"tensorflow.cast",
"tensorflow.image.resize_bilinear",
"tensorflow.Session",
"tensorflow.import_graph_def",
"tensorflow.image.decode_jpeg",
"tensorflow.image.decode_png",
"tensorflow.image.decode_gif",
"tensorflow.ConfigProto",
"tensorflow.GraphDef"
]
] |
sanweiliti/HMP | [
"abb37a553c9ebeccf746225331bd90ccc0e33df9"
] | [
"utils/Quaternions.py"
] | [
"import numpy as np\r\n\r\nclass Quaternions:\r\n \"\"\"\r\n Quaternions is a wrapper around a numpy ndarray\r\n that allows it to act as if it were an narray of\r\n a quaternion data type.\r\n \r\n Therefore addition, subtraction, multiplication,\r\n division, negation, absolute, are all defined\r\n in terms of quaternion operations such as quaternion\r\n multiplication.\r\n \r\n This allows for much neater code and many routines\r\n which conceptually do the same thing to be written\r\n in the same way for point data and for rotation data.\r\n \r\n The Quaternions class has been desgined such that it\r\n should support broadcasting and slicing in all of the\r\n usual ways.\r\n \"\"\"\r\n \r\n def __init__(self, qs):\r\n if isinstance(qs, np.ndarray):\r\n \r\n if len(qs.shape) == 1: qs = np.array([qs])\r\n self.qs = qs\r\n return\r\n \r\n if isinstance(qs, Quaternions):\r\n self.qs = qs.qs\r\n return\r\n \r\n raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))\r\n \r\n def __str__(self): return \"Quaternions(\"+ str(self.qs) + \")\"\r\n def __repr__(self): return \"Quaternions(\"+ repr(self.qs) + \")\"\r\n \r\n \"\"\" Helper Methods for Broadcasting and Data extraction \"\"\"\r\n \r\n @classmethod\r\n def _broadcast(cls, sqs, oqs, scalar=False):\r\n \r\n if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])\r\n \r\n ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])\r\n os = np.array(oqs.shape)\r\n \r\n if len(ss) != len(os):\r\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\r\n \r\n if np.all(ss == os): return sqs, oqs\r\n \r\n if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):\r\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\r\n \r\n sqsn, oqsn = sqs.copy(), oqs.copy()\r\n \r\n for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)\r\n for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)\r\n \r\n return sqsn, oqsn\r\n \r\n \"\"\" Adding Quaterions is just Defined as Multiplication \"\"\"\r\n \r\n def __add__(self, other): return self * other\r\n def __sub__(self, other): return self / other\r\n \r\n \"\"\" Quaterion Multiplication \"\"\"\r\n \r\n def __mul__(self, other):\r\n \"\"\"\r\n Quaternion multiplication has three main methods.\r\n \r\n When multiplying a Quaternions array by Quaternions\r\n normal quaternion multiplication is performed.\r\n \r\n When multiplying a Quaternions array by a vector\r\n array of the same shape, where the last axis is 3,\r\n it is assumed to be a Quaternion by 3D-Vector \r\n multiplication and the 3D-Vectors are rotated\r\n in space by the Quaternions.\r\n \r\n When multipplying a Quaternions array by a scalar\r\n or vector of different shape it is assumed to be\r\n a Quaternions by Scalars multiplication and the\r\n Quaternions are scaled using Slerp and the identity\r\n quaternions.\r\n \"\"\"\r\n \r\n \"\"\" If Quaternions type do Quaternions * Quaternions \"\"\"\r\n if isinstance(other, Quaternions):\r\n \r\n sqs, oqs = Quaternions._broadcast(self.qs, other.qs)\r\n \r\n q0 = sqs[...,0]; q1 = sqs[...,1]; \r\n q2 = sqs[...,2]; q3 = sqs[...,3]; \r\n r0 = oqs[...,0]; r1 = oqs[...,1]; \r\n r2 = oqs[...,2]; r3 = oqs[...,3]; \r\n \r\n qs = np.empty(sqs.shape)\r\n qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3\r\n qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2\r\n qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1\r\n qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0\r\n \r\n return Quaternions(qs)\r\n \r\n \"\"\" If array type do Quaternions * Vectors \"\"\"\r\n if isinstance(other, np.ndarray) and other.shape[-1] == 3:\r\n vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))\r\n return (self * (vs * -self)).imaginaries\r\n \r\n \"\"\" If float do Quaternions * Scalars \"\"\"\r\n if isinstance(other, np.ndarray) or isinstance(other, float):\r\n return Quaternions.slerp(Quaternions.id_like(self), self, other)\r\n \r\n raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))\r\n \r\n def __div__(self, other):\r\n \"\"\"\r\n When a Quaternion type is supplied, division is defined\r\n as multiplication by the inverse of that Quaternion.\r\n \r\n When a scalar or vector is supplied it is defined\r\n as multiplicaion of one over the supplied value.\r\n Essentially a scaling.\r\n \"\"\"\r\n \r\n if isinstance(other, Quaternions): return self * (-other)\r\n if isinstance(other, np.ndarray): return self * (1.0 / other)\r\n if isinstance(other, float): return self * (1.0 / other)\r\n raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))\r\n \r\n def __eq__(self, other): return self.qs == other.qs\r\n def __ne__(self, other): return self.qs != other.qs\r\n \r\n def __neg__(self):\r\n \"\"\" Invert Quaternions \"\"\"\r\n return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))\r\n \r\n def __abs__(self):\r\n \"\"\" Unify Quaternions To Single Pole \"\"\"\r\n qabs = self.normalized().copy()\r\n top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)\r\n bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)\r\n qabs.qs[top < bot] = -qabs.qs[top < bot]\r\n return qabs\r\n \r\n def __iter__(self): return iter(self.qs)\r\n def __len__(self): return len(self.qs)\r\n \r\n def __getitem__(self, k): return Quaternions(self.qs[k]) \r\n def __setitem__(self, k, v): self.qs[k] = v.qs\r\n \r\n @property\r\n def lengths(self):\r\n return np.sum(self.qs**2.0, axis=-1)**0.5\r\n \r\n @property\r\n def reals(self):\r\n return self.qs[...,0]\r\n \r\n @property\r\n def imaginaries(self):\r\n return self.qs[...,1:4]\r\n \r\n @property\r\n def shape(self): return self.qs.shape[:-1]\r\n \r\n def repeat(self, n, **kwargs):\r\n return Quaternions(self.qs.repeat(n, **kwargs))\r\n \r\n def normalized(self):\r\n return Quaternions(self.qs / self.lengths[...,np.newaxis])\r\n \r\n def log(self):\r\n norm = abs(self.normalized())\r\n imgs = norm.imaginaries\r\n lens = np.sqrt(np.sum(imgs**2, axis=-1))\r\n lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)\r\n return imgs * lens[...,np.newaxis]\r\n \r\n def constrained(self, axis):\r\n \r\n rl = self.reals\r\n im = np.sum(axis * self.imaginaries, axis=-1)\r\n \r\n t1 = -2 * np.arctan2(rl, im) + np.pi\r\n t2 = -2 * np.arctan2(rl, im) - np.pi\r\n \r\n top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))\r\n bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))\r\n img = self.dot(top) > self.dot(bot)\r\n \r\n ret = top.copy()\r\n ret[ img] = top[ img]\r\n ret[~img] = bot[~img]\r\n return ret\r\n \r\n def constrained_x(self): return self.constrained(np.array([1,0,0]))\r\n def constrained_y(self): return self.constrained(np.array([0,1,0]))\r\n def constrained_z(self): return self.constrained(np.array([0,0,1]))\r\n \r\n def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)\r\n \r\n def copy(self): return Quaternions(np.copy(self.qs))\r\n \r\n def reshape(self, s):\r\n self.qs.reshape(s)\r\n return self\r\n \r\n def interpolate(self, ws):\r\n return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))\r\n \r\n def euler(self, order='xyz'):\r\n \r\n q = self.normalized().qs\r\n q0 = q[...,0]\r\n q1 = q[...,1]\r\n q2 = q[...,2]\r\n q3 = q[...,3]\r\n es = np.zeros(self.shape + (3,))\r\n \r\n if order == 'xyz':\r\n es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\r\n es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))\r\n es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))\r\n elif order == 'yzx':\r\n es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)\r\n es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)\r\n es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))\r\n else:\r\n raise NotImplementedError('Cannot convert from ordering %s' % order)\r\n \r\n \"\"\"\r\n \r\n # These conversion don't appear to work correctly for Maya.\r\n # http://bediyap.com/programming/convert-quaternion-to-euler-rotations/\r\n \r\n if order == 'xyz':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n elif order == 'yzx':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n elif order == 'zxy':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) \r\n elif order == 'xzy':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n elif order == 'yxz':\r\n es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n elif order == 'zyx':\r\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\r\n es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))\r\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\r\n else:\r\n raise KeyError('Unknown ordering %s' % order)\r\n \r\n \"\"\"\r\n \r\n # https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp\r\n # Use this class and convert from matrix\r\n \r\n return es\r\n \r\n \r\n def average(self):\r\n \r\n if len(self.shape) == 1:\r\n \r\n import numpy.core.umath_tests as ut\r\n system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)\r\n w, v = np.linalg.eigh(system)\r\n qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)\r\n return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))]) \r\n \r\n else:\r\n \r\n raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')\r\n\r\n def angle_axis(self):\r\n \r\n norm = self.normalized() \r\n s = np.sqrt(1 - (norm.reals**2.0))\r\n s[s == 0] = 0.001\r\n \r\n angles = 2.0 * np.arccos(norm.reals)\r\n axis = norm.imaginaries / s[...,np.newaxis]\r\n \r\n return angles, axis\r\n \r\n \r\n def transforms(self):\r\n \r\n qw = self.qs[...,0]\r\n qx = self.qs[...,1]\r\n qy = self.qs[...,2]\r\n qz = self.qs[...,3]\r\n \r\n x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;\r\n xx = qx * x2; yy = qy * y2; wx = qw * x2;\r\n xy = qx * y2; yz = qy * z2; wy = qw * y2;\r\n xz = qx * z2; zz = qz * z2; wz = qw * z2;\r\n \r\n m = np.empty(self.shape + (3,3))\r\n m[...,0,0] = 1.0 - (yy + zz)\r\n m[...,0,1] = xy - wz\r\n m[...,0,2] = xz + wy \r\n m[...,1,0] = xy + wz\r\n m[...,1,1] = 1.0 - (xx + zz)\r\n m[...,1,2] = yz - wx \r\n m[...,2,0] = xz - wy\r\n m[...,2,1] = yz + wx\r\n m[...,2,2] = 1.0 - (xx + yy)\r\n \r\n return m\r\n \r\n def ravel(self):\r\n return self.qs.ravel()\r\n \r\n @classmethod\r\n def id(cls, n):\r\n \r\n if isinstance(n, tuple):\r\n qs = np.zeros(n + (4,))\r\n qs[...,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n if isinstance(n, int) or isinstance(n, long):\r\n qs = np.zeros((n,4))\r\n qs[:,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))\r\n\r\n @classmethod\r\n def id_like(cls, a):\r\n qs = np.zeros(a.shape + (4,))\r\n qs[...,0] = 1.0\r\n return Quaternions(qs)\r\n \r\n @classmethod\r\n def exp(cls, ws):\r\n \r\n ts = np.sum(ws**2.0, axis=-1)**0.5\r\n ts[ts == 0] = 0.001\r\n ls = np.sin(ts) / ts\r\n \r\n qs = np.empty(ws.shape[:-1] + (4,))\r\n qs[...,0] = np.cos(ts)\r\n qs[...,1] = ws[...,0] * ls\r\n qs[...,2] = ws[...,1] * ls\r\n qs[...,3] = ws[...,2] * ls\r\n \r\n return Quaternions(qs).normalized()\r\n \r\n @classmethod\r\n def slerp(cls, q0s, q1s, a):\r\n \r\n fst, snd = cls._broadcast(q0s.qs, q1s.qs)\r\n fst, a = cls._broadcast(fst, a, scalar=True)\r\n snd, a = cls._broadcast(snd, a, scalar=True)\r\n \r\n len = np.sum(fst * snd, axis=-1)\r\n \r\n neg = len < 0.0\r\n len[neg] = -len[neg]\r\n snd[neg] = -snd[neg]\r\n \r\n amount0 = np.zeros(a.shape)\r\n amount1 = np.zeros(a.shape)\r\n\r\n linear = (1.0 - len) < 0.01\r\n omegas = np.arccos(len[~linear])\r\n sinoms = np.sin(omegas)\r\n \r\n amount0[ linear] = 1.0 - a[linear]\r\n amount1[ linear] = a[linear]\r\n amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms\r\n amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms\r\n \r\n return Quaternions(\r\n amount0[...,np.newaxis] * fst + \r\n amount1[...,np.newaxis] * snd)\r\n \r\n @classmethod\r\n def between(cls, v0s, v1s):\r\n a = np.cross(v0s, v1s)\r\n w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)\r\n return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()\r\n \r\n @classmethod\r\n def from_angle_axis(cls, angles, axis):\r\n axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]\r\n sines = np.sin(angles / 2.0)[...,np.newaxis]\r\n cosines = np.cos(angles / 2.0)[...,np.newaxis]\r\n return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))\r\n \r\n @classmethod\r\n def from_euler(cls, es, order='xyz', world=False):\r\n \r\n axis = {\r\n 'x' : np.array([1,0,0]),\r\n 'y' : np.array([0,1,0]),\r\n 'z' : np.array([0,0,1]),\r\n }\r\n \r\n q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])\r\n q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])\r\n q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])\r\n \r\n return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))\r\n \r\n @classmethod\r\n def from_transforms(cls, ts):\r\n \r\n d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]\r\n \r\n q0 = ( d0 + d1 + d2 + 1.0) / 4.0\r\n q1 = ( d0 - d1 - d2 + 1.0) / 4.0\r\n q2 = (-d0 + d1 - d2 + 1.0) / 4.0\r\n q3 = (-d0 - d1 + d2 + 1.0) / 4.0\r\n \r\n q0 = np.sqrt(q0.clip(0,None))\r\n q1 = np.sqrt(q1.clip(0,None))\r\n q2 = np.sqrt(q2.clip(0,None))\r\n q3 = np.sqrt(q3.clip(0,None))\r\n \r\n c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)\r\n c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)\r\n c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)\r\n c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)\r\n \r\n q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])\r\n q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])\r\n q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])\r\n \r\n q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])\r\n q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])\r\n q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0]) \r\n \r\n q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])\r\n q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])\r\n q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2]) \r\n \r\n q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])\r\n q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])\r\n q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2]) \r\n \r\n qs = np.empty(ts.shape[:-2] + (4,))\r\n qs[...,0] = q0\r\n qs[...,1] = q1\r\n qs[...,2] = q2\r\n qs[...,3] = q3\r\n \r\n return cls(qs)\r\n \r\n \r\n "
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.arctan2",
"numpy.ones",
"numpy.sign",
"numpy.empty",
"numpy.zeros",
"numpy.concatenate",
"numpy.linalg.eigh",
"numpy.arccos",
"numpy.cos",
"numpy.cross",
"numpy.copy",
"numpy.all",
"numpy.core.umath_tests.matrix_multiply",
"numpy.array",
"numpy.sin",
"numpy.where"
]
] |
RingoIngo/gluon-ts | [
"62fb20c36025fc969653accaffaa783671709564",
"62fb20c36025fc969653accaffaa783671709564"
] | [
"src/gluonts/nursery/tsbench/src/tsbench/surrogate/deepset.py",
"src/gluonts/nursery/SCott/test/evaluation/test_evaluator.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import cast, List, Literal, Optional\nimport numpy as np\nimport numpy.typing as npt\nimport pytorch_lightning as pl\nimport torch\nfrom lightkit.data import DataLoader\nfrom torch import nn\nfrom torch.utils.data import TensorDataset\nfrom tsbench.config import Config, EnsembleConfig\nfrom tsbench.evaluations.tracking import EnsembleTracker\nfrom tsbench.surrogate.torch.deepset import DeepSetModel\nfrom ._base import OutputNormalization, Surrogate\nfrom ._factory import register_ensemble_surrogate\nfrom .torch import DeepSetLightningModule, ListMLELoss\nfrom .transformers import EnsembleConfigTransformer\n\n\n@register_ensemble_surrogate(\"deepset\")\nclass DeepSetSurrogate(Surrogate[EnsembleConfig]):\n \"\"\"\n The DeepSet surrogate is similar to the MLP surrogate but makes predictions for ensembles\n rather than single models. Currently, it does not support the use of dataset features.\n \"\"\"\n\n trainer_: pl.Trainer\n models_: List[nn.Module]\n\n def __init__(\n self,\n tracker: EnsembleTracker,\n objective: Literal[\"regression\", \"ranking\"] = \"ranking\",\n discount: Optional[\n Literal[\"logarithmic\", \"linear\", \"quadratic\"]\n ] = \"linear\",\n hidden_layer_sizes: Optional[List[int]] = None,\n weight_decay: float = 0.01,\n dropout: float = 0.0,\n predict: Optional[List[str]] = None,\n output_normalization: OutputNormalization = None,\n impute_simulatable: bool = False,\n ):\n \"\"\"\n Args:\n tracker: A tracker that can be used to impute latency and number of model parameters\n into model performances. Also, it is required for some input features.\n objective: The optimization objective for the XGBoost estimators.\n discount: The discount to apply for the ranking loss. If provided, it focuses on\n correctly predicting the top values.\n hidden_layer_sizes: The dimensions of the hidden layers. Defaults to two hidden layers\n of size 32.\n weight_decay: The weight decay to apply during optimization.\n dropout: The dropout probability of dropout layers applied after every activation\n function.\n predict: The metrics to predict. All if not provided.\n output_normalization: The type of normalization to apply to the features of each\n dataset independently. `None` applies no normalization, \"quantile\" applies quantile\n normalization, and \"standard\" transforms data to have zero mean and unit variance.\n impute_simulatable: Whether the tracker should impute latency and number of model\n parameters into the returned performance object.\n \"\"\"\n\n super().__init__(\n tracker, predict, output_normalization, impute_simulatable\n )\n\n self.use_ranking = objective == \"ranking\"\n self.hidden_layer_sizes = hidden_layer_sizes or [32, 32]\n self.weight_decay = weight_decay\n self.dropout = dropout\n\n self.config_transformer = EnsembleConfigTransformer()\n\n if objective == \"regression\":\n self.loss = nn.MSELoss()\n elif objective == \"ranking\":\n self.loss = ListMLELoss(discount=discount)\n\n @property\n def required_cpus(self) -> int:\n return 4\n\n def _fit(\n self, X: List[Config[EnsembleConfig]], y: npt.NDArray[np.float32]\n ) -> None:\n # Fit transformers to infer dimensionality\n X_numpy_list = self.config_transformer.fit_transform(X)\n X_numpy = np.concatenate(X_numpy_list)\n X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])\n\n input_dim = len(self.config_transformer.feature_names_)\n output_dim = y.shape[1]\n\n # For initializing data, we prepare group IDs for the datasets\n mapping = {d: i for i, d in enumerate({x.dataset for x in X})}\n\n # For each output variable, we need to train a separate model\n self.models_ = []\n for i in range(output_dim):\n model = self._init_model(input_dim)\n module = DeepSetLightningModule(\n model, self.loss, self.weight_decay\n )\n\n # Train on output variable i\n dataset = TensorDataset(\n torch.from_numpy(X_numpy).float(),\n torch.from_numpy(X_lengths_numpy).long(),\n torch.from_numpy(y[:, i : i + 1]).float(),\n torch.as_tensor(\n [mapping[x.dataset] for x in X], dtype=torch.long\n ),\n )\n train_loader = DataLoader(dataset, batch_size=len(dataset))\n self._trainer.fit(module, train_dataloaders=train_loader)\n\n # Add to models\n self.models_.append(model)\n\n def _predict(\n self, X: List[Config[EnsembleConfig]]\n ) -> npt.NDArray[np.float32]:\n # Get data\n X_numpy_list = self.config_transformer.transform(X)\n X_numpy = np.concatenate(X_numpy_list)\n X_lengths_numpy = np.array([x.shape[0] for x in X_numpy_list])\n\n dataset = TensorDataset(\n torch.from_numpy(X_numpy).float(),\n torch.from_numpy(X_lengths_numpy).long(),\n )\n test_loader = DataLoader(dataset, batch_size=len(dataset))\n\n # Run prediction\n predictions = []\n for model in self.models_:\n module = DeepSetLightningModule(model, self.loss)\n out = cast(\n List[torch.Tensor], self._trainer.predict(module, test_loader)\n )\n predictions.append(out[0].numpy())\n\n return np.concatenate(predictions, axis=-1)\n\n @property\n def _trainer(self) -> pl.Trainer:\n return pl.Trainer(\n max_epochs=1000,\n logger=False,\n enable_checkpointing=False,\n enable_model_summary=False,\n enable_progress_bar=False,\n gpus=int(torch.cuda.is_available()),\n )\n\n def _init_model(self, input_dim: int) -> nn.Module:\n return DeepSetModel(\n input_dim,\n self.hidden_layer_sizes[-1],\n 1,\n self.hidden_layer_sizes,\n self.hidden_layer_sizes,\n self.dropout,\n )\n",
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Third-party imports\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n# First-party imports\nfrom pts.evaluation import (\n Evaluator,\n MultivariateEvaluator,\n)\nfrom pts.feature import get_seasonality\nfrom pts.model.forecast import QuantileForecast, SampleForecast\n\nQUANTILES = [str(q / 10.0) for q in range(1, 10)]\n\n\ndef data_iterator(ts):\n \"\"\"\n :param ts: list of pd.Series or pd.DataFrame\n :return:\n \"\"\"\n for i in range(len(ts)):\n yield ts[i]\n\n\ndef fcst_iterator(fcst, start_dates, freq):\n \"\"\"\n :param fcst: list of numpy arrays with the sample paths\n :return:\n \"\"\"\n for i in range(len(fcst)):\n yield SampleForecast(\n samples=fcst[i], start_date=start_dates[i], freq=freq\n )\n\n\ndef iterator(it):\n \"\"\"\n Convenience function to toggle whether to consume dataset and forecasts as iterators or iterables.\n :param it:\n :return: it (as iterator)\n \"\"\"\n return iter(it)\n\n\ndef iterable(it):\n \"\"\"\n Convenience function to toggle whether to consume dataset and forecasts as iterators or iterables.\n :param it:\n :return: it (as iterable)\n \"\"\"\n return list(it)\n\n\ndef naive_forecaster(ts, prediction_length, num_samples=100, target_dim=0):\n \"\"\"\n :param ts: pandas.Series\n :param prediction_length:\n :param num_samples: number of sample paths\n :param target_dim: number of axes of target (0: scalar, 1: array, ...)\n :return: np.array with dimension (num_samples, prediction_length)\n \"\"\"\n\n # naive prediction: last observed value\n naive_pred = ts.values[-prediction_length - 1]\n assert len(naive_pred.shape) == target_dim\n return np.tile(\n naive_pred,\n (num_samples, prediction_length) + tuple(1 for _ in range(target_dim)),\n )\n\n\ndef naive_multivariate_forecaster(ts, prediction_length, num_samples=100):\n return naive_forecaster(ts, prediction_length, num_samples, target_dim=1)\n\n\ndef calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=False,\n forecaster=naive_forecaster,\n input_type=iterator,\n):\n num_timeseries = timeseries.shape[0]\n num_timestamps = timeseries.shape[1]\n\n if has_nans:\n timeseries[0, 1] = np.nan\n timeseries[0, 7] = np.nan\n\n num_samples = 100\n prediction_length = 3\n freq = \"1D\"\n\n ts_start_dates = (\n []\n ) # starting date of each time series - can be different in general\n pd_timeseries = [] # list of pandas.DataFrame\n samples = [] # list of forecast samples\n start_dates = [] # start date of the prediction range\n for i in range(num_timeseries):\n ts_start_dates.append(pd.Timestamp(year=2018, month=1, day=1, hour=1))\n index = pd.date_range(\n ts_start_dates[i], periods=num_timestamps, freq=freq\n )\n\n pd_timeseries.append(ts_datastructure(timeseries[i], index=index))\n samples.append(\n forecaster(pd_timeseries[i], prediction_length, num_samples)\n )\n start_dates.append(\n pd.date_range(\n ts_start_dates[i], periods=num_timestamps, freq=freq\n )[-prediction_length]\n )\n\n # data iterator\n data_iter = input_type(data_iterator(pd_timeseries))\n fcst_iter = input_type(fcst_iterator(samples, start_dates, freq))\n\n # evaluate\n agg_df, item_df = evaluator(data_iter, fcst_iter)\n return agg_df, item_df\n\n\nTIMESERIES_M4 = [\n np.array(\n [\n [\n 2.943_013,\n 2.822_251,\n 4.196_222,\n 1.328_664,\n 4.947_390,\n 3.333_131,\n 1.479_800,\n 2.265_094,\n 3.413_493,\n 3.497_607,\n ],\n [\n -0.126_781_2,\n 3.057_412_2,\n 1.901_594_4,\n 2.772_549_5,\n 3.312_853_1,\n 4.411_818_0,\n 3.709_025_2,\n 4.322_028,\n 2.565_359,\n 3.074_308,\n ],\n [\n 2.542_998,\n 2.336_757,\n 1.417_916,\n 1.335_139,\n 2.523_035,\n 3.645_589,\n 3.382_819,\n 2.075_960,\n 2.643_869,\n 2.772_456,\n ],\n [\n 0.315_685_6,\n 1.892_312_1,\n 2.476_861_2,\n 3.511_628_6,\n 4.384_346_5,\n 2.960_685_6,\n 4.897_572_5,\n 3.280_125,\n 4.768_556,\n 4.958_616,\n ],\n [\n 2.205_877_3,\n 0.782_759_4,\n 2.401_420_8,\n 2.385_643_4,\n 4.845_818_2,\n 3.102_322_9,\n 3.567_723_7,\n 4.878_143,\n 3.735_245,\n 2.218_113,\n ],\n ]\n ),\n np.array(\n [\n [\n 13.11301,\n 13.16225,\n 14.70622,\n 12.00866,\n 15.79739,\n 14.35313,\n 12.66980,\n 13.62509,\n 14.94349,\n 15.19761,\n ],\n [\n 10.04322,\n 13.39741,\n 12.41159,\n 13.45255,\n 14.16285,\n 15.43182,\n 14.89903,\n 15.68203,\n 14.09536,\n 14.77431,\n ],\n [\n 12.71300,\n 12.67676,\n 11.92792,\n 12.01514,\n 13.37303,\n 14.66559,\n 14.57282,\n 13.43596,\n 14.17387,\n 14.47246,\n ],\n [\n 10.48569,\n 12.23231,\n 12.98686,\n 14.19163,\n 15.23435,\n 13.98069,\n 16.08757,\n 14.64012,\n 16.29856,\n 16.65862,\n ],\n [\n 12.37588,\n 11.12276,\n 12.91142,\n 13.06564,\n 15.69582,\n 14.12232,\n 14.75772,\n 16.23814,\n 15.26524,\n 13.91811,\n ],\n ]\n ),\n]\n\nRES_M4 = [\n {\n \"MASE\": 0.816_837_618,\n \"MAPE\": 0.324_517_430_685_928_1,\n \"sMAPE\": 0.326_973_268_4,\n \"seasonal_error\": np.array(\n [1.908_101, 1.258_838, 0.63018, 1.238_201, 1.287_771]\n ),\n },\n {\n \"MASE\": 0.723_948_2,\n \"MAPE\": 0.063_634_129_851_747_6,\n \"sMAPE\": 0.065_310_85,\n \"seasonal_error\": np.array(\n [1.867_847, 1.315_505, 0.602_587_4, 1.351_535, 1.339_179]\n ),\n },\n]\n\n\[email protected](\"timeseries, res\", zip(TIMESERIES_M4, RES_M4))\ndef test_MASE_sMAPE_M4(timeseries, res):\n ts_datastructure = pd.Series\n evaluator = Evaluator(quantiles=QUANTILES)\n agg_df, item_df = calculate_metrics(\n timeseries, evaluator, ts_datastructure\n )\n\n assert abs((agg_df[\"MASE\"] - res[\"MASE\"]) / res[\"MASE\"]) < 0.001, (\n \"Scores for the metric MASE do not match: \"\n \"\\nexpected: {} \\nobtained: {}\".format(res[\"MASE\"], agg_df[\"MASE\"])\n )\n assert abs((agg_df[\"MAPE\"] - res[\"MAPE\"]) / res[\"MAPE\"]) < 0.001, (\n \"Scores for the metric MAPE do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(res[\"MAPE\"], agg_df[\"MAPE\"])\n )\n assert abs((agg_df[\"sMAPE\"] - res[\"sMAPE\"]) / res[\"sMAPE\"]) < 0.001, (\n \"Scores for the metric sMAPE do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(res[\"sMAPE\"], agg_df[\"sMAPE\"])\n )\n assert (\n sum(abs(item_df[\"seasonal_error\"].values - res[\"seasonal_error\"]))\n < 0.001\n ), (\n \"Scores for the metric seasonal_error do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(\n res[\"seasonal_error\"], item_df[\"seasonal_error\"].values\n )\n )\n\n\nTIMESERIES = [\n np.ones((5, 10), dtype=np.float64),\n np.ones((5, 10), dtype=np.float64),\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.array([[np.nan] * 10, [1.0] * 10]),\n]\n\nRES = [\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 14.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 420.0,\n \"abs_target_mean\": 28.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"MAPE\": 0.103_112_211_532_524_85,\n \"sMAPE\": 0.113_254_049_3,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.077_151_674_981_045_956,\n \"ND\": 0.071_428_571_428_571_42,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 5.033_333_333_333_3,\n \"abs_error\": 29.0,\n \"abs_target_sum\": 413.0,\n \"abs_target_mean\": 28.1,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.1,\n \"MAPE\": 0.113_032_846_453_159_77,\n \"sMAPE\": 0.125_854_781_903_299_57,\n \"MSIS\": 84.0,\n \"RMSE\": 2.243_509_156_061_845_6,\n \"NRMSE\": 0.079_840_183_489_745_39,\n \"ND\": 0.070_217_917_675_544_79,\n \"MAE_Coverage\": 0.5,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 3.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"MAPE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n },\n]\n\nHAS_NANS = [False, True, False, True, True]\n\n\nINPUT_TYPE = [iterable, iterable, iterator, iterator, iterable]\n\n\[email protected](\n \"timeseries, res, has_nans, input_type\",\n zip(TIMESERIES, RES, HAS_NANS, INPUT_TYPE),\n)\ndef test_metrics(timeseries, res, has_nans, input_type):\n ts_datastructure = pd.Series\n evaluator = Evaluator(quantiles=QUANTILES, num_workers=0)\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\[email protected](\n \"timeseries, res, has_nans, input_type\",\n zip(TIMESERIES, RES, HAS_NANS, INPUT_TYPE),\n)\ndef test_metrics_mp(timeseries, res, has_nans, input_type):\n ts_datastructure = pd.Series\n # Default will be multiprocessing evaluator\n evaluator = Evaluator(quantiles=QUANTILES, num_workers=4)\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\nTIMESERIES_MULTIVARIATE = [\n np.ones((5, 10, 2), dtype=np.float64),\n np.ones((5, 10, 2), dtype=np.float64),\n np.ones((5, 10, 2), dtype=np.float64),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n np.stack(\n (\n np.arange(0, 50, dtype=np.float64).reshape(5, 10),\n np.arange(50, 100, dtype=np.float64).reshape(5, 10),\n ),\n axis=2,\n ),\n]\n\nRES_MULTIVARIATE = [\n {\n \"MSE\": 0.0,\n \"0_MSE\": 0.0,\n \"1_MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 15.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 0.0,\n \"abs_error\": 0.0,\n \"abs_target_sum\": 30.0,\n \"abs_target_mean\": 1.0,\n \"seasonal_error\": 0.0,\n \"MASE\": 0.0,\n \"sMAPE\": 0.0,\n \"MSIS\": 0.0,\n \"RMSE\": 0.0,\n \"NRMSE\": 0.0,\n \"ND\": 0.0,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 0.0,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 420.0,\n \"abs_target_mean\": 28.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.113_254_049_3,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.077_151_674_981_045_956,\n \"ND\": 0.071_428_571_428_571_42,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 30.0,\n \"abs_target_sum\": 1170.0,\n \"abs_target_mean\": 78.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.026_842_301_756_499_45,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.027_695_473_070_119_065,\n \"ND\": 0.025_641_025_641_025_64,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n {\n \"MSE\": 4.666_666_666_666,\n \"abs_error\": 60.0,\n \"abs_target_sum\": 1590.0,\n \"abs_target_mean\": 53.0,\n \"seasonal_error\": 1.0,\n \"MASE\": 2.0,\n \"sMAPE\": 0.070_048_175_528_249_73,\n \"MSIS\": 80.0,\n \"RMSE\": 2.160_246_899_469_286_9,\n \"NRMSE\": 0.040_759_375_461_684_65,\n \"ND\": 0.037_735_849_056_603_77,\n \"MAE_Coverage\": 0.5,\n \"m_sum_MSE\": 18.666_666_666_666,\n },\n]\n\nHAS_NANS_MULTIVARIATE = [False, False, False, False, False, False]\n\nEVAL_DIMS = [[0], [1], [0, 1], [0], [1], None]\n\nINPUT_TYPE = [iterable, iterable, iterator, iterator, iterable, iterator]\n\n\[email protected](\n \"timeseries, res, has_nans, eval_dims, input_type\",\n zip(\n TIMESERIES_MULTIVARIATE,\n RES_MULTIVARIATE,\n HAS_NANS_MULTIVARIATE,\n EVAL_DIMS,\n INPUT_TYPE,\n ),\n)\ndef test_metrics_multivariate(\n timeseries, res, has_nans, eval_dims, input_type\n):\n ts_datastructure = pd.DataFrame\n evaluator = MultivariateEvaluator(\n quantiles=QUANTILES,\n eval_dims=eval_dims,\n target_agg_funcs={\"sum\": np.sum},\n )\n\n agg_metrics, item_metrics = calculate_metrics(\n timeseries,\n evaluator,\n ts_datastructure,\n has_nans=has_nans,\n forecaster=naive_multivariate_forecaster,\n input_type=input_type,\n )\n\n for metric, score in agg_metrics.items():\n if metric in res.keys():\n assert abs(score - res[metric]) < 0.001, (\n \"Scores for the metric {} do not match: \\nexpected: {} \"\n \"\\nobtained: {}\".format(metric, res[metric], score)\n )\n\n\ndef test_evaluation_with_QuantileForecast():\n start = \"2012-01-11\"\n target = [2.4, 1.0, 3.0, 4.4, 5.5, 4.9] * 11\n index = pd.date_range(start=start, freq=\"1D\", periods=len(target))\n ts = pd.Series(index=index, data=target)\n\n ev = Evaluator(quantiles=(\"0.1\", \"0.2\", \"0.5\"))\n\n fcst = [\n QuantileForecast(\n start_date=pd.Timestamp(\"2012-01-11\"),\n freq=\"D\",\n forecast_arrays=np.array([[2.4, 9.0, 3.0, 2.4, 5.5, 4.9] * 10]),\n forecast_keys=[\"0.5\"],\n )\n ]\n\n agg_metric, _ = ev(iter([ts]), iter(fcst))\n\n assert np.isfinite(agg_metric[\"wQuantileLoss[0.5]\"])\n\n\[email protected](\n \"freq, expected_seasonality\",\n [\n (\"1H\", 24),\n (\"H\", 24),\n (\"2H\", 12),\n (\"3H\", 8),\n (\"4H\", 6),\n (\"15H\", 1),\n (\"5B\", 1),\n (\"1B\", 5),\n (\"2W\", 1),\n (\"3M\", 4),\n (\"1D\", 1),\n (\"7D\", 1),\n (\"8D\", 1),\n ],\n)\ndef test_get_seasonality(freq, expected_seasonality):\n assert get_seasonality(freq) == expected_seasonality\n"
] | [
[
"torch.nn.MSELoss",
"torch.as_tensor",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.array",
"numpy.concatenate"
],
[
"numpy.ones",
"pandas.Series",
"pandas.date_range",
"numpy.arange",
"numpy.array",
"pandas.Timestamp",
"numpy.isfinite"
]
] |
Wayne-Mai/DynSLAM | [
"7b62e13d2a33ff58ca888a346433a4891a228a20"
] | [
"preprocessing/MaskRCNN/MaskRCNN_TUM.py"
] | [
"#!/usr/bin/env python3\n#\n# This file is part of https://github.com/martinruenz/maskfusion\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n\n# To use this script, add the MaskRCNN directoy to your PYTHON_PATH\nimport sys\nimport os\n\nmask_rcnn_path = os.path.abspath(\"../Mask_RCNN\")\nsys.path.insert(0, mask_rcnn_path)\n\nimport random\nimport math\nimport numpy as np\nimport scipy.misc\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport argparse\nfrom samples.coco import coco\nfrom mrcnn import utils\nfrom mrcnn import model as modellib\nfrom mrcnn import visualize\nfrom PIL import Image\nfrom helpers import *\nimport time\nimport pytoml as toml\nimport scipy.misc\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", required=True, help=\"Input directory (all files are being processed)\")\nparser.add_argument(\"-c\", required=False, help=\"Optional config file, otherwise MsCoco is assumed\")\nparser.add_argument(\"-o\", required=True, help=\"Output directory\")\nparser.add_argument(\"--filter\", nargs='+', required=False,\n help=\"Specify which labels you would like to use (empty means all), example: --filter teddy_bear pizza baseball_bat\")\nargs = parser.parse_args()\n\n# FURTHER PARAMETERS\nEXTENSIONS = ['jpg', 'png']\nFILTER_IMAGE_NAME = \"\" # only use images, whose name contains this string (eg \"Color\")\nscore_threshold = 0.85\nSPECIAL_ASSIGNMENTS = {} #{'person': 255}\nSINGLE_INSTANCES = False\nOUTPUT_FRAMES = True\nSTORE_CLASS_IDS = True\nSTART_INDEX = 0\n\nIMAGE_DIR = args.i\nOUTPUT_DIR = args.o\nDATA_DIR = os.path.join(mask_rcnn_path, \"data\")\nMODEL_DIR = os.path.join(DATA_DIR, \"logs\")\nmodel_path = os.path.join(DATA_DIR, \"mask_rcnn_coco.h5\")\n\nfilter_classes = []\nif args.filter:\n filter_classes = args.filter\n filter_classes = [f.replace(\"_\", \" \") for f in filter_classes]\nclass_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush']\n\nif args.c:\n with open(args.c, 'rb') as toml_file:\n toml_config = toml.load(toml_file)\n class_names = toml_config[\"MaskRCNN\"][\"class_names\"]\n model_path = toml_config[\"MaskRCNN\"][\"model_path\"]\n filter_classes = toml_config[\"MaskRCNN\"][\"filter_classes\"]\n score_threshold = toml_config[\"MaskRCNN\"][\"score_threshold\"]\n\nfilter_classes = [class_names.index(x) for x in filter_classes]\nSPECIAL_ASSIGNMENTS = {class_names.index(x): SPECIAL_ASSIGNMENTS[x] for x in SPECIAL_ASSIGNMENTS}\n\nclass InferenceConfig(coco.CocoConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n NUM_CLASSES = len(class_names)\n\nconfig = InferenceConfig()\nconfig.display()\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(model_path, by_name=True)\n\nfile_names = [fn for fn in os.listdir(IMAGE_DIR) if any(fn.endswith(ext) for ext in EXTENSIONS)]\nfile_names.sort()\nif FILTER_IMAGE_NAME and FILTER_IMAGE_NAME != \"\":\n file_names = [fn for fn in file_names if FILTER_IMAGE_NAME in fn]\n\n# ALL TOGETHER:\n# print(\"Loading images...\")\n# loaded_images = [scipy.misc.imread(os.path.join(IMAGE_DIR, f)) for f in file_names]\n# print(\"Starting evaluation...\")\n# start_time = time.time()\n# results = model.detect(loaded_images, verbose=0)\n# duration = time.time() - start_time\n# print(\"Evaluation took {} seconds.\".format(duration))\n# for idx, result in enumerate(results):\n# out_path = os.path.join(\"/tmp/test\", \"{}.png\".format(idx))\n# output_mask_ids(result, out_path)\n\n\n# SEPARATELY\nfig = plt.figure()\nax = fig.add_subplot(111)\n# plt.show(block=False)\nplt.ion()\n#_, ax = plt.subplots(1, figsize=(16, 16))\nfor idx, file_name in enumerate(file_names):\n\n if idx < START_INDEX:\n continue\n\n base_name = str(idx).zfill(4)\n\n if os.path.isfile(os.path.join(OUTPUT_DIR, base_name + \".png\")):\n continue\n\n print(\"Starting to work on frame\", base_name)\n\n image = scipy.misc.imread(os.path.join(IMAGE_DIR, file_name))\n h, w = image.shape[:2]\n\n results = model.detect([image], verbose=0)\n r = results[0]\n\n if len(r['class_ids']) == 0:\n r['masks'] = np.empty(shape=[h, w, 0])\n r['scores'] = []\n r['class_ids'] = []\n r['rois'] = np.empty(shape=[0, 4])\n\n if SINGLE_INSTANCES:\n merge_instances(r)\n\n #out_path = os.path.join(OUTPUT_DIR, \"{}.png\".format(idx))\n id_image, exported_class_ids, exported_rois = generate_id_image(r, score_threshold, filter_classes, SPECIAL_ASSIGNMENTS)\n save_id_image(id_image, OUTPUT_DIR, base_name, exported_class_ids, STORE_CLASS_IDS, exported_rois)\n\n\n # Visualise\n ax.clear()\n filter_result(r, filter_classes)\n #visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n # class_names, r['scores'], score_threshold, ax=ax) # requires patched version\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'], ax=ax)\n fig.canvas.draw()\n if OUTPUT_FRAMES:\n plt.savefig(os.path.join(OUTPUT_DIR, base_name+\".jpg\"))\n"
] | [
[
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"numpy.empty"
]
] |
AropJoe/milvus | [
"35612881e33ce19a7407628769f6b51a7518bfe9"
] | [
"tests/benchmark/milvus_benchmark/runners/utils.py"
] | [
"import os\nimport logging\nimport numpy as np\nimport sklearn.preprocessing\nimport h5py\nimport random\nfrom itertools import product\n\nfrom pymilvus import DataType\nfrom milvus_benchmark import config\n\nlogger = logging.getLogger(\"milvus_benchmark.runners.utils\")\n\nDELETE_INTERVAL_TIME = 2\n\nVECTORS_PER_FILE = 1000000\nSIFT_VECTORS_PER_FILE = 100000\nBINARY_VECTORS_PER_FILE = 2000000\n\nMAX_NQ = 10001\nFILE_PREFIX = \"binary_\"\n\nWARM_TOP_K = 1\nWARM_NQ = 1\nDEFAULT_DIM = 512\nDEFAULT_METRIC_TYPE = \"L2\"\n\nRANDOM_SRC_DATA_DIR = config.RAW_DATA_DIR + 'random/'\nSIFT_SRC_DATA_DIR = config.RAW_DATA_DIR + 'sift1b/'\nDEEP_SRC_DATA_DIR = config.RAW_DATA_DIR + 'deep1b/'\nJACCARD_SRC_DATA_DIR = config.RAW_DATA_DIR + 'jaccard/'\nHAMMING_SRC_DATA_DIR = config.RAW_DATA_DIR + 'hamming/'\nSTRUCTURE_SRC_DATA_DIR = config.RAW_DATA_DIR + 'structure/'\nBINARY_SRC_DATA_DIR = config.RAW_DATA_DIR + 'binary/'\nSIFT_SRC_GROUNDTRUTH_DATA_DIR = SIFT_SRC_DATA_DIR + 'gnd'\n\nDEFAULT_F_FIELD_NAME = 'float_vector'\nDEFAULT_B_FIELD_NAME = 'binary_vector'\nDEFAULT_INT_FIELD_NAME = 'int64'\nDEFAULT_FLOAT_FIELD_NAME = 'float'\nDEFAULT_DOUBLE_FIELD_NAME = \"double\"\n\nGROUNDTRUTH_MAP = {\n \"1000000\": \"idx_1M.ivecs\",\n \"2000000\": \"idx_2M.ivecs\",\n \"5000000\": \"idx_5M.ivecs\",\n \"10000000\": \"idx_10M.ivecs\",\n \"20000000\": \"idx_20M.ivecs\",\n \"50000000\": \"idx_50M.ivecs\",\n \"100000000\": \"idx_100M.ivecs\",\n \"200000000\": \"idx_200M.ivecs\",\n \"500000000\": \"idx_500M.ivecs\",\n \"1000000000\": \"idx_1000M.ivecs\",\n}\n\nMETRIC_MAP = {\n \"l2\": \"L2\",\n \"ip\": \"IP\",\n \"jaccard\": \"JACCARD\",\n \"hamming\": \"HAMMING\",\n \"sub\": \"SUBSTRUCTURE\",\n \"super\": \"SUPERSTRUCTURE\"\n}\n\n\ndef get_len_vectors_per_file(data_type, dimension):\n if data_type == \"random\":\n if dimension == 512:\n vectors_per_file = VECTORS_PER_FILE\n elif dimension == 4096:\n vectors_per_file = 100000\n elif dimension == 16384:\n vectors_per_file = 10000\n else:\n raise Exception(\"dimension: %s not supported\" % str(dimension))\n elif data_type == \"sift\":\n vectors_per_file = SIFT_VECTORS_PER_FILE\n elif data_type in [\"binary\"]:\n vectors_per_file = BINARY_VECTORS_PER_FILE\n elif data_type == \"local\":\n vectors_per_file = SIFT_VECTORS_PER_FILE\n else:\n raise Exception(\"data_type: %s not supported\" % data_type)\n return vectors_per_file\n\n\ndef get_vectors_from_binary(nq, dimension, data_type):\n # use the first file, nq should be less than VECTORS_PER_FILE 10001\n if nq > MAX_NQ:\n raise Exception(\"Over size nq\")\n if data_type == \"local\":\n return generate_vectors(nq, dimension)\n elif data_type == \"random\":\n file_name = RANDOM_SRC_DATA_DIR + 'query_%d.npy' % dimension\n elif data_type == \"sift\":\n file_name = SIFT_SRC_DATA_DIR + 'query.npy'\n elif data_type == \"deep\":\n file_name = DEEP_SRC_DATA_DIR + 'query.npy'\n elif data_type == \"binary\":\n file_name = BINARY_SRC_DATA_DIR + 'query.npy'\n else:\n raise Exception(\"There is no corresponding file for this data type %s.\" % str(data_type))\n data = np.load(file_name)\n vectors = data[0:nq].tolist()\n return vectors\n\n\ndef generate_vectors(nb, dim):\n return [[random.random() for _ in range(dim)] for _ in range(nb)]\n\n\ndef generate_values(data_type, vectors, ids):\n values = None\n if data_type in [DataType.INT32, DataType.INT64]:\n values = ids\n elif data_type in [DataType.FLOAT, DataType.DOUBLE]:\n values = [(i + 0.0) for i in ids]\n elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]:\n values = vectors\n return values\n\n\ndef generate_entities(info, vectors, ids=None):\n entities = []\n for field in info[\"fields\"]:\n # if field[\"name\"] == \"_id\":\n # continue\n field_type = field[\"type\"]\n entities.append(\n {\"name\": field[\"name\"], \"type\": field_type, \"values\": generate_values(field_type, vectors, ids)})\n return entities\n\n\ndef metric_type_trans(metric_type):\n if metric_type in METRIC_MAP.keys():\n return METRIC_MAP[metric_type]\n else:\n raise Exception(\"metric_type: %s not in METRIC_MAP\" % metric_type)\n\n\ndef get_dataset(hdf5_file_path):\n \"\"\" Determine whether hdf5 file exists, and return the content of hdf5 file \"\"\"\n if not os.path.exists(hdf5_file_path):\n raise Exception(\"%s not existed\" % hdf5_file_path)\n dataset = h5py.File(hdf5_file_path)\n return dataset\n\n\ndef get_default_field_name(data_type=DataType.FLOAT_VECTOR):\n \"\"\" Return field name according to data type \"\"\"\n if data_type == DataType.FLOAT_VECTOR:\n field_name = DEFAULT_F_FIELD_NAME\n elif data_type == DataType.BINARY_VECTOR:\n field_name = DEFAULT_B_FIELD_NAME\n elif data_type == DataType.INT64:\n field_name = DEFAULT_INT_FIELD_NAME\n elif data_type == DataType.FLOAT:\n field_name = DEFAULT_FLOAT_FIELD_NAME\n else:\n logger.error(data_type)\n raise Exception(\"Not supported data type\")\n return field_name\n\n\ndef get_vector_type(data_type):\n \"\"\" Return vector type according to data type \"\"\"\n vector_type = ''\n if data_type in [\"random\", \"sift\", \"deep\", \"glove\", \"local\"]:\n vector_type = DataType.FLOAT_VECTOR\n elif data_type in [\"binary\"]:\n vector_type = DataType.BINARY_VECTOR\n else:\n raise Exception(\"Data type: %s not defined\" % data_type)\n return vector_type\n\n\ndef get_vector_type_from_metric(metric_type):\n if metric_type in [\"hamming\", \"jaccard\"]:\n vector_type = DataType.BINARY_VECTOR\n else:\n vector_type = DataType.FLOAT_VECTOR\n return vector_type\n\n\ndef normalize(metric_type, X):\n if metric_type == \"ip\":\n logger.info(\"Set normalize for metric_type: %s\" % metric_type)\n X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')\n X = X.astype(np.float32)\n elif metric_type == \"l2\":\n X = X.astype(np.float32)\n elif metric_type in [\"jaccard\", \"hamming\", \"sub\", \"super\"]:\n tmp = []\n for item in X:\n new_vector = bytes(np.packbits(item, axis=-1).tolist())\n tmp.append(new_vector)\n X = tmp\n return X\n\n\ndef generate_combinations(args):\n if isinstance(args, list):\n args = [el if isinstance(el, list) else [el] for el in args]\n return [list(x) for x in product(*args)]\n elif isinstance(args, dict):\n flat = []\n for k, v in args.items():\n if isinstance(v, list):\n flat.append([(k, el) for el in v])\n else:\n flat.append([(k, v)])\n return [dict(x) for x in product(*flat)]\n else:\n raise TypeError(\"No args handling exists for %s\" % type(args).__name__)\n\n\ndef gen_file_name(idx, dimension, data_type):\n s = \"%05d\" % idx\n fname = FILE_PREFIX + str(dimension) + \"d_\" + s + \".npy\"\n if data_type == \"random\":\n fname = RANDOM_SRC_DATA_DIR + fname\n elif data_type == \"sift\":\n fname = SIFT_SRC_DATA_DIR + fname\n elif data_type == \"deep\":\n fname = DEEP_SRC_DATA_DIR + fname\n elif data_type == \"jaccard\":\n fname = JACCARD_SRC_DATA_DIR + fname\n elif data_type == \"hamming\":\n fname = HAMMING_SRC_DATA_DIR + fname\n elif data_type == \"sub\" or data_type == \"super\":\n fname = STRUCTURE_SRC_DATA_DIR + fname\n return fname\n\n\ndef get_recall_value(true_ids, result_ids):\n \"\"\"\n Use the intersection length\n true_ids: neighbors taken from the dataset\n result_ids: ids returned by query\n \"\"\"\n sum_radio = 0.0\n for index, item in enumerate(result_ids):\n # tmp = set(item).intersection(set(flat_id_list[index]))\n\n # Get the value of true_ids and the returned value to do the intersection\n tmp = set(true_ids[index]).intersection(set(item))\n\n # Add up each ratio\n sum_radio = sum_radio + len(tmp) / len(item)\n # logger.debug(sum_radio)\n\n # Calculate the average ratio and take three digits after the decimal point\n return round(sum_radio / len(result_ids), 3)\n\n\ndef get_ground_truth_ids(collection_size):\n fname = GROUNDTRUTH_MAP[str(collection_size)]\n fname = SIFT_SRC_GROUNDTRUTH_DATA_DIR + \"/\" + fname\n a = np.fromfile(fname, dtype='int32')\n d = a[0]\n true_ids = a.reshape(-1, d + 1)[:, 1:].copy()\n return true_ids\n\n\ndef normalize(metric_type, X):\n if metric_type == \"ip\":\n logger.info(\"Set normalize for metric_type: %s\" % metric_type)\n X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')\n X = X.astype(np.float32)\n elif metric_type == \"l2\":\n X = X.astype(np.float32)\n elif metric_type in [\"jaccard\", \"hamming\", \"sub\", \"super\"]:\n tmp = []\n for item in X:\n new_vector = bytes(np.packbits(item, axis=-1).tolist())\n tmp.append(new_vector)\n X = tmp\n return X"
] | [
[
"numpy.load",
"numpy.fromfile",
"numpy.packbits"
]
] |
gonzrubio/ML_Papers | [
"562f85c81b0afb8771708ff31063f722d838b9d2"
] | [
"GANs/WGANGP_Gulrajani_et_al_2017/driver.py"
] | [
"\"\"\"Improved Training of Wasserstein GANs.\n\nPapers:\n https://arxiv.org/abs/1701.07875\n https://arxiv.org/abs/1704.00028\n\nCreated on Tue Oct 26 15:17:08 2021\n\n@author: gonzo\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.data import DataLoader, ConcatDataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms, datasets\nfrom torchvision.utils import make_grid\n\n\nfrom model import Generator, Critic\n\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef gradient_penalty(critic, real, fake, device=device):\n BATCH_SIZE, C, H, W = real.shape\n alpha = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)\n interpolated_images = real * alpha + fake * (1 - alpha)\n\n # Calculate critic scores\n mixed_scores = critic(interpolated_images)\n\n # Take the gradient of the scores with respect to the images\n gradient = torch.autograd.grad(\n inputs=interpolated_images,\n outputs=mixed_scores,\n grad_outputs=torch.ones_like(mixed_scores),\n create_graph=True,\n retain_graph=True,\n )[0]\n gradient = gradient.view(gradient.shape[0], -1)\n gradient_norm = gradient.norm(2, dim=1)\n gradient_penalty = torch.mean((gradient_norm - 1) ** 2)\n\n return gradient_penalty\n\n\ndef main():\n\n # Data\n train_dataset = datasets.CIFAR10(\n root='./data/train',\n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[-0.0163, -0.0347, -0.1056],\n std=[0.4045, 0.3987, 0.4020]),\n ])\n )\n\n test_dataset = datasets.CIFAR10(\n root='./data/test/',\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[-0.0163, -0.0347, -0.1056],\n std=[0.4045, 0.3987, 0.4020]),\n ])\n )\n\n dataset = ConcatDataset([train_dataset, test_dataset])\n\n # Hyperparameters\n epochs = 200\n critic_iterations = 5\n lambda_gp = 10\n z_dim = 100\n batch_size = 2 ** 9\n fixed_noise = torch.randn((batch_size, z_dim, 1, 1), device=device)\n\n generator = Generator().to(device)\n critic = Critic().to(device)\n\n total_params = sum(p.numel() for p in generator.parameters())\n total_params += sum(p.numel() for p in critic.parameters())\n print(f'Number of parameters: {total_params:,}')\n\n lr_G = 5e-4\n lr_D = 4e-6\n betas = (0.0, 0.9)\n\n optim_G = optim.Adam(generator.parameters(), lr=lr_G, betas=betas)\n optim_C = optim.Adam(critic.parameters(), lr=lr_D, betas=betas)\n sched_G = CosineAnnealingLR(optim_G, T_max=20, eta_min=0)\n sched_C = CosineAnnealingLR(optim_C, T_max=20, eta_min=0)\n\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n writer = SummaryWriter(\"logs/fake\")\n step = 0\n\n for epoch in range(epochs):\n for batch_idx, (real, label) in enumerate(loader):\n\n # real = real.reshape((-1, 3, 32, 32)).to(device)\n real = real.to(device)\n\n for iteration in range(critic_iterations):\n noise = torch.randn((real.shape[0], z_dim, 1, 1), device=device)\n fake = generator(noise)\n critic_real = critic(real).reshape(-1)\n critic_fake = critic(fake).reshape(-1)\n gp = gradient_penalty(critic, real, fake, device=device)\n loss_critic = torch.mean(critic_fake) - torch.mean(critic_real)\n loss_critic += lambda_gp * gp\n loss_C = torch.mean(critic_fake) - torch.mean(critic_real)\n critic.zero_grad(set_to_none=True)\n loss_C.backward(retain_graph=True)\n optim_C.step()\n sched_C.step()\n\n # Minimize Generator\n C_fake = critic(fake)\n loss_G = -torch.mean(C_fake)\n generator.zero_grad(set_to_none=True)\n loss_G.backward()\n optim_G.step()\n sched_G.step()\n\n if batch_idx % 25 == 0:\n\n print(f\"{epoch}.{batch_idx} {loss_C: .3e} {loss_G: .3e}\")\n\n generator.eval()\n with torch.no_grad():\n fake = generator(fixed_noise)\n img_grid = make_grid(fake, normalize=True)\n writer.add_image(\"Fake Images\", img_grid, global_step=step)\n step += 1\n generator.train()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.utils.data.DataLoader",
"torch.ones_like",
"torch.utils.data.ConcatDataset",
"torch.randn",
"torch.rand",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.mean"
]
] |
KireinaHoro/Ax | [
"16cb868911eecba323759e2e129df8833361e614"
] | [
"ax/modelbridge/factory.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom logging import Logger\nfrom typing import Any, Dict, List, Optional, Type\n\nimport torch\nfrom ax.core.data import Data\nfrom ax.core.experiment import Experiment\nfrom ax.core.multi_type_experiment import MultiTypeExperiment\nfrom ax.core.objective import MultiObjective\nfrom ax.core.observation import ObservationFeatures\nfrom ax.core.optimization_config import OptimizationConfig\nfrom ax.core.search_space import SearchSpace\nfrom ax.core.types import TConfig\nfrom ax.modelbridge.discrete import DiscreteModelBridge\nfrom ax.modelbridge.multi_objective_torch import MultiObjectiveTorchModelBridge\nfrom ax.modelbridge.random import RandomModelBridge\nfrom ax.modelbridge.registry import (\n Cont_X_trans,\n Models,\n MT_MTGP_trans,\n ST_MTGP_trans,\n Y_trans,\n)\nfrom ax.modelbridge.torch import TorchModelBridge\nfrom ax.modelbridge.transforms.base import Transform\nfrom ax.modelbridge.transforms.convert_metric_names import tconfig_from_mt_experiment\nfrom ax.models.torch.botorch import (\n BotorchModel,\n TAcqfConstructor,\n TModelConstructor,\n TModelPredictor,\n TOptimizer,\n)\nfrom ax.models.torch.botorch_defaults import (\n get_and_fit_model,\n get_NEI,\n predict_from_model,\n scipy_optimizer,\n)\nfrom ax.utils.common.logger import get_logger\nfrom ax.utils.common.typeutils import checked_cast\n\n\nlogger: Logger = get_logger(__name__)\n\n\nDEFAULT_TORCH_DEVICE = torch.device(\"cpu\")\nDEFAULT_EHVI_BATCH_LIMIT = 5\n\n\n\"\"\"\nModule containing functions that generate standard models, such as Sobol,\nGP+EI, etc.\n\nNote: a special case here is a composite generator, which requires an\nadditional ``GenerationStrategy`` and is able to delegate work to multiple models\n(for instance, to a random model to generate the first trial, and to an\noptimization model for subsequent trials).\n\n\"\"\"\n\n\ndef get_sobol(\n search_space: SearchSpace,\n seed: Optional[int] = None,\n deduplicate: bool = False,\n init_position: int = 0,\n scramble: bool = True,\n) -> RandomModelBridge:\n \"\"\"Instantiates a Sobol sequence quasi-random generator.\n\n Args:\n search_space: Sobol generator search space.\n kwargs: Custom args for sobol generator.\n\n Returns:\n RandomModelBridge, with SobolGenerator as model.\n \"\"\"\n return checked_cast(\n RandomModelBridge,\n Models.SOBOL(\n search_space=search_space,\n seed=seed,\n deduplicate=deduplicate,\n init_position=init_position,\n scramble=scramble,\n ),\n )\n\n\ndef get_uniform(\n search_space: SearchSpace, deduplicate: bool = False, seed: Optional[int] = None\n) -> RandomModelBridge:\n \"\"\"Instantiate uniform generator.\n\n Args:\n search_space: Uniform generator search space.\n kwargs: Custom args for uniform generator.\n\n Returns:\n RandomModelBridge, with UniformGenerator as model.\n \"\"\"\n return checked_cast(\n RandomModelBridge,\n Models.UNIFORM(search_space=search_space, seed=seed, deduplicate=deduplicate),\n )\n\n\ndef get_botorch(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n model_constructor: TModelConstructor = get_and_fit_model,\n model_predictor: TModelPredictor = predict_from_model,\n acqf_constructor: TAcqfConstructor = get_NEI, # pyre-ignore[9]\n acqf_optimizer: TOptimizer = scipy_optimizer, # pyre-ignore[9]\n refit_on_cv: bool = False,\n refit_on_update: bool = True,\n optimization_config: Optional[OptimizationConfig] = None,\n) -> TorchModelBridge:\n \"\"\"Instantiates a BotorchModel.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"`BotorchModel` requires non-empty data.\")\n return checked_cast(\n TorchModelBridge,\n Models.BOTORCH(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n transforms=transforms,\n transform_configs=transform_configs,\n model_constructor=model_constructor,\n model_predictor=model_predictor,\n acqf_constructor=acqf_constructor,\n acqf_optimizer=acqf_optimizer,\n refit_on_cv=refit_on_cv,\n refit_on_update=refit_on_update,\n optimization_config=optimization_config,\n ),\n )\n\n\ndef get_GPEI(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with EI.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP+EI BotorchModel requires non-empty data.\")\n return checked_cast(\n TorchModelBridge,\n Models.BOTORCH(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n ),\n )\n\n\ndef get_GPKG(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n cost_intercept: float = 0.01,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n **kwargs: Any,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with KG.\"\"\"\n if search_space is None:\n search_space = experiment.search_space\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP+KG BotorchModel requires non-empty data.\")\n\n inputs = {\n \"search_space\": search_space,\n \"experiment\": experiment,\n \"data\": data,\n \"cost_intercept\": cost_intercept,\n \"torch_dtype\": dtype,\n \"torch_device\": device,\n \"transforms\": transforms,\n \"transform_configs\": transform_configs,\n }\n\n if any(p.is_fidelity for k, p in experiment.parameters.items()):\n inputs[\"linear_truncated\"] = kwargs.get(\"linear_truncated\", True)\n return checked_cast(TorchModelBridge, Models.GPKG(**inputs)) # pyre-ignore: [16]\n\n\n# TODO[Lena]: how to instantiate MTGP through the enum? The Multi-type MTGP requires\n# a MultiTypeExperiment, so we would need validation for that, but more importantly,\n# we need to create `trial_index_to_type` as in the factory function below.\n# Maybe `MultiTypeExperiment` could have that mapping as a property?\ndef get_MTGP(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n trial_index: Optional[int] = None,\n) -> TorchModelBridge:\n \"\"\"Instantiates a Multi-task Gaussian Process (MTGP) model that generates\n points with EI.\n\n If the input experiment is a MultiTypeExperiment then a\n Multi-type Multi-task GP model will be instantiated.\n Otherwise, the model will be a Single-type Multi-task GP.\n \"\"\"\n\n if isinstance(experiment, MultiTypeExperiment):\n trial_index_to_type = {\n t.index: t.trial_type for t in experiment.trials.values()\n }\n transforms = MT_MTGP_trans\n transform_configs = {\n \"TrialAsTask\": {\"trial_level_map\": {\"trial_type\": trial_index_to_type}},\n \"ConvertMetricNames\": tconfig_from_mt_experiment(experiment),\n }\n else:\n # Set transforms for a Single-type MTGP model.\n transforms = ST_MTGP_trans\n transform_configs = None\n\n # Choose the status quo features for the experiment from the selected trial.\n # If trial_index is None, we will look for a status quo from the last\n # experiment trial to use as a status quo for the experiment.\n if trial_index is None:\n trial_index = len(experiment.trials) - 1\n elif trial_index >= len(experiment.trials):\n raise ValueError(\"trial_index is bigger than the number of experiment trials\")\n\n # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.\n status_quo = experiment.trials[trial_index].status_quo\n if status_quo is None:\n status_quo_features = None\n else:\n status_quo_features = ObservationFeatures(\n parameters=status_quo.parameters, trial_index=trial_index\n )\n\n return TorchModelBridge(\n experiment=experiment,\n search_space=search_space or experiment.search_space,\n data=data,\n model=BotorchModel(),\n transforms=transforms,\n # pyre-fixme[6]: Expected `Optional[Dict[str, Dict[str,\n # typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,\n # int, str]]]]` for 6th param but got `Optional[Dict[str,\n # typing.Union[Dict[str, Dict[str, Dict[int, Optional[str]]]], Dict[str,\n # typing.Union[botorch.acquisition.acquisition.AcquisitionFunction, float,\n # int, str]]]]]`.\n transform_configs=transform_configs,\n torch_dtype=torch.double,\n torch_device=DEFAULT_TORCH_DEVICE,\n status_quo_features=status_quo_features,\n )\n\n\ndef get_factorial(search_space: SearchSpace) -> DiscreteModelBridge:\n \"\"\"Instantiates a factorial generator.\"\"\"\n return checked_cast(\n DiscreteModelBridge,\n Models.FACTORIAL(search_space=search_space, fit_out_of_design=True),\n )\n\n\ndef get_empirical_bayes_thompson(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n num_samples: int = 10000,\n min_weight: Optional[float] = None,\n uniform_weights: bool = False,\n) -> DiscreteModelBridge:\n \"\"\"Instantiates an empirical Bayes / Thompson sampling model.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"Empirical Bayes Thompson sampler requires non-empty data.\")\n return checked_cast(\n DiscreteModelBridge,\n Models.EMPIRICAL_BAYES_THOMPSON(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n num_samples=num_samples,\n min_weight=min_weight,\n uniform_weights=uniform_weights,\n fit_out_of_design=True,\n ),\n )\n\n\ndef get_thompson(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n num_samples: int = 10000,\n min_weight: Optional[float] = None,\n uniform_weights: bool = False,\n) -> DiscreteModelBridge:\n \"\"\"Instantiates a Thompson sampling model.\"\"\"\n if data.df.empty: # pragma: no cover\n raise ValueError(\"Thompson sampler requires non-empty data.\")\n return checked_cast(\n DiscreteModelBridge,\n Models.THOMPSON(\n experiment=experiment,\n data=data,\n search_space=search_space or experiment.search_space,\n num_samples=num_samples,\n min_weight=min_weight,\n uniform_weights=uniform_weights,\n fit_out_of_design=True,\n ),\n )\n\n\ndef get_GPMES(\n experiment: Experiment,\n data: Data,\n search_space: Optional[SearchSpace] = None,\n cost_intercept: float = 0.01,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n transforms: List[Type[Transform]] = Cont_X_trans + Y_trans,\n transform_configs: Optional[Dict[str, TConfig]] = None,\n **kwargs: Any,\n) -> TorchModelBridge:\n \"\"\"Instantiates a GP model that generates points with MES.\"\"\"\n if search_space is None:\n search_space = experiment.search_space\n if data.df.empty: # pragma: no cover\n raise ValueError(\"GP + MES BotorchModel requires non-empty data.\")\n\n inputs = {\n \"search_space\": search_space,\n \"experiment\": experiment,\n \"data\": data,\n \"cost_intercept\": cost_intercept,\n \"torch_dtype\": dtype,\n \"torch_device\": device,\n \"transforms\": transforms,\n \"transform_configs\": transform_configs,\n }\n\n if any(p.is_fidelity for k, p in experiment.parameters.items()):\n inputs[\"linear_truncated\"] = kwargs.get(\"linear_truncated\", True)\n return checked_cast(TorchModelBridge, Models.GPMES(**inputs)) # pyre-ignore: [16]\n\n\ndef get_MOO_EHVI(\n experiment: Experiment,\n data: Data,\n ref_point: Dict[str, float],\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = (\n torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n ),\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a multi-objective model that generates points with EHVI.\n\n Requires a `ref_point`, a dictionary of the metric name to the reference point value\n for every objective being optimized. An arm only improves hypervolume if it is\n strictly better than this point in all metrics.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-objective optimization requires multiple objectives.\")\n if data.df.empty: # pragma: no cover\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\"sequential\": True},\n \"optimizer_kwargs\": {\n # having a batch limit is very important for avoiding\n # memory issues in the initialization\n \"batch_limit\": DEFAULT_EHVI_BATCH_LIMIT\n },\n },\n ),\n )\n\n\ndef get_MOO_PAREGO(\n experiment: Experiment,\n data: Data,\n ref_point: Optional[List[float]] = None,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a multi-objective model that generates points with ParEGO.\n\n qParEGO optimizes random augmented chebyshev scalarizations of the multiple\n objectives. This allows it to explore non-convex pareto frontiers.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-Objective optimization requires multiple objectives\")\n if data.df.empty:\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n acqf_constructor=get_NEI,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\n \"chebyshev_scalarization\": True,\n \"sequential\": True,\n }\n },\n ),\n )\n\n\ndef get_MOO_RS(\n experiment: Experiment,\n data: Data,\n ref_point: Optional[List[float]] = None,\n search_space: Optional[SearchSpace] = None,\n dtype: torch.dtype = torch.double,\n device: torch.device = DEFAULT_TORCH_DEVICE,\n) -> MultiObjectiveTorchModelBridge:\n \"\"\"Instantiates a Random Scalarization multi-objective model.\n\n Chooses a different random linear scalarization of the objectives\n for generating each new candidate arm. This will only explore the\n convex hull of the pareto frontier.\n \"\"\"\n # pyre-ignore: [16] `Optional` has no attribute `objective`.\n if not isinstance(experiment.optimization_config.objective, MultiObjective):\n raise ValueError(\"Multi-Objective optimization requires multiple objectives\")\n if data.df.empty:\n raise ValueError(\"MultiObjectiveOptimization requires non-empty data.\")\n return checked_cast(\n MultiObjectiveTorchModelBridge,\n Models.MOO(\n experiment=experiment,\n data=data,\n ref_point=ref_point,\n search_space=search_space or experiment.search_space,\n torch_dtype=dtype,\n torch_device=device,\n acqf_constructor=get_NEI,\n default_model_gen_options={\n \"acquisition_function_kwargs\": {\n \"random_scalarization\": True,\n \"sequential\": True,\n }\n },\n ),\n )\n"
] | [
[
"torch.cuda.is_available",
"torch.device"
]
] |
maximilian-hoffmann/FINE | [
"62828f5feefefc2208dde0133435979d63398cc1"
] | [
"FINE/storage.py"
] | [
"from FINE.component import Component, ComponentModel\nfrom FINE import utils\nimport pyomo.environ as pyomo\nimport warnings\nimport pandas as pd\n\n\nclass Storage(Component):\n \"\"\"\n A Storage component can store a commodity and thus transfers it between time steps.\n \"\"\"\n def __init__(self, esM, name, commodity, chargeRate=1, dischargeRate=1,\n chargeEfficiency=1, dischargeEfficiency=1, selfDischarge=0, cyclicLifetime=None,\n stateOfChargeMin=0, stateOfChargeMax=1,\n hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1,\n hasIsBuiltBinaryVariable=False, bigM=None, doPreciseTsaModeling=False,\n chargeOpRateMax=None, chargeOpRateFix=None, chargeTsaWeight=1,\n dischargeOpRateMax=None, dischargeOpRateFix=None, dischargeTsaWeight=1,\n isPeriodicalStorage=False,\n locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None,\n capacityFix=None, isBuiltFix=None,\n investPerCapacity=0, investIfBuilt=0, opexPerChargeOperation=0,\n opexPerDischargeOperation=0, opexPerCapacity=0, opexIfBuilt=0, interestRate=0.08, economicLifetime=10):\n \"\"\"\n Constructor for creating an Storage class instance.\n The Storage component specific input arguments are described below. The general component\n input arguments are described in the Component class.\n\n **Required arguments:**\n\n :param commodity: to the component related commodity.\n :type commodity: string\n\n **Default arguments:**\n\n :param chargeRate: ratio of the maximum storage inflow (in commodityUnit/hour) and the\n storage capacity (in commodityUnit). Example:\\n\n * A hydrogen salt cavern which can store 133 GWh_H2_LHV can be charged 0.45 GWh_H2_LHV during\n one hour. The chargeRate thus equals 0.45/133.\\n\n |br| * the default value is 1\n :type chargeRate: 0 <= float <=1\n\n :param dischargeRate: ratio of the maximum storage outflow (in commodityUnit/hour) and\n the storage capacity (in commodityUnit). Example:\\n\n * A hydrogen salt cavern which can store 133 GWh_H2_LHV can be discharged 0.45 GWh_H2_LHV during\n one hour. The dischargeRate thus equals 0.45/133.\\n\n |br| * the default value is 1\n :type dischargeRate: 0 <= float <=1\n\n :param chargeEfficiency: defines the efficiency with which the storage can be charged (equals\n the percentage of the injected commodity that is transformed into stored commodity).\n Enter 0.98 for 98% etc.\n |br| * the default value is 1\n :type chargeEfficiency: 0 <= float <=1\n\n :param dischargeEfficiency: defines the efficiency with which the storage can be discharged\n (equals the percentage of the withdrawn commodity that is transformed into stored commodity).\n Enter 0.98 for 98% etc.\n |br| * the default value is 1\n :type dischargeEfficiency: 0 <= float <=1\n\n :param selfDischarge: percentage of self-discharge from the storage during one hour\n |br| * the default value is 0\n :type selfDischarge: 0 <= float <=1\n\n :param cyclicLifetime: if specified, the total number of full cycle equivalents that are supported\n by the technology.\n |br| * the default value is None\n :type cyclicLifetime: positive float\n\n :param stateOfChargeMin: threshold (percentage) that the state of charge can not drop under\n |br| * the default value is 0\n :type stateOfChargeMin: 0 <= float <=1\n\n :param stateOfChargeMax: threshold (percentage) that the state of charge can not exceed\n |br| * the default value is 1\n :type stateOfChargeMax: 0 <= float <=1\n\n :param doPreciseTsaModeling: determines whether the state of charge is limited precisely (True) or\n with a simplified method (False). The error is small if the selfDischarge is small.\n |br| * the default value is False\n :type doPreciseTsaModeling: boolean\n\n :param chargeOpRateMax: if specified indicates a maximum charging rate for each location and each time\n step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the charged commodity (before multiplying the charging efficiency)\n during one time step.\n |br| * the default value is None\n :type chargeOpRateMax: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param chargeOpRateFix: if specified indicates a fixed charging rate for each location and each time\n step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodity, referring to the charged commodity (before multiplying the charging efficiency)\n during one time step.\n |br| * the default value is None\n :type chargeOpRateFix: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param chargeTsaWeight: weight with which the chargeOpRate (max/fix) time series of the\n component should be considered when applying time series aggregation.\n |br| * the default value is 1\n :type chargeTsaWeight: positive (>= 0) float\n\n :param dischargeOpRateMax: if specified indicates a maximum discharging rate for each location and each\n time step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the discharged commodity (after multiplying the discharging\n efficiency) during one time step.\n |br| * the default value is None\n :type dischargeOpRateMax: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param dischargeOpRateFix: if specified indicates a fixed discharging rate for each location and each\n time step by a positive float. If hasCapacityVariable is set to True, the values are given relative\n to the installed capacities (i.e. in that case a value of 1 indicates a utilization of 100% of the\n capacity). If hasCapacityVariable is set to False, the values are given as absolute values in form\n of the commodityUnit, referring to the charged commodity (after multiplying the discharging\n efficiency) during one time step.\n |br| * the default value is None\n :type dischargeOpRateFix: None or Pandas DataFrame with positive (>= 0) entries. The row indices have\n to match the in the energy system model specified time steps. The column indices have to match the\n in the energy system model specified locations.\n\n :param dischargeTsaWeight: weight with which the dischargeOpRate (max/fix) time series of the\n component should be considered when applying time series aggregation.\n |br| * the default value is 1\n :type dischargeTsaWeight: positive (>= 0) float\n\n :param isPeriodicalStorage: indicates if the state of charge of the storage has to be at the same value\n after the end of each period. This is especially relevant when using daily periods where short term\n storage can be restrained to daily cycles. Benefits the run time of the model.\n |br| * the default value is False\n :type isPeriodicalStorage: boolean\n\n :param opexPerChargeOperation: cost which is directly proportional to the charge operation of the\n component is obtained by multiplying the opexPerOperation parameter with the annual sum of the\n operational time series of the components. The opexPerOperation can either be given as a float\n or a Pandas Series with location specific values.\n The cost unit in which the parameter is given has to match the one specified in the energy\n system model (i.e. Euro, Dollar, 1e6 Euro).\n |br| * the default value is 0\n :type opexPerChargeOperation: positive (>=0) float or Pandas Series with positive (>=0) values.\n The indices of the series have to equal the in the energy system model specified locations.\n\n :param opexPerDischargeOperation: cost which is directly proportional to the discharge operation\n of the component is obtained by multiplying the opexPerOperation parameter with the annual sum\n of the operational time series of the components. The opexPerOperation can either be given as\n a float or a Pandas Series with location specific values.\n The cost unit in which the parameter is given has to match the one specified in the energy\n system model (i.e. Euro, Dollar, 1e6 Euro).\n |br| * the default value is 0\n\n :type opexPerDischargeOperation: positive (>=0) float or Pandas Series with positive (>=0) values.\n The indices of the series have to equal the in the energy system model specified locations.\n component (in the physicalUnit of the component) and not of the specific operation itself are\n obtained by multiplying the capacity of the component at a location with the opexPerCapacity\n factor. The opexPerCapacity can either be given as a float or a Pandas Series with location\n specific values.\n \"\"\"\n Component. __init__(self, esM, name, dimension='1dim', hasCapacityVariable=hasCapacityVariable,\n capacityVariableDomain=capacityVariableDomain, capacityPerPlantUnit=capacityPerPlantUnit,\n hasIsBuiltBinaryVariable=hasIsBuiltBinaryVariable, bigM=bigM,\n locationalEligibility=locationalEligibility, capacityMin=capacityMin,\n capacityMax=capacityMax, sharedPotentialID=sharedPotentialID, capacityFix=capacityFix,\n isBuiltFix=isBuiltFix, investPerCapacity=investPerCapacity, investIfBuilt=investIfBuilt,\n opexPerCapacity=opexPerCapacity, opexIfBuilt=opexIfBuilt, interestRate=interestRate,\n economicLifetime=economicLifetime)\n\n # Set general storage component data\n utils.checkCommodities(esM, {commodity})\n self.commodity, self.commodityUnit = commodity, esM.commodityUnitsDict[commodity]\n # TODO unit and type checks\n self.chargeRate, self.dischargeRate = chargeRate, dischargeRate\n self.chargeEfficiency, self.dischargeEfficiency = chargeEfficiency, dischargeEfficiency\n self.selfDischarge = selfDischarge\n self.cyclicLifetime = cyclicLifetime\n self.stateOfChargeMin, self.stateOfChargeMax = stateOfChargeMin, stateOfChargeMax\n self.isPeriodicalStorage = isPeriodicalStorage\n self.doPreciseTsaModeling = doPreciseTsaModeling\n self.modelingClass = StorageModel\n\n # Set additional economic data\n self.opexPerChargeOperation = utils.checkAndSetCostParameter(esM, name, opexPerChargeOperation, '1dim',\n locationalEligibility)\n self.opexPerDischargeOperation = utils.checkAndSetCostParameter(esM, name, opexPerDischargeOperation, '1dim',\n locationalEligibility)\n\n # Set location-specific operation parameters (Charging rate, discharging rate, state of charge rate)\n # and time series aggregation weighting factor\n if chargeOpRateMax is not None and chargeOpRateFix is not None:\n chargeOpRateMax = None\n if esM.verbose < 2:\n warnings.warn('If chargeOpRateFix is specified, the chargeOpRateMax parameter is not required.\\n' +\n 'The chargeOpRateMax time series was set to None.')\n utils.checkOperationTimeSeriesInputParameters(esM, chargeOpRateMax, locationalEligibility)\n utils.checkOperationTimeSeriesInputParameters(esM, chargeOpRateFix, locationalEligibility)\n\n self.fullChargeOpRateMax = utils.setFormattedTimeSeries(chargeOpRateMax)\n self.aggregatedChargeOpRateMax = None\n self.chargeOpRateMax = None\n\n self.fullChargeOpRateFix = utils.setFormattedTimeSeries(chargeOpRateFix)\n self.aggregatedChargeOpRateFix = None\n self.chargeOpRateFix = None\n\n utils.isPositiveNumber(chargeTsaWeight)\n self.chargeTsaWeight = chargeTsaWeight\n\n if dischargeOpRateMax is not None and dischargeOpRateFix is not None:\n dischargeOpRateMax = None\n if esM.verbose < 2:\n warnings.warn('If dischargeOpRateFix is specified, the dischargeOpRateMax parameter is not required.\\n'\n + 'The dischargeOpRateMax time series was set to None.')\n utils.checkOperationTimeSeriesInputParameters(esM, dischargeOpRateMax, locationalEligibility)\n utils.checkOperationTimeSeriesInputParameters(esM, dischargeOpRateFix, locationalEligibility)\n\n self.fullDischargeOpRateMax = utils.setFormattedTimeSeries(dischargeOpRateMax)\n self.aggregatedDischargeOpRateMax = None\n self.dischargeOpRateMax = None\n\n self.fullDischargeOpRateFix = utils.setFormattedTimeSeries(dischargeOpRateFix)\n self.aggregatedDischargeOpRateFix = None\n self.dischargeOpRateFix = None\n\n utils.isPositiveNumber(dischargeTsaWeight)\n self.dischargeTsaWeight = dischargeTsaWeight\n\n # Set locational eligibility\n timeSeriesData = None\n tsNb = sum([0 if data is None else 1 for data in [chargeOpRateMax, chargeOpRateFix, dischargeOpRateMax,\n dischargeOpRateFix, ]])\n if tsNb > 0:\n timeSeriesData = sum([data for data in [chargeOpRateMax, chargeOpRateFix, dischargeOpRateMax,\n dischargeOpRateFix, ] if data is not None])\n self.locationalEligibility = \\\n utils.setLocationalEligibility(esM, self.locationalEligibility, self.capacityMax, self.capacityFix,\n self.isBuiltFix, self.hasCapacityVariable, timeSeriesData)\n\n def addToEnergySystemModel(self, esM):\n super().addToEnergySystemModel(esM)\n\n def setTimeSeriesData(self, hasTSA):\n self.chargeOpRateMax = self.aggregatedChargeOpRateMax if hasTSA else self.fullChargeOpRateMax\n self.chargeOpRateFix = self.aggregatedChargeOpRateFix if hasTSA else self.fullChargeOpRateFix\n self.dischargeOpRateMax = self.aggregatedChargeOpRateMax if hasTSA else self.fullDischargeOpRateMax\n self.dischargeOpRateFix = self.aggregatedChargeOpRateFix if hasTSA else self.fullDischargeOpRateFix\n\n def getDataForTimeSeriesAggregation(self):\n weightDict, data = {}, []\n I = [(self.fullChargeOpRateFix, self.fullChargeOpRateMax, 'chargeRate_', self.chargeTsaWeight),\n (self.fullDischargeOpRateFix, self.fullDischargeOpRateMax, 'dischargeRate_', self.dischargeTsaWeight)]\n\n for rateFix, rateMax, rateName, rateWeight in I:\n weightDict, data = self.prepareTSAInput(rateFix, rateMax, rateName, rateWeight, weightDict, data)\n return (pd.concat(data, axis=1), weightDict) if data else (None, {})\n\n def setAggregatedTimeSeriesData(self, data):\n\n self.aggregatedChargeOpRateFix = self.getTSAOutput(self.fullChargeOpRateFix, 'chargeRate_', data)\n self.aggregatedChargeOpRateMax = self.getTSAOutput(self.fullChargeOpRateMax, 'chargeRate_', data)\n\n self.aggregatedDischargeOpRateFix = self.getTSAOutput(self.fullDischargeOpRateFix, 'dischargeRate_', data)\n self.aggregatedDischargeOpRateMax = self.getTSAOutput(self.fullDischargeOpRateMax, 'dischargeRate_', data)\n\n\nclass StorageModel(ComponentModel):\n \"\"\" Doc \"\"\"\n\n def __init__(self):\n self.abbrvName = 'stor'\n self.dimension = '1dim'\n self.componentsDict = {}\n self.capacityVariablesOptimum, self.isBuiltVariablesOptimum = None, None\n self.chargeOperationVariablesOptimum, self.dischargeOperationVariablesOptimum = None, None\n self.stateOfChargeOperationVariablesOptimum = None\n self.optSummary = None\n\n ####################################################################################################################\n # Declare sparse index sets #\n ####################################################################################################################\n\n def declareSets(self, esM, pyM):\n \"\"\" Declares sets and dictionaries \"\"\"\n compDict = self.componentsDict\n\n # Declare design variable sets\n self.declareDesignVarSet(pyM)\n self.declareContinuousDesignVarSet(pyM)\n self.declareDiscreteDesignVarSet(pyM)\n self.declareDesignDecisionVarSet(pyM)\n\n if pyM.hasTSA:\n varSet = getattr(pyM, 'designDimensionVarSet_' + self.abbrvName)\n\n def initDesignVarSimpleTSASet(pyM):\n return ((loc, compName) for loc, compName in varSet if not compDict[compName].doPreciseTsaModeling)\n setattr(pyM, 'designDimensionVarSetSimple_' + self.abbrvName,\n pyomo.Set(dimen=2, initialize=initDesignVarSimpleTSASet))\n\n def initDesignVarPreciseTSASet(pyM):\n return ((loc, compName) for loc, compName in varSet if compDict[compName].doPreciseTsaModeling)\n setattr(pyM, 'designDimensionVarSetPrecise_' + self.abbrvName,\n pyomo.Set(dimen=2, initialize=initDesignVarPreciseTSASet))\n\n # Declare operation variable set\n self.declareOpVarSet(esM, pyM)\n\n # Declare sets for case differentiation of operating modes\n # * Charge operation\n self.declareOperationModeSets(pyM, 'chargeOpConstrSet', 'chargeOpRateMax', 'chargeOpRateFix')\n # * Discharge operation\n self.declareOperationModeSets(pyM, 'dischargeOpConstrSet', 'dischargeOpRateMax', 'dischargeOpRateFix')\n\n ####################################################################################################################\n # Declare variables #\n ####################################################################################################################\n\n def declareVariables(self, esM, pyM):\n \"\"\" Declares design and operation variables \"\"\"\n\n # Capacity variables in [commodityUnit*hour]\n self.declareCapacityVars(pyM)\n # (Continuous) numbers of installed components in [-]\n self.declareRealNumbersVars(pyM)\n # (Discrete/integer) numbers of installed components in [-]\n self.declareIntNumbersVars(pyM)\n # Binary variables [-] indicating if a component is considered at a location or not in [-]\n self.declareBinaryDesignDecisionVars(pyM)\n # Energy amount injected into a storage (before injection efficiency losses) between two time steps\n self.declareOperationVars(pyM, 'chargeOp')\n # Energy amount delivered from a storage (after delivery efficiency losses) between two time steps\n self.declareOperationVars(pyM, 'dischargeOp')\n\n # Inventory of storage components [commodityUnit*hour]\n if not pyM.hasTSA:\n # Energy amount stored at the beginning of a time step during the (one) period (the i-th state of charge\n # refers to the state of charge at the beginning of the i-th time step, the last index is the state of\n # charge after the last time step)\n setattr(pyM, 'stateOfCharge_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), pyM.interTimeStepsSet, domain=pyomo.NonNegativeReals))\n else:\n # (Virtual) energy amount stored during a period (the i-th state of charge refers to the state of charge at\n # the beginning of the i-th time step, the last index is the state of charge after the last time step)\n setattr(pyM, 'stateOfCharge_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), pyM.interTimeStepsSet, domain=pyomo.Reals))\n # (Virtual) minimum amount of energy stored within a period\n setattr(pyM, 'stateOfChargeMin_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), esM.typicalPeriods, domain=pyomo.Reals))\n # (Virtual) maximum amount of energy stored within a period\n setattr(pyM, 'stateOfChargeMax_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_' +\n self.abbrvName), esM.typicalPeriods, domain=pyomo.Reals))\n # (Real) energy amount stored at the beginning of a period between periods(the i-th state of charge refers\n # to the state of charge at the beginning of the i-th period, the last index is the state of charge after\n # the last period)\n setattr(pyM, 'stateOfChargeInterPeriods_' + self.abbrvName, pyomo.Var(getattr(pyM, 'designDimensionVarSet_'\n + self.abbrvName), esM.interPeriodTimeSteps, domain=pyomo.NonNegativeReals))\n\n ####################################################################################################################\n # Declare component constraints #\n ####################################################################################################################\n\n def connectSOCs(self, pyM, esM):\n \"\"\" Constraint for connecting the state of charge with the charge and discharge operation \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n\n def connectSOCs(pyM, loc, compName, p, t):\n return (SOC[loc, compName, p, t+1] - SOC[loc, compName, p, t] *\n (1 - compDict[compName].selfDischarge) ** esM.hoursPerTimeStep ==\n chargeOp[loc, compName, p, t] * compDict[compName].chargeEfficiency -\n dischargeOp[loc, compName, p, t] / compDict[compName].dischargeEfficiency)\n setattr(pyM, 'ConstrConnectSOC_' + abbrvName, pyomo.Constraint(opVarSet, pyM.timeSet, rule=connectSOCs))\n\n def cyclicState(self, pyM, esM):\n \"\"\" Constraint for connecting the state of charge with the charge and discharge operation \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n if not pyM.hasTSA:\n def cyclicState(pyM, loc, compName):\n return SOC[loc, compName, 0, 0] == SOC[loc, compName, 0, esM.timeStepsPerPeriod[-1] + 1]\n else:\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n def cyclicState(pyM, loc, compName):\n return SOCInter[loc, compName, 0] == SOCInter[loc, compName, esM.interPeriodTimeSteps[-1]]\n setattr(pyM, 'ConstrCyclicState_' + abbrvName, pyomo.Constraint(opVarSet, rule=cyclicState))\n\n def cyclicLifetime(self, pyM, esM):\n \"\"\" Constraint for limiting the number of full cycle equivalents to stay below cyclic lifetime \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, capVar = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n capVarSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n def cyclicLifetime(pyM, loc, compName):\n return (sum(chargeOp[loc, compName, p, t] * esM.periodOccurrences[p] for p, t in pyM.timeSet) /\n esM.numberOfYears <= capVar[loc, compName] *\n (compDict[compName].stateOfChargeMax - compDict[compName].stateOfChargeMin) *\n compDict[compName].cyclicLifetime / compDict[compName].economicLifetime[loc]\n if compDict[compName].cyclicLifetime is not None else pyomo.Constraint.Skip)\n setattr(pyM, 'ConstrCyclicLifetime_' + abbrvName, pyomo.Constraint(capVarSet, rule=cyclicLifetime))\n\n def connectInterPeriodSOC(self, pyM, esM):\n \"\"\"\n The state of charge at the end of each period is equivalent to the state of charge of the period\n before it (minus its self discharge) plus the change in the state of charge which happened during\n the typical period which was assigned to that period\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n def connectInterSOC(pyM, loc, compName, pInter):\n return SOCInter[loc, compName, pInter + 1] == \\\n SOCInter[loc, compName, pInter] * (1 - compDict[compName].selfDischarge) ** \\\n ((esM.timeStepsPerPeriod[-1] + 1) * esM.hoursPerTimeStep) + \\\n SOC[loc, compName, esM.periodsOrder[pInter], esM.timeStepsPerPeriod[-1] + 1]\n setattr(pyM, 'ConstrInterSOC_' + abbrvName, pyomo.Constraint(opVarSet, esM.periods, rule=connectInterSOC))\n\n def intraSOCstart(self, pyM, esM):\n \"\"\" The (virtual) state of charge at the beginning of a typical period is zero \"\"\"\n abbrvName = self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n\n def intraSOCstart(pyM, loc, compName, p):\n return SOC[loc, compName, p, 0] == 0\n setattr(pyM, 'ConstrSOCPeriodStart_' + abbrvName,\n pyomo.Constraint(opVarSet, esM.typicalPeriods, rule=intraSOCstart))\n\n def equalInterSOC(self, pyM, esM):\n \"\"\" If periodic storage is selected, the states of charge between periods have the same value \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVarSet = getattr(pyM, 'operationVarSet_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n def equalInterSOC(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] == SOCInter[loc, compName, pInter + 1]\n if compDict[compName].isPeriodicalStorage else pyomo.Constraint.Skip)\n setattr(pyM, 'ConstrEqualInterSOC_' + abbrvName, pyomo.Constraint(opVarSet, esM.periods, rule=equalInterSOC))\n\n def minSOC(self, pyM):\n \"\"\"\n The state of charge [energyUnit] has to be larger than the installed capacity [energyUnit] multiplied\n with the relative minimum state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n capVarSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n\n def SOCMin(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] >= capVar[loc, compName] * compDict[compName].stateOfChargeMin\n setattr(pyM, 'ConstrSOCMin_' + abbrvName, pyomo.Constraint(capVarSet, pyM.timeSet, rule=SOCMin))\n\n def limitSOCwithSimpleTsa(self, pyM, esM):\n \"\"\"\n Simplified version of the state of charge limitation control.\n The error compared to the precise version is small in cases of small selfDischarge.\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n capVarSimpleSet = getattr(pyM, 'designDimensionVarSetSimple_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n SOCmax, SOCmin = getattr(pyM, 'stateOfChargeMax_' + abbrvName), getattr(pyM, 'stateOfChargeMin_' + abbrvName)\n SOCInter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n\n # The maximum (virtual) state of charge during a typical period is larger than all occurring (virtual)\n # states of charge in that period (the last time step is considered in the subsequent period for t=0)\n def SOCintraPeriodMax(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] <= SOCmax[loc, compName, p]\n setattr(pyM, 'ConstSOCintraPeriodMax_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, pyM.timeSet, rule=SOCintraPeriodMax))\n\n # The minimum (virtual) state of charge during a typical period is smaller than all occurring (virtual)\n # states of charge in that period (the last time step is considered in the subsequent period for t=0)\n def SOCintraPeriodMin(pyM, loc, compName, p, t):\n return SOC[loc, compName, p, t] >= SOCmin[loc, compName, p]\n setattr(pyM, 'ConstSOCintraPeriodMin_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, pyM.timeSet, rule=SOCintraPeriodMin))\n\n # The state of charge at the beginning of one period plus the maximum (virtual) state of charge\n # during that period has to be smaller than the installed capacities multiplied with the relative maximum\n # state of charge\n def SOCMaxSimple(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] + SOCmax[loc, compName, esM.periodsOrder[pInter]]\n <= capVar[loc, compName] * compDict[compName].stateOfChargeMax)\n setattr(pyM, 'ConstrSOCMaxSimple_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, esM.periods, rule=SOCMaxSimple))\n\n # The state of charge at the beginning of one period plus the minimum (virtual) state of charge\n # during that period has to be larger than the installed capacities multiplied with the relative minimum\n # state of charge\n def SOCMinSimple(pyM, loc, compName, pInter):\n return (SOCInter[loc, compName, pInter] * (1 - compDict[compName].selfDischarge) **\n ((esM.timeStepsPerPeriod[-1] + 1) * esM.hoursPerTimeStep)\n + SOCmin[loc, compName, esM.periodsOrder[pInter]]\n >= capVar[loc, compName] * compDict[compName].stateOfChargeMin)\n setattr(pyM, 'ConstrSOCMinSimple_' + abbrvName,\n pyomo.Constraint(capVarSimpleSet, esM.periods, rule=SOCMinSimple))\n\n def operationModeSOC(self, pyM, esM):\n \"\"\"\n State of charge [energyUnit] limited by the installed capacity [powerUnit] and the relative maximum\n state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n opVar, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n constrSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by the hours per time step\n def op(pyM, loc, compName, p, t):\n return (opVar[loc, compName, p, t] <=\n esM.hoursPerTimeStep * compDict[compName].stateOfChargeMax * capVar[loc, compName])\n setattr(pyM, 'ConstrSOCMaxPrecise_' + abbrvName, pyomo.Constraint(constrSet, pyM.timeSet, rule=op))\n\n def operationModeSOCwithTSA(self, pyM, esM):\n \"\"\"\n State of charge [energyUnit] limited by the installed capacity [powerUnit] and the relative maximum\n state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n constrSet = getattr(pyM, 'designDimensionVarSet_' + abbrvName)\n\n def SOCMaxPrecise(pyM, loc, compName, pInter, t):\n if compDict[compName].doPreciseTsaModeling:\n return (SOCinter[loc, compName, pInter] *\n ((1 - compDict[compName].selfDischarge) ** (t * esM.hoursPerTimeStep)) +\n SOC[loc, compName, esM.periodsOrder[pInter], t]\n <= capVar[loc, compName] * compDict[compName].stateOfChargeMax)\n else:\n return pyomo.Constraint.Skip\n setattr(pyM, 'ConstrSOCMaxPrecise_' + abbrvName,\n pyomo.Constraint(constrSet, esM.periods, esM.timeStepsPerPeriod, rule=SOCMaxPrecise))\n\n def minSOCwithTSAprecise(self, pyM, esM):\n \"\"\"\n The state of charge at each time step cannot be smaller than the installed capacity multiplied with the\n relative minimum state of charge\n \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n SOC, capVar = getattr(pyM, 'stateOfCharge_' + abbrvName), getattr(pyM, 'cap_' + abbrvName)\n capVarPreciseSet = getattr(pyM, 'designDimensionVarSetPrecise_' + abbrvName)\n\n def SOCMinPrecise(pyM, loc, compName, pInter, t):\n return (SOCinter[loc, compName, pInter] * ((1 - compDict[compName].selfDischarge) **\n (t * esM.hoursPerTimeStep)) + SOC[loc, compName, esM.periodsOrder[pInter], t]\n >= capVar[loc, compName] * compDict[compName].stateOfChargeMin)\n setattr(pyM, 'ConstrSOCMinPrecise_' + abbrvName,\n pyomo.Constraint(capVarPreciseSet, esM.periods, esM.timeStepsPerPeriod, rule=SOCMinPrecise))\n\n def declareComponentConstraints(self, esM, pyM):\n \"\"\" Declares time independent and dependent constraints\"\"\"\n\n ################################################################################################################\n # Declare time independent constraints #\n ################################################################################################################\n\n # Determine the components' capacities from the number of installed units\n self.capToNbReal(pyM)\n # Determine the components' capacities from the number of installed units\n self.capToNbInt(pyM)\n # Enforce the consideration of the binary design variables of a component\n self.bigM(pyM)\n # Enforce the consideration of minimum capacities for components with design decision variables\n self.capacityMinDec(pyM)\n # Sets, if applicable, the installed capacities of a component\n self.capacityFix(pyM)\n # Sets, if applicable, the binary design variables of a component\n self.designBinFix(pyM)\n\n ################################################################################################################\n # Declare time dependent constraints #\n ################################################################################################################\n\n # Constraint for connecting the state of charge with the charge and discharge operation\n self.connectSOCs(pyM, esM)\n\n # Constraints for enforcing charging operation modes #\n\n # Charging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging rate factor [powerUnit/energyUnit]\n self.operationMode1(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp', 'chargeRate')\n # Charging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode2(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Charging of storage [energyUnit] equal to the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode3(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Operation [energyUnit] limited by the operation time series [energyUnit]\n self.operationMode4(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n # Operation [energyUnit] equal to the operation time series [energyUnit]\n self.operationMode5(pyM, esM, 'ConstrCharge', 'chargeOpConstrSet', 'chargeOp')\n\n # Constraints for enforcing discharging operation modes #\n\n # Discharging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the discharging rate factor [powerUnit/energyUnit]\n self.operationMode1(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp', 'dischargeRate')\n # Discharging of storage [energyUnit] limited by the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode2(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Discharging of storage [energyUnit] equal to the installed capacity [energyUnit] multiplied by the hours per\n # time step [h] and the charging operation time series [powerUnit/energyUnit]\n self.operationMode3(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Operation [energyUnit] limited by the operation time series [energyUnit]\n self.operationMode4(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n # Operation [energyUnit] equal to the operation time series [energyUnit]\n self.operationMode5(pyM, esM, 'ConstrDischarge', 'dischargeOpConstrSet', 'dischargeOp')\n\n # Cyclic constraint enforcing that all storages have the same state of charge at the the beginning of the first\n # and the end of the last time step\n self.cyclicState(pyM, esM)\n\n # Constraint for limiting the number of full cycle equivalents to stay below cyclic lifetime\n self.cyclicLifetime(pyM, esM)\n\n if pyM.hasTSA:\n # The state of charge at the end of each period is equivalent to the state of charge of the period before it\n # (minus its self discharge) plus the change in the state of charge which happened during the typical\n # # period which was assigned to that period\n self.connectInterPeriodSOC(pyM, esM)\n # The (virtual) state of charge at the beginning of a typical period is zero\n self.intraSOCstart(pyM, esM)\n # If periodic storage is selected, the states of charge between periods have the same value\n self.equalInterSOC(pyM, esM)\n\n # Ensure that the state of charge is within the operating limits of the installed capacities\n if not pyM.hasTSA:\n # Constraints for enforcing a state of charge operation mode within given limits #\n\n # State of charge [energyUnit] limited by the installed capacity [energyUnit] and the relative maximum\n # state of charge\n self.operationModeSOC(pyM, esM)\n\n # The state of charge [energyUnit] has to be larger than the installed capacity [energyUnit] multiplied\n # with the relative minimum state of charge\n self.minSOC(pyM)\n\n else:\n # Simplified version of the state of charge limitation control #\n # (The error compared to the precise version is small in cases of small selfDischarge) #\n self.limitSOCwithSimpleTsa(pyM, esM)\n\n # Precise version of the state of charge limitation control #\n\n # Constraints for enforcing a state of charge operation within given limits\n\n # State of charge [energyUnit] limited by the installed capacity [energyUnit] and the relative maximum\n # state of charge\n self.operationModeSOCwithTSA(pyM, esM)\n\n # The state of charge at each time step cannot be smaller than the installed capacity multiplied with the\n # relative minimum state of charge\n self.minSOCwithTSAprecise(pyM, esM)\n\n ####################################################################################################################\n # Declare component contributions to basic EnergySystemModel constraints and its objective function #\n ####################################################################################################################\n\n def getSharedPotentialContribution(self, pyM, key, loc):\n \"\"\" Gets contributions to shared location potential \"\"\"\n return super().getSharedPotentialContribution(pyM, key, loc)\n\n def hasOpVariablesForLocationCommodity(self, esM, loc, commod):\n return any([comp.commodity == commod and comp.locationalEligibility[loc] == 1\n for comp in self.componentsDict.values()])\n\n def getCommodityBalanceContribution(self, pyM, commod, loc, p, t):\n \"\"\" Gets contribution to a commodity balance \"\"\"\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n opVarDict = getattr(pyM, 'operationVarDict_' + abbrvName)\n return sum(dischargeOp[loc, compName, p, t] - chargeOp[loc, compName, p, t]\n for compName in opVarDict[loc] if commod == self.componentsDict[compName].commodity)\n\n def getObjectiveFunctionContribution(self, esM, pyM):\n \"\"\" Gets contribution to the objective function \"\"\"\n\n capexCap = self.getEconomicsTI(pyM, ['investPerCapacity'], 'cap', 'CCF')\n capexDec = self.getEconomicsTI(pyM, ['investIfBuilt'], 'designBin', 'CCF')\n opexCap = self.getEconomicsTI(pyM, ['opexPerCapacity'], 'cap')\n opexDec = self.getEconomicsTI(pyM, ['opexIfBuilt'], 'designBin')\n opexOp1 = self.getEconomicsTD(pyM, esM, ['opexPerChargeOperation'], 'chargeOp', 'operationVarDict')\n opexOp2 = self.getEconomicsTD(pyM, esM, ['opexPerDischargeOperation'], 'dischargeOp', 'operationVarDict')\n\n return capexCap + capexDec + opexCap + opexDec + opexOp1 + opexOp2\n\n ####################################################################################################################\n # Return optimal values of the component class #\n ####################################################################################################################\n\n def setOptimalValues(self, esM, pyM):\n compDict, abbrvName = self.componentsDict, self.abbrvName\n chargeOp, dischargeOp = getattr(pyM, 'chargeOp_' + abbrvName), getattr(pyM, 'dischargeOp_' + abbrvName)\n SOC = getattr(pyM, 'stateOfCharge_' + abbrvName)\n\n # Set optimal design dimension variables and get basic optimization summary\n optSummaryBasic = super().setOptimalValues(esM, pyM, esM.locations, 'commodityUnit', '*h')\n\n # Set optimal operation variables and append optimization summary\n props = ['operationCharge', 'operationDischarge', 'opexCharge', 'opexDischarge']\n units = ['[-]', '[-]', '[' + esM.costUnit + '/a]', '[' + esM.costUnit + '/a]']\n tuples = [(compName, prop, unit) for compName in compDict.keys() for prop, unit in zip(props, units)]\n tuples = list(map(lambda x: (x[0], x[1], '[' + compDict[x[0]].commodityUnit + '*h/a]')\n if x[1] == 'operationCharge' else x, tuples))\n tuples = list(map(lambda x: (x[0], x[1], '[' + compDict[x[0]].commodityUnit + '*h/a]')\n if x[1] == 'operationDischarge' else x, tuples))\n mIndex = pd.MultiIndex.from_tuples(tuples, names=['Component', 'Property', 'Unit'])\n optSummary = pd.DataFrame(index=mIndex, columns=sorted(esM.locations)).sort_index()\n\n # * charge variables and contributions\n optVal = utils.formatOptimizationOutput(chargeOp.get_values(), 'operationVariables', '1dim', esM.periodsOrder)\n self.chargeOperationVariablesOptimum = optVal\n\n if optVal is not None:\n opSum = optVal.sum(axis=1).unstack(-1)\n ox = opSum.apply(lambda op: op * compDict[op.name].opexPerChargeOperation[op.index], axis=1)\n optSummary.loc[[(ix, 'operationCharge', '[' + compDict[ix].commodityUnit + '*h/a]')\n for ix in opSum.index], opSum.columns] = opSum.values/esM.numberOfYears\n optSummary.loc[[(ix, 'opexCharge', '[' + esM.costUnit + '/a]') for ix in ox.index],\n ox.columns] = ox.values/esM.numberOfYears\n\n # * discharge variables and contributions\n optVal = utils.formatOptimizationOutput(dischargeOp.get_values(), 'operationVariables', '1dim',\n esM.periodsOrder)\n self.dischargeOperationVariablesOptimum = optVal\n\n if optVal is not None:\n opSum = optVal.sum(axis=1).unstack(-1)\n ox = opSum.apply(lambda op: op * compDict[op.name].opexPerDischargeOperation[op.index], axis=1)\n optSummary.loc[[(ix, 'operationDischarge', '[' + compDict[ix].commodityUnit + '*h/a]')\n for ix in opSum.index], opSum.columns] = opSum.values/esM.numberOfYears\n optSummary.loc[[(ix, 'opexDischarge', '[' + esM.costUnit + '/a]') for ix in ox.index],\n ox.columns] = ox.values/esM.numberOfYears\n\n # * set state of charge variables\n if not pyM.hasTSA:\n optVal = utils.formatOptimizationOutput(SOC.get_values(), 'operationVariables', '1dim', esM.periodsOrder)\n # Remove the last column (by applying the cycle constraint, the first and the last columns are equal to each\n # other)\n optVal = optVal.loc[:, :len(optVal.columns) - 2]\n self.stateOfChargeOperationVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_stateOfChargeVariablesOptimum', compDict)\n else:\n SOCinter = getattr(pyM, 'stateOfChargeInterPeriods_' + abbrvName)\n stateOfChargeIntra = SOC.get_values()\n stateOfChargeInter = SOCinter.get_values()\n if stateOfChargeIntra is not None:\n # Convert dictionary to DataFrame, transpose, put the period column first and sort the index\n # Results in a one dimensional DataFrame\n stateOfChargeIntra = pd.DataFrame(stateOfChargeIntra, index=[0]).T.swaplevel(i=0, j=-2).sort_index()\n stateOfChargeInter = pd.DataFrame(stateOfChargeInter, index=[0]).T.swaplevel(i=0, j=1).sort_index()\n # Unstack time steps (convert to a two dimensional DataFrame with the time indices being the columns)\n stateOfChargeIntra = stateOfChargeIntra.unstack(level=-1)\n stateOfChargeInter = stateOfChargeInter.unstack(level=-1)\n # Get rid of the unnecessary 0 level\n stateOfChargeIntra.columns = stateOfChargeIntra.columns.droplevel()\n stateOfChargeInter.columns = stateOfChargeInter.columns.droplevel()\n # Concat data\n data = []\n for count, p in enumerate(esM.periodsOrder):\n data.append((stateOfChargeInter.loc[:, count] +\n stateOfChargeIntra.loc[p].loc[:, :esM.timeStepsPerPeriod[-1]].T).T)\n optVal = pd.concat(data, axis=1, ignore_index=True)\n else:\n optVal = None\n self.stateOfChargeOperationVariablesOptimum = optVal\n utils.setOptimalComponentVariables(optVal, '_stateOfChargeVariablesOptimum', compDict)\n\n # Append optimization summaries\n optSummary = optSummary.append(optSummaryBasic).sort_index()\n\n # Summarize all contributions to the total annual cost\n optSummary.loc[optSummary.index.get_level_values(1) == 'TAC'] = \\\n optSummary.loc[(optSummary.index.get_level_values(1) == 'TAC') |\n (optSummary.index.get_level_values(1) == 'opexCharge') |\n (optSummary.index.get_level_values(1) == 'opexDischarge')].groupby(level=0).sum().values\n\n self.optSummary = optSummary\n\n def getOptimalValues(self, name='all'):\n \"\"\"\n Returns optimal values of the components\n\n :param name: name of the variables of which the optimal values should be returned:\\n\n * 'capacityVariables',\n * 'isBuiltVariables',\n * 'chargeOperationVariablesOptimum',\n * 'dischargeOperationVariablesOptimum',\n * 'stateOfChargeOperationVariablesOptimum',\n * 'all' or another input: all variables are returned.\\n\n :type name: string\n \"\"\"\n if name == 'capacityVariablesOptimum':\n return {'values': self.capacityVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'isBuiltVariablesOptimum':\n return {'values': self.isBuiltVariablesOptimum, 'timeDependent': False, 'dimension': self.dimension}\n elif name == 'chargeOperationVariablesOptimum':\n return {'values': self.chargeOperationVariablesOptimum, 'timeDependent': True, 'dimension': self.dimension}\n elif name == 'dischargeOperationVariablesOptimum':\n return {'values': self.dischargeOperationVariablesOptimum, 'timeDependent': True, 'dimension':\n self.dimension}\n elif name == 'stateOfChargeOperationVariablesOptimum':\n return {'values': self.stateOfChargeOperationVariablesOptimum, 'timeDependent': True, 'dimension':\n self.dimension}\n else:\n return {'capacityVariablesOptimum': {'values': self.capacityVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'isBuiltVariablesOptimum': {'values': self.isBuiltVariablesOptimum, 'timeDependent': False,\n 'dimension': self.dimension},\n 'chargeOperationVariablesOptimum': {'values': self.chargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension},\n 'dischargeOperationVariablesOptimum': {'values': self.dischargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension},\n 'stateOfChargeOperationVariablesOptimum': {'values': self.stateOfChargeOperationVariablesOptimum,\n 'timeDependent': True, 'dimension': self.dimension}}\n"
] | [
[
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.concat"
]
] |
yzR1991/Copulas | [
"72c0d9c398f7fe3eb075b56591911fea377cdc33"
] | [
"tests/unit/multivariate/test_gaussian.py"
] | [
"from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nimport numpy as np\nimport pandas as pd\n\nfrom copulas import get_qualified_name\nfrom copulas.multivariate.gaussian import GaussianMultivariate\nfrom copulas.univariate import GaussianUnivariate\n\n\nclass TestGaussianMultivariate(TestCase):\n\n def setUp(self):\n \"\"\"Defines random variable to use on tests. \"\"\"\n\n self.data = pd.DataFrame({\n 'column1': np.array([\n 2641.16233666, 921.14476418, -651.32239137, 1223.63536668,\n 3233.37342355, 1373.22400821, 1959.28188858, 1076.99295365,\n 2029.25100261, 1835.52188141, 1170.03850556, 739.42628394,\n 1866.65810627, 3703.49786503, 1719.45232017, 258.90206528,\n 219.42363944, 609.90212377, 1618.44207239, 2323.2775272,\n 3251.78732274, 1430.63989981, -180.57028875, -592.84497457,\n ]),\n 'column2': np.array([\n 180.2425623, 192.35609972, 150.24830291, 156.62123653,\n 173.80311908, 191.0922843, 163.22252158, 190.73280428,\n 158.52982435, 163.0101334, 205.24904026, 175.42916046,\n 208.31821984, 178.98351969, 160.50981075, 163.19294974,\n 173.30395132, 215.18996298, 164.71141696, 178.84973821,\n 182.99902513, 217.5796917, 201.56983421, 174.92272693\n ]),\n 'column3': np.array([\n -1.42432446, -0.14759864, 0.66476302, -0.04061445, 0.64305762,\n 1.79615407, 0.70450457, -0.05886671, -0.36794788, 1.39331262,\n 0.39792831, 0.0676313, -0.96761759, 0.67286132, -0.55013279,\n -0.53118328, 1.23969655, -0.35985016, -0.03568531, 0.91456357,\n 0.49077378, -0.27428204, 0.45857406, 2.29614033\n ])\n })\n\n def test__transform_to_normal_numpy_1d(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = np.array([\n [3, 5],\n ])\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_numpy_2d(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0, 0.5, 1])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3, 0.5, 0.7])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = np.array([\n [3, 5],\n [4, 6],\n [5, 7],\n ])\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n [0.0, 0.0],\n [5.166579, 0.524401]\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3, 4, 5])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5, 6, 7])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_series(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = pd.Series({'a': 3, 'b': 5})\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__transform_to_normal_dataframe(self):\n # Setup\n gm = GaussianMultivariate()\n dist_a = Mock()\n dist_a.cdf.return_value = np.array([0, 0.5, 1])\n dist_b = Mock()\n dist_b.cdf.return_value = np.array([0.3, 0.5, 0.7])\n gm.columns = ['a', 'b']\n gm.univariates = [dist_a, dist_b]\n\n # Run\n data = pd.DataFrame({\n 'a': [3, 4, 5],\n 'b': [5, 6, 7]\n })\n returned = gm._transform_to_normal(data)\n\n # Check\n # Failures may occurr on different cpytonn implementations\n # with different float precision values.\n # If that happens, atol might need to be increased\n expected = np.array([\n [-5.166579, -0.524401],\n [0.0, 0.0],\n [5.166579, 0.524401]\n ])\n np.testing.assert_allclose(returned, expected, atol=1e-6)\n\n assert dist_a.cdf.call_count == 1\n expected = np.array([3, 4, 5])\n passed = dist_a.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n assert dist_b.cdf.call_count == 1\n expected = np.array([5, 6, 7])\n passed = dist_b.cdf.call_args[0][0]\n np.testing.assert_allclose(expected, passed)\n\n def test__get_covariance(self):\n \"\"\"_get_covariance computes the covariance matrix of normalized values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()\n\n def test_fit_default_distribution(self):\n \"\"\"On fit, a distribution is created for each column along the covariance and means\"\"\"\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_fit_distribution_arg(self):\n \"\"\"On fit, the distributions for each column use instances of copula.distribution.\"\"\"\n # Setup\n distribution = 'copulas.univariate.gaussian_kde.GaussianKDE'\n copula = GaussianMultivariate(distribution=distribution)\n\n # Run\n copula.fit(self.data)\n\n # Check\n assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE'\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_fit_distribution_selector(self):\n \"\"\"\n On fit, it should use the correct distributions for those that are\n specified and default to using the base class otherwise.\n \"\"\"\n copula = GaussianMultivariate(distribution={\n 'column1': 'copulas.univariate.beta.BetaUnivariate',\n 'column2': 'copulas.univariate.gaussian_kde.GaussianKDE',\n })\n copula.fit(self.data)\n\n assert get_qualified_name(\n copula.univariates[0].__class__) == 'copulas.univariate.beta.BetaUnivariate'\n assert get_qualified_name(\n copula.univariates[1].__class__) == 'copulas.univariate.gaussian_kde.GaussianKDE'\n assert get_qualified_name(\n copula.univariates[2].__class__) == 'copulas.univariate.base.Univariate'\n\n def test_fit_numpy_array(self):\n \"\"\"Fit should work indistinctly with numpy arrays and pandas dataframes \"\"\"\n # Setup\n copula = GaussianMultivariate(\n distribution='copulas.univariate.gaussian.GaussianUnivariate')\n\n # Run\n copula.fit(self.data.values)\n\n # Check\n for key, (column, univariate) in enumerate(zip(self.data.columns, copula.univariates)):\n assert univariate._params['loc'] == np.mean(self.data[column])\n assert univariate._params['scale'] == np.std(self.data[column])\n\n expected_covariance = copula._get_covariance(pd.DataFrame(self.data.values))\n assert (copula.covariance == expected_covariance).all().all()\n\n def test_probability_density(self):\n \"\"\"Probability_density computes probability for the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 0.])\n expected_result = 0.032245296420409846\n\n # Run\n result = copula.probability_density(X)\n\n # Check\n self.assertAlmostEqual(result, expected_result)\n\n def test_cumulative_distribution_fit_df_call_np_array(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n def test_cumulative_distribution_fit_call_np_array(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.values)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n def test_cumulative_distribution_fit_call_pd(self):\n \"\"\"Cumulative_density integrates the probability density along the given values.\"\"\"\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.values)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()\n\n @patch('copulas.multivariate.gaussian.np.random.multivariate_normal')\n def test_sample(self, normal_mock):\n \"\"\"Sample use the inverse-transform method to generate new samples.\"\"\"\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n normal_mock.return_value = np.array([\n [0.1, 0.1, 0.1],\n [0.2, 0.2, 0.2],\n [0.4, 0.4, 0.4],\n [0.6, 0.6, 0.6],\n [0.8, 0.8, 0.8]\n ])\n\n expected_result = pd.DataFrame([\n {'A': 22.678232998312527, 'B': 70.70710678118655, 'C': 284.35270009440734},\n {'A': 23.356465996625055, 'B': 71.41421356237309, 'C': 298.7054001888146},\n {'A': 24.712931993250110, 'B': 72.82842712474618, 'C': 327.4108003776293},\n {'A': 26.069397989875164, 'B': 74.24264068711929, 'C': 356.116200566444},\n {'A': 27.425863986500215, 'B': 75.65685424949238, 'C': 384.8216007552586}\n ])\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.equals(expected_result)\n\n assert normal_mock.called_once_with(\n np.zeros(instance.covariance.shape[0]),\n instance.covariance,\n 5\n )\n\n def test_sample_random_state(self):\n \"\"\"When random_state is set the samples are the same.\"\"\"\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate, random_seed=0)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n expected_result = pd.DataFrame(\n np.array([\n [25.19031668, 61.96527251, 543.43595269],\n [31.50262306, 49.70971698, 429.06537124],\n [20.31636799, 64.3492326, 384.27561823],\n [25.00302427, 72.06019812, 415.85215123],\n [23.07525773, 66.70901743, 390.8226672]\n ]),\n columns=['A', 'B', 'C']\n )\n\n # Run\n result = instance.sample(5)\n\n # Check\n pd.testing.assert_frame_equal(result, expected_result, check_less_precise=True)\n\n def test_to_dict(self):\n \"\"\"To_dict returns the parameters to replicate the copula.\"\"\"\n # Setup\n copula = GaussianMultivariate()\n copula.fit(self.data)\n\n # Run\n result = copula.to_dict()\n\n # Asserts\n assert result['type'] == 'copulas.multivariate.gaussian.GaussianMultivariate'\n assert result['columns'] == ['column1', 'column2', 'column3']\n assert len(result['univariates']) == 3\n\n expected_cov = copula._get_covariance(self.data).tolist()\n np.testing.assert_equal(result['covariance'], expected_cov)\n\n for univariate, result_univariate in zip(copula.univariates, result['univariates']):\n assert univariate.to_dict() == result_univariate\n\n def test_from_dict(self):\n \"\"\"from_dict generates a new instance from its parameters.\"\"\"\n # Setup\n copula = GaussianMultivariate()\n copula.fit(self.data)\n copula_dict = copula.to_dict()\n\n # Run\n new_copula = GaussianMultivariate.from_dict(copula_dict)\n\n # Asserts\n assert isinstance(new_copula, GaussianMultivariate)\n assert new_copula.columns == ['column1', 'column2', 'column3']\n assert len(new_copula.univariates) == 3\n\n for new_univariate, old_univariate in zip(copula.univariates, new_copula.univariates):\n assert new_univariate.to_dict() == old_univariate.to_dict()\n\n def test_sample_constant_column(self):\n \"\"\"Gaussian copula can sample after being fit with a constant column.\n\n This process will raise warnings when computing the covariance matrix\n \"\"\"\n # Setup\n instance = GaussianMultivariate()\n X = np.array([\n [1.0, 2.0],\n [1.0, 3.0],\n [1.0, 4.0],\n [1.0, 5.0]\n ])\n instance.fit(X)\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.shape == (5, 2)\n assert result[~result.isnull()].all().all()\n assert result.loc[:, 0].equals(pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=0))\n\n # This is to check that the samples on the non constant column are not constant too.\n assert len(result.loc[:, 1].unique()) > 1\n\n covariance = instance.covariance\n assert (~pd.isnull(covariance)).all().all()\n"
] | [
[
"pandas.Series",
"numpy.zeros",
"numpy.testing.assert_equal",
"pandas.DataFrame",
"numpy.isclose",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.std",
"pandas.testing.assert_frame_equal",
"numpy.mean",
"pandas.isnull"
]
] |
Cheol-H-Jeong/Doridori-Counter | [
"c16da56dbbcccdc24033ddb9435d13506feb8b99"
] | [
"doridori.py"
] | [
"import cv2\nimport mediapipe as mp\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nfrom scipy.signal import find_peaks\nfrom celluloid import Camera\nfrom tqdm import tqdm\n\nclass Doridori:\n def __init__(self,filepath):\n self.cap = cv2.VideoCapture(filepath)\n self.total_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n self.df = np.array([])\n self.distance_list = np.array([])\n self.peaks = np.array([])\n \n def detect_face(self):\n frame_cnt = 0\n nose_x = list()\n nose_y = list()\n nose_z = list()\n mp_face_mesh = mp.solutions.face_mesh\n with mp_face_mesh.FaceMesh(\n static_image_mode=True,\n max_num_faces=1,\n min_detection_confidence=0.5) as face_mesh:\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret:\n frame_cnt += 1\n results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n if results.multi_face_landmarks:\n x, y, z = self.__getNose(results.multi_face_landmarks)\n nose_x.append(x)\n nose_y.append(y)\n nose_z.append(z)\n if frame_cnt >= self.total_frame:\n print(\"============End Video============\")\n self.df = np.array([nose_x, nose_y, nose_z]).T\n break\n self.cap.release()\n cv2.destroyAllWindows()\n return self.df\n\n def fit(self, data = np.array([]), threshold=0.004, min_peak_distance = 12, display_mode = True):\n distance_list = list()\n if data.size == 0:\n df = self.df\n else:\n df = data\n for i in range(1, len(df)):\n distance_list.append(distance.euclidean(df[i-1,:], df[i,:]))\n peaks_index = find_peaks(distance_list, distance=min_peak_distance)[0]\n low_peak_index = list()\n for i, j in enumerate (peaks_index):\n if distance_list[j] < threshold:\n low_peak_index.append(i)\n peaks_index= np.delete(peaks_index, low_peak_index)\n print(f\"total_doridori_count : {len(peaks_index)}\")\n peaks = list()\n for i, value in enumerate (distance_list):\n if i in peaks_index:\n peaks.append(value)\n else:\n peaks.append(np.nan)\n if display_mode:\n plt.figure(figsize=(25,8))\n plt.plot(distance_list)\n plt.plot(peaks, 'ro')\n \n self.distance_list = distance_list\n self.peaks = peaks\n \n return len(peaks_index)\n \n def save_video(self, filepath, display_frame = 100, frame_rate = 30.0, video_size=(25,8)):\n fig, ax = plt.subplots(figsize=video_size)\n camera = Camera(fig)\n padding_nan = np.empty(display_frame)\n padding_nan[:] = np.nan\n distance_with_nan = np.concatenate([padding_nan, self.distance_list])\n peaks_with_nan = np.concatenate([padding_nan, self.peaks])\n for i in tqdm(range(display_frame, len(distance_with_nan))):\n ax.plot(distance_with_nan[i-display_frame:i], c='blue')\n ax.plot(peaks_with_nan[i-display_frame:i], 'ro')\n camera.snap()\n print(f\"saving to {filepath}\")\n animation = camera.animate(interval=1000.0/frame_rate)\n animation.save(filepath)\n plt.close(fig)\n \n def __getNose(self, landmarks):\n x = 0\n y = 0\n z = 0\n landmark = list(landmarks)\n for mark in landmark:\n x = mark.landmark[0].x\n y = mark.landmark[0].y\n z = mark.landmark[0].z\n return x, y, z\n "
] | [
[
"numpy.empty",
"matplotlib.pyplot.figure",
"scipy.spatial.distance.euclidean",
"matplotlib.pyplot.subplots",
"scipy.signal.find_peaks",
"numpy.delete",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.concatenate"
]
] |
MobTgZhang/CIAlgorithms | [
"3aa1b249f526d75fb8e9bf7f37516f18a025d50a"
] | [
"ACO/ACO.py"
] | [
"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport urllib.request\nimport os\nimport time\ndef download(root_path,filename):\n if not os.path.exists(root_path):\n os.mkdir(root_path)\n if not os.path.exists(os.path.join(root_path,filename)):\n url = \"http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/\"+filename\n urllib.request.urlretrieve(url,os.path.join(root_path,filename))\n print(\"The data set: %s downloaded!\"%os.path.join(root_path,filename))\n else:\n print(\"The data set: %s already has downloaded!\"%os.path.join(root_path,filename))\ndef get_data(filename):\n data_list = []\n with open(filename,mode=\"r\") as f:\n flag = False\n while True:\n line = f.readline()\n if \"EOF\" in line:\n break\n elif \"NODE_COORD_SECTION\" in line:\n flag = True\n elif flag:\n tmp = line.strip().split(\" \")\n data_list.append([float(item) for item in tmp])\n return np.array(data_list)\nclass ACO:\n def __init__(self,ant_num,alpha,beta,rho,Q,epoches):\n self.ant_num = ant_num\n self.alpha = alpha\n self.beta = beta\n self.rho = rho\n self.Q = Q\n self.epoches = epoches\n self.citys_mat = None\n self.E_best = None\n self.sol_best = None\n self.length_list = None\n self.name = time.strftime(\"%Y%m%d%H%M\", time.localtime(time.time()))\n def solve(self,citys_mat):\n self.citys_mat = citys_mat\n citys_num = citys_mat.shape[0]\n # 获取邻接矩阵\n citys_x = citys_mat[:, 0].reshape(citys_num, 1).dot(np.ones((1, citys_num)))\n citys_y = citys_mat[:, 1].reshape(citys_num, 1).dot(np.ones((1, citys_num)))\n citys_distance = np.sqrt(np.square(citys_x - citys_x.T) + np.square(citys_y - citys_y.T))\n # 初始化启发函数\n Heu_f = 1.0/(citys_distance + np.diag([np.inf] * citys_num))\n # 信息素矩阵\n Tau_table = np.ones((citys_num,citys_num))\n # 每一次迭代过程中每个蚂蚁的路径记录表\n Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)\n # 每一次迭代过程中的最佳路径\n Route_best = np.zeros((self.epoches,citys_num),dtype=np.int)\n # 每一次迭代过程中最佳路径记录表\n Length_best = np.zeros(self.epoches)\n # 每次迭代过程中蚂蚁的平均路径长度\n Length_average = np.zeros(self.epoches)\n # 每次迭代过程中当前路径长度\n Length_current = np.zeros(self.ant_num)\n iter = 0\n while iter <self.epoches:\n # 产生城市集合表\n # 随机产生各个蚂蚁的起点城市\n Route_table[:,0]= self.randseed(citys_num)\n # 更新信息素\n Delta_tau = np.zeros((citys_num, citys_num))\n for k in range(self.ant_num):\n # 用于记录蚂蚁下一个访问的城市集合\n # 蚂蚁已经访问过的城市\n tabu = [Route_table[k,0]]\n allow_set = list(set(range(citys_num))-set(tabu))\n city_index = Route_table[k,0]\n for i in range(1,citys_num):\n # 初始化城市之间的转移概率\n P_table = np.zeros(len(allow_set))\n # 计算城市之间的转移概率\n for j in range(len(allow_set)):\n P_table[j] = np.power(Tau_table[city_index,allow_set[j]],self.alpha)*\\\n np.power(Heu_f[city_index,allow_set[j]],self.beta)\n P_table = P_table/np.sum(P_table)\n\n # 轮盘赌算法来选择下一个访问的城市\n #out_prob = np.cumsum(P_table)\n while True:\n r = np.random.rand()\n index_need = np.where(P_table > r)[0]\n if len(index_need) >0:\n city_index2 = allow_set[index_need[0]]\n break\n Route_table[k,i] = city_index2\n tabu.append(city_index2)\n allow_set = list(set(range(0,citys_num))-set(tabu))\n city_index = city_index2\n tabu.append(tabu[0])\n # 计算蚂蚁路径的距离信息\n for j in range(citys_num):\n Length_current[k] = Length_current[k] + citys_distance[tabu[j],tabu[j+1]]\n for j in range(citys_num):\n Delta_tau[tabu[j],tabu[j+1]] = Delta_tau[tabu[j],tabu[j+1]] + self.Q / Length_current[k]\n # 计算最短路径、最短路径长度以及平均路径长度\n Length_best[iter] = np.min(Length_current)\n index = np.where(Length_current == np.min(Length_current))[0][0]\n Route_best[iter] = Route_table[index]\n Length_average[iter] = np.mean(Length_current)\n #更新信息素\n Tau_table = (1-self.rho)*Tau_table + Delta_tau\n #Route_table = np.zeros((self.ant_num,citys_num),dtype=np.int)\n Length_current = np.zeros(self.ant_num)\n\n print(\"epoches:%d,best value every epoches%.4f\"%(iter, Length_best[iter]))\n iter = iter + 1\n self.E_best = np.min(Length_best)\n index = np.where(Length_best == np.min(Length_best))[0][0]\n self.sol_best = Route_table[index]\n self.length_list = Length_average\n def randseed(self,citys_num):\n if self.ant_num <citys_num:\n initial_route = np.random.permutation(range(citys_num))[:self.ant_num]\n else:\n initial_route = np.zeros((self.ant_num,))\n initial_route[:citys_num] = np.random.permutation(range(citys_num))\n tmp_index = citys_num\n while tmp_index + citys_num <= self.ant_num:\n initial_route[tmp_index:citys_num + tmp_index] = np.random.permutation(range(citys_num))\n tmp_index += citys_num\n tmp_left = self.ant_num % citys_num\n if tmp_left != 0:\n initial_route[tmp_index:] = np.random.permutation(range(citys_num))[:tmp_left]\n return initial_route\n def draw(self):\n print(self.sol_best)\n print(self.E_best)\n if not os.path.exists(\"log\"):\n os.mkdir(\"log\")\n # draw loss\n x = np.linspace(0, len(self.length_list) - 1, len(self.length_list))\n y = np.array(self.length_list)\n plt.plot(x, y)\n plt.title(label=\"loss\")\n plt.savefig(os.path.join(\"log\", \"%s_loss.png\" % self.name))\n plt.close()\n # draw dots\n for k in range(0, len(self.sol_best) - 1):\n start = self.citys_mat[self.sol_best[k]]\n end = self.citys_mat[self.sol_best[k + 1]]\n plt.plot(start[0], start[1], \"bo\")\n plt.plot(end[0], end[1], \"bo\")\n plt.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1],\n length_includes_head=True, head_width=0.2, head_length=0.3, lw=1,\n color=\"r\")\n start = self.citys_mat[self.sol_best[-1]]\n end = self.citys_mat[self.sol_best[0]]\n plt.plot(start[0], start[1], \"bo\")\n plt.plot(end[0], end[1], \"bo\")\n plt.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1],\n length_includes_head=True, head_width=0.2, head_length=0.3, lw=1,\n color=\"r\")\n plt.title(label=\"length:%.2f\" % (self.E_best))\n plt.savefig(os.path.join(\"log\", \"%s_route.png\" % self.name))\n plt.show()\ndef main():\n filename = \"eil51.tsp\"\n root_path = \"data\"\n download(root_path,filename)\n data_list = get_data(os.path.join(root_path,filename))\n ant_num = 500\n alpha = 1\n beta = 5\n rho = 0.2\n Q = 10\n epoches = 20\n model = ACO(ant_num, alpha, beta, rho, Q, epoches)\n model.solve(data_list[:,1:])\n model.draw()\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.zeros",
"numpy.mean",
"numpy.diag",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.power",
"numpy.min",
"matplotlib.pyplot.close",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.where",
"numpy.square",
"matplotlib.pyplot.arrow"
]
] |
uwe-iben/torchphysics | [
"775d9aca71752a568f1fca972c958b99107f3b7c"
] | [
"src/torchphysics/utils/user_fun.py"
] | [
"\"\"\"Contains a class which extracts the needed arguments of an arbitrary \nmethode/function and wraps them for future usage. E.g correctly choosing \nthe needed arguments and passing them on to the original function.\n\"\"\"\nimport inspect\nimport copy \nimport torch\n\nfrom ..problem.spaces.points import Points\n\n\nclass UserFunction:\n \"\"\"Wraps a function, so that it can be called with arbitrary input arguments.\n \n Parameters\n ----------\n fun : callable\n The original function that should be wrapped.\n defaults : dict, optional\n Possible defaults arguments of the function. If none are specified will\n check by itself if there are any. \n args : dict, optional\n All arguments of the function. If none are specified will\n check by itself if there are any. \n\n Notes\n -----\n Uses inspect.getfullargspec(fun) to get the possible input arguments.\n When called just extracts the needed arguments and passes them to the \n original function. \n \"\"\"\n def __init__(self, fun, defaults={}, args={}):\n if isinstance(fun, (UserFunction, DomainUserFunction)):\n self.fun = fun.fun\n self.defaults = fun.defaults\n self.args = fun.args\n else:\n self._transform_to_user_function(fun, defaults, args)\n\n def _transform_to_user_function(self, fun, defaults, args):\n self.fun = fun\n self.defaults = defaults\n self.args = args\n if callable(self.fun) and self.defaults == {} and self.args == {}:\n self._set_input_args_for_function()\n\n def _set_input_args_for_function(self):\n f_args = inspect.getfullargspec(self.fun).args\n\n # we check that the function defines all needed parameters\n if inspect.getfullargspec(self.fun).varargs is not None or \\\n inspect.getfullargspec(self.fun).varkw is not None:\n raise ValueError(\"\"\"\n Variable arguments are not supported in\n UserFunctions. Please use keyword arguments.\n \"\"\")\n\n f_defaults = inspect.getfullargspec(self.fun).defaults\n f_kwonlyargs = inspect.getfullargspec(self.fun).kwonlyargs\n #f_kwonlydefaults = inspect.getfullargspec(self.fun).kwonlydefaults\n # NOTE: By above check, there should not be kwonlyargs. However, we still catch\n # this case here.\n self.args = f_args + f_kwonlyargs\n\n # defaults always align at the end of the args\n self.defaults = {}\n if not f_defaults is None:\n self.defaults = {self.args[-i]: f_defaults[-i] \n for i in range(len(f_defaults), 0, -1)}\n #if not f_kwonlydefaults is None:\n # self.defaults.update(f_kwonlydefaults)\n\n def __call__(self, args={}, vectorize=False):\n \"\"\"To evalute the function. Will automatically extract the needed arguments \n from the input data and will set the possible default values.\n\n Parameters\n ----------\n args : dict or torchphysics.Points\n The input data, where the function should be evaluated.\n vectorize : bool, optional\n If the original function can work with a batch of data, or\n a loop needs to be used to evaluate the function.\n default is False, which means that we assume the function\n can work with a batch of data.\n\n Returns\n -------\n torch.tensor\n The output values of the function.\n \"\"\"\n if isinstance(args, Points):\n args = args.coordinates\n # check that every necessary arg is given\n for key in self.necessary_args:\n assert key in args, \\\n f\"The argument '{key}' is necessary in {self.__name__} but not given.\"\n # if necessary, pass defaults\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n if not vectorize:\n return self.evaluate_function(**inp)\n else:\n return self.apply_to_batch(inp)\n\n def evaluate_function(self, **inp):\n \"\"\"Evaluates the original input function. Should not be used directly, \n rather use the call-methode.\n \"\"\"\n if callable(self.fun):\n return self.fun(**inp)\n return self.fun\n\n def apply_to_batch(self, inp):\n \"\"\"Apply the function to a batch of elements by running a for-loop.\n we assume that all inputs either have batch (i.e. maximum) dimension or\n are a constant param.\n\n Parameters\n ----------\n inp : torchphysics.points\n The Points-object of the input data\n\n Returns\n -------\n torch.tensor\n The output values of the function, for each input.\n\n \"\"\"\n batch_size = max(len(inp[key]) for key in inp)\n out = []\n for i in range(batch_size):\n inp_i = {}\n for key in inp:\n if len(inp[key]) == batch_size:\n inp_i[key] = inp[key][i]\n else:\n inp_i[key] = inp[key]\n o = self.fun(**inp_i)\n if o is not None:\n out.append(o)\n return out\n\n def partially_evaluate(self, **args):\n \"\"\"(partially) evaluates a given function.\n\n Parameters\n ----------\n **args :\n The arguments where the function should be (partially) evaluated.\n\n Returns\n -------\n Out : value or UserFunction\n If the input arguments are enough to evalate the whole function, the \n corresponding output is returned. \n If some needed arguments are missing, a copy of this UserFunction will \n be returned. Whereby the values of **args will be added to the \n default values of the returned UserFunction.\n \"\"\"\n if callable(self.fun):\n if all(arg in args for arg in self.necessary_args):\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n return self.fun(**inp)\n else:\n # to avoid manipulation of given param obj, we create a copy\n copy_self = copy.deepcopy(self)\n copy_self.set_default(**args)\n return copy_self\n return self.fun\n\n def __name__(self):\n \"\"\"The name of the function\n\n Returns\n -------\n str\n The name of the function\n \"\"\"\n return self.fun.__name__\n\n def set_default(self, **args):\n \"\"\"Sets a input argument to given value.\n\n Parameters\n ----------\n **args:\n The value the input should be set to.\n \"\"\"\n self.defaults.update({key: args[key] for key in args if key in self.args})\n\n def remove_default(self, *args, **kwargs):\n \"\"\"Removes an default value of a input argument.\n\n Parameters\n ----------\n *args, **kwargs:\n The arguments for which the default values should be deleted.\n \"\"\"\n for key in args:\n self.defaults.pop(key)\n for key in kwargs.keys():\n self.defaults.pop(key)\n\n def __deepcopy__(self, memo):\n \"\"\"Creates a copy of the function\n \"\"\"\n cls = self.__class__\n copy_object = cls.__new__(cls, self.fun)\n memo[id(self)] = copy_object\n for k, v in self.__dict__.items():\n setattr(copy_object, k, copy.deepcopy(v, memo))\n return copy_object\n\n @property\n def necessary_args(self):\n \"\"\"Returns the function arguments that are needed to evaluate this function.\n\n Returns\n -------\n list :\n The needed arguments.\n \"\"\"\n return [arg for arg in self.args if arg not in self.defaults]\n\n @property\n def optional_args(self):\n \"\"\"Returns the function arguments that are optional to evaluate this function.\n\n Returns\n -------\n list :\n The optional arguments.\n \"\"\"\n return [arg for arg in self.args if arg in self.defaults]\n\n\nclass DomainUserFunction(UserFunction):\n \"\"\"Extension of the original UserFunctions, that are used in the Domain-Class.\n \n Parameters\n ----------\n fun : callable\n The original function that should be wrapped.\n defaults : dict, optional\n Possible defaults arguments of the function. If none are specified will\n check by itself if there are any. \n args : dict, optional\n All arguments of the function. If none are specified will\n check by itself if there are any. \n\n Notes\n -----\n The only difference to normal UserFunction is how the evaluation \n of the original function is handled. Since all Domains use Pytorch, \n we check that the output always is a torch.tensor. In the case that the function\n is not constant, we also append an extra dimension to the output, so that the \n domains can work with it correctly. \n \"\"\"\n def __call__(self, args={}, device='cpu'):\n \"\"\"To evalute the function. Will automatically extract the needed arguments \n from the input data and will set the possible default values.\n\n Parameters\n ----------\n args : dict or torchphysics.Points\n The input data, where the function should be evaluated.\n device : str, optional\n The device on which the output of th efunction values should lay.\n Default is 'cpu'.\n\n Returns\n -------\n torch.tensor\n The output values of the function.\n \"\"\"\n if isinstance(args, Points):\n args = args.coordinates\n if len(args) != 0: # set the device correctly\n device = args[list(args.keys())[0]].device\n # check that every necessary arg is given\n for key in self.necessary_args:\n assert key in args, \\\n f\"The argument '{key}' is necessary in {self.__name__} but not given.\"\n # if necessary, pass defaults\n inp = {key: args[key] for key in self.args if key in args}\n inp.update({key: self.defaults[key] for key in self.args if key not in args})\n return self.evaluate_function(device=device, **inp)\n\n def evaluate_function(self, device='cpu', **inp):\n \"\"\"Evaluates the original input function. Should not be used directly, \n rather use the call-methode.\n\n Parameters\n ----------\n device : str, optional\n The device on which the output of th efunction values should lay.\n Default is 'cpu'.\n inp \n The input values.\n \"\"\"\n if callable(self.fun):\n fun_eval = self.fun(**inp)\n if not isinstance(fun_eval, torch.Tensor):\n fun_eval = torch.tensor(fun_eval, device=device)\n return fun_eval[:, None]\n else:\n if isinstance(self.fun, torch.Tensor):\n self.fun = self.fun.to(device)\n return self.fun\n else: \n return torch.tensor(self.fun, device=device).float()"
] | [
[
"torch.tensor"
]
] |
DevRx28/pokemon-type | [
"2f62d4b88856dcd9aff79bdda993a4ddc093d7b7"
] | [
"prepro.py"
] | [
"import numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nimport time\nimport argparse\nimport cv2\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport csv \nimport sys\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom keras.preprocessing.image import img_to_array\n\n\nimg = cv2.imread('pokemonimages/Groudon.jpg',cv2.COLOR_BGR2RGB)\nprint (img.shape)\nim = Image.open(\"pokemonimages/Groudon.jpg\")\nim1 = im.resize((200,200))\n#im1= img_to_array(im1, dtype='uint8')\nprint(im1)\n\n\n\ndef remove_transparency(im, bg_colour=(255, 255, 255)):\n\n # Only process if image has transparency (http://stackoverflow.com/a/1963146)\n if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):\n\n # Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)\n alpha = im.convert('RGBA').split()[-1]\n\n # Create a new background image of our matt color.\n # Must be RGBA because paste requires both images have the same format\n # (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)\n bg = Image.new(\"RGBA\", im.size, bg_colour + (255,))\n bg.paste(im, mask=alpha)\n return bg\n\n else:\n return im\n\ny=remove_transparency(im1)\n\ny=y.convert(\"RGB\")\nprint(\"rgb\")\ny.show()\ny= img_to_array(y, dtype='uint8')\nprint(y.shape)\n\n\n\n#img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n\n\n\nmask = np.zeros(img.shape[:2],np.uint8)\n\nbgdModel = np.zeros((1,65),np.float64)\n\nfgdModel = np.zeros((1,65),np.float64)\nheight, width = img.shape[:2]\n\nrect = (0,0,width-10,height-10)\ncv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)\n\nmask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\nimgnew= img*mask2[:,:,np.newaxis]\nbackground=img-imgnew\nbackground[np.where((background>[0,0,0]).all(axis=2))]=[255,255,255]\n\nfinal=background+imgnew\n#print mask2\n\n#plt.imshow(final)\n#plt.show()"
] | [
[
"numpy.where",
"numpy.zeros"
]
] |
vishalbelsare/lasso-python | [
"319bf590599b4a4d50d9345e83e8030afe044aec"
] | [
"lasso/dyna/FemzipMapper.py"
] | [
"\r\nimport logging\r\nimport re\r\nimport traceback\r\nfrom typing import Dict, List, Set, Tuple, Union\r\n\r\nimport numpy as np\r\nfrom lasso.dyna.ArrayType import ArrayType\r\nfrom lasso.femzip.femzip_api import FemzipAPI, FemzipFileMetadata, VariableInfo\r\nfrom lasso.femzip.fz_config import (FemzipArrayType, FemzipVariableCategory,\r\n get_last_int_of_line)\r\n\r\nTRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE: Dict[Tuple[FemzipArrayType, FemzipVariableCategory], Set[str]] = {\r\n # GLOBAL\r\n (FemzipArrayType.global_data, FemzipVariableCategory.GLOBAL): {\r\n # ArrayType.global_timesteps,\r\n ArrayType.global_internal_energy,\r\n ArrayType.global_kinetic_energy,\r\n ArrayType.global_total_energy,\r\n ArrayType.global_velocity,\r\n },\r\n # PART\r\n (FemzipArrayType.part_results, FemzipVariableCategory.PART): {\r\n ArrayType.part_hourglass_energy,\r\n ArrayType.part_internal_energy,\r\n ArrayType.part_kinetic_energy,\r\n ArrayType.part_mass,\r\n ArrayType.part_velocity,\r\n },\r\n # NODE\r\n (FemzipArrayType.node_displacement, FemzipVariableCategory.NODE): {\r\n ArrayType.node_displacement\r\n },\r\n (FemzipArrayType.node_accelerations, FemzipVariableCategory.NODE): {\r\n ArrayType.node_acceleration\r\n },\r\n (FemzipArrayType.node_velocities, FemzipVariableCategory.NODE): {\r\n ArrayType.node_velocity\r\n },\r\n (FemzipArrayType.node_temperatures, FemzipVariableCategory.NODE): {\r\n ArrayType.node_temperature\r\n },\r\n (FemzipArrayType.node_heat_flux, FemzipVariableCategory.NODE): {\r\n ArrayType.node_heat_flux\r\n },\r\n (FemzipArrayType.node_mass_scaling, FemzipVariableCategory.NODE): {\r\n ArrayType.node_mass_scaling\r\n },\r\n (FemzipArrayType.node_temperature_gradient, FemzipVariableCategory.NODE): {\r\n ArrayType.node_temperature_gradient\r\n },\r\n # BEAM\r\n (FemzipArrayType.beam_axial_force, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_force\r\n },\r\n (FemzipArrayType.beam_s_bending_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_bending_moment\r\n },\r\n (FemzipArrayType.beam_t_bending_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_bending_moment\r\n },\r\n (FemzipArrayType.beam_s_shear_resultant, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_force\r\n },\r\n (FemzipArrayType.beam_t_shear_resultant, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_force\r\n },\r\n (FemzipArrayType.beam_torsional_moment, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_torsion_moment\r\n },\r\n (FemzipArrayType.beam_axial_stress, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_stress\r\n },\r\n (FemzipArrayType.beam_shear_stress_rs, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_stress\r\n },\r\n (FemzipArrayType.beam_shear_stress_tr, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_shear_stress\r\n },\r\n (FemzipArrayType.beam_plastic_strain, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_plastic_strain\r\n },\r\n (FemzipArrayType.beam_axial_strain, FemzipVariableCategory.BEAM): {\r\n ArrayType.element_beam_axial_strain\r\n },\r\n # SHELL\r\n (FemzipArrayType.stress_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.stress_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_effective_plastic_strain\r\n },\r\n (FemzipArrayType.history_vars, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_history_vars\r\n },\r\n (FemzipArrayType.bending_moment_mx, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.bending_moment_my, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.bending_moment_mxy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_bending_moment\r\n },\r\n (FemzipArrayType.shear_force_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_shear_force\r\n },\r\n (FemzipArrayType.shear_force_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_shear_force\r\n },\r\n (FemzipArrayType.normal_force_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.normal_force_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.normal_force_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_normal_force\r\n },\r\n (FemzipArrayType.thickness, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_thickness\r\n },\r\n (FemzipArrayType.unknown_1, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_unknown_variables\r\n },\r\n (FemzipArrayType.unknown_2, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_unknown_variables\r\n },\r\n (FemzipArrayType.strain_inner_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_x, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_y, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_z, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_strain\r\n },\r\n (FemzipArrayType.internal_energy, FemzipVariableCategory.SHELL): {\r\n ArrayType.element_shell_internal_energy\r\n },\r\n # THICK SHELL\r\n ((FemzipArrayType.stress_x, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_y, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_z, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_xy, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_yz, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n ((FemzipArrayType.stress_xz, FemzipVariableCategory.THICK_SHELL)): {\r\n ArrayType.element_tshell_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_effective_plastic_strain\r\n },\r\n (FemzipArrayType.strain_outer_x, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_y, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_z, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_x, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_y, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_z, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.THICK_SHELL): {\r\n ArrayType.element_tshell_strain\r\n },\r\n # SOLID\r\n (FemzipArrayType.stress_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.stress_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_stress\r\n },\r\n (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_effective_plastic_strain\r\n },\r\n (FemzipArrayType.history_vars, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_history_variables\r\n },\r\n (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): {\r\n ArrayType.element_solid_strain\r\n },\r\n # AIRBAG\r\n (FemzipArrayType.airbag_state_geom, FemzipVariableCategory.CPM_AIRBAG): {\r\n ArrayType.airbag_n_active_particles,\r\n ArrayType.airbag_bag_volume,\r\n },\r\n # AIRBAG PARTICLES\r\n (FemzipArrayType.airbag_particle_gas_chamber_id, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_gas_id\r\n },\r\n (FemzipArrayType.airbag_particle_chamber_id, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_chamber_id\r\n },\r\n (FemzipArrayType.airbag_particle_leakage, FemzipVariableCategory.CPM_INT_VAR): {\r\n ArrayType.airbag_particle_leakage\r\n },\r\n (FemzipArrayType.airbag_particle_mass, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_mass\r\n },\r\n (FemzipArrayType.airbag_particle_pos_x, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_pos_y, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_pos_z, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_position\r\n },\r\n (FemzipArrayType.airbag_particle_vel_x, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_vel_y, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_vel_z, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_velocity\r\n },\r\n (FemzipArrayType.airbag_particle_radius, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_radius\r\n },\r\n (FemzipArrayType.airbag_particle_spin_energy, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_spin_energy\r\n },\r\n (FemzipArrayType.airbag_particle_tran_energy, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_translation_energy\r\n },\r\n (FemzipArrayType.airbag_particle_neighbor_dist, FemzipVariableCategory.CPM_FLOAT_VAR): {\r\n ArrayType.airbag_particle_nearest_segment_distance\r\n },\r\n}\r\n\r\n# indexes for various femzip arrays\r\nstress_index = {\r\n FemzipArrayType.stress_x.value: 0,\r\n FemzipArrayType.stress_y.value: 1,\r\n FemzipArrayType.stress_z.value: 2,\r\n FemzipArrayType.stress_xy.value: 3,\r\n FemzipArrayType.stress_yz.value: 4,\r\n FemzipArrayType.stress_xz.value: 5,\r\n FemzipArrayType.normal_force_x.value: 0,\r\n FemzipArrayType.normal_force_y.value: 1,\r\n FemzipArrayType.normal_force_xy.value: 2,\r\n FemzipArrayType.shear_force_x.value: 0,\r\n FemzipArrayType.shear_force_y.value: 1,\r\n FemzipArrayType.strain_inner_x.value: 0,\r\n FemzipArrayType.strain_inner_y.value: 1,\r\n FemzipArrayType.strain_inner_z.value: 2,\r\n FemzipArrayType.strain_inner_xy.value: 3,\r\n FemzipArrayType.strain_inner_yz.value: 4,\r\n FemzipArrayType.strain_inner_xz.value: 5,\r\n FemzipArrayType.strain_outer_x.value: 0,\r\n FemzipArrayType.strain_outer_y.value: 1,\r\n FemzipArrayType.strain_outer_z.value: 2,\r\n FemzipArrayType.strain_outer_xy.value: 3,\r\n FemzipArrayType.strain_outer_yz.value: 4,\r\n FemzipArrayType.strain_outer_xz.value: 5,\r\n FemzipArrayType.beam_s_shear_resultant.value: 0,\r\n FemzipArrayType.beam_t_shear_resultant.value: 1,\r\n FemzipArrayType.beam_s_bending_moment.value: 0,\r\n FemzipArrayType.beam_t_bending_moment.value: 1,\r\n\r\n FemzipArrayType.strain_x.value: 0,\r\n FemzipArrayType.strain_y.value: 1,\r\n FemzipArrayType.strain_z.value: 2,\r\n FemzipArrayType.strain_xy.value: 3,\r\n FemzipArrayType.strain_yz.value: 4,\r\n FemzipArrayType.strain_xz.value: 5,\r\n\r\n FemzipArrayType.beam_shear_stress_rs.value: 0,\r\n FemzipArrayType.beam_shear_stress_tr.value: 1,\r\n\r\n FemzipArrayType.airbag_particle_pos_x.value: 0,\r\n FemzipArrayType.airbag_particle_pos_y.value: 1,\r\n FemzipArrayType.airbag_particle_pos_z.value: 2,\r\n FemzipArrayType.airbag_particle_vel_x.value: 0,\r\n FemzipArrayType.airbag_particle_vel_y.value: 1,\r\n FemzipArrayType.airbag_particle_vel_z.value: 2,\r\n\r\n FemzipArrayType.bending_moment_mx.value: 0,\r\n FemzipArrayType.bending_moment_my.value: 1,\r\n FemzipArrayType.bending_moment_mxy.value: 2,\r\n\r\n FemzipArrayType.unknown_1.value: 0,\r\n FemzipArrayType.unknown_2.value: 1,\r\n}\r\n\r\n\r\ndef femzip_to_d3plot(\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n ) -> Dict[str, np.ndarray]:\r\n \"\"\"Map femzip arrays to d3plot arrays\r\n\r\n Parameters\r\n ----------\r\n result_arrays:\r\n femzip arrays\r\n \"\"\"\r\n a = FemzipMapper()\r\n a.map(result_arrays)\r\n\r\n return a.d3plot_arrays\r\n\r\n\r\nclass ArrayShapeInfo:\r\n n_layers: Union[int, None] = None\r\n n_vars: Union[int, None] = None\r\n n_entries: Union[int, None] = None\r\n n_timesteps: Union[int, None] = None\r\n\r\n def _set_attr(self, attr_name: str, value: Union[int, None]) -> None:\r\n self_attr_value = getattr(self, attr_name)\r\n if value is not None:\r\n if self_attr_value is None:\r\n setattr(self, attr_name, value)\r\n else:\r\n setattr(self, attr_name, max(self_attr_value, value))\r\n\r\n def set_n_layers(self, n_layers: Union[int, None]) -> None:\r\n self._set_attr(\"n_layers\", n_layers)\r\n\r\n def set_n_vars(self, n_vars: Union[int, None]) -> None:\r\n self._set_attr(\"n_vars\", n_vars)\r\n\r\n def set_n_entries(self, n_entries: Union[int, None]) -> None:\r\n self._set_attr(\"n_entries\", n_entries)\r\n\r\n def set_n_timesteps(self, n_timesteps: Union[int, None]) -> None:\r\n self._set_attr(\"n_timesteps\", n_timesteps)\r\n\r\n def to_shape(self) -> Tuple[int, ...]:\r\n shape = [self.n_timesteps, self.n_entries]\r\n fortran_offset = 1\r\n if self.n_layers is not None:\r\n shape.append(self.n_layers + fortran_offset)\r\n if self.n_vars is not None:\r\n shape.append(self.n_vars + fortran_offset)\r\n return tuple(shape)\r\n\r\n\r\nclass D3plotArrayMapping:\r\n d3plot_array_type: str\r\n d3_layer_slice: Union[slice, int, None] = None\r\n d3_var_slice: Union[slice, int, None] = None\r\n\r\n fz_layer_slice: Union[slice, int, None] = None\r\n fz_var_slice: Union[slice, int, None] = None\r\n\r\n just_assign: bool = False\r\n\r\n def to_slice(self) -> Tuple[Union[int, slice], ...]:\r\n slices: List[Union[slice, int]] = [slice(None), slice(None)]\r\n if self.d3_layer_slice is not None:\r\n slices.append(self.d3_layer_slice)\r\n if self.d3_var_slice is not None:\r\n slices.append(self.d3_var_slice)\r\n return tuple(slices)\r\n\r\n\r\nclass FemzipArrayInfo:\r\n full_name: str = \"\"\r\n short_name: str = \"\"\r\n index: int = -1\r\n category: FemzipVariableCategory\r\n array_type: FemzipArrayType\r\n array: np.ndarray\r\n\r\n i_layer: Union[int, None] = None\r\n i_var: Union[int, None] = None\r\n\r\n mappings: List[D3plotArrayMapping]\r\n\r\n def __init__(self):\r\n self.mappings = []\r\n\r\n def __str__(self) -> str:\r\n return f\"\"\"FemzipArrayInfo:\r\n full_name = {self.full_name}\r\n short_name = {self.short_name}\r\n index = {self.index}\r\n category = {self.category}\r\n array_type = {self.array_type}>\r\n i_layer = {self.i_layer}\r\n i_var = {self.i_var}\"\"\"\r\n\r\n\r\nclass FemzipMapper():\r\n \"\"\"Class for mapping femzip variable data to d3plots.\r\n\r\n Takes no arguments.\r\n \"\"\"\r\n # regex pattern for reading variables\r\n name_separation_pattern = re.compile(r\"(^[^\\(\\n]+)(\\([^\\)]+\\))*\")\r\n\r\n FORTRAN_OFFSET: int = 1\r\n\r\n _d3plot_arrays: Dict[str, np.ndarray] = {}\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def map(self, result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]):\r\n \"\"\"Map femzip data to d3plot arrays.\r\n\r\n Parameters\r\n ----------\r\n result_arrays:\r\n femzip variable data\r\n \"\"\"\r\n self._d3plot_arrays = {}\r\n self._fz_array_slices = {}\r\n\r\n # convert to internal datastructure\r\n array_infos = self._convert(result_arrays)\r\n\r\n # build the array shapes\r\n d3plot_array_shapes = self._build(array_infos)\r\n\r\n # init the numpy arrays\r\n self._d3plot_arrays = self._allocate_d3plot_arrays(d3plot_array_shapes)\r\n\r\n # add all the data to its right place\r\n self._map_arrays(array_infos, self._d3plot_arrays)\r\n\r\n def _convert(self,\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n ) -> List[FemzipArrayInfo]:\r\n \"\"\" Convert femzip result arrays into array infos\r\n\r\n Parameters\r\n ----------\r\n result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]\r\n result arrays from femzip\r\n\r\n Returns\r\n -------\r\n array_infos: List[FemzipArrayInfo]\r\n infos about femzip arrays\r\n \"\"\"\r\n\r\n array_infos = []\r\n\r\n # convert\r\n for (fz_index, fz_name, fz_cat), array in result_arrays.items():\r\n femzip_array_info = FemzipArrayInfo()\r\n femzip_array_info.index = fz_index\r\n femzip_array_info.full_name = fz_name\r\n femzip_array_info.category = fz_cat\r\n femzip_array_info.array = array\r\n femzip_array_info.array_type = FemzipArrayType.from_string(fz_name)\r\n\r\n var_name, i_layer, i_stress, i_history = self._parse_femzip_name(\r\n fz_name, fz_cat)\r\n\r\n femzip_array_info.short_name = var_name\r\n femzip_array_info.i_layer = i_layer\r\n femzip_array_info.i_var = i_stress if i_stress is not None else i_history\r\n\r\n array_infos.append(femzip_array_info)\r\n\r\n return array_infos\r\n\r\n @staticmethod\r\n def _build(fz_arrays: List[FemzipArrayInfo]) -> Dict[str, Tuple[int, ...]]:\r\n \"\"\" Counts the occurence of all variables in the result array such as the\r\n number of layers and stresses.\r\n\r\n Paramters\r\n ---------\r\n fz_arrays: List[FemzipArrayInfo]\r\n infos about femzip arrays\r\n\r\n Returns\r\n -------\r\n d3plot_array_shapes:\r\n shapes of the d3plot arrays required to be allocated\r\n\r\n Notes\r\n -----\r\n Some variables only have partial stress results written for Sigma-x and Sigma-y\r\n and layers one to three for example.\r\n \"\"\"\r\n shape_infos: Dict[str, ArrayShapeInfo] = {}\r\n name_count: Dict[Tuple[str, FemzipVariableCategory], int] = {}\r\n\r\n for arr_info in fz_arrays:\r\n # print(arr_info)\r\n\r\n d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n arr_info.array_type, arr_info.category)]\r\n\r\n # var_name = var_name.strip()\r\n for array_type in d3_array_types:\r\n # print(array_type)\r\n array_shape_info = shape_infos.get(array_type) or ArrayShapeInfo()\r\n\r\n # beam layer vars always have same name but\r\n # must be counted up as layers\r\n if (arr_info.full_name, arr_info.category) in name_count:\r\n count = name_count[(arr_info.full_name, arr_info.category)]\r\n i_layer = count + 1\r\n name_count[(arr_info.full_name, arr_info.category)] = i_layer\r\n else:\r\n name_count[(arr_info.full_name, arr_info.category)] = 0\r\n\r\n # update shape\r\n array_shape_info.set_n_timesteps(arr_info.array.shape[0])\r\n array_shape_info.set_n_entries(arr_info.array.shape[1])\r\n array_shape_info.set_n_layers(arr_info.i_layer)\r\n array_shape_info.set_n_vars(arr_info.i_var)\r\n\r\n shape_infos[array_type] = array_shape_info\r\n\r\n # where to put it\r\n mapping = D3plotArrayMapping()\r\n mapping.d3plot_array_type = array_type\r\n if arr_info.i_layer is not None:\r\n mapping.d3_layer_slice = arr_info.i_layer\r\n if arr_info.i_var is not None:\r\n mapping.d3_var_slice = arr_info.i_var\r\n # arrays to copy:\r\n # - node displacement, veloctiy, acceleration\r\n # - airbag integer vars (so we don't need to cast)\r\n if arr_info.array.ndim == 3 \\\r\n or arr_info.category == FemzipVariableCategory.CPM_INT_VAR:\r\n mapping.just_assign = True\r\n\r\n arr_info.mappings.append(mapping)\r\n\r\n # correct layers\r\n # if a field has the same name for multiple\r\n # layers such as beam axial stress, we needed\r\n # to count in order to determine if it had layers\r\n # now we need to correct i_layers from None to 0 for them\r\n name_count2 = {}\r\n for arr_info in fz_arrays:\r\n count = name_count[(arr_info.full_name, arr_info.category)]\r\n\r\n if count != 0 and arr_info.i_layer is None:\r\n count2 = name_count2.get((arr_info.full_name, arr_info.category), -1)\r\n count2 += 1\r\n arr_info.i_layer = count2\r\n name_count2[(arr_info.full_name, arr_info.category)] = count2\r\n\r\n for mapping in arr_info.mappings:\r\n shape_info = shape_infos[mapping.d3plot_array_type]\r\n shape_info.set_n_layers(count)\r\n mapping.d3_layer_slice = count2\r\n\r\n # all arrays which are simply copied (slice has len 2 and only one target)\r\n # get a just assign flag\r\n if (len(arr_info.mappings) == 2 and\r\n len(arr_info.mappings[0].to_slice()) == 2):\r\n arr_info.mappings[0].just_assign = True\r\n\r\n d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n arr_info.array_type, arr_info.category)]\r\n\r\n for array_type in d3_array_types:\r\n del shape_infos[array_type]\r\n\r\n return {name: info.to_shape() for name, info in shape_infos.items()}\r\n\r\n def _map_arrays(self, array_infos: List[FemzipArrayInfo], d3plot_arrays: Dict[str, np.ndarray]):\r\n \"\"\"Allocate a femzip variable to its correct position in\r\n the d3plot array dictionary.\r\n\r\n Paramters\r\n ---------\r\n array_infos: List[FemzipArrayInfo]\r\n femzip variables stored in a dictionary\r\n d3plot_array: Dict[str, np.ndarray]\r\n d3plot arrays preallocated\r\n\r\n Notes\r\n -----\r\n The keys are the femzip array name (unparsed)\r\n and the category of the variable as an enum.\r\n \"\"\"\r\n for arr_info in array_infos:\r\n if arr_info.category == FemzipVariableCategory.CPM_AIRBAG:\r\n d3plot_arrays[ArrayType.airbag_n_active_particles] = arr_info.array[:, :, 0].view(\r\n np.int32)\r\n d3plot_arrays[ArrayType.airbag_bag_volume] = arr_info.array[:, :, 1]\r\n else:\r\n for mapping in arr_info.mappings:\r\n if mapping.just_assign:\r\n d3plot_arrays[mapping.d3plot_array_type] = arr_info.array\r\n continue\r\n\r\n slices = mapping.to_slice()\r\n d3plot_array = d3plot_arrays[mapping.d3plot_array_type]\r\n\r\n # for femzip arrays with same name first var_index is missing\r\n if d3plot_array.ndim == 3 and len(slices) == 2 and arr_info.array.ndim == 2:\r\n slices = (*slices, 0)\r\n\r\n d3plot_array[slices] = arr_info.array\r\n\r\n def _allocate_d3plot_arrays(self,\r\n array_shapes: Dict[str, Tuple[int, ...]]) -> Dict[str, np.ndarray]:\r\n \"\"\"Initialize all the d3plot arrays.\r\n\r\n Parameters\r\n ----------\r\n array_shapes: array_shapes: Dict[str, Tuple[int, ...]]\r\n array shapes required to be allocated\r\n\r\n Returns\r\n -------\r\n d3plot_arrays: Dict[str, np.ndarray]\r\n d3plot arrays preallocated\r\n \"\"\"\r\n d3plot_arrays = {}\r\n for key, shape in array_shapes.items():\r\n d3plot_arrays[key] = np.empty(shape, dtype=np.float32)\r\n return d3plot_arrays\r\n\r\n @ property\r\n def d3plot_arrays(self):\r\n \"\"\"Returns the mapped d3plot arrays.\r\n \"\"\"\r\n return self._d3plot_arrays\r\n\r\n def _parse_femzip_name(self,\r\n fz_name: str,\r\n var_type: FemzipVariableCategory) -> Tuple[str,\r\n Union[int, None],\r\n Union[int, None],\r\n Union[int, None]]:\r\n \"\"\"Parses the femzip variable names.\r\n\r\n Parameters\r\n ----------\r\n fz_name:\r\n cryptic femzip variable name we need to parse\r\n var_type:\r\n the category of this varialbe e.g. shells, parts, global etc.\r\n\r\n Returns\r\n -------\r\n var_name:\r\n femzip variable name without integration and layer info\r\n i_layer:\r\n layer index\r\n i_stress:\r\n stress index\r\n i_history:\r\n history variable index\r\n \"\"\"\r\n matches = self.name_separation_pattern.findall(fz_name)\r\n if not len(matches) == 1:\r\n err_msg = \"Could not match femzip array name: {0}\"\r\n raise ValueError(err_msg.format(fz_name))\r\n if not len(matches[0]) == 2:\r\n err_msg = \"Could not match femzip array name: {0}\"\r\n raise ValueError(err_msg.format(fz_name))\r\n\r\n (first_grp, second_grp) = matches[0]\r\n var_name, extra_value = get_last_int_of_line(first_grp)\r\n var_name = var_name.strip()\r\n\r\n # the slice 1:-1 leaves out the brackets '(' and ')'\r\n _, i_layer = get_last_int_of_line(\r\n second_grp[1:-1])\r\n\r\n if i_layer is not None:\r\n i_layer -= self.FORTRAN_OFFSET\r\n\r\n i_history: Union[int, None] = None\r\n\r\n if var_type != FemzipVariableCategory.PART or \\\r\n var_type != FemzipVariableCategory.GLOBAL:\r\n i_history = extra_value\r\n\r\n if i_history:\r\n i_history -= self.FORTRAN_OFFSET\r\n\r\n # set var name to the unformatted femzip array type name\r\n if \"Epsilon\" in var_name:\r\n var_name = fz_name.strip()\r\n if \"inner\" in var_name:\r\n i_layer = 0\r\n elif \"outer\" in var_name:\r\n i_layer = 1\r\n else:\r\n # solid strain\r\n i_layer = 0\r\n\r\n i_stress: Union[int, None] = stress_index.get(var_name, None)\r\n\r\n return var_name, i_layer, i_stress, i_history\r\n\r\n\r\ndef filter_femzip_variables(file_metadata: FemzipFileMetadata,\r\n d3plot_array_filter: Union[Set[str], None]) -> FemzipFileMetadata:\r\n \"\"\" Filters variable infos regarding d3plot array types\r\n\r\n Parameters\r\n ----------\r\n file_metadata: FemzipFileMetadata\r\n metadata of femzip file including contained variables\r\n d3plot_array_filter: Union[Set[str], None]\r\n array types to filter for if wanted\r\n\r\n Returns\r\n -------\r\n file_metadata: FemzipFileMetadata\r\n filtered array according to array types\r\n \"\"\"\r\n\r\n # find out which arrays we need and\r\n vars_to_copy: List[int] = list()\r\n\r\n for i_var in range(file_metadata.number_of_variables):\r\n try:\r\n var_info: VariableInfo = file_metadata.variable_infos[i_var]\r\n var_type: int = var_info.var_type\r\n var_index: int = var_info.var_index\r\n var_name: str = var_info.name.decode(\"utf-8\")\r\n\r\n logging.debug(f\"{var_type}, {var_index}, {var_name.strip()}\")\r\n\r\n if var_type == FemzipVariableCategory.GEOMETRY.value:\r\n continue\r\n\r\n # find out which array from name\r\n try:\r\n fz_array_type = FemzipArrayType.from_string(var_name)\r\n except ValueError:\r\n warn_msg = (\"Warning: lasso-python does not support femzip result\"\r\n \" field '{0}' category type '{1}'.\")\r\n logging.warning(warn_msg.format(var_name.strip(), var_type))\r\n continue\r\n\r\n # check if we asked for the array\r\n matching_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[(\r\n fz_array_type, FemzipVariableCategory(var_type))]\r\n\r\n if d3plot_array_filter is not None:\r\n if not matching_array_types.intersection(d3plot_array_filter):\r\n continue\r\n vars_to_copy.append(i_var)\r\n except Exception:\r\n trb_msg = traceback.format_exc()\r\n err_msg = \"An error ocurred while preprocessing femzip variable information: {0}\"\r\n logging.warning(err_msg.format(trb_msg))\r\n\r\n # copy filtered data\r\n filtered_file_metadata = FemzipFileMetadata()\r\n FemzipAPI.copy_struct(file_metadata, filtered_file_metadata)\r\n filtered_file_metadata.number_of_variables = len(vars_to_copy)\r\n\r\n FilteredVariableInfoArrayType = len(vars_to_copy) * VariableInfo\r\n filtered_info_array_data = FilteredVariableInfoArrayType()\r\n\r\n for i_var, src_i_var in enumerate(vars_to_copy):\r\n FemzipAPI.copy_struct(\r\n file_metadata.variable_infos[src_i_var],\r\n filtered_info_array_data[i_var])\r\n filtered_file_metadata.variable_infos = filtered_info_array_data\r\n\r\n return filtered_file_metadata\r\n"
] | [
[
"numpy.empty"
]
] |
rewin123/NNPreprocessingTomography | [
"b630f4c2cb9705c3c8432480498e4307ed511edf"
] | [
"open_test.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import Sequential\n\n#model definition\nclass Unet1D(nn.Module):\n def __init__(self):\n super(Unet1D, self).__init__()\n \n ch = 32\n self.maxpool = nn.MaxPool2d((1,2))\n self.unpool = nn.Upsample(scale_factor=(1,2))\n self.startLayer = nn.Conv2d(1, ch, (1,3), padding=(0,1))\n self.endLayer = nn.Conv2d(ch, 1, (1,1))\n self.tb1 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb2 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb3 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb4 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.tb5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n\n self.db1 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db2 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db3 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db4 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())\n self.db5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())\n\n\n def forward(self, x):\n data = self.startLayer(x)\n\n data1 = self.tb1(data)\n data2 = self.tb2(self.maxpool(data1))\n data3 = self.tb3(self.maxpool(data2))\n data4 = self.tb4(self.maxpool(data3))\n data5 = self.tb5(self.maxpool(data4))\n\n \n data5 = self.db5(data5)\n data4 = self.db4(torch.cat([data4, nn.Upsample(size=(data4.shape[2], data4.shape[3]))(data5)], dim=1))\n data3 = self.db3(torch.cat([data3, nn.Upsample(size=(data3.shape[2], data3.shape[3]))(data4)], dim=1))\n data2 = self.db2(torch.cat([data2, nn.Upsample(size=(data2.shape[2], data2.shape[3]))(data3)], dim=1))\n data1 = self.db1(torch.cat([data1, nn.Upsample(size=(data1.shape[2], data1.shape[3]))(data2)], dim=1))\n\n return self.endLayer(data1)\n\n#we use cuda for model\nmodel = torch.load(\"model_unet1d.pkl\").cpu()\n\nimport numpy as np\n#load train and val data\n#input sinograms with noise\nnoised_sin = torch.from_numpy(np.load(\"data/noised_sin.npy\")).unsqueeze(1)\n#filtered sinograms without noise\nfiltered_sin = torch.from_numpy(np.load(\"data/clear_sin.npy\")).unsqueeze(1)\n#groundtruth phantoms\nphantoms = torch.from_numpy(np.load(\"data/phantoms.npy\")).unsqueeze(1)\n\n\nimport odl\n#define radon scheme\ndetectors = 183\nangles = 128\nangles_parallel = np.linspace(0, 180, angles, False)\n\nreco_space = odl.uniform_discr(min_pt=[-20,-20], max_pt=[20,20], shape=[128, 128], dtype='float32')\n\nphantom = odl.phantom.shepp_logan(reco_space, modified=True)\n\nimport math\nl = 40 * math.sqrt(2)\n\nangle_partition = odl.uniform_partition(-np.pi / 2, np.pi / 2, angles)\ndetector_partition = odl.uniform_partition(-l / 2, l / 2, detectors)\ngeometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)\nray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=\"astra_cuda\")\n\ndef ramp_op(ray_trafo):\n fourier = odl.trafos.FourierTransform(ray_trafo.range, axes=[1])\n # Create ramp in the detector direction\n ramp_function = fourier.range.element(lambda x: np.abs(x[1]) / (2 * np.pi))\n # Create ramp filter via the convolution formula with fourier transforms\n ramp_filter = fourier.inverse * ramp_function * fourier\n return ramp_filter\n\nramp = ramp_op(ray_trafo)\n\ntest_data_idx = 1000\n\ninp = noised_sin[test_data_idx:test_data_idx+1]\nf_sin = filtered_sin[test_data_idx]\ngroundtruth = phantoms[test_data_idx, 0].numpy()\n\n#plot and measure experiments\nimport matplotlib.pyplot as plt\n\nfig, axs = plt.subplots(2, 3)\nfig.set_figheight(15)\nfig.set_figwidth(15)\n\nproposed_rec = ray_trafo.adjoint(model(inp).detach().numpy()[0,0]).data\nproposed_rec *= (proposed_rec > 0)\nfbp_rec = ray_trafo.adjoint(ramp(inp[0,0])).data\nfbp_rec *= (fbp_rec > 0)\n\nproposed_diff = np.abs(proposed_rec - groundtruth)\nfbp_diff = np.abs(fbp_rec - groundtruth)\n\n# diff_max = max(np.max(proposed_diff), np.max(fbp_diff))\n# proposed_diff /= diff_max\n# fbp_diff /= diff_max\n\n\n#show phantom\nim_ph = axs[0,0].imshow(groundtruth, cmap='gray')\naxs[0,0].set_title('a) Фантом')\n\n#show fbp reconstruction\naxs[0,1].imshow(fbp_rec, cmap='gray')\naxs[0,1].set_title('б) FBP')\naxs[0,1].axhline(y=64, color='orange', ls='--')\n\n#show reconstruction of proposed models\naxs[0,2].imshow(proposed_rec, cmap='gray')\naxs[0,2].set_title('в) UNet1D')\naxs[0,2].axhline(y=64, color='blue', ls='--')\n\n\n#show diff slice\n# axs[1, 2].plot(groundtruth[64], label='Phantom')\naxs[1, 0].plot(proposed_rec[64], '-', label='UNet1D', color='blue')\naxs[1, 0].plot(fbp_rec[64], '--', label='FBP', color='orange')\naxs[1, 0].set_title('г) Срез реконструкции от FBP и UNet1D')\naxs[1, 0].grid()\naxs[1, 0].legend()\n\n#diff fbp to groundtruth\naxs[1,1].imshow(fbp_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')\naxs[1,1].set_title('д) Разница между FBP и фантомом')\n\n#diff proposed to groundtruth\naxs[1,2].imshow(proposed_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')\naxs[1,2].set_title('е) Разница между UNet1D и фантомом')\n\n\n\nfig.subplots_adjust(right=0.9)\ncbar_ax = fig.add_axes([0.91, 0.53, 0.02, 0.35])\nfig.colorbar(im_ph, cax=cbar_ax)\n\nplt.show()"
] | [
[
"torch.nn.MaxPool2d",
"numpy.load",
"torch.load",
"numpy.abs",
"matplotlib.pyplot.subplots",
"torch.nn.Upsample",
"matplotlib.pyplot.show",
"torch.nn.Conv2d",
"numpy.linspace"
]
] |
rran9235/GazePal-Application | [
"88d6a74daeddd18ab37c0f2953a118f1f59e06a5"
] | [
"src/python/GazePal_PC.py"
] | [
"\"\"\"\nGazePal Application\nAuthor: Rishi Rangarajan\nYear: 2021 \n\nFile: GazePal_PC.py\nInfo: GazePal_PC class definition\n\"\"\"\n\n# Imports\nimport csv\nimport os\nimport pyautogui\nimport time\nimport torch\nimport torchvision\nimport cv2 as cv\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom collections import Counter\nfrom threading import Thread\nfrom Class_CNN import CNN\n\n# Class declaration\nclass GazePal_PC:\n\n # Class constructor\n def __init__(self):\n \n # Initialise screen-based parameters\n self.screen_init()\n # Initialise gaze tracking camera\n self.camera_init()\n # Initialise CNN\n self.CNN_init()\n\n # Initialise timer\n self.timer = 0\n\n print(\"[INFO]: GazePal initialised.\") \n\n # Initialise screen-based parameters\n def screen_init(self):\n # Set FR names\n self.classes = [\"FR1\", \"FR2\", \"FR3\", \"FR4\", \"FR5\", \"FR6\", \"FR7\", \"FR8\", \"FR9\"]\n # Get screen resolution\n width_px, height_px = pyautogui.size()\n w_r = width_px/3\n h_r = height_px/3\n # Set xy coordinates for FRs\n self.regions = {\"FR1\" : [1*w_r/2, 1*h_r/2], \"FR2\" : [3*w_r/2, 1*h_r/2], \"FR3\" : [5*w_r/2, 1*h_r/2],\n \"FR4\" : [1*w_r/2, 3*h_r/2], \"FR5\" : [3*w_r/2, 3*h_r/2], \"FR6\" : [5*w_r/2, 3*h_r/2],\n \"FR7\" : [1*w_r/2, 5*h_r/2], \"FR8\" : [3*w_r/2, 5*h_r/2], \"FR9\" : [5*w_r/2, 5*h_r/2]}\n # Initialise buffer\n self.buffer = [4]*20\n # Initialise message\n self.old_FR = \"FR5\"\n\n # Initialise gaze tracking camera-based parameters\n def camera_init(self):\n # Create camera object\n self.GazePal_Camera = cv.VideoCapture(0)\n (self.status, self.frame) = self.GazePal_Camera.read()\n self.stopped = False\n self.thread = Thread(target=self.update, args=())\n self.thread.start()\n\n # Initialise CNN-based parameters\n def CNN_init(self):\n # Create CNN\n self.model_path = os.path.join(\"models\", \"GazePal-Latest.pth\")\n self.GazePal_CNN = CNN()\n self.GazePal_CNN.load_state_dict(torch.load(self.model_path))\n # Set image transforms\n self.img_transform = transforms.Compose([transforms.ToPILImage(),\n transforms.Resize((100,100)),\n transforms.Grayscale(3),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n # Create Haar Cascade classifier for Eyes\n self.haar_eye = cv.CascadeClassifier(os.path.join(\"models\", \"haarcascade_eye.xml\"))\n self.haar_face = cv.CascadeClassifier(os.path.join(\"models\", \"haarcascade_frontalface_default.xml\"))\n \n print(\"[INFO]: Created Haar Classifier.\")\n\n # Repeatedly acquire images from gaze tracking camera\n def update(self):\n # Loop infinitely\n while True:\n # If stopped then break from loop\n if self.stopped:\n break\n # Read image frame from camera\n (self.status, self.frame) = self.GazePal_Camera.read()\n\n # Returns acquired image frame\n def read(self):\n # Return frame\n return self.frame\n\n # Exit protocol\n def stop(self):\n self.stopped = True\n # Release gaze tracking camera object\n self.GazePal_Camera.release()\n # Join threads\n self.thread.join() \n # Close all OpenCV windows\n cv.destroyAllWindows()\n\n\n # Move cursor to within a FR\n def absolute_movement(self, FR):\n # Retrieve xy position inside FR\n pos_x = self.regions[FR][0]\n pos_y = self.regions[FR][1]\n # Move cursor to location\n pyautogui.moveTo(pos_x, pos_y, 0.5)\n\n # Initiate left-mouse button click\n def cursor_click(self):\n # Perform click\n pyautogui.leftClick()\n\n # Function to detect faces\n def detect_faces(self, img):\n # Convert to grayscale\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # Detect face(s) in the image\n faces = self.haar_face.detectMultiScale(gray, 1.3, 5)\n \n # Loop through each face detected\n for (fx, fy, fw, fh) in faces:\n # Draw rectangle(s) over every face\n cv.rectangle(img, (fx,fy), (fx+fw,fy+fh), (225,0,0), 2)\n\n return faces\n\n # Detect eyes in image frame\n def detect_eyes(self, img):\n # Convert to grayscale \n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # Detect eye(s) in the image\n eyes = self.haar_eye.detectMultiScale(gray)\n # Extract image dimensions\n height = np.size(img, 0)\n width = np.size(img, 1)\n # Initialise that no eyes are detected\n left_eye = None\n right_eye = None\n\n # Loop through each eye detected\n for (ex, ey, ew, eh) in eyes:\n # Check if detected eye is in bottom half of face\n if ey > height/2:\n pass\n else:\n # Centre between the eyes\n centre = ex + ew/2\n # Check if centre of eye is on the right or left\n if centre > width/2:\n # Draw rectangle(s) over every eye\n cv.rectangle(img, (ex,ey), (ex+ew,ey+eh), (225,0,0), 2)\n # Crop eye\n eye = img[ey:ey+eh, ex:ex+ew]\n\n # Return cropped image of left eye and right eye\n return eye\n\n # Predict the gaze for a given image frame\n def predict_gaze(self):\n \n # Reset previous time\n prev_time = time.time()\n\n # Read frame\n frame = self.read()\n\n # Flip frame\n frame = cv.flip(frame, 1)\n\n try:\n # Detect face\n faces = self.detect_faces(frame)\n # Crop face image\n face_img = frame[faces[0][1]:faces[0][1]+faces[0][3], faces[0][0]:faces[0][0]+faces[0][2]]\n # Detect the eyes in face image\n eye = self.detect_eyes(face_img) \n # Transform image before CNN pass-through\n torch_img = self.image_loader(eye)\n # Obtain CNN prediction distribution\n output = self.GazePal_CNN(torch_img)\n # Extract prediction with highest energy\n _, prediction = torch.max(output.data, 1)\n\n # Remove oldest prediction in buffer\n self.buffer.pop(0)\n # Append new prediction to buffer\n self.buffer.append(int(prediction))\n # Determine most frequently predicted FR\n prediction = max(self.buffer, key=self.buffer.count)\n # Obtain FR name\n FR = str(self.classes[prediction])\n\n # Check if FR is not same as previous FR\n if self.old_FR != FR:\n # If not, then move to new FR\n self.absolute_movement(FR)\n # Reset old FR value to current FR\n self.old_FR = FR\n # Reset timer\n self.timer = time.time()\n else:\n # If same, then check time elapsed\n if (time.time() - self.timer) > 2:\n # If more than 2 seconds on same FR,\n # then reset time\n self.timer = time.time()\n # Initiate cursor click\n self.cursor_click()\n except:\n # Process errors as FR not available\n FR = \"N/A\"\n\n # Print FR number in frame\n cv.putText(frame, FR, (20,50), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)\n\n # Update new time\n new_time = time.time()\n # Compute and write fps\n time_taken = (new_time - prev_time)\n fps = \"FPS: {0:.1f}\".format(1/time_taken)\n cv.putText(frame, fps, (120,50), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2)\n # Update previous time\n prev_time = new_time\n \n # Show image frame\n cv.imshow(\"frame\", frame)\n \n # Repeatedly predict user gaze\n def gaze_tracking(self):\n # Loop infinitely\n while True:\n # Predict user gaze\n self.predict_gaze()\n\n # Wait for keystroke\n k = cv.waitKey(1)\n\n # If keystroke is ESC\n if k == 27:\n # Print message\n print(\"[INFO]: ESC pressed; quitting program.\")\n # Break from loop\n break\n\n # # If keystroke is SPACE\n # elif k == 32:\n # if self.logging:\n # self.logging = False\n # print(\"[INFO]: SPACE pressed; stopping logging.\")\n # else:\n # print(\"[INFO]: SPACE pressed; starting logging.\")\n # self.logging = True\n \n # Exit GazePal \n self.stop()\n\n # Transforms to image before CNN prediction\n def image_loader(self, img):\n\n # Transform image\n torch_img = self.img_transform(img).float()\n # Convert to tensor\n torch_img = torch.tensor(torch_img, requires_grad=True)\n # Unsqueeze\n torch_img = torch_img.unsqueeze(0)\n # Return new torch image\n return torch_img\n\n\n\n\n\n\n\n"
] | [
[
"torch.max",
"torch.tensor",
"torch.load",
"numpy.size"
]
] |
ICRC-BME/PySigView | [
"8ac60960dea0e5c70757c76545a896c76a95f68d"
] | [
"pysigview/widgets/transforms/filters.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 29 09:23:17 2017\n\nAnnotations plugin for pysigview\n\nIng.,Mgr. (MSc.) Jan Cimbálník\nBiomedical engineering\nInternational Clinical Research Center\nSt. Anne's University Hospital in Brno\nCzech Republic\n&\nMayo systems electrophysiology lab\nMayo Clinic\n200 1st St SW\nRochester, MN\nUnited States\n\"\"\"\n\n# Standard library imports\n\n# Third party imports\nfrom PyQt5.QtCore import pyqtSignal, Qt\nfrom PyQt5.QtWidgets import (QVBoxLayout,\n QWidget, QLineEdit,\n QComboBox, QLabel, QMessageBox, QPushButton)\n\nfrom scipy.signal import butter, filtfilt\n\n# Local imports\nfrom pysigview.core.plot_transform import BasePlotTransform\n\n\nclass FilterTransform(BasePlotTransform):\n\n def __init__(self):\n super().__init__()\n\n self.name = 'filter'\n self.a = None\n self.b = None\n\n def apply_transform(self, data):\n return filtfilt(self.b, self.a, data)\n\n @property\n def transform_variables(self):\n return (self.a, self.b)\n\n @transform_variables.setter\n def transforms_variables(self, a, b):\n self.a = a\n self.b = b\n\n\nclass Filters(QWidget):\n\n # Attributes\n CONF_SUBSECTION = 'filters'\n IMG_PATH = 'images'\n shortcut = None\n\n # Signals\n filters_transform_changed = pyqtSignal(name='filters_transform_changed')\n\n def __init__(self, parent):\n super(Filters, self).__init__(parent)\n\n self.transform_list_stack = self.parent()\n self.preview = self.transform_list_stack.parent().signal_preview\n self.main = self.transform_list_stack.main\n\n self.title = 'Filters'\n\n # Transform\n self.preview_transform = None\n\n # Master layout\n layout = QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n\n # Filter design widget layout\n filter_layout = QVBoxLayout()\n filter_layout.setContentsMargins(0, 0, 0, 0)\n\n # Filter selector\n self.filter_selector_label = QLabel('Select filter type:', self)\n self.filter_selector = QComboBox(self)\n self.filter_selector.addItem('Butterworth')\n\n # Filter cut-offs\n self.low_cutoff_label = QLabel('Low cutoff:', self)\n self.low_cutoff_le = QLineEdit(self)\n\n self.high_cutoff_label = QLabel('High cutoff:', self)\n self.high_cutoff_le = QLineEdit(self)\n\n # Poles\n self.poles_label = QLabel('N poles:', self)\n self.poles_le = QLineEdit(self)\n\n # Set button\n self.set_button = QPushButton('Set', self)\n\n # Vipy canvas with axes for FFT\n\n #TODO - filter for number only in lineedits\n # Asseble the layout\n filter_layout.addWidget(self.filter_selector_label)\n filter_layout.addWidget(self.filter_selector)\n\n filter_layout.addWidget(self.low_cutoff_label)\n filter_layout.addWidget(self.low_cutoff_le)\n\n filter_layout.addWidget(self.high_cutoff_label)\n filter_layout.addWidget(self.high_cutoff_le)\n\n filter_layout.addWidget(self.poles_label)\n filter_layout.addWidget(self.poles_le)\n\n filter_layout.addWidget(self.set_button)\n\n layout.addLayout(filter_layout)\n layout.setAlignment(Qt.AlignTop)\n\n self.setLayout(layout)\n\n # Connect signals\n self.filter_selector.currentIndexChanged.connect(\n self.set_preview_transform)\n self.low_cutoff_le.returnPressed.connect(self.set_preview_transform)\n self.high_cutoff_le.returnPressed.connect(self.set_preview_transform)\n self.poles_le.returnPressed.connect(self.set_preview_transform)\n self.set_button.clicked.connect(self.set_preview_transform)\n\n def create_transform(self, vc):\n\n fs = vc.fsamp\n if fs is None:\n return\n\n # Design the filter\n selected_filter = self.filter_selector.currentText()\n low_fc_str = self.low_cutoff_le.text()\n high_fc_str = self.high_cutoff_le.text()\n poles_str = self.poles_le.text()\n\n if poles_str == '':\n QMessageBox.Warning('Number of poles must by specified')\n return\n else:\n poles = int(poles_str)\n\n if low_fc_str == '':\n low_fc = None\n else:\n low_fc = float(low_fc_str)\n if high_fc_str == '':\n high_fc = None\n else:\n high_fc = float(high_fc_str)\n\n if low_fc is not None and high_fc is not None and low_fc >= high_fc:\n QMessageBox.Warning('Low cut-off frequency cannot be higher',\n 'than high cut-off frequency')\n return\n\n if selected_filter == 'Butterworth':\n if low_fc and high_fc:\n b, a = butter(poles, [low_fc/(fs/2),\n high_fc/(fs/2)], 'bandpass')\n elif low_fc and not high_fc:\n b, a = butter(poles, low_fc/(fs/2), 'highpass')\n elif not low_fc and high_fc:\n b, a = butter(poles, high_fc/(fs/2), 'lowpass')\n else:\n return\n\n # Greate the transform object\n transform = FilterTransform()\n transform.a = a\n transform.b = b\n transform.name = (' / ' + selected_filter + '; '\n + '-'.join([low_fc_str, high_fc_str]) + 'Hz')\n\n return transform\n\n # ??? Should be part of transforms API??\n def set_preview_transform(self):\n\n vc = self.preview.preview_pvc\n self.preview.preview_temp_transform = self.create_transform(vc)\n self.preview.update_trans_sig()\n\n # ----- Transforms API -----\n def get_transform_title(self):\n \"\"\"Return widget title\"\"\"\n return self.title\n\n def register_transform(self):\n \"\"\"\n Register transform in Transforms plugin.\n \"\"\"\n\n # Connect signals\n\n return\n"
] | [
[
"scipy.signal.butter",
"scipy.signal.filtfilt"
]
] |
monferrand/pandas | [
"a3477c769b3d2ea4950ae69f8867e3b291b743c1"
] | [
"pandas/core/arrays/datetimes.py"
] | [
"from datetime import datetime, time, timedelta\nfrom typing import Union\nimport warnings\n\nimport numpy as np\nfrom pytz import utc\n\nfrom pandas._libs import lib, tslib\nfrom pandas._libs.tslibs import (\n NaT,\n Timestamp,\n ccalendar,\n conversion,\n fields,\n iNaT,\n normalize_date,\n resolution as libresolution,\n timezones,\n tzconversion,\n)\nimport pandas._libs.tslibs.frequencies as libfrequencies\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n INT64_DTYPE,\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_object_dtype,\n is_period_dtype,\n is_string_dtype,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import checked_add_with_arr\nfrom pandas.core.arrays import datetimelike as dtl\nfrom pandas.core.arrays._ranges import generate_regular_range\nimport pandas.core.common as com\n\nfrom pandas.tseries.frequencies import get_period_alias, to_offset\nfrom pandas.tseries.offsets import Day, Tick\n\n_midnight = time(0, 0)\n\n\ndef tz_to_dtype(tz):\n \"\"\"\n Return a datetime64[ns] dtype appropriate for the given timezone.\n\n Parameters\n ----------\n tz : tzinfo or None\n\n Returns\n -------\n np.dtype or Datetime64TZDType\n \"\"\"\n if tz is None:\n return DT64NS_DTYPE\n else:\n return DatetimeTZDtype(tz=tz)\n\n\ndef _field_accessor(name, field, docstring=None):\n def f(self):\n values = self.asi8\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n\n if field in self._bool_ops:\n if field.endswith((\"start\", \"end\")):\n freq = self.freq\n month_kw = 12\n if freq:\n kwds = freq.kwds\n month_kw = kwds.get(\"startingMonth\", kwds.get(\"month\", 12))\n\n result = fields.get_start_end_field(\n values, field, self.freqstr, month_kw\n )\n else:\n result = fields.get_date_field(values, field)\n\n # these return a boolean by-definition\n return result\n\n if field in self._object_ops:\n result = fields.get_date_name_field(values, field)\n result = self._maybe_mask_results(result, fill_value=None)\n\n else:\n result = fields.get_date_field(values, field)\n result = self._maybe_mask_results(\n result, fill_value=None, convert=\"float64\"\n )\n\n return result\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n\nclass DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps):\n \"\"\"\n Pandas ExtensionArray for tz-naive or tz-aware datetime data.\n\n .. versionadded:: 0.24.0\n\n .. warning::\n\n DatetimeArray is currently experimental, and its API may change\n without warning. In particular, :attr:`DatetimeArray.dtype` is\n expected to change to always be an instance of an ``ExtensionDtype``\n subclass.\n\n Parameters\n ----------\n values : Series, Index, DatetimeArray, ndarray\n The datetime data.\n\n For DatetimeArray `values` (or a Series or Index boxing one),\n `dtype` and `freq` will be extracted from `values`.\n\n dtype : numpy.dtype or DatetimeTZDtype\n Note that the only NumPy dtype allowed is 'datetime64[ns]'.\n freq : str or Offset, optional\n The frequency.\n copy : bool, default False\n Whether to copy the underlying array of values.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n \"\"\"\n\n _typ = \"datetimearray\"\n _scalar_type = Timestamp\n _recognized_scalars = (datetime, np.datetime64)\n _is_recognized_dtype = is_datetime64_any_dtype\n\n # define my properties & methods for delegation\n _bool_ops = [\n \"is_month_start\",\n \"is_month_end\",\n \"is_quarter_start\",\n \"is_quarter_end\",\n \"is_year_start\",\n \"is_year_end\",\n \"is_leap_year\",\n ]\n _object_ops = [\"freq\", \"tz\"]\n _field_ops = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"weekofyear\",\n \"week\",\n \"weekday\",\n \"dayofweek\",\n \"dayofyear\",\n \"quarter\",\n \"days_in_month\",\n \"daysinmonth\",\n \"microsecond\",\n \"nanosecond\",\n ]\n _other_ops = [\"date\", \"time\", \"timetz\"]\n _datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops\n _datetimelike_methods = [\n \"to_period\",\n \"tz_localize\",\n \"tz_convert\",\n \"normalize\",\n \"strftime\",\n \"round\",\n \"floor\",\n \"ceil\",\n \"month_name\",\n \"day_name\",\n ]\n\n # ndim is inherited from ExtensionArray, must exist to ensure\n # Timestamp.__richcmp__(DateTimeArray) operates pointwise\n\n # ensure that operations with numpy arrays defer to our implementation\n __array_priority__ = 1000\n\n # -----------------------------------------------------------------\n # Constructors\n\n _dtype: Union[np.dtype, DatetimeTZDtype]\n _freq = None\n\n def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy=False):\n if isinstance(values, (ABCSeries, ABCIndexClass)):\n values = values._values\n\n inferred_freq = getattr(values, \"_freq\", None)\n\n if isinstance(values, type(self)):\n # validation\n dtz = getattr(dtype, \"tz\", None)\n if dtz and values.tz is None:\n dtype = DatetimeTZDtype(tz=dtype.tz)\n elif dtz and values.tz:\n if not timezones.tz_compare(dtz, values.tz):\n msg = (\n \"Timezone of the array and 'dtype' do not match. \"\n f\"'{dtz}' != '{values.tz}'\"\n )\n raise TypeError(msg)\n elif values.tz:\n dtype = values.dtype\n\n if freq is None:\n freq = values.freq\n values = values._data\n\n if not isinstance(values, np.ndarray):\n raise ValueError(\n f\"Unexpected type '{type(values).__name__}'. 'values' must be \"\n \"a DatetimeArray ndarray, or Series or Index containing one of those.\"\n )\n if values.ndim not in [1, 2]:\n raise ValueError(\"Only 1-dimensional input arrays are supported.\")\n\n if values.dtype == \"i8\":\n # for compat with datetime/timedelta/period shared methods,\n # we can sometimes get here with int64 values. These represent\n # nanosecond UTC (or tz-naive) unix timestamps\n values = values.view(DT64NS_DTYPE)\n\n if values.dtype != DT64NS_DTYPE:\n raise ValueError(\n \"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. \"\n f\"Got {values.dtype} instead.\"\n )\n\n dtype = _validate_dt64_dtype(dtype)\n\n if freq == \"infer\":\n raise ValueError(\n \"Frequency inference not allowed in DatetimeArray.__init__. \"\n \"Use 'pd.array()' instead.\"\n )\n\n if copy:\n values = values.copy()\n if freq:\n freq = to_offset(freq)\n if getattr(dtype, \"tz\", None):\n # https://github.com/pandas-dev/pandas/issues/18595\n # Ensure that we have a standard timezone for pytz objects.\n # Without this, things like adding an array of timedeltas and\n # a tz-aware Timestamp (with a tz specific to its datetime) will\n # be incorrect(ish?) for the array as a whole\n dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))\n\n self._data = values\n self._dtype = dtype\n self._freq = freq\n\n if inferred_freq is None and freq is not None:\n type(self)._validate_frequency(self, freq)\n\n @classmethod\n def _simple_new(cls, values, freq=None, dtype=DT64NS_DTYPE):\n assert isinstance(values, np.ndarray)\n if values.dtype != DT64NS_DTYPE:\n assert values.dtype == \"i8\"\n values = values.view(DT64NS_DTYPE)\n\n result = object.__new__(cls)\n result._data = values\n result._freq = freq\n result._dtype = dtype\n return result\n\n @classmethod\n def _from_sequence(\n cls,\n data,\n dtype=None,\n copy=False,\n tz=None,\n freq=lib.no_default,\n dayfirst=False,\n yearfirst=False,\n ambiguous=\"raise\",\n ):\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n\n subarr, tz, inferred_freq = sequence_to_dt64ns(\n data,\n dtype=dtype,\n copy=copy,\n tz=tz,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n ambiguous=ambiguous,\n )\n\n freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)\n if explicit_none:\n freq = None\n\n dtype = tz_to_dtype(tz)\n result = cls._simple_new(subarr, freq=freq, dtype=dtype)\n\n if inferred_freq is None and freq is not None:\n # this condition precludes `freq_infer`\n cls._validate_frequency(result, freq, ambiguous=ambiguous)\n\n elif freq_infer:\n # Set _freq directly to bypass duplicative _validate_frequency\n # check.\n result._freq = to_offset(result.inferred_freq)\n\n return result\n\n @classmethod\n def _generate_range(\n cls,\n start,\n end,\n periods,\n freq,\n tz=None,\n normalize=False,\n ambiguous=\"raise\",\n nonexistent=\"raise\",\n closed=None,\n ):\n\n periods = dtl.validate_periods(periods)\n if freq is None and any(x is None for x in [periods, start, end]):\n raise ValueError(\"Must provide freq argument if no data is supplied\")\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, \"\n \"and freq, exactly three must be specified\"\n )\n freq = to_offset(freq)\n\n if start is not None:\n start = Timestamp(start)\n\n if end is not None:\n end = Timestamp(end)\n\n if start is None and end is None:\n if closed is not None:\n raise ValueError(\n \"Closed has to be None if not both of start and end are defined\"\n )\n if start is NaT or end is NaT:\n raise ValueError(\"Neither `start` nor `end` can be NaT\")\n\n left_closed, right_closed = dtl.validate_endpoints(closed)\n\n start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)\n\n tz = _infer_tz_from_endpoints(start, end, tz)\n\n if tz is not None:\n # Localize the start and end arguments\n start = _maybe_localize_point(\n start,\n getattr(start, \"tz\", None),\n start,\n freq,\n tz,\n ambiguous,\n nonexistent,\n )\n end = _maybe_localize_point(\n end, getattr(end, \"tz\", None), end, freq, tz, ambiguous, nonexistent\n )\n if freq is not None:\n # We break Day arithmetic (fixed 24 hour) here and opt for\n # Day to mean calendar day (23/24/25 hour). Therefore, strip\n # tz info from start and day to avoid DST arithmetic\n if isinstance(freq, Day):\n if start is not None:\n start = start.tz_localize(None)\n if end is not None:\n end = end.tz_localize(None)\n\n values, _tz = generate_regular_range(start, end, periods, freq)\n index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))\n\n if tz is not None and index.tz is None:\n arr = conversion.tz_localize_to_utc(\n index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n\n index = cls(arr)\n\n # index is localized datetime64 array -> have to convert\n # start/end as well to compare\n if start is not None:\n start = start.tz_localize(tz).asm8\n if end is not None:\n end = end.tz_localize(tz).asm8\n else:\n # Create a linearly spaced date_range in local time\n # Nanosecond-granularity timestamps aren't always correctly\n # representable with doubles, so we limit the range that we\n # pass to np.linspace as much as possible\n arr = (\n np.linspace(0, end.value - start.value, periods, dtype=\"int64\")\n + start.value\n )\n dtype = tz_to_dtype(tz)\n index = cls._simple_new(\n arr.astype(\"M8[ns]\", copy=False), freq=None, dtype=dtype\n )\n\n if not left_closed and len(index) and index[0] == start:\n index = index[1:]\n if not right_closed and len(index) and index[-1] == end:\n index = index[:-1]\n\n dtype = tz_to_dtype(tz)\n return cls._simple_new(index.asi8, freq=freq, dtype=dtype)\n\n # -----------------------------------------------------------------\n # DatetimeLike Interface\n\n def _unbox_scalar(self, value):\n if not isinstance(value, self._scalar_type) and value is not NaT:\n raise ValueError(\"'value' should be a Timestamp.\")\n if not isna(value):\n self._check_compatible_with(value)\n return value.value\n\n def _scalar_from_string(self, value):\n return Timestamp(value, tz=self.tz)\n\n def _check_compatible_with(self, other, setitem: bool = False):\n if other is NaT:\n return\n self._assert_tzawareness_compat(other)\n if setitem:\n # Stricter check for setitem vs comparison methods\n if not timezones.tz_compare(self.tz, other.tz):\n raise ValueError(f\"Timezones don't match. '{self.tz} != {other.tz}'\")\n\n def _maybe_clear_freq(self):\n self._freq = None\n\n # -----------------------------------------------------------------\n # Descriptive Properties\n\n @property\n def _box_func(self):\n return lambda x: Timestamp(x, freq=self.freq, tz=self.tz)\n\n @property\n def dtype(self) -> Union[np.dtype, DatetimeTZDtype]:\n \"\"\"\n The dtype for the DatetimeArray.\n\n .. warning::\n\n A future version of pandas will change dtype to never be a\n ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will\n always be an instance of an ``ExtensionDtype`` subclass.\n\n Returns\n -------\n numpy.dtype or DatetimeTZDtype\n If the values are tz-naive, then ``np.dtype('datetime64[ns]')``\n is returned.\n\n If the values are tz-aware, then the ``DatetimeTZDtype``\n is returned.\n \"\"\"\n return self._dtype\n\n @property\n def tz(self):\n \"\"\"\n Return timezone, if any.\n\n Returns\n -------\n datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None\n Returns None when the array is tz-naive.\n \"\"\"\n # GH 18595\n return getattr(self.dtype, \"tz\", None)\n\n @tz.setter\n def tz(self, value):\n # GH 3746: Prevent localizing or converting the index by setting tz\n raise AttributeError(\n \"Cannot directly set timezone. Use tz_localize() \"\n \"or tz_convert() as appropriate\"\n )\n\n @property\n def tzinfo(self):\n \"\"\"\n Alias for tz attribute\n \"\"\"\n return self.tz\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _timezone(self):\n \"\"\"\n Comparable timezone both for pytz / dateutil\n \"\"\"\n return timezones.get_timezone(self.tzinfo)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def is_normalized(self):\n \"\"\"\n Returns True if all of the dates are at midnight (\"no time\")\n \"\"\"\n return conversion.is_date_array_normalized(self.asi8, self.tz)\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution(self):\n return libresolution.resolution(self.asi8, self.tz)\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def __array__(self, dtype=None) -> np.ndarray:\n if dtype is None and self.tz:\n # The default for tz-aware is object, to preserve tz info\n dtype = object\n\n return super().__array__(dtype=dtype)\n\n def __iter__(self):\n \"\"\"\n Return an iterator over the boxed values\n\n Yields\n ------\n tstamp : Timestamp\n \"\"\"\n\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = int(length / chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = tslib.ints_to_pydatetime(\n data[start_i:end_i], tz=self.tz, freq=self.freq, box=\"timestamp\"\n )\n for v in converted:\n yield v\n\n def astype(self, dtype, copy=True):\n # We handle\n # --> datetime\n # --> period\n # DatetimeLikeArrayMixin Super handles the rest.\n dtype = pandas_dtype(dtype)\n\n if is_datetime64_ns_dtype(dtype) and not is_dtype_equal(dtype, self.dtype):\n # GH#18951: datetime64_ns dtype but not equal means different tz\n new_tz = getattr(dtype, \"tz\", None)\n if getattr(self.dtype, \"tz\", None) is None:\n return self.tz_localize(new_tz)\n result = self.tz_convert(new_tz)\n if copy:\n result = result.copy()\n if new_tz is None:\n # Do we want .astype('datetime64[ns]') to be an ndarray.\n # The astype in Block._astype expects this to return an\n # ndarray, but we could maybe work around it there.\n result = result._data\n return result\n elif is_datetime64tz_dtype(self.dtype) and is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n elif is_period_dtype(dtype):\n return self.to_period(freq=dtype.freq)\n return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)\n\n # -----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None, **kwargs):\n from pandas.io.formats.format import _get_format_datetime64_from_values\n\n fmt = _get_format_datetime64_from_values(self, date_format)\n\n return tslib.format_array_from_datetime(\n self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep\n ).reshape(self.shape)\n\n # -----------------------------------------------------------------\n # Comparison Methods\n\n def _has_same_tz(self, other):\n zzone = self._timezone\n\n # vzone shouldn't be None if value is non-datetime like\n if isinstance(other, np.datetime64):\n # convert to Timestamp as np.datetime64 doesn't have tz attr\n other = Timestamp(other)\n vzone = timezones.get_timezone(getattr(other, \"tzinfo\", \"__no_tz__\"))\n return zzone == vzone\n\n def _assert_tzawareness_compat(self, other):\n # adapted from _Timestamp._assert_tzawareness_compat\n other_tz = getattr(other, \"tzinfo\", None)\n if is_datetime64tz_dtype(other):\n # Get tzinfo from Series dtype\n other_tz = other.dtype.tz\n if other is NaT:\n # pd.NaT quacks both aware and naive\n pass\n elif self.tz is None:\n if other_tz is not None:\n raise TypeError(\n \"Cannot compare tz-naive and tz-aware datetime-like objects.\"\n )\n elif other_tz is None:\n raise TypeError(\n \"Cannot compare tz-naive and tz-aware datetime-like objects\"\n )\n\n # -----------------------------------------------------------------\n # Arithmetic Methods\n\n def _sub_datetime_arraylike(self, other):\n \"\"\"subtract DatetimeArray/Index or ndarray[datetime64]\"\"\"\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n assert is_datetime64_dtype(other)\n other = type(self)(other)\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\n f\"{type(self).__name__} subtraction must have the same \"\n \"timezones or no timezones\"\n )\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n arr_mask = self._isnan | other._isnan\n new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)\n if self._hasnans or other._hasnans:\n new_values[arr_mask] = iNaT\n return new_values.view(\"timedelta64[ns]\")\n\n def _add_offset(self, offset):\n if self.ndim == 2:\n return self.ravel()._add_offset(offset).reshape(self.shape)\n\n assert not isinstance(offset, Tick)\n try:\n if self.tz is not None:\n values = self.tz_localize(None)\n else:\n values = self\n result = offset.apply_index(values).tz_localize(self.tz)\n\n except NotImplementedError:\n warnings.warn(\n \"Non-vectorized DateOffset being applied to Series or DatetimeIndex\",\n PerformanceWarning,\n )\n result = self.astype(\"O\") + offset\n if not len(self):\n # GH#30336 _from_sequence won't be able to infer self.tz\n return type(self)._from_sequence(result).tz_localize(self.tz)\n\n return type(self)._from_sequence(result)\n\n def _sub_datetimelike_scalar(self, other):\n # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]\n assert isinstance(other, (datetime, np.datetime64))\n assert other is not NaT\n other = Timestamp(other)\n if other is NaT:\n return self - NaT\n\n if not self._has_same_tz(other):\n # require tz compat\n raise TypeError(\n \"Timestamp subtraction must have the same timezones or no timezones\"\n )\n\n i8 = self.asi8\n result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)\n result = self._maybe_mask_results(result)\n return result.view(\"timedelta64[ns]\")\n\n # -----------------------------------------------------------------\n # Timezone Conversion and Localization Methods\n\n def _local_timestamps(self):\n \"\"\"\n Convert to an i8 (unix-like nanosecond timestamp) representation\n while keeping the local timezone and not using UTC.\n This is used to calculate time-of-day information as if the timestamps\n were timezone-naive.\n \"\"\"\n return tzconversion.tz_convert(self.asi8, utc, self.tz)\n\n def tz_convert(self, tz):\n \"\"\"\n Convert tz-aware Datetime Array/Index from one time zone to another.\n\n Parameters\n ----------\n tz : str, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted\n to this time zone of the Datetime Array/Index. A `tz` of None will\n convert to UTC and remove the timezone information.\n\n Returns\n -------\n Array or Index\n\n Raises\n ------\n TypeError\n If Datetime Array/Index is tz-naive.\n\n See Also\n --------\n DatetimeIndex.tz : A timezone that has a variable offset from UTC.\n DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a\n given time zone, or remove timezone from a tz-aware DatetimeIndex.\n\n Examples\n --------\n With the `tz` parameter, we can change the DatetimeIndex\n to other time zones:\n\n >>> dti = pd.date_range(start='2014-08-01 09:00',\n ... freq='H', periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert('US/Central')\n DatetimeIndex(['2014-08-01 02:00:00-05:00',\n '2014-08-01 03:00:00-05:00',\n '2014-08-01 04:00:00-05:00'],\n dtype='datetime64[ns, US/Central]', freq='H')\n\n With the ``tz=None``, we can remove the timezone (after converting\n to UTC if necessary):\n\n >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',\n ... periods=3, tz='Europe/Berlin')\n\n >>> dti\n DatetimeIndex(['2014-08-01 09:00:00+02:00',\n '2014-08-01 10:00:00+02:00',\n '2014-08-01 11:00:00+02:00'],\n dtype='datetime64[ns, Europe/Berlin]', freq='H')\n\n >>> dti.tz_convert(None)\n DatetimeIndex(['2014-08-01 07:00:00',\n '2014-08-01 08:00:00',\n '2014-08-01 09:00:00'],\n dtype='datetime64[ns]', freq='H')\n \"\"\"\n tz = timezones.maybe_get_tz(tz)\n\n if self.tz is None:\n # tz naive, use tz_localize\n raise TypeError(\n \"Cannot convert tz-naive timestamps, use tz_localize to localize\"\n )\n\n # No conversion since timestamps are all UTC to begin with\n dtype = tz_to_dtype(tz)\n return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)\n\n def tz_localize(self, tz, ambiguous=\"raise\", nonexistent=\"raise\"):\n \"\"\"\n Localize tz-naive Datetime Array/Index to tz-aware\n Datetime Array/Index.\n\n This method takes a time zone (tz) naive Datetime Array/Index object\n and makes this time zone aware. It does not move the time to another\n time zone.\n Time zone localization helps to switch from time zone aware to time\n zone unaware objects.\n\n Parameters\n ----------\n tz : str, pytz.timezone, dateutil.tz.tzfile or None\n Time zone to convert timestamps to. Passing ``None`` will\n remove the time zone information preserving local time.\n ambiguous : 'infer', 'NaT', bool array, default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n\n nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \\\ndefault 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Same type as self\n Array/Index converted to the specified time zone.\n\n Raises\n ------\n TypeError\n If the Datetime Array/Index is tz-aware and tz is not None.\n\n See Also\n --------\n DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from\n one time zone to another.\n\n Examples\n --------\n >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)\n >>> tz_naive\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq='D')\n\n Localize DatetimeIndex in US/Eastern time zone:\n\n >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')\n >>> tz_aware\n DatetimeIndex(['2018-03-01 09:00:00-05:00',\n '2018-03-02 09:00:00-05:00',\n '2018-03-03 09:00:00-05:00'],\n dtype='datetime64[ns, US/Eastern]', freq=None)\n\n With the ``tz=None``, we can remove the time zone information\n while keeping the local time (not converted to UTC):\n\n >>> tz_aware.tz_localize(None)\n DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',\n '2018-03-03 09:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n Be careful with DST changes. When there is sequential data, pandas can\n infer the DST time:\n\n >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.dt.tz_localize('CET', ambiguous='infer')\n 0 2018-10-28 01:30:00+02:00\n 1 2018-10-28 02:00:00+02:00\n 2 2018-10-28 02:30:00+02:00\n 3 2018-10-28 02:00:00+01:00\n 4 2018-10-28 02:30:00+01:00\n 5 2018-10-28 03:00:00+01:00\n 6 2018-10-28 03:30:00+01:00\n dtype: datetime64[ns, CET]\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 0 2018-10-28 01:20:00+02:00\n 1 2018-10-28 02:36:00+02:00\n 2 2018-10-28 03:46:00+01:00\n dtype: datetime64[ns, CET]\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n\n >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 0 2015-03-29 03:00:00+02:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 0 2015-03-29 01:59:59.999999999+01:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n\n >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 0 2015-03-29 03:30:00+02:00\n 1 2015-03-29 03:30:00+02:00\n dtype: datetime64[ns, Europe/Warsaw]\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n if self.tz is not None:\n if tz is None:\n new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz)\n else:\n raise TypeError(\"Already tz-aware, use tz_convert to convert.\")\n else:\n tz = timezones.maybe_get_tz(tz)\n # Convert to UTC\n\n new_dates = conversion.tz_localize_to_utc(\n self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n new_dates = new_dates.view(DT64NS_DTYPE)\n dtype = tz_to_dtype(tz)\n\n freq = None\n if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):\n # we can preserve freq\n # TODO: Also for fixed-offsets\n freq = self.freq\n elif tz is None and self.tz is None:\n # no-op\n freq = self.freq\n return self._simple_new(new_dates, dtype=dtype, freq=freq)\n\n # ----------------------------------------------------------------\n # Conversion Methods - Vectorized analogues of Timestamp methods\n\n def to_pydatetime(self) -> np.ndarray:\n \"\"\"\n Return Datetime Array/Index as object ndarray of datetime.datetime\n objects.\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)\n\n def normalize(self):\n \"\"\"\n Convert times to midnight.\n\n The time component of the date-time is converted to midnight i.e.\n 00:00:00. This is useful in cases, when the time does not matter.\n Length is unaltered. The timezones are unaffected.\n\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on Datetime Array/Index.\n\n Returns\n -------\n DatetimeArray, DatetimeIndex or Series\n The same type as the original data. Series will have the same\n name and index. DatetimeIndex will have the same name.\n\n See Also\n --------\n floor : Floor the datetimes to the specified freq.\n ceil : Ceil the datetimes to the specified freq.\n round : Round the datetimes to the specified freq.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',\n ... periods=3, tz='Asia/Calcutta')\n >>> idx\n DatetimeIndex(['2014-08-01 10:00:00+05:30',\n '2014-08-01 11:00:00+05:30',\n '2014-08-01 12:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq='H')\n >>> idx.normalize()\n DatetimeIndex(['2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30',\n '2014-08-01 00:00:00+05:30'],\n dtype='datetime64[ns, Asia/Calcutta]', freq=None)\n \"\"\"\n if self.tz is None or timezones.is_utc(self.tz):\n not_null = ~self.isna()\n DAY_NS = ccalendar.DAY_SECONDS * 1_000_000_000\n new_values = self.asi8.copy()\n adjustment = new_values[not_null] % DAY_NS\n new_values[not_null] = new_values[not_null] - adjustment\n else:\n new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)\n return type(self)(new_values)._with_freq(\"infer\").tz_localize(self.tz)\n\n def to_period(self, freq=None):\n \"\"\"\n Cast to PeriodArray/Index at a particular frequency.\n\n Converts DatetimeArray/Index to PeriodArray/Index.\n\n Parameters\n ----------\n freq : str or Offset, optional\n One of pandas' :ref:`offset strings <timeseries.offset_aliases>`\n or an Offset object. Will be inferred by default.\n\n Returns\n -------\n PeriodArray/Index\n\n Raises\n ------\n ValueError\n When converting a DatetimeArray/Index with non-regular values,\n so that a frequency cannot be inferred.\n\n See Also\n --------\n PeriodIndex: Immutable ndarray holding ordinal values.\n DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"y\": [1, 2, 3]},\n ... index=pd.to_datetime([\"2000-03-31 00:00:00\",\n ... \"2000-05-31 00:00:00\",\n ... \"2000-08-31 00:00:00\"]))\n >>> df.index.to_period(\"M\")\n PeriodIndex(['2000-03', '2000-05', '2000-08'],\n dtype='period[M]', freq='M')\n\n Infer the daily frequency\n\n >>> idx = pd.date_range(\"2017-01-01\", periods=2)\n >>> idx.to_period()\n PeriodIndex(['2017-01-01', '2017-01-02'],\n dtype='period[D]', freq='D')\n \"\"\"\n from pandas.core.arrays import PeriodArray\n\n if self.tz is not None:\n warnings.warn(\n \"Converting to PeriodArray/Index representation \"\n \"will drop timezone information.\",\n UserWarning,\n )\n\n if freq is None:\n freq = self.freqstr or self.inferred_freq\n\n if freq is None:\n raise ValueError(\n \"You must pass a freq argument as current index has none.\"\n )\n\n res = get_period_alias(freq)\n\n # https://github.com/pandas-dev/pandas/issues/33358\n if res is None:\n base, stride = libfrequencies.base_and_stride(freq)\n res = f\"{stride}{base}\"\n\n freq = res\n\n return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)\n\n def to_perioddelta(self, freq):\n \"\"\"\n Calculate TimedeltaArray of difference between index\n values and index converted to PeriodArray at specified\n freq. Used for vectorized offsets.\n\n Parameters\n ----------\n freq : Period frequency\n\n Returns\n -------\n TimedeltaArray/Index\n \"\"\"\n # TODO: consider privatizing (discussion in GH#23113)\n from pandas.core.arrays.timedeltas import TimedeltaArray\n\n i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8\n m8delta = i8delta.view(\"m8[ns]\")\n return TimedeltaArray(m8delta)\n\n # -----------------------------------------------------------------\n # Properties - Vectorized Timestamp Properties/Methods\n\n def month_name(self, locale=None):\n \"\"\"\n Return the month names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the month name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of month names.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2018-01', freq='M', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],\n dtype='datetime64[ns]', freq='M')\n >>> idx.month_name()\n Index(['January', 'February', 'March'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, \"month_name\", locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n def day_name(self, locale=None):\n \"\"\"\n Return the day names of the DateTimeIndex with specified locale.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n locale : str, optional\n Locale determining the language in which to return the day name.\n Default is English locale.\n\n Returns\n -------\n Index\n Index of day names.\n\n Examples\n --------\n >>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)\n >>> idx\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],\n dtype='datetime64[ns]', freq='D')\n >>> idx.day_name()\n Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')\n \"\"\"\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n\n result = fields.get_date_name_field(values, \"day_name\", locale=locale)\n result = self._maybe_mask_results(result, fill_value=None)\n return result\n\n @property\n def time(self):\n \"\"\"\n Returns numpy array of datetime.time. The time part of the Timestamps.\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"time\")\n\n @property\n def timetz(self):\n \"\"\"\n Returns numpy array of datetime.time also containing timezone\n information. The time part of the Timestamps.\n \"\"\"\n return tslib.ints_to_pydatetime(self.asi8, self.tz, box=\"time\")\n\n @property\n def date(self):\n \"\"\"\n Returns numpy array of python datetime.date objects (namely, the date\n part of Timestamps without timezone information).\n \"\"\"\n # If the Timestamps have a timezone that is not UTC,\n # convert them into their i8 representation while\n # keeping their timezone and not using UTC\n if self.tz is not None and not timezones.is_utc(self.tz):\n timestamps = self._local_timestamps()\n else:\n timestamps = self.asi8\n\n return tslib.ints_to_pydatetime(timestamps, box=\"date\")\n\n def isocalendar(self):\n \"\"\"\n Returns a DataFrame with the year, week, and day calculated according to\n the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n with columns year, week and day\n\n See Also\n --------\n Timestamp.isocalendar\n datetime.date.isocalendar\n\n Examples\n --------\n >>> idx = pd.date_range(start='2019-12-29', freq='D', periods=4)\n >>> idx.isocalendar()\n year week day\n 0 2019 52 7\n 1 2020 1 1\n 2 2020 1 2\n 3 2020 1 3\n >>> idx.isocalendar().week\n 0 52\n 1 1\n 2 1\n 3 1\n Name: week, dtype: UInt32\n \"\"\"\n from pandas import DataFrame\n\n if self.tz is not None and not timezones.is_utc(self.tz):\n values = self._local_timestamps()\n else:\n values = self.asi8\n sarray = fields.build_isocalendar_sarray(values)\n iso_calendar_df = DataFrame(\n sarray, columns=[\"year\", \"week\", \"day\"], dtype=\"UInt32\"\n )\n if self._hasnans:\n iso_calendar_df.iloc[self._isnan] = None\n return iso_calendar_df\n\n year = _field_accessor(\n \"year\",\n \"Y\",\n \"\"\"\n The year of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"Y\")\n ... )\n >>> datetime_series\n 0 2000-12-31\n 1 2001-12-31\n 2 2002-12-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.year\n 0 2000\n 1 2001\n 2 2002\n dtype: int64\n \"\"\",\n )\n month = _field_accessor(\n \"month\",\n \"M\",\n \"\"\"\n The month as January=1, December=12.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"M\")\n ... )\n >>> datetime_series\n 0 2000-01-31\n 1 2000-02-29\n 2 2000-03-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.month\n 0 1\n 1 2\n 2 3\n dtype: int64\n \"\"\",\n )\n day = _field_accessor(\n \"day\",\n \"D\",\n \"\"\"\n The day of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"D\")\n ... )\n >>> datetime_series\n 0 2000-01-01\n 1 2000-01-02\n 2 2000-01-03\n dtype: datetime64[ns]\n >>> datetime_series.dt.day\n 0 1\n 1 2\n 2 3\n dtype: int64\n \"\"\",\n )\n hour = _field_accessor(\n \"hour\",\n \"h\",\n \"\"\"\n The hours of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"h\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n minute = _field_accessor(\n \"minute\",\n \"m\",\n \"\"\"\n The minutes of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"T\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:01:00\n 2 2000-01-01 00:02:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.minute\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n second = _field_accessor(\n \"second\",\n \"s\",\n \"\"\"\n The seconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"s\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> datetime_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n microsecond = _field_accessor(\n \"microsecond\",\n \"us\",\n \"\"\"\n The microseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"us\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000\n 1 2000-01-01 00:00:00.000001\n 2 2000-01-01 00:00:00.000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.microsecond\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n nanosecond = _field_accessor(\n \"nanosecond\",\n \"ns\",\n \"\"\"\n The nanoseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range(\"2000-01-01\", periods=3, freq=\"ns\")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000000\n 1 2000-01-01 00:00:00.000000001\n 2 2000-01-01 00:00:00.000000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.nanosecond\n 0 0\n 1 1\n 2 2\n dtype: int64\n \"\"\",\n )\n weekofyear = _field_accessor(\n \"weekofyear\",\n \"woy\",\n \"\"\"\n The week ordinal of the year.\n \"\"\",\n )\n week = weekofyear\n _dayofweek_doc = \"\"\"\n The day of the week with Monday=0, Sunday=6.\n\n Return the day of the week. It is assumed the week starts on\n Monday, which is denoted by 0 and ends on Sunday which is denoted\n by 6. This method is available on both Series with datetime\n values (using the `dt` accessor) or DatetimeIndex.\n\n Returns\n -------\n Series or Index\n Containing integers indicating the day number.\n\n See Also\n --------\n Series.dt.dayofweek : Alias.\n Series.dt.weekday : Alias.\n Series.dt.day_name : Returns the name of the day of the week.\n\n Examples\n --------\n >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()\n >>> s.dt.dayofweek\n 2016-12-31 5\n 2017-01-01 6\n 2017-01-02 0\n 2017-01-03 1\n 2017-01-04 2\n 2017-01-05 3\n 2017-01-06 4\n 2017-01-07 5\n 2017-01-08 6\n Freq: D, dtype: int64\n \"\"\"\n dayofweek = _field_accessor(\"dayofweek\", \"dow\", _dayofweek_doc)\n weekday = dayofweek\n\n dayofyear = _field_accessor(\n \"dayofyear\",\n \"doy\",\n \"\"\"\n The ordinal day of the year.\n \"\"\",\n )\n quarter = _field_accessor(\n \"quarter\",\n \"q\",\n \"\"\"\n The quarter of the date.\n \"\"\",\n )\n days_in_month = _field_accessor(\n \"days_in_month\",\n \"dim\",\n \"\"\"\n The number of days in the month.\n \"\"\",\n )\n daysinmonth = days_in_month\n _is_month_doc = \"\"\"\n Indicates whether the date is the {first_or_last} day of the month.\n\n Returns\n -------\n Series or array\n For Series, returns a Series with boolean values.\n For DatetimeIndex, returns a boolean array.\n\n See Also\n --------\n is_month_start : Return a boolean indicating whether the date\n is the first day of the month.\n is_month_end : Return a boolean indicating whether the date\n is the last day of the month.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> s = pd.Series(pd.date_range(\"2018-02-27\", periods=3))\n >>> s\n 0 2018-02-27\n 1 2018-02-28\n 2 2018-03-01\n dtype: datetime64[ns]\n >>> s.dt.is_month_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n >>> s.dt.is_month_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2018-02-27\", periods=3)\n >>> idx.is_month_start\n array([False, False, True])\n >>> idx.is_month_end\n array([False, True, False])\n \"\"\"\n is_month_start = _field_accessor(\n \"is_month_start\", \"is_month_start\", _is_month_doc.format(first_or_last=\"first\")\n )\n\n is_month_end = _field_accessor(\n \"is_month_end\", \"is_month_end\", _is_month_doc.format(first_or_last=\"last\")\n )\n\n is_quarter_start = _field_accessor(\n \"is_quarter_start\",\n \"is_quarter_start\",\n \"\"\"\n Indicator for whether the date is the first day of a quarter.\n\n Returns\n -------\n is_quarter_start : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_end : Similar property for indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_start=df.dates.dt.is_quarter_start)\n dates quarter is_quarter_start\n 0 2017-03-30 1 False\n 1 2017-03-31 1 False\n 2 2017-04-01 2 True\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_start\n array([False, False, True, False])\n \"\"\",\n )\n is_quarter_end = _field_accessor(\n \"is_quarter_end\",\n \"is_quarter_end\",\n \"\"\"\n Indicator for whether the date is the last day of a quarter.\n\n Returns\n -------\n is_quarter_end : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_start : Similar property indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({'dates': pd.date_range(\"2017-03-30\",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_end=df.dates.dt.is_quarter_end)\n dates quarter is_quarter_end\n 0 2017-03-30 1 False\n 1 2017-03-31 1 True\n 2 2017-04-01 2 False\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range('2017-03-30', periods=4)\n >>> idx\n DatetimeIndex(['2017-03-30', '2017-03-31', '2017-04-01', '2017-04-02'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_quarter_end\n array([False, True, False, False])\n \"\"\",\n )\n is_year_start = _field_accessor(\n \"is_year_start\",\n \"is_year_start\",\n \"\"\"\n Indicate whether the date is the first day of a year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_end : Similar property indicating the last day of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_start\n array([False, False, True])\n \"\"\",\n )\n is_year_end = _field_accessor(\n \"is_year_end\",\n \"is_year_end\",\n \"\"\"\n Indicate whether the date is the last day of the year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_start : Similar property indicating the start of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range(\"2017-12-30\", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range(\"2017-12-30\", periods=3)\n >>> idx\n DatetimeIndex(['2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n >>> idx.is_year_end\n array([False, True, False])\n \"\"\",\n )\n is_leap_year = _field_accessor(\n \"is_leap_year\",\n \"is_leap_year\",\n \"\"\"\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day.\n Leap years are years which are multiples of four with the exception\n of years divisible by 100 but not by 400.\n\n Returns\n -------\n Series or ndarray\n Booleans indicating if dates belong to a leap year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> idx = pd.date_range(\"2012-01-01\", \"2015-01-01\", freq=\"Y\")\n >>> idx\n DatetimeIndex(['2012-12-31', '2013-12-31', '2014-12-31'],\n dtype='datetime64[ns]', freq='A-DEC')\n >>> idx.is_leap_year\n array([ True, False, False])\n\n >>> dates_series = pd.Series(idx)\n >>> dates_series\n 0 2012-12-31\n 1 2013-12-31\n 2 2014-12-31\n dtype: datetime64[ns]\n >>> dates_series.dt.is_leap_year\n 0 True\n 1 False\n 2 False\n dtype: bool\n \"\"\",\n )\n\n def to_julian_date(self):\n \"\"\"\n Convert Datetime Array to float64 ndarray of Julian Dates.\n 0 Julian date is noon January 1, 4713 BC.\n https://en.wikipedia.org/wiki/Julian_day\n \"\"\"\n\n # http://mysite.verizon.net/aesir_research/date/jdalg2.htm\n year = np.asarray(self.year)\n month = np.asarray(self.month)\n day = np.asarray(self.day)\n testarr = month < 3\n year[testarr] -= 1\n month[testarr] += 12\n return (\n day\n + np.fix((153 * month - 457) / 5)\n + 365 * year\n + np.floor(year / 4)\n - np.floor(year / 100)\n + np.floor(year / 400)\n + 1_721_118.5\n + (\n self.hour\n + self.minute / 60.0\n + self.second / 3600.0\n + self.microsecond / 3600.0 / 1e6\n + self.nanosecond / 3600.0 / 1e9\n )\n / 24.0\n )\n\n\n# -------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef sequence_to_dt64ns(\n data,\n dtype=None,\n copy=False,\n tz=None,\n dayfirst=False,\n yearfirst=False,\n ambiguous=\"raise\",\n):\n \"\"\"\n Parameters\n ----------\n data : list-like\n dtype : dtype, str, or None, default None\n copy : bool, default False\n tz : tzinfo, str, or None, default None\n dayfirst : bool, default False\n yearfirst : bool, default False\n ambiguous : str, bool, or arraylike, default 'raise'\n See pandas._libs.tslibs.conversion.tz_localize_to_utc.\n\n Returns\n -------\n result : numpy.ndarray\n The sequence converted to a numpy array with dtype ``datetime64[ns]``.\n tz : tzinfo or None\n Either the user-provided tzinfo or one inferred from the data.\n inferred_freq : Tick or None\n The inferred frequency of the sequence.\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n\n inferred_freq = None\n\n dtype = _validate_dt64_dtype(dtype)\n\n if not hasattr(data, \"dtype\"):\n # e.g. list, tuple\n if np.ndim(data) == 0:\n # i.e. generator\n data = list(data)\n data = np.asarray(data)\n copy = False\n elif isinstance(data, ABCSeries):\n data = data._values\n if isinstance(data, ABCPandasArray):\n data = data.to_numpy()\n\n if hasattr(data, \"freq\"):\n # i.e. DatetimeArray/Index\n inferred_freq = data.freq\n\n # if dtype has an embedded tz, capture it\n tz = validate_tz_from_dtype(dtype, tz)\n\n if isinstance(data, ABCIndexClass):\n if data.nlevels > 1:\n # Without this check, data._data below is None\n raise TypeError(\"Cannot create a DatetimeArray from a MultiIndex.\")\n data = data._data\n\n # By this point we are assured to have either a numpy array or Index\n data, copy = maybe_convert_dtype(data, copy)\n\n if is_object_dtype(data) or is_string_dtype(data):\n # TODO: We do not have tests specific to string-dtypes,\n # also complex or categorical or other extension\n copy = False\n if lib.infer_dtype(data, skipna=False) == \"integer\":\n data = data.astype(np.int64)\n else:\n # data comes back here as either i8 to denote UTC timestamps\n # or M8[ns] to denote wall times\n data, inferred_tz = objects_to_datetime64ns(\n data, dayfirst=dayfirst, yearfirst=yearfirst\n )\n tz = maybe_infer_tz(tz, inferred_tz)\n\n # `data` may have originally been a Categorical[datetime64[ns, tz]],\n # so we need to handle these types.\n if is_datetime64tz_dtype(data):\n # DatetimeArray -> ndarray\n tz = maybe_infer_tz(tz, data.tz)\n result = data._data\n\n elif is_datetime64_dtype(data):\n # tz-naive DatetimeArray or ndarray[datetime64]\n data = getattr(data, \"_data\", data)\n if data.dtype != DT64NS_DTYPE:\n data = conversion.ensure_datetime64ns(data)\n\n if tz is not None:\n # Convert tz-naive to UTC\n tz = timezones.maybe_get_tz(tz)\n data = conversion.tz_localize_to_utc(\n data.view(\"i8\"), tz, ambiguous=ambiguous\n )\n data = data.view(DT64NS_DTYPE)\n\n assert data.dtype == DT64NS_DTYPE, data.dtype\n result = data\n\n else:\n # must be integer dtype otherwise\n # assume this data are epoch timestamps\n if tz:\n tz = timezones.maybe_get_tz(tz)\n\n if data.dtype != INT64_DTYPE:\n data = data.astype(np.int64, copy=False)\n result = data.view(DT64NS_DTYPE)\n\n if copy:\n # TODO: should this be deepcopy?\n result = result.copy()\n\n assert isinstance(result, np.ndarray), type(result)\n assert result.dtype == \"M8[ns]\", result.dtype\n\n # We have to call this again after possibly inferring a tz above\n validate_tz_from_dtype(dtype, tz)\n\n return result, tz, inferred_freq\n\n\ndef objects_to_datetime64ns(\n data,\n dayfirst,\n yearfirst,\n utc=False,\n errors=\"raise\",\n require_iso8601=False,\n allow_object=False,\n):\n \"\"\"\n Convert data to array of timestamps.\n\n Parameters\n ----------\n data : np.ndarray[object]\n dayfirst : bool\n yearfirst : bool\n utc : bool, default False\n Whether to convert timezone-aware timestamps to UTC.\n errors : {'raise', 'ignore', 'coerce'}\n allow_object : bool\n Whether to return an object-dtype ndarray instead of raising if the\n data contains more than one timezone.\n\n Returns\n -------\n result : ndarray\n np.int64 dtype if returned values represent UTC timestamps\n np.datetime64[ns] if returned values represent wall times\n object if mixed timezones\n inferred_tz : tzinfo or None\n\n Raises\n ------\n ValueError : if data cannot be converted to datetimes\n \"\"\"\n assert errors in [\"raise\", \"ignore\", \"coerce\"]\n\n # if str-dtype, convert\n data = np.array(data, copy=False, dtype=np.object_)\n\n try:\n result, tz_parsed = tslib.array_to_datetime(\n data,\n errors=errors,\n utc=utc,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n require_iso8601=require_iso8601,\n )\n except ValueError as e:\n try:\n values, tz_parsed = conversion.datetime_to_datetime64(data)\n # If tzaware, these values represent unix timestamps, so we\n # return them as i8 to distinguish from wall times\n return values.view(\"i8\"), tz_parsed\n except (ValueError, TypeError):\n raise e\n\n if tz_parsed is not None:\n # We can take a shortcut since the datetime64 numpy array\n # is in UTC\n # Return i8 values to denote unix timestamps\n return result.view(\"i8\"), tz_parsed\n elif is_datetime64_dtype(result):\n # returning M8[ns] denotes wall-times; since tz is None\n # the distinction is a thin one\n return result, tz_parsed\n elif is_object_dtype(result):\n # GH#23675 when called via `pd.to_datetime`, returning an object-dtype\n # array is allowed. When called via `pd.DatetimeIndex`, we can\n # only accept datetime64 dtype, so raise TypeError if object-dtype\n # is returned, as that indicates the values can be recognized as\n # datetimes but they have conflicting timezones/awareness\n if allow_object:\n return result, tz_parsed\n raise TypeError(result)\n else: # pragma: no cover\n # GH#23675 this TypeError should never be hit, whereas the TypeError\n # in the object-dtype branch above is reachable.\n raise TypeError(result)\n\n\ndef maybe_convert_dtype(data, copy):\n \"\"\"\n Convert data based on dtype conventions, issuing deprecation warnings\n or errors where appropriate.\n\n Parameters\n ----------\n data : np.ndarray or pd.Index\n copy : bool\n\n Returns\n -------\n data : np.ndarray or pd.Index\n copy : bool\n\n Raises\n ------\n TypeError : PeriodDType data is passed\n \"\"\"\n if not hasattr(data, \"dtype\"):\n # e.g. collections.deque\n return data, copy\n\n if is_float_dtype(data.dtype):\n # Note: we must cast to datetime64[ns] here in order to treat these\n # as wall-times instead of UTC timestamps.\n data = data.astype(DT64NS_DTYPE)\n copy = False\n # TODO: deprecate this behavior to instead treat symmetrically\n # with integer dtypes. See discussion in GH#23675\n\n elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype):\n # GH#29794 enforcing deprecation introduced in GH#23539\n raise TypeError(f\"dtype {data.dtype} cannot be converted to datetime64[ns]\")\n elif is_period_dtype(data.dtype):\n # Note: without explicitly raising here, PeriodIndex\n # test_setops.test_join_does_not_recur fails\n raise TypeError(\n \"Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead\"\n )\n\n elif is_categorical_dtype(data.dtype):\n # GH#18664 preserve tz in going DTI->Categorical->DTI\n # TODO: cases where we need to do another pass through this func,\n # e.g. the categories are timedelta64s\n data = data.categories.take(data.codes, fill_value=NaT)._values\n copy = False\n\n elif is_extension_array_dtype(data.dtype) and not is_datetime64tz_dtype(data.dtype):\n # Includes categorical\n # TODO: We have no tests for these\n data = np.array(data, dtype=np.object_)\n copy = False\n\n return data, copy\n\n\n# -------------------------------------------------------------------\n# Validation and Inference\n\n\ndef maybe_infer_tz(tz, inferred_tz):\n \"\"\"\n If a timezone is inferred from data, check that it is compatible with\n the user-provided timezone, if any.\n\n Parameters\n ----------\n tz : tzinfo or None\n inferred_tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if both timezones are present but do not match\n \"\"\"\n if tz is None:\n tz = inferred_tz\n elif inferred_tz is None:\n pass\n elif not timezones.tz_compare(tz, inferred_tz):\n raise TypeError(\n f\"data is already tz-aware {inferred_tz}, unable to \"\n f\"set specified tz: {tz}\"\n )\n return tz\n\n\ndef _validate_dt64_dtype(dtype):\n \"\"\"\n Check that a dtype, if passed, represents either a numpy datetime64[ns]\n dtype or a pandas DatetimeTZDtype.\n\n Parameters\n ----------\n dtype : object\n\n Returns\n -------\n dtype : None, numpy.dtype, or DatetimeTZDtype\n\n Raises\n ------\n ValueError : invalid dtype\n\n Notes\n -----\n Unlike validate_tz_from_dtype, this does _not_ allow non-existent\n tz errors to go through\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if is_dtype_equal(dtype, np.dtype(\"M8\")):\n # no precision, disallowed GH#24806\n msg = (\n \"Passing in 'datetime64' dtype with no precision is not allowed. \"\n \"Please pass in 'datetime64[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if (isinstance(dtype, np.dtype) and dtype != DT64NS_DTYPE) or not isinstance(\n dtype, (np.dtype, DatetimeTZDtype)\n ):\n raise ValueError(\n f\"Unexpected value for 'dtype': '{dtype}'. \"\n \"Must be 'datetime64[ns]' or DatetimeTZDtype'.\"\n )\n return dtype\n\n\ndef validate_tz_from_dtype(dtype, tz):\n \"\"\"\n If the given dtype is a DatetimeTZDtype, extract the implied\n tzinfo object from it and check that it does not conflict with the given\n tz.\n\n Parameters\n ----------\n dtype : dtype, str\n tz : None, tzinfo\n\n Returns\n -------\n tz : consensus tzinfo\n\n Raises\n ------\n ValueError : on tzinfo mismatch\n \"\"\"\n if dtype is not None:\n if isinstance(dtype, str):\n try:\n dtype = DatetimeTZDtype.construct_from_string(dtype)\n except TypeError:\n # Things like `datetime64[ns]`, which is OK for the\n # constructors, but also nonsense, which should be validated\n # but not by us. We *do* allow non-existent tz errors to\n # go through\n pass\n dtz = getattr(dtype, \"tz\", None)\n if dtz is not None:\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\"cannot supply both a tz and a dtype with a tz\")\n tz = dtz\n\n if tz is not None and is_datetime64_dtype(dtype):\n # We also need to check for the case where the user passed a\n # tz-naive dtype (i.e. datetime64[ns])\n if tz is not None and not timezones.tz_compare(tz, dtz):\n raise ValueError(\n \"cannot supply both a tz and a \"\n \"timezone-naive dtype (i.e. datetime64[ns])\"\n )\n\n return tz\n\n\ndef _infer_tz_from_endpoints(start, end, tz):\n \"\"\"\n If a timezone is not explicitly given via `tz`, see if one can\n be inferred from the `start` and `end` endpoints. If more than one\n of these inputs provides a timezone, require that they all agree.\n\n Parameters\n ----------\n start : Timestamp\n end : Timestamp\n tz : tzinfo or None\n\n Returns\n -------\n tz : tzinfo or None\n\n Raises\n ------\n TypeError : if start and end timezones do not agree\n \"\"\"\n try:\n inferred_tz = timezones.infer_tzinfo(start, end)\n except AssertionError as err:\n # infer_tzinfo raises AssertionError if passed mismatched timezones\n raise TypeError(\n \"Start and end cannot both be tz-aware with different timezones\"\n ) from err\n\n inferred_tz = timezones.maybe_get_tz(inferred_tz)\n tz = timezones.maybe_get_tz(tz)\n\n if tz is not None and inferred_tz is not None:\n if not timezones.tz_compare(inferred_tz, tz):\n raise AssertionError(\"Inferred time zone not equal to passed time zone\")\n\n elif inferred_tz is not None:\n tz = inferred_tz\n\n return tz\n\n\ndef _maybe_normalize_endpoints(start, end, normalize):\n _normalized = True\n\n if start is not None:\n if normalize:\n start = normalize_date(start)\n _normalized = True\n else:\n _normalized = _normalized and start.time() == _midnight\n\n if end is not None:\n if normalize:\n end = normalize_date(end)\n _normalized = True\n else:\n _normalized = _normalized and end.time() == _midnight\n\n return start, end, _normalized\n\n\ndef _maybe_localize_point(ts, is_none, is_not_none, freq, tz, ambiguous, nonexistent):\n \"\"\"\n Localize a start or end Timestamp to the timezone of the corresponding\n start or end Timestamp\n\n Parameters\n ----------\n ts : start or end Timestamp to potentially localize\n is_none : argument that should be None\n is_not_none : argument that should not be None\n freq : Tick, DateOffset, or None\n tz : str, timezone object or None\n ambiguous: str, localization behavior for ambiguous times\n nonexistent: str, localization behavior for nonexistent times\n\n Returns\n -------\n ts : Timestamp\n \"\"\"\n # Make sure start and end are timezone localized if:\n # 1) freq = a Timedelta-like frequency (Tick)\n # 2) freq = None i.e. generating a linspaced range\n if is_none is None and is_not_none is not None:\n # Note: We can't ambiguous='infer' a singular ambiguous time; however,\n # we have historically defaulted ambiguous=False\n ambiguous = ambiguous if ambiguous != \"infer\" else False\n localize_args = {\"ambiguous\": ambiguous, \"nonexistent\": nonexistent, \"tz\": None}\n if isinstance(freq, Tick) or freq is None:\n localize_args[\"tz\"] = tz\n ts = ts.tz_localize(**localize_args)\n return ts\n"
] | [
[
"pandas._libs.tslibs.fields.build_isocalendar_sarray",
"pandas.core.arrays.datetimelike.validate_endpoints",
"pandas.core.arrays.timedeltas.TimedeltaArray",
"numpy.dtype",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.arrays.datetimelike.validate_inferred_freq",
"pandas._libs.tslibs.resolution.resolution",
"numpy.asarray",
"pandas._libs.tslibs.frequencies.base_and_stride",
"pandas._libs.tslibs.normalize_date",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.tseries.frequencies.get_period_alias",
"pandas.core.arrays.datetimelike.maybe_infer_freq",
"pandas.core.dtypes.dtypes.DatetimeTZDtype.construct_from_string",
"pandas.core.common.count_not_none",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"pandas.core.arrays.PeriodArray._from_datetime64",
"pandas._libs.tslibs.Timestamp",
"pandas._libs.tslibs.tzconversion.tz_convert",
"pandas._libs.tslibs.timezones.get_timezone",
"pandas.core.arrays.datetimelike.validate_periods",
"pandas._libs.tslibs.fields.get_date_name_field",
"pandas._libs.tslibs.conversion.tz_localize_to_utc",
"pandas._libs.tslibs.fields.get_start_end_field",
"pandas.core.dtypes.common.is_datetime64_dtype",
"numpy.linspace",
"pandas._libs.tslibs.timezones.is_utc",
"pandas._libs.tslibs.conversion.normalize_i8_timestamps",
"pandas._libs.lib.infer_dtype",
"pandas.tseries.frequencies.to_offset",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.dtypes.common.is_period_dtype",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas._libs.tslibs.conversion.datetime_to_datetime64",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.arrays._ranges.generate_regular_range",
"numpy.ndim",
"pandas._libs.tslibs.timezones.infer_tzinfo",
"pandas.io.formats.format._get_format_datetime64_from_values",
"pandas.core.dtypes.dtypes.DatetimeTZDtype",
"pandas._libs.tslibs.fields.get_date_field",
"pandas._libs.tslibs.timezones.tz_standardize",
"pandas.core.arrays.datetimelike.DatetimeLikeArrayMixin.astype",
"pandas._libs.tslib.ints_to_pydatetime",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_string_dtype",
"numpy.fix",
"pandas._libs.tslibs.conversion.is_date_array_normalized",
"pandas.DataFrame",
"numpy.floor",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"numpy.array",
"pandas._libs.tslib.array_to_datetime"
]
] |
steventan0110/multiDDS | [
"b77d0ad7b8f38d5b3b1b0e63e2671e0de0e3da00"
] | [
"fairseq/optim/lr_scheduler/inverse_square_root_decay_schedule.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom . import FairseqLRScheduler, register_lr_scheduler\nimport torch\n\n@register_lr_scheduler('inverse_sqrt_decay')\nclass InverseSquareRootDecaySchedule(FairseqLRScheduler):\n \"\"\"Decay the LR based on the inverse square root of the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n decay_factor = args.lr * sqrt(args.warmup_updates)\n lr = decay_factor / sqrt(update_num)\n \"\"\"\n\n def __init__(self, args, optimizer):\n super().__init__(args, optimizer)\n if len(args.lr) > 1:\n raise ValueError(\n 'Cannot use a fixed learning rate schedule with inverse_sqrt.'\n ' Consider --lr-scheduler=fixed instead.'\n )\n warmup_end_lr = args.lr[0]\n if args.warmup_init_lr < 0:\n args.warmup_init_lr = 0 if args.warmup_updates > 0 else warmup_end_lr\n\n # linearly warmup for the first args.warmup_updates\n self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates\n\n # then, decay prop. to the inverse square root of the update number\n self.decay_factor = warmup_end_lr * args.warmup_updates**0.5\n\n # initial learning rate\n self.lr = args.warmup_init_lr\n self.optimizer.set_lr(self.lr)\n self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer.optimizer, patience=0, factor=args.lr_shrink,\n threshold=args.lr_threshold)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add arguments to the parser for this LR scheduler.\"\"\"\n # fmt: off\n parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',\n help='warmup the learning rate linearly for the first N updates')\n parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',\n help='initial learning rate during warmup phase; default is args.lr')\n parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',\n help='shrink factor for annealing, lr_new = (lr * lr_shrink)')\n parser.add_argument('--lr-threshold', default=1e-4, type=float, metavar='LT',\n help='Threshold for measuring the new optimum, \\\n to only focus on significant changes')\n # fmt: on\n\n def step(self, epoch, val_loss=None):\n \"\"\"Update the learning rate at the end of the given epoch.\"\"\"\n super().step(epoch, val_loss)\n if val_loss is not None:\n self.lr_scheduler.step(val_loss, epoch)\n else:\n self.lr_scheduler.last_epoch = epoch\n return self.optimizer.get_lr()\n\n def step_update(self, num_updates):\n \"\"\"Update the learning rate after each update.\"\"\"\n if num_updates < self.args.warmup_updates:\n self.lr = self.args.warmup_init_lr + num_updates*self.lr_step\n else:\n self.lr = self.decay_factor * num_updates**-0.5\n self.optimizer.set_lr(self.lr)\n return self.lr\n"
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau"
]
] |
mariogeiger/jax | [
"7098088f4eb15cf750398889e4341dbc15cda1b3"
] | [
"tests/lax_numpy_indexing_test.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom contextlib import contextmanager\nimport enum\nfrom functools import partial\nimport itertools\nimport typing\nfrom typing import Any, Optional, Tuple\nimport warnings\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nimport jax\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import ops\n\nfrom jax._src import dtypes\nfrom jax._src import test_util as jtu\nfrom jax._src import util\nfrom jax._src.lax import lax as lax_internal\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n# We disable the whitespace continuation check in this file because otherwise it\n# makes the test name formatting unwieldy.\n# pylint: disable=bad-continuation\n\n\nARRAY_MSG = r\"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\\[array\\(seq\\)\\]\"\nTUPLE_MSG = r\"Using a non-tuple sequence for multidimensional indexing is not allowed.*arr\\[tuple\\(seq\\)\\]\"\n\n\nfloat_dtypes = jtu.dtypes.floating\ndefault_dtypes = float_dtypes + jtu.dtypes.integer\nall_dtypes = default_dtypes + jtu.dtypes.boolean\n\nclass IndexSpec(typing.NamedTuple):\n shape: Tuple[int, ...]\n indexer: Any\n out_shape: Optional[Tuple[int, ...]] = None\n\n\ndef check_grads(f, args, order, atol=None, rtol=None, eps=None):\n # TODO(mattjj,dougalm): add higher-order check\n default_tol = 1e-6 if config.x64_enabled else 1e-2\n atol = atol or default_tol\n rtol = rtol or default_tol\n eps = eps or default_tol\n jtu.check_jvp(f, partial(jax.jvp, f), args, atol, rtol, eps)\n jtu.check_vjp(f, partial(jax.vjp, f), args, atol, rtol, eps)\n\n\nSTATIC_INDEXING_TESTS = [\n (\"OneIntIndex\", [\n IndexSpec(shape=(3,), indexer=1, out_shape=()),\n IndexSpec(shape=(3, 3), indexer=0, out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=2, out_shape=(4, 5)),\n IndexSpec(shape=(3,), indexer=-1, out_shape=()),\n IndexSpec(shape=(3,), indexer=-2, out_shape=()),\n ]),\n (\"TwoIntIndices\", [\n IndexSpec(shape=(3, 3), indexer=(2, 1), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2), out_shape=(5,)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2), out_shape=(5,)),\n ]),\n (\"ThreeIntIndices\", [\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2, 3), out_shape=()),\n ]),\n (\"OneSliceIndex\", [\n IndexSpec(shape=(10,), indexer=slice(1, 3), out_shape=(2,)),\n IndexSpec(shape=(10,), indexer=slice(1, -1), out_shape=(8,)),\n IndexSpec(shape=(10,), indexer=slice(None, -1), out_shape=(9,)),\n IndexSpec(shape=(10,), indexer=slice(None, None, None), out_shape=(10,)),\n IndexSpec(shape=(10, 8), indexer=slice(1, 3), out_shape=(2, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(1, None), out_shape=(9, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, 3), out_shape=(3, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(-3, None), out_shape=(3, 8)),\n ]),\n (\"OneSliceIndexNegativeStride\", [\n IndexSpec(shape=(10,), indexer=slice(3, 1, -1), out_shape=(2,)),\n IndexSpec(shape=(10,), indexer=slice(1, 8, -1), out_shape=(0,)),\n IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),\n IndexSpec(shape=(10,), indexer=slice(None, None, -1), out_shape=(10,)),\n IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1), out_shape=(2, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1), out_shape=(0, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, -1), out_shape=(10, 8)),\n ]),\n (\"OneSliceIndexNonUnitStride\", [\n IndexSpec(shape=(10,), indexer=slice(0, 8, 2), out_shape=(4,)),\n IndexSpec(shape=(10,), indexer=slice(0, 8, 3), out_shape=(3,)),\n IndexSpec(shape=(10,), indexer=slice(1, 3, 2), out_shape=(1,)),\n IndexSpec(shape=(10,), indexer=slice(1, None, 2), out_shape=(5,)),\n IndexSpec(shape=(10,), indexer=slice(None, 1, -2), out_shape=(4,)),\n IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3), out_shape=(3, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, 2), out_shape=(5, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2), out_shape=(4, 8)),\n IndexSpec(shape=(10, 8), indexer=slice(None, None, -2), out_shape=(5, 8)),\n ]),\n (\"TwoSliceIndices\", [\n IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2)),\n out_shape=(2, 2)),\n IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2)),\n out_shape=(9, 2)),\n IndexSpec(shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2)),\n out_shape=(10, 2)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2)),\n out_shape=(2, 2, 3)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None)),\n out_shape=(2, 8, 3)),\n IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2)),\n out_shape=(9, 2, 3)),\n ]),\n (\"OneColonIndex\", [\n IndexSpec(shape=(3,), indexer=slice(None), out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=slice(None), out_shape=(3, 4)),\n ]),\n (\"MultipleColonIndices\", [\n IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None)),\n out_shape=(3, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None)),\n out_shape=(3, 4, 5)),\n ]),\n (\"MixedSliceIndices\", [\n IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2)),\n out_shape=(10, 2)),\n IndexSpec(shape=(10, 4), indexer=(1, slice(None)),\n out_shape=(4,)),\n ]),\n (\"EllipsisIndex\", [\n IndexSpec(shape=(3,), indexer=Ellipsis, out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=Ellipsis, out_shape=(3, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis), out_shape=(4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3), out_shape=(3,)),\n ]),\n (\"NoneIndex\", [\n IndexSpec(shape=(), indexer=None, out_shape=(1,)),\n IndexSpec(shape=(), indexer=(None, None), out_shape=(1, 1)),\n IndexSpec(shape=(), indexer=(Ellipsis, None), out_shape=(1,)),\n IndexSpec(shape=(3,), indexer=None, out_shape=(1, 3)),\n IndexSpec(shape=(3, 4), indexer=None, out_shape=(1, 3, 4)),\n IndexSpec(shape=(3, 4), indexer=(Ellipsis, None), out_shape=(3, 4, 1)),\n IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis), out_shape=(1, 4)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis), out_shape=(1, 4, 5)),\n ]),\n (\"EmptyIndex\", [\n IndexSpec(shape=(), indexer=(), out_shape=()),\n IndexSpec(shape=(3,), indexer=(), out_shape=(3,)),\n IndexSpec(shape=(3, 4), indexer=(), out_shape=(3, 4)),\n ]),\n (\"TupleOfIntAndSliceAndIntArray\", [\n IndexSpec(shape=(3, 2, 3), indexer=(0, slice(None), np.arange(3)),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 2, 3), indexer=(np.int32(1), slice(None), np.arange(3)),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 2, 3), indexer=(np.array(2), slice(None), np.arange(3)),\n out_shape=(3, 2)),\n ]),\n]\n\nSTATIC_INDEXING_OUT_OF_BOUNDS_TESTS = [\n (\"OneIntIndex\", [\n IndexSpec(shape=(3,), indexer=-4, out_shape=()),\n IndexSpec(shape=(3, 3), indexer=3, out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=4, out_shape=(4, 5)),\n ]),\n (\"TwoIntIndices\", [\n IndexSpec(shape=(3, 3), indexer=(2, -4), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(3, 2), out_shape=()),\n IndexSpec(shape=(3, 4, 5), indexer=(-4, 4), out_shape=(5,)),\n ]),\n]\n\n\nADVANCED_INDEXING_TESTS = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1]),\n out_shape=(4, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32),\n out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 0]]),out_shape=(1, 2)),\n IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1], [0, 1, -1]]),\n out_shape=(2, 3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1], [-1, -2, 1, 0]]),\n out_shape=(2, 4, 4, 5)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2, 0, 1]), np.array([-1, 0, -1, 2])),\n out_shape=(4, 5)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([[0, 2, 0, 1]]), np.array([-1, 0, -1, 2])),\n out_shape=(1, 4, 5)),\n ]),\n (\"ArrayOfInts\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1, 0]), out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]]),\n out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0, 3]])),\n out_shape=(1, 4)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0, 3]])),\n out_shape=(2, 4, 5)),\n ]),\n]\n\nADVANCED_INDEXING_TESTS_NO_REPEATS = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 0]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 1]),\n out_shape=(3, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),\n IndexSpec(shape=(6, 6), indexer=np.array([[1, 2, 0], [3, 4, -1]]),\n out_shape=(2, 3, 6)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([0, 2, 1, 3]), np.array([-1, 0, -2, 1])),\n out_shape=(4, 6)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([[0, 2, -1, 1]]), np.array([-1, 0, -2, 2])),\n out_shape=(1, 4, 6)),\n ]),\n (\"ArrayOfInts\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 2, 1]), out_shape=(3,)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([ 0, -1]), out_shape=(2, 4, 5)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]]),\n out_shape=(2, 3, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[2, 3, 0]])),\n out_shape=(1, 3)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[2, 3, 0]])),\n out_shape=(2, 3, 5)),\n ]),\n]\n\nADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED = [\n (\"One1DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([0, 1]), out_shape=(2,)),\n IndexSpec(shape=(3, 3), indexer=np.array([0, 1, 2]), out_shape=(3, 3)),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 1, 2]),\n out_shape=(3, 4, 5)),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1]), out_shape=(2,)),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1]), out_shape=(2,)),\n IndexSpec(shape=(0,), indexer=np.array([], dtype=np.int32), out_shape=(0,)),\n ]),\n (\"One2DIntArrayIndex\", [\n IndexSpec(shape=(3,), indexer=np.array([[0, 1]]), out_shape=(1, 2)),\n IndexSpec(shape=(6, 6), indexer=np.array([[-1, 0, 1],\n [ 2, 3, 4]]), out_shape=(2, 3, 6)),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]), np.array([1, 2])),\n out_shape=(2,)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([0, 1, 2, 3]), np.array([-2, -1, 0, 1])),\n out_shape=(4, 6)),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\", [\n IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]), np.array([1, 2])),\n out_shape=(1, 2)),\n IndexSpec(shape=(4, 5, 6),\n indexer=(np.array([[-1, 0, 1, 2]]), np.array([-2, -1, 0, 2])),\n out_shape=(1, 4, 6)),\n ]),\n (\"TupleOfListsOfPythonInts\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1],), out_shape=(2, 4, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[0, 2, 3]]),\n out_shape=(2, 3, 5)),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1])), out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1, np.array([[0, 2, 3]])),\n out_shape=(1, 3)),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\", [\n IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0])),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], np.array([[0, 2, 3]])),\n out_shape=(2, 3, 5)),\n ]),\n]\n\n\nMIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [\n (\"SlicesAndOneIntArrayIndex\", [\n IndexSpec(shape=(2, 3), indexer=(np.array([0, 1]), slice(1, 2)),\n out_shape=(2, 1)),\n IndexSpec(shape=(2, 3), indexer=(slice(0, 2), np.array([0, 2])),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), slice(None)),\n out_shape=(3, 2, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([[0, 2], [1, 3]]), slice(None)),\n out_shape=(3, 2, 2, 5)),\n ]),\n (\"SlicesAndTwoIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), np.array([-1, 2])),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), Ellipsis, np.array([-1, 2])),\n out_shape=(2, 4)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), np.array([-1, 2]), Ellipsis),\n out_shape=(2, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), np.array([-1, 2]), slice(1, 3)),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), slice(1, 3), np.array([-1, 2])),\n out_shape=(2, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),\n np.array([-1, 2, 1])),\n out_shape=(3, 2)),\n ]),\n (\"NonesAndIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), None, np.array([-1, 2])),\n out_shape=(2, 1, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([0, 2]), None, None, np.array([-1, 2])),\n out_shape=(2, 1, 1, 5)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([0, 2]), None, None,\n np.array([-1, 2])),\n out_shape=(2, 3, 1, 1)),\n ]),\n (\"IntArrayWithInt32Type\", [\n IndexSpec(shape=(3, 4), indexer=(Ellipsis, np.array(1, dtype=np.int32)),\n out_shape=(3,)),\n ]),\n]\n\n\nMIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [\n (\"SlicesAndOneIntArrayIndex\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(Ellipsis, np.array([[0, 2], [1, 1]]), slice(None)),\n out_shape=(3, 2, 2, 5)),\n ]),\n (\"SlicesAndTwoIntArrayIndices\", [\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([ 0, 2, -2]), slice(None, None, 2),\n np.array([-1, 2, -1])),\n out_shape=(3, 2)),\n IndexSpec(shape=(3, 4, 5),\n indexer=(np.array([[0, 2], [2, 0]]), Ellipsis,\n np.array([[1, 0], [1, 0]])),\n out_shape=(2, 2, 4)),\n ]),\n]\n\nMODES = [\"clip\", \"drop\", \"promise_in_bounds\"]\n\n\nclass IndexingTest(jtu.JaxTestCase):\n \"\"\"Tests for Numpy indexing translation rules.\"\"\"\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": \"{}_inshape={}_indexer={}\".format(\n name, jtu.format_shape_dtype_string( shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer\n } for name, index_specs in STATIC_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes))\n def testStaticIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype)]\n np_fun = lambda x: np.asarray(x)[indexer]\n jnp_fun = lambda x: jnp.asarray(x)[indexer]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n # Tests x.at[...].get(...) as well.\n jnp_fun = lambda x: jnp.asarray(x).at[indexer].get()\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": f\"_{funcname}\", \"funcname\": funcname}\n for funcname in [\"negative\", \"sin\", \"cos\", \"square\", \"sqrt\", \"log\", \"exp\"]))\n def testIndexApply(self, funcname, size=10, dtype='float32'):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), -size, size)\n np_func = getattr(np, funcname)\n jnp_func = getattr(jnp, funcname)\n @jtu.ignore_warning(category=RuntimeWarning)\n def np_op(x, idx):\n y = x.copy()\n np_func.at(y, idx)\n return y\n def jnp_op(x, idx):\n return jnp.asarray(x).at[idx].apply(jnp_func)\n args_maker = lambda: [rng(size, dtype), idx_rng(size, int)]\n self._CheckAgainstNumpy(np_op, jnp_op, args_maker)\n self._CompileAndCheck(jnp_op, args_maker)\n\n\n @parameterized.named_parameters({\n \"testcase_name\":\n f\"{jtu.format_shape_dtype_string(shape, dtype)}_inshape={name}\"\n f\"_indexer={indexer}_mode={mode}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer, \"mode\": mode\n }\n for mode in MODES\n for name, index_specs in (\n STATIC_INDEXING_TESTS if mode == \"promise_in_bounds\" else\n STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testStaticIndexingGrads(self, shape, dtype, indexer, mode):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n arg = rng(shape, dtype)\n # Use an arbitrary finite fill_value, since NaNs won't work in a numerical\n # gradient test.\n fun = lambda x: jnp.asarray(x).at[indexer].get(mode=mode, fill_value=7)**2\n check_grads(fun, (arg,), 2, tol, tol, tol)\n\n def _ReplaceSlicesWithTuples(self, idx):\n \"\"\"Helper method to replace slices with tuples for dynamic indexing args.\"\"\"\n if isinstance(idx, slice):\n triple = idx.start, idx.stop, idx.step\n isnone = [i for i, elt in enumerate(triple) if elt is None]\n zeros = itertools.repeat(0)\n nones = itertools.repeat(None)\n out = util.subvals(triple, zip(isnone, zeros))\n return out, lambda out: slice(*util.subvals(out, zip(isnone, nones)))\n elif isinstance(idx, (tuple, list)) and idx:\n t = type(idx)\n elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))\n return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))\n else:\n return idx, lambda x: x\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneSliceIndex\",\n [IndexSpec(shape=(5,), indexer=slice(1, 3)),\n IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),\n (\"TwoSliceIndices\",\n [IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),\n IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),\n (\"NonUnitStrides\", [\n IndexSpec(shape=(3,), indexer=slice(None, None, -1)),\n IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),\n IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))\n ]),\n (\"OnlyStartOrStopDynamic\", [\n IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),\n IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))\n ]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testDynamicIndexingWithSlicesErrors(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n @jax.jit\n def fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return x[indexer]\n\n args_maker = lambda: [rng(shape, dtype), unpacked_indexer]\n self.assertRaises(IndexError, lambda: fun(*args_maker()))\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneIntIndex\",\n [IndexSpec(shape=(3,), indexer=1),\n IndexSpec(shape=(3, 3), indexer=0),\n IndexSpec(shape=(3, 4, 5), indexer=2),\n IndexSpec(shape=(3,), indexer=-1),\n IndexSpec(shape=(3,), indexer=-2)]),\n (\"TwoIntIndices\",\n [IndexSpec(shape=(3, 3), indexer=(2, 1)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),\n (\"ThreeIntIndices\",\n [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testDynamicIndexingWithIntegers(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n def np_fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return np.asarray(x)[indexer]\n\n def jnp_fun(x, unpacked_indexer):\n indexer = pack_indexer(unpacked_indexer)\n return jnp.array(x)[indexer]\n\n args_maker = lambda: [rng(shape, dtype), unpacked_indexer]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"OneIntIndex\",\n [IndexSpec(shape=(3,), indexer=1),\n IndexSpec(shape=(3, 3), indexer=0),\n IndexSpec(shape=(3, 4, 5), indexer=2),\n IndexSpec(shape=(3,), indexer=-1),\n IndexSpec(shape=(3,), indexer=-2),\n ]),\n (\"TwoIntIndices\",\n [IndexSpec(shape=(3, 3), indexer=(2, 1)),\n IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),\n IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),\n ]),\n (\"ThreeIntIndices\",\n [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testDynamicIndexingWithIntegersGrads(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)\n\n @jax.jit\n def fun(unpacked_indexer, x):\n indexer = pack_indexer(unpacked_indexer)\n return x[indexer]\n\n arr = rng(shape, dtype)\n check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in ADVANCED_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testAdvancedIntegerIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), indexer]\n np_fun = lambda x, idx: np.asarray(x)[idx]\n jnp_fun = lambda x, idx: jnp.asarray(x)[idx]\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n @parameterized.named_parameters(\n {\"testcase_name\": f\"_{dtype}\", \"dtype\": dtype}\n for dtype in jtu.dtypes.unsigned + jtu.dtypes.integer)\n def testIndicesNormalizationByType(self, dtype):\n x = jnp.arange(10)\n jaxpr = jax.make_jaxpr(x.__getitem__)(jnp.arange(3, dtype=dtype))\n primitives = [eqn.primitive for eqn in jaxpr.eqns]\n if np.issubdtype(dtype, np.unsignedinteger):\n # Unsigned integers should not require lt, add, and select.\n self.assertEqual(primitives, [lax.convert_element_type_p, lax.broadcast_in_dim_p, lax.gather_p])\n else:\n # May or may not contain convert_element_type.\n self.assertIn(len(primitives), [5, 6])\n self.assertEqual(primitives[:3], [lax.lt_p, lax.add_p, lax.select_n_p])\n self.assertEqual(primitives[-2:], [lax.broadcast_in_dim_p, lax.gather_p])\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in [\n (\"One1DIntArrayIndex\",\n [IndexSpec(shape=(3,), indexer=np.array([0, 1])),\n IndexSpec(shape=(3, 3), indexer=np.array([1, 2, 1])),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([0, 2, 0, 1])),\n IndexSpec(shape=(3,), indexer=np.array([-1, 1])),\n IndexSpec(shape=(3,), indexer=np.array([-2, -1])),\n ]),\n (\"One2DIntArrayIndex\",\n [IndexSpec(shape=(3,), indexer=np.array([[0, 0]])),\n IndexSpec(shape=(3, 3), indexer=np.array([[1, 2, 1],\n [0, 1, -1]])),\n IndexSpec(shape=(3, 4, 5), indexer=np.array([[0, 2, 0, 1],\n [-1, -2, 1, 0]])),\n ]),\n (\"Two1DIntArrayIndicesNoBroadcasting\",\n [IndexSpec(shape=(3, 3), indexer=(np.array([0, 1]),\n np.array([1, 2]))),\n IndexSpec(shape=(3, 4, 5), indexer=(np.array([0, 2, 0, 1]),\n np.array([-1, 0, -1, 2]))),\n ]),\n (\"Two1DIntArrayIndicesWithBroadcasting\",\n [IndexSpec(shape=(3, 3), indexer=(np.array([[0, 1]]),\n np.array([1, 2]))),\n IndexSpec(shape=(3, 4, 5), indexer=(np.array([[0, 2, 0, 1]]),\n np.array([-1, 0, -1, 2]))),\n ]),\n (\"TupleOfPythonIntsAndIntArrays\",\n [IndexSpec(shape=(3, 4, 5), indexer=(0, np.array([0, 1]))),\n IndexSpec(shape=(3, 4, 5), indexer=(0, 1,\n np.array([[2, 3, 0, 3]]))),\n ]),\n (\"TupleOfListsOfPythonIntsAndIntArrays\",\n [IndexSpec(shape=(3, 4, 5), indexer=([0, 1], np.array([0]))),\n IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]],\n np.array([[2, 3, 0, 3]]))),\n ]),\n ]\n for shape, indexer, _ in index_specs\n for dtype in float_dtypes)\n def testAdvancedIntegerIndexingGrads(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n tol = 1e-2 if jnp.finfo(dtype).bits == 32 else None\n arg = rng(shape, dtype)\n fun = lambda x: jnp.asarray(x)[indexer]\n check_grads(fun, (arg,), 2, tol, tol, eps=1.)\n\n @parameterized.named_parameters(\n {\"testcase_name\": \"{}_inshape={}_indexer={}\"\n .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer}\n for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS\n for shape, indexer, _ in index_specs\n for dtype in all_dtypes)\n def testMixedAdvancedIntegerIndexing(self, shape, dtype, indexer):\n rng = jtu.rand_default(self.rng())\n indexer_with_dummies = [e if isinstance(e, np.ndarray) else ()\n for e in indexer]\n substitutes = [(i, e) for i, e in enumerate(indexer)\n if not isinstance(e, np.ndarray)]\n args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]\n\n def jnp_fun(x, indexer_with_dummies):\n idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))\n return jnp.asarray(x)[idx]\n\n def np_fun(x, indexer_with_dummies):\n idx = type(indexer)(util.subvals(indexer_with_dummies, substitutes))\n return np.asarray(x)[idx]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testAdvancedIndexingManually(self):\n x = self.rng().randn(3, 4, 5)\n index_array = np.array([0, 2, -1, 0])\n\n op = lambda x, index_array: x[..., index_array, :]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n op = lambda x, index_array: x[..., index_array, :, index_array, None]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]\n cop = jax.jit(op)\n\n a1 = op(x, index_array)\n a2 = cop(x, index_array)\n\n self.assertAllClose(a1, a2)\n\n def testUnpacking(self):\n\n def foo(x):\n a, b, c = x\n return a + b + c\n\n cfoo = jax.jit(foo)\n\n a1 = foo(np.arange(3))\n a2 = cfoo(np.arange(3))\n\n self.assertAllClose(a1, a2)\n\n def testBooleanIndexingArray1D(self):\n idx = np.array([True, True, False])\n x = jax.device_put(np.arange(3))\n ans = x[idx]\n expected = np.arange(3)[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingList1D(self):\n idx = [True, True, False]\n x = jax.device_put(np.arange(3))\n with self.assertRaisesRegex(TypeError, ARRAY_MSG):\n x[idx]\n\n def testBooleanIndexingArray2DBroadcast(self):\n idx = np.array([True, True, False, True])\n x = np.arange(8).reshape(4, 2)\n ans = jax.device_put(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingList2DBroadcast(self):\n idx = [True, True, False, True]\n x = np.arange(8).reshape(4, 2)\n with self.assertRaisesRegex(TypeError, ARRAY_MSG):\n jax.device_put(x)[idx]\n\n def testBooleanIndexingArray2D(self):\n idx = np.array([[True, False],\n [False, True],\n [False, False],\n [True, True]])\n x = np.arange(8).reshape(4, 2)\n ans = jax.device_put(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis(self):\n # Regression test for https://github.com/google/jax/issues/8412\n x = np.arange(24).reshape(4, 3, 2)\n idx = (..., np.array([True, False]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis2(self):\n # Regression test for https://github.com/google/jax/issues/9050\n x = np.arange(3)\n idx = (..., np.array([True, False, True]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithEllipsis3(self):\n x = np.arange(6).reshape(2, 3)\n idx = (0, ..., np.array([True, False, True]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean2DIndexingWithEllipsis(self):\n x = np.arange(24).reshape(4, 3, 2)\n idx = (..., np.array([[True, False], [True, False], [False, False]]))\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBoolean1DIndexingWithTrailingEllipsis(self):\n x = np.arange(24).reshape(4, 3, 2)\n idx = (np.array([True, False, True, False]), ...)\n ans = jnp.array(x)[idx]\n expected = x[idx]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingDynamicShapeError(self):\n x = np.zeros(3)\n i = np.array([True, True, False])\n self.assertRaises(IndexError, lambda: jax.jit(lambda x, i: x[i])(x, i))\n\n def testScalarBooleanIndexingNotImplemented(self):\n msg = \"JAX arrays do not support boolean scalar indices\"\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[True]\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[False]\n with self.assertRaisesRegex(TypeError, msg):\n jnp.arange(4)[..., True]\n\n def testIssue187(self):\n x = jnp.ones((5, 5))\n x[[0, 2, 4], [0, 2, 4]] # doesn't crash\n\n x = np.arange(25).reshape((5, 5))\n ans = jax.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)\n expected = x[[0, 2, 4], [0, 2, 4]]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testJVPOfGradOfIndexing(self):\n # Should return a value, even though we didn't pass a symbolic zero as the\n # index tangent.\n x = jnp.ones((3, 4), jnp.float32)\n i = jnp.ones((3,), jnp.int32)\n f = lambda x, i: jnp.sum(x[i])\n primals, tangents = jax.jvp(jax.grad(f), (x, i),\n (x, np.zeros(i.shape, dtypes.float0)))\n expected = np.broadcast_to(\n np.array([0, 3, 0], dtype=np.float32)[:, None], (3, 4))\n self.assertAllClose(expected, primals)\n self.assertAllClose(np.zeros_like(x), tangents)\n\n def testTrivialGatherIsntGenerated(self):\n # https://github.com/google/jax/issues/1621\n jaxpr = jax.make_jaxpr(lambda x: x[:, None])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 1)\n self.assertNotIn('gather', str(jaxpr))\n\n jaxpr = jax.make_jaxpr(lambda x: x[0:6:1])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 0)\n jaxpr = jax.make_jaxpr(lambda x: x[:4])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 0)\n\n jaxpr = jax.make_jaxpr(lambda x: x[::-1])(np.arange(4))\n self.assertEqual(len(jaxpr.jaxpr.eqns), 1)\n self.assertEqual(jaxpr.jaxpr.eqns[0].primitive, lax.rev_p)\n\n def testIndexingEmptyDimension(self):\n # Issue 2671: XLA error when indexing into dimension of size 0\n x = jnp.ones((2, 0))\n # The following work, even on axis 1 of size 0\n with jax.numpy_rank_promotion('allow'):\n _ = x[0, :] + x[0, None] + x[0, 1:] + x[0, 1:3:2]\n\n with self.assertRaisesRegex(IndexError,\n \"index .* is out of bounds for axis .* with size 0\"):\n _ = np.ones((2, 0))[0, 0] # The numpy error\n with self.assertRaisesRegex(IndexError,\n \"index is out of bounds for axis .* with size 0\"):\n _ = x[0, 0] # JAX indexing\n with self.assertRaisesRegex(IndexError,\n \"index is out of bounds for axis .* with size 0\"):\n jax.jit(lambda i: x[0, i])(0) # JAX indexing under jit\n\n def testBooleanIndexingWithEmptyResult(self):\n # based on a TensorFlow Probability test that started failing after #1622\n x = jnp.array([-1])\n mask = jnp.array([False])\n ans = x[mask] # doesn't crash\n\n expected = np.array([-1])[np.array([False])]\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testBooleanIndexingShapeMismatch(self):\n # Regression test for https://github.com/google/jax/issues/7329\n x = jnp.arange(4)\n idx = jnp.array([True, False])\n with self.assertRaisesRegex(IndexError, \"boolean index did not match shape.*\"):\n x[idx]\n\n def testNontrivialBooleanIndexing(self):\n # Test nontrivial corner case in boolean indexing shape validation\n rng = jtu.rand_default(self.rng())\n index = (rng((2, 3), np.bool_), rng((6,), np.bool_))\n\n args_maker = lambda: [rng((2, 3, 6), np.int32)]\n np_fun = lambda x: np.asarray(x)[index]\n jnp_fun = lambda x: jnp.asarray(x)[index]\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testFloatIndexingError(self):\n BAD_INDEX_TYPE_ERROR = \"Indexer must have integer or boolean type, got indexer with type\"\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2)[0.]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros((2, 2))[(0, 0.)]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros((2, 2))[(0, 0.)]\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jax.jit(lambda idx: jnp.zeros((2, 2))[idx])((0, 0.))\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2).at[0.].add(1.)\n with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):\n jnp.zeros(2).at[0.].set(1.)\n\n def testIndexOutOfBounds(self): # https://github.com/google/jax/issues/2245\n x = jnp.arange(5, dtype=jnp.int32) + 1\n self.assertAllClose(x, x[:10])\n\n idx = jnp.array([-10, -6, -5, -4, 0, 3, 4, 5, 6, 100])\n self.assertArraysEqual(\n x.at[idx].get(mode=\"clip\"),\n jnp.array([1, 1, 1, 2, 1, 4, 5, 5, 5, 5], jnp.int32))\n nan = np.nan\n self.assertArraysEqual(\n x.astype(jnp.float32).at[idx].get(mode=\"fill\"),\n jnp.array([nan, nan, 1, 2, 1, 4, 5, nan, nan, nan], jnp.float32))\n imin = np.iinfo(np.int32).min\n self.assertArraysEqual(\n x.at[idx].get(mode=\"fill\"),\n jnp.array([imin, imin, 1, 2, 1, 4, 5, imin, imin, imin], jnp.int32))\n umax = np.iinfo(np.uint32).max\n self.assertArraysEqual(\n x.astype(np.uint32).at[idx].get(mode=\"fill\"),\n jnp.array([umax, umax, 1, 2, 1, 4, 5, umax, umax, umax], jnp.uint32))\n self.assertArraysEqual(\n x.at[idx].get(mode=\"fill\", fill_value=7),\n jnp.array([7, 7, 1, 2, 1, 4, 5, 7, 7, 7], jnp.int32))\n\n def testIndexingWeakTypes(self):\n x = lax_internal._convert_element_type(jnp.arange(5), float, weak_type=True)\n\n a = x.at[0].set(1.0)\n self.assertEqual(a.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(a))\n\n b = x.at[0].add(1.0)\n self.assertEqual(b.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(b))\n\n c = x.at[0].mul(1.0)\n self.assertEqual(c.dtype, x.dtype)\n self.assertTrue(dtypes.is_weakly_typed(c))\n\n def testIndexingTypePromotion(self):\n def _check(x_type, y_type):\n x = jnp.arange(5, dtype=x_type)\n y = y_type(0)\n out = x.at[0].set(y)\n self.assertEqual(x.dtype, out.dtype)\n\n @jtu.ignore_warning(category=np.ComplexWarning,\n message=\"Casting complex values to real\")\n def _check_warns(x_type, y_type, msg):\n with self.assertWarnsRegex(FutureWarning, msg):\n _check(x_type, y_type)\n\n def _check_raises(x_type, y_type, msg):\n with self.assertRaisesRegex(ValueError, msg):\n _check(x_type, y_type)\n\n # Matching dtypes are always OK\n _check(jnp.int32, jnp.int32)\n _check(jnp.float32, jnp.float32)\n _check(jnp.complex64, jnp.complex64)\n\n # Weakly-typed y values promote.\n _check(jnp.int32, int)\n _check(jnp.float32, int)\n _check(jnp.float32, float)\n _check(jnp.complex64, int)\n _check(jnp.complex64, float)\n _check(jnp.complex64, complex)\n\n # in standard promotion mode, strong types can promote.\n msg = \"scatter inputs have incompatible types\"\n with jax.numpy_dtype_promotion('standard'):\n _check(jnp.int32, jnp.int16)\n _check(jnp.float32, jnp.float16)\n _check(jnp.float32, jnp.int32)\n _check(jnp.complex64, jnp.int32)\n _check(jnp.complex64, jnp.float32)\n\n # TODO(jakevdp): make these _check_raises\n _check_warns(jnp.int16, jnp.int32, msg)\n _check_warns(jnp.int32, jnp.float32, msg)\n _check_warns(jnp.int32, jnp.complex64, msg)\n _check_warns(jnp.float16, jnp.float32, msg)\n _check_warns(jnp.float32, jnp.complex64, msg)\n\n # in strict promotion mode, strong types do not promote.\n msg = \"Input dtypes .* have no available implicit dtype promotion path\"\n with jax.numpy_dtype_promotion('strict'):\n _check_raises(jnp.int32, jnp.int16, msg)\n _check_raises(jnp.float32, jnp.float16, msg)\n _check_raises(jnp.float32, jnp.int32, msg)\n _check_raises(jnp.complex64, jnp.int32, msg)\n _check_raises(jnp.complex64, jnp.float32, msg)\n\n _check_raises(jnp.int16, jnp.int32, msg)\n _check_raises(jnp.int32, jnp.float32, msg)\n _check_raises(jnp.int32, jnp.complex64, msg)\n _check_raises(jnp.float16, jnp.float32, msg)\n _check_raises(jnp.float32, jnp.complex64, msg)\n\n\ndef _broadcastable_shapes(shape):\n \"\"\"Returns all shapes that broadcast to `shape`.\"\"\"\n def f(rshape):\n yield []\n if rshape:\n for s in f(rshape[1:]):\n yield rshape[0:1] + s\n if rshape[0] != 1:\n for s in f(rshape[1:]):\n yield [1] + s\n for x in f(list(reversed(shape))):\n yield list(reversed(x))\n\n\n# TODO(jakevdp): move this implementation to jax.dtypes & use in scatter?\ndef _can_cast(from_, to):\n return lax.dtype(to) == dtypes.result_type(from_, to)\n\n\ndef _compatible_dtypes(op, dtype, inexact=False):\n if op == UpdateOps.ADD:\n return [dtype]\n elif inexact:\n return [dt for dt in float_dtypes if _can_cast(dt, dtype)]\n else:\n return [dt for dt in all_dtypes if _can_cast(dt, dtype)]\n\n\nclass UpdateOps(enum.Enum):\n UPDATE = 0\n ADD = 1\n MUL = 2\n DIV = 3\n POW = 4\n MIN = 5\n MAX = 6\n\n def np_fn(op, indexer, x, y):\n x = x.copy()\n x[indexer] = {\n UpdateOps.UPDATE: lambda: y,\n UpdateOps.ADD: lambda: x[indexer] + y,\n UpdateOps.MUL: lambda: x[indexer] * y,\n UpdateOps.DIV: jtu.ignore_warning(category=RuntimeWarning)(\n lambda: x[indexer] / y.astype(x.dtype)),\n UpdateOps.POW: jtu.ignore_warning(category=RuntimeWarning)(\n lambda: x[indexer] ** y.astype(x.dtype)),\n UpdateOps.MIN: lambda: np.minimum(x[indexer], y),\n UpdateOps.MAX: lambda: np.maximum(x[indexer], y),\n }[op]()\n return x\n\n def jax_fn(op, indexer, x, y, indices_are_sorted=False,\n unique_indices=False, mode=None):\n x = jnp.array(x)\n return {\n UpdateOps.UPDATE: x.at[indexer].set,\n UpdateOps.ADD: x.at[indexer].add,\n UpdateOps.MUL: x.at[indexer].multiply,\n UpdateOps.DIV: x.at[indexer].divide,\n UpdateOps.POW: x.at[indexer].power,\n UpdateOps.MIN: x.at[indexer].min,\n UpdateOps.MAX: x.at[indexer].max,\n }[op](y, indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices, mode=mode)\n\n def dtypes(op):\n if op == UpdateOps.UPDATE:\n return all_dtypes\n elif op == UpdateOps.DIV or op == UpdateOps.POW:\n return jtu.dtypes.inexact\n else:\n return default_dtypes\n\ndef _update_tol(op):\n if op == UpdateOps.POW:\n tol = {np.complex64: 1e-4 if jtu.device_under_test() == \"tpu\" else 1e-5,\n np.complex128: 1e-14}\n else:\n tol = {np.complex128: 1e-14}\n return tol\n\n\nclass IndexedUpdateTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\":\n f\"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}\"\n f\"_indexer={indexer}\"\n f\"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}\"\n f\"_op={op.name}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"mode\": mode,\n } for name, index_specs in s(STATIC_INDEXING_TESTS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype))\n for mode in s(MODES))))\n def testStaticIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op, mode):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,\n unique_indices=True)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(ADVANCED_INDEXING_TESTS_NO_REPEATS_SORTED)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testAdvancedIndexingSorted(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(\n op, indexer, x, y, indices_are_sorted=True, unique_indices=True)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, check_dtypes=True,\n tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op\n } for name, index_specs in s(MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(UpdateOps)\n for dtype in s(UpdateOps.dtypes(op))\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype)))))\n def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\n indexer, op):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)\n self._CheckAgainstNumpy(np_fn, jax_fn, args_maker, tol=_update_tol(op))\n self._CompileAndCheck(jax_fn, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\":\n f\"{name}_inshape={jtu.format_shape_dtype_string(shape, dtype)}\"\n f\"_indexer={indexer}\"\n f\"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}\"\n f\"_op={op.name}_mode={mode}\",\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"mode\": mode,\n } for mode in [None] + MODES\n for name, index_specs in (\n STATIC_INDEXING_TESTS if mode == \"promise_in_bounds\" else\n STATIC_INDEXING_TESTS + STATIC_INDEXING_OUT_OF_BOUNDS_TESTS)\n for shape, indexer, update_shape in index_specs\n for op in [UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE]\n for dtype in float_dtypes\n for update_shape in _broadcastable_shapes(update_shape)\n for update_dtype in _compatible_dtypes(op, dtype, inexact=True)))\n def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype,\n indexer, op, mode):\n rng = jtu.rand_default(self.rng())\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y, mode=mode,\n unique_indices=True)\n x = rng(shape, dtype)\n y = rng(update_shape, update_dtype)\n check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)\n\n @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({\n \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n \"shape\": shape, \"dtype\": dtype, \"indexer\": indexer,\n \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n \"op\": op, \"unique_indices\": unique_indices,\n } for unique_indices in s([False, True])\n for name, index_specs in s(\n ADVANCED_INDEXING_TESTS_NO_REPEATS if unique_indices\n else ADVANCED_INDEXING_TESTS)\n for shape, indexer, update_shape in s(index_specs)\n for op in s(\n [UpdateOps.ADD, UpdateOps.MUL, UpdateOps.UPDATE] if unique_indices\n else [UpdateOps.ADD])\n for dtype in s(float_dtypes)\n for update_shape in s(_broadcastable_shapes(update_shape))\n for update_dtype in s(_compatible_dtypes(op, dtype, inexact=True)))))\n def testAdvancedIndexingGrads(self, shape, dtype, update_shape, update_dtype,\n indexer, op, unique_indices):\n rng = jtu.rand_default(self.rng())\n jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y,\n unique_indices=unique_indices)\n x = rng(shape, dtype)\n y = rng(update_shape, update_dtype)\n check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)\n\n def testIndexMulGradFailsIfNotUnique(self):\n y = jnp.ones((10,), jnp.int32)\n f = lambda x, z: x.at[y].mul(z)\n\n x = jnp.ones((100,), jnp.float32)\n z = jnp.ones((10,), jnp.float32)\n with self.assertRaises(NotImplementedError,\n msg=\"scatter_mul gradients are only implemented if \"\n \"`unique_indices=True`\"):\n jax.jvp(f, (x, z), (x, z))\n\n def testSegmentSumBehavior(self):\n # testAdvancedIndexing compares against NumPy, and as a result doesn't check\n # repeated indices. This test is just a simple manual check, based on\n # https://www.tensorflow.org/api_docs/python/tf/math/segment_sum\n data = np.array([5, 1, 7, 2, 3, 4, 1, 3])\n segment_ids = np.array([0, 0, 0, 1, 2, 2, 3, 3])\n\n ans = jnp.zeros(np.max(segment_ids) + 1).at[segment_ids].add(data)\n expected = np.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testSegmentSum(self):\n data = jnp.array([5, 1, 7, 2, 3, 4, 1, 3])\n segment_ids = jnp.array([0, 0, 0, 1, 2, 2, 3, 3])\n\n # test with explicit num_segments\n ans = ops.segment_sum(data, segment_ids, num_segments=4)\n expected = jnp.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with explicit num_segments larger than the higher index.\n ans = ops.segment_sum(data, segment_ids, num_segments=5)\n expected = jnp.array([13, 2, 7, 4, 0])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test without explicit num_segments\n ans = ops.segment_sum(data, segment_ids)\n expected = jnp.array([13, 2, 7, 4])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with negative segment ids and segment ids larger than num_segments,\n # that will be wrapped with the `mod`.\n segment_ids = jnp.array([0, 4, 8, 1, 2, -6, -1, 3])\n ans = ops.segment_sum(data, segment_ids, num_segments=4)\n expected = jnp.array([5, 2, 3, 3])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n # test with negative segment ids and without without explicit num_segments\n # such as num_segments is defined by the smaller index.\n segment_ids = jnp.array([3, 3, 3, 4, 5, 5, -7, -6])\n ans = ops.segment_sum(data, segment_ids)\n expected = jnp.array([0, 0, 0, 13, 2, 7])\n self.assertAllClose(ans, expected, check_dtypes=False)\n\n def testSegmentSumOutOfBounds(self):\n def fn(data, segment_ids):\n return jax.ops.segment_sum(data, segment_ids, num_segments).sum()\n\n data = np.array([0, 0], dtype=np.float32)\n num_segments = 2\n segment_ids = np.array([2, 3])\n val, grad = jax.value_and_grad(fn)(data, segment_ids)\n self.assertAllClose(val, np.array(0., np.float32))\n self.assertAllClose(grad, np.array([0., 0.], np.float32))\n\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list({\n \"testcase_name\": \"_{}_{}_num_segments={}_bucket_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n reducer.__name__, num_segments, bucket_size),\n \"dtype\": dtype, \"shape\": shape,\n \"reducer\": reducer, \"op\": op, \"identity\": identity,\n \"num_segments\": num_segments, \"bucket_size\": bucket_size}\n for dtype in [np.bool_]\n for shape in [(8,), (7, 4), (6, 4, 2)]\n for bucket_size in [None, 2]\n for num_segments in [None, 1, 3])\n for reducer, op, identity in [\n (ops.segment_min, np.minimum, True),\n (ops.segment_max, np.maximum, False),\n ]))\n def testSegmentReduceBoolean(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)\n args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]\n\n if np.issubdtype(dtype, np.integer):\n if np.isposinf(identity):\n identity = np.iinfo(dtype).max\n elif np.isneginf(identity):\n identity = np.iinfo(dtype).min\n\n jnp_fun = lambda data, segment_ids: reducer(\n data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)\n\n def np_fun(data, segment_ids):\n size = num_segments if num_segments is not None else (segment_ids.max() + 1)\n out = np.full((size,) + shape[1:], identity, dtype)\n for i, val in zip(segment_ids, data):\n if 0 <= i < size:\n out[i] = op(out[i], val).astype(dtype)\n return out\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n if num_segments is not None:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n\n @parameterized.named_parameters(itertools.chain.from_iterable(\n jtu.cases_from_list({\n \"testcase_name\": \"_{}_{}_num_segments={}_bucket_size={}\".format(\n jtu.format_shape_dtype_string(shape, dtype),\n reducer.__name__, num_segments, bucket_size),\n \"dtype\": dtype, \"shape\": shape,\n \"reducer\": reducer, \"op\": op, \"identity\": identity,\n \"num_segments\": num_segments, \"bucket_size\": bucket_size}\n for dtype in default_dtypes\n for shape in [(8,), (7, 4), (6, 4, 2)]\n for bucket_size in [None, 2]\n for num_segments in [None, 1, 3])\n for reducer, op, identity in [\n (ops.segment_sum, np.add, 0),\n (ops.segment_prod, np.multiply, 1),\n (ops.segment_min, np.minimum, float('inf')),\n (ops.segment_max, np.maximum, -float('inf')),\n ]))\n def testSegmentReduce(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):\n rng = jtu.rand_default(self.rng())\n idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)\n args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]\n\n if np.issubdtype(dtype, np.integer):\n if np.isposinf(identity):\n identity = np.iinfo(dtype).max\n elif np.isneginf(identity):\n identity = np.iinfo(dtype).min\n\n jnp_fun = lambda data, segment_ids: reducer(\n data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)\n\n def np_fun(data, segment_ids):\n size = num_segments if num_segments is not None else (segment_ids.max() + 1)\n out = np.full((size,) + shape[1:], identity, dtype)\n for i, val in zip(segment_ids, data):\n if 0 <= i < size:\n out[i] = op(out[i], val).astype(dtype)\n return out\n\n self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)\n if num_segments is not None:\n self._CompileAndCheck(jnp_fun, args_maker)\n\n def testIndexDtypeError(self):\n # https://github.com/google/jax/issues/2795\n jnp.array(1) # get rid of startup warning\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n jnp.zeros(5).at[::2].set(1)\n self.assertLen(w, 0)\n\n @contextmanager\n def assertNoWarnings(self):\n with warnings.catch_warnings(record=True) as caught_warnings:\n yield\n self.assertEmpty(caught_warnings)\n\n @parameterized.named_parameters(jtu.cases_from_list({\n \"testcase_name\": f\"idx={idx}\", \"idx\": idx, \"idx_type\": idx_type}\n for idx, idx_type in [\n ([0], \"array\"),\n ([0, 0], \"array\"),\n ([[0, 0]], \"tuple\"),\n ([0, [0, 1]], \"tuple\"),\n ([0, np.arange(2)], \"tuple\"),\n ([0, None], \"tuple\"),\n ([0, slice(None)], \"tuple\"),\n ]))\n def testIndexSequenceDeprecation(self, idx, idx_type):\n normalize = {\"array\": np.array, \"tuple\": tuple}[idx_type]\n msg = {\"array\": ARRAY_MSG, \"tuple\": TUPLE_MSG}[idx_type]\n x = jnp.arange(6).reshape(3, 2)\n\n with self.assertRaisesRegex(TypeError, msg):\n x[idx]\n with self.assertNoWarnings():\n x[normalize(idx)]\n\n with self.assertRaisesRegex(TypeError, msg):\n x.at[idx].set(0)\n with self.assertNoWarnings():\n x.at[normalize(idx)].set(0)\n\n def testIndexedUpdateAliasingBug(self):\n # https://github.com/google/jax/issues/7461\n fn = lambda x: x.at[1:].set(1 + x[:-1])\n y = jnp.zeros(8)\n self.assertArraysEqual(fn(y), jax.jit(fn)(y))\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n"
] | [
[
"numpy.zeros_like",
"numpy.ones",
"numpy.zeros",
"numpy.maximum",
"numpy.isposinf",
"numpy.issubdtype",
"numpy.asarray",
"numpy.arange",
"numpy.int32",
"numpy.iinfo",
"numpy.max",
"numpy.array",
"numpy.minimum",
"numpy.full",
"numpy.isneginf"
]
] |
archman/python-mpl4qt | [
"f84fefb95113492407899206269ff82b609279b2"
] | [
"mpl4qt/widgets/mplbasewidget.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmplbasewidget.py\n\nBase class for matplotlib widget for PyQt.\n\nCopyright (C) 2018 Tong Zhang <[email protected]>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\"\"\"\n\nimport time\nimport numpy as np\nfrom collections import deque\nfrom collections import OrderedDict\nfrom functools import partial\n\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtCore import QVariant\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtProperty\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtGui import QFontDatabase\nfrom PyQt5.QtGui import QGuiApplication\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QPalette\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtGui import QResizeEvent\n\nfrom PyQt5.QtWidgets import QAction\nfrom PyQt5.QtWidgets import QFileDialog\nfrom PyQt5.QtWidgets import QMenu\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtWidgets import QSizePolicy\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QWidget\n\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\nfrom matplotlib.lines import Line2D\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.ticker import NullLocator\n\nfrom mpl4qt.widgets.mpltoolbar import MToolbar\nfrom mpl4qt.widgets.utils import ALL_COLORMAPS\nfrom mpl4qt.widgets.utils import AUTOFORMATTER\nfrom mpl4qt.widgets.utils import AUTOFORMATTER_MATHTEXT\nfrom mpl4qt.widgets.utils import BOOTSTRAP_GREEN\nfrom mpl4qt.widgets.utils import BOOTSTRAP_RED\nfrom mpl4qt.widgets.utils import LINE_STY_VALS\nfrom mpl4qt.widgets.utils import LINE_DS_VALS\nfrom mpl4qt.widgets.utils import MatplotlibCurveWidgetSettings\nfrom mpl4qt.widgets.utils import SCALE_STY_VALS\nfrom mpl4qt.widgets.utils import cycle_list_next\nfrom mpl4qt.widgets.utils import mfont_to_qfont\nfrom mpl4qt.widgets.utils import mplcolor2hex\nfrom mpl4qt.widgets.utils import set_font\nfrom mpl4qt.widgets.utils import generate_formatter\nfrom mpl4qt.widgets.utils import is_cmap_valid\n\nMPL_VERSION = mpl.__version__\nDTMSEC = 500 # msec\nDTSEC = DTMSEC / 1000.0 # sec\n\n\nclass BasePlotWidget(QWidget):\n # combo keyshorts, keystring, timestamp\n keycombo_cached = pyqtSignal(str, float)\n\n # indices list of points selected by lasso tool,\n # ind: array, pts: array (selected)\n # for i,idx in enumerate(ind): idx, pts[i]\n selectedIndicesUpdated = pyqtSignal(QVariant, QVariant)\n\n # zoomed ROI changed\n zoom_roi_changed = pyqtSignal(tuple, tuple)\n\n # grid\n gridOnUpdated = pyqtSignal(bool)\n\n # legend\n legendOnUpdated = pyqtSignal(bool)\n\n # autoscale\n autoScaleOnUpdated = pyqtSignal(bool)\n\n # bg color\n bgColorChanged = pyqtSignal(QColor)\n\n # xy pos, x,y (default) or x,y,z\n xyposUpdated = pyqtSignal(list)\n\n # cross markers updated, is_new_marker?, x, y, mk_name\n markerUpdated = pyqtSignal(bool, float, float, 'QString')\n\n # selected point/line\n selectedPointChanged = pyqtSignal(float, float)\n selectedLineChanged = pyqtSignal(Line2D)\n\n # shaded area updated (mpltoolbar)\n shaded_area_updated = pyqtSignal(tuple, tuple)\n\n # xlimit is changed\n xlimitMinChanged = pyqtSignal(float)\n xlimitMaxChanged = pyqtSignal(float)\n # ylimit is changed\n ylimitMinChanged = pyqtSignal(float)\n ylimitMaxChanged = pyqtSignal(float)\n\n def __init__(self, parent=None, show_toolbar=True, **kws):\n super(BasePlotWidget, self).__init__(parent)\n self.widget_type = '__BasePlotWidget'\n self.figure = Figure()\n self.axes = self.figure.add_subplot(111)\n self.axes.set_picker(True)\n self.init_figure()\n self.canvas = FigureCanvas(self.figure)\n self.setParent(parent)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.sys_bg_color = self.palette().color(QPalette.Background)\n self.sys_fg_color = self.palette().color(QPalette.Foreground)\n DEFAULT_FONTS = {\n 'title': QFontDatabase.systemFont(QFontDatabase.TitleFont),\n 'fixed': QFontDatabase.systemFont(QFontDatabase.FixedFont),\n 'general': QFontDatabase.systemFont(QFontDatabase.GeneralFont),\n }\n self.sys_label_font = DEFAULT_FONTS['general']\n self.sys_title_font = DEFAULT_FONTS['title']\n self.post_style_figure()\n # set up layout\n self.set_up_layout()\n\n self.adjustSize()\n self.set_context_menu()\n\n # track (x,y)\n self.canvas.mpl_connect('motion_notify_event', self.on_motion)\n\n # key press\n self.canvas.mpl_connect('key_press_event', self.on_key_press)\n\n # key release\n self.canvas.mpl_connect('key_release_event', self.on_key_release)\n\n # pick\n self.canvas.mpl_connect('pick_event', self.on_pick)\n\n # button\n self.canvas.mpl_connect('button_press_event', self.on_press)\n self.canvas.mpl_connect('button_release_event', self.on_release)\n\n self.canvas.mpl_connect('scroll_event', self.on_scroll)\n\n self.canvas.setFocusPolicy(Qt.ClickFocus)\n self.canvas.setFocus()\n\n # patches container: mk_area,\n # see draw_shade_area()\n self._patches = {}\n\n # dnd\n self.setAcceptDrops(True)\n\n # window/widget/dialog handlers\n self._handlers = {}\n\n # cross markers\n self._markers = OrderedDict() # list of {mk_name: [hl,vl,cp,pt,(x,y)]}\n self._to_add_marker = False\n self._added_marker = False # if added or not\n self._marker_id = 1 # initial marker id, always increase, even for deletion\n self._marker_with_xy = False # anote with (x,y)\n self._visible_hvlines = True # default visibility\n self.markerUpdated.connect(self.on_cross_markers_update)\n\n # pan\n self._pan_on = False\n\n # keypress cache\n self.dq_keycombo = deque([], 2)\n self.keycombo_cached.connect(self.on_update_keycombo_cache)\n\n # tb_toggle\n self._fig_tb_toggle = show_toolbar\n if self._fig_tb_toggle:\n # show mpltool\n self.__show_mpl_tools()\n\n #\n self.as_ann = None\n self.autoScaleOnUpdated.connect(self.on_autoscale_toggled)\n\n # add marker mpltool\n self._mk_add_hint_ann = None\n\n # [(lbl, (o,lw,mw))]\n self._last_sel_lines = {}\n\n def on_cross_markers_update(self):\n # cross markers updated.\n if len(self._markers) == 2:\n w = self._handlers.get('w_mpl_tools', None)\n if w is None:\n return # usually is not None\n w.on_show_mks()\n w.mk_view.close()\n\n def draw_shade_area(self, p1, p2, **kws):\n # see markers view\n from matplotlib.patches import Rectangle\n def f(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n pts = sorted([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])\n p1, p4 = pts[0], pts[-1]\n return p1, p4[0] - p1[0], p4[1] - p1[1]\n p = Rectangle(*f(p1, p2), **kws)\n self._patches['mk_area'] = p\n self.axes.add_patch(p)\n self.update_figure()\n\n @pyqtSlot(bool)\n def on_autoscale_toggled(self, auto_scale_enabled):\n # if auto scale is enabled, put text label\n if auto_scale_enabled:\n if self.as_ann is None:\n self.as_ann = self.axes.annotate('AutoScale is Enabled',\n xy=(1.0, 1.01),\n ha='right', va='bottom',\n xycoords=('axes fraction'),\n color='w',\n bbox=dict(\n boxstyle='round,pad=0.3',\n fc=BOOTSTRAP_GREEN, ec=BOOTSTRAP_GREEN,\n lw=1.0, alpha=0.8))\n else:\n self.as_ann.set_visible(True)\n else:\n if self.as_ann is not None:\n self.as_ann.set_visible(False)\n self.update_figure()\n\n @pyqtSlot(bool, 'QString', bool)\n def on_marker_add_checked(self, is_checked, mk_name, update_flag):\n # Add marker tool is checked.\n if update_flag:\n text = \"Updating Marker ({}) is Activated, Finish by CTRL+M\\nStart New by CTRL+M\".format(mk_name)\n else:\n text = \"Adding Marker ({}) is Activated, Finish by CTRL+M\\nStart New by CTRL+M\".format(mk_name)\n if is_checked:\n if self._mk_add_hint_ann is None:\n self._mk_add_hint_ann = self.axes.annotate(\n text,\n xy=(0, 1.01),\n ha='left', va='bottom',\n xycoords=('axes fraction'),\n color='w',\n bbox=dict(\n boxstyle='round,pad=0.3',\n fc=BOOTSTRAP_RED, ec=BOOTSTRAP_RED,\n lw=1.0, alpha=0.8))\n else:\n self._mk_add_hint_ann.set_text(text)\n self._mk_add_hint_ann.set_visible(True)\n else:\n if self._mk_add_hint_ann is not None:\n self._mk_add_hint_ann.set_visible(False)\n self.update_figure()\n\n def get_crossmk_config(self, name):\n # get cross marker (w/ lines, text) config by name\n hl, _, cp, pt, _, = self._markers[name]\n return {'ls': hl.get_ls(), 'lw': hl.get_lw(),\n 'c': hl.get_c(),\n 'line_visible': hl.get_visible(),\n 'line_alpha': hl.get_alpha(),\n 'ms': cp.get_ms(), 'mk': cp.get_marker(),\n 'mew': cp.get_mew(), 'mec': cp.get_mec(),\n 'mfc': cp.get_mfc(),\n 'mk_visible': cp.get_visible(),\n 'mk_alpha': cp.get_alpha(),\n 'text_visible': pt.get_visible(),\n 'text_color': pt.get_color(),\n 'text_content': pt.get_text(),\n 'text_alpha': pt.get_bbox_patch().get_alpha(),}\n\n def draw_hvlines(self, x0, y0, name, mc=None):\n if name in self._markers:\n is_new_marker = False\n hl, vl, cp, pt, _ = self._markers[name]\n if mc is None:\n mc = hl.get_color()\n else:\n is_new_marker = True\n hl, vl, cp, pt = None, None, None, None\n assert mc is not None # mc must be given\n\n if hl is None:\n hl = self.axes.axhline(y0,\n alpha=0.8, color=mc, ls='--')\n hl.set_label('_H-Line {}'.format(name))\n else:\n hl.set_ydata([y0, y0])\n\n if vl is None:\n vl = self.axes.axvline(x0,\n alpha=0.8, color=mc, ls='--')\n vl.set_label('_V-Line {}'.format(name))\n else:\n vl.set_xdata([x0, x0])\n\n if cp is None:\n cp, = self.axes.plot([x0], [y0], 'o',\n mec=mc, mfc='w', mew=2.0, alpha=0.9)\n cp.set_label('_Cross-Point {}'.format(name))\n if self._marker_with_xy:\n text = '{0:g},{1:g}'.format(x0, y0)\n else:\n text = name\n pt = self.axes.annotate(text,\n color='#000000', xy=(x0, y0), xytext=(15, 15),\n xycoords=\"data\", textcoords=\"offset pixels\",\n bbox=dict(boxstyle=\"round\", fc='w'))\n pt.get_bbox_patch().set_alpha(0.5)\n else:\n cp.set_data([x0], [y0])\n pt.xy = (x0, y0)\n if self._marker_with_xy:\n pt.set_text('{0:g},{1:g}'.format(x0, y0))\n else:\n pt.set_text(name)\n self._markers[name][-1] = (x0, y0)\n\n if is_new_marker:\n self._markers[name] = [hl, vl, cp, pt, (x0, y0)]\n\n self.markerUpdated.emit(is_new_marker, x0, y0, name)\n self.update_figure()\n\n def set_visible_hvlines(self, flag=True):\n \"\"\"Set all markers visible (*flag* is True) or invisible (*flag* is False).\n \"\"\"\n self._visible_hvlines = flag\n for name, (hl, vl, cp, pt, _,) in self._markers.items():\n for o in (hl, vl, cp, pt):\n o.set_visible(flag)\n self.update_figure()\n\n def __show_mpl_tools(self):\n if 'w_mpl_tools' in self._handlers:\n w = self._handlers['w_mpl_tools']\n else:\n w = MToolbar(self.figure.canvas, self)\n self._handlers['w_mpl_tools'] = w\n w.selectedIndicesUpdated.connect(self.on_selected_indices)\n w.zoom_roi_changed.connect(self.on_zoom_roi_changed)\n w.shaded_area_updated.connect(self.on_shaded_area_updated)\n w.show_toolbar()\n w.floatable_changed.emit(False)\n\n @pyqtSlot(QVariant, QVariant)\n def on_selected_indices(self, ind, pts):\n self.selectedIndicesUpdated.emit(ind, pts)\n\n @pyqtSlot(tuple, tuple)\n def on_shaded_area_updated(self, xlim, ylim):\n self.shaded_area_updated.emit(xlim, ylim)\n\n @pyqtSlot(tuple, tuple)\n def on_zoom_roi_changed(self, xlim, ylim):\n # print(\"Zoomed Rect ROI: \", xlim, ylim)\n self.zoom_roi_changed.emit(xlim, ylim)\n\n def set_up_layout(self):\n self.vbox = QVBoxLayout()\n self.vbox.setContentsMargins(0, 0, 0, 0)\n self.vbox.addWidget(self.canvas, 1)\n self.setLayout(self.vbox)\n\n def post_style_figure(self):\n self.init_prop_settings()\n self.set_figure_color()\n\n def init_prop_settings(self):\n \"\"\"Initial settings for properties.\n \"\"\"\n ## fonts:\n # xy labels\n lbl = self.axes.xaxis.label\n self._fig_xylabel_font = mfont_to_qfont(lbl.get_fontproperties())\n self._fig_xylabel_visible = lbl.get_visible()\n # xy ticklabels\n tklbl = self.axes.get_xticklabels()[0]\n self._fig_xyticks_font = mfont_to_qfont(tklbl.get_fontproperties())\n # title\n title = self.axes.title\n self._fig_title_font = mfont_to_qfont(title.get_fontproperties())\n self._fig_title_visible = title.get_visible()\n\n ## border, if auto scale is enabled, style could not be changed.\n o = list(self.axes.spines.values())[0]\n # c, lw, ls, vis,\n self._fig_border_color = QColor(mplcolor2hex(o.get_ec()))\n self._fig_border_lw = o.get_linewidth()\n self._fig_border_ls = o.get_linestyle()\n self._fig_border_visible = o.get_visible()\n\n # aspect\n self._fig_aspect = str(self.axes.get_aspect())\n\n # tight?\n self._fig_tight_layout = False\n\n # lbls,title\n self._fig_title = ''\n self._fig_xlabel = ''\n self._fig_ylabel = ''\n\n # figure, w,h,dpi\n self._fig_width, self._fig_height = self.figure.get_size_inches()\n self._fig_dpi = self.figure.get_dpi()\n\n # bg color\n self._fig_bgcolor = self.sys_bg_color\n\n # grid color\n self._fig_grid_color = QColor('gray')\n # grid toggle\n self._fig_grid_toggle = False\n\n # mticks toggle\n self._fig_mticks_toggle = False\n\n # legend toggle\n self._legend_toggle = False\n\n # legend location\n self._legend_location = 0\n\n # xyticks angle\n self._fig_xticks_angle = 0\n self._fig_yticks_angle = 0\n\n # xyticks color\n self._fig_ticks_color = self.sys_fg_color\n\n # tick format\n self._fig_xtick_formatter_type = 'Auto'\n self._fig_xtick_formatter = None # placeholder only\n self._fig_xtick_cfmt = '' # c string format for FuncFormatter\n self._fig_ytick_formatter_type = 'Auto'\n self._fig_ytick_formatter = None # placeholder only\n self._fig_ytick_cfmt = '' # c string format for FuncFormatter\n self._fig_ticks_enable_mathtext = False # use math text or not\n\n # xy axis scale\n self._fig_xscale = 'linear'\n self._fig_yscale = 'linear'\n\n # xylimits\n self._xlim_min, self._xlim_max = self.axes.get_xlim()\n self._ylim_min, self._ylim_max = self.axes.get_ylim()\n\n # ticklabels visibility\n xtklbl = self.axes.get_xticklabels()[0]\n ytklbl = self.axes.get_yticklabels()[0]\n self._fig_xticks_visible = xtklbl.get_visible()\n self._fig_yticks_visible = ytklbl.get_visible()\n\n # auto scale\n self._fig_auto_scale = False # default disable autoscale\n\n def on_scroll(self, e):\n if e.inaxes is None:\n return\n if e.step < 0:\n f = 1.05 ** (-e.step)\n else:\n f = 0.95 ** e.step\n self.zoom(e, f)\n\n def zoom(self, e, factor):\n x0, y0 = e.xdata, e.ydata\n x_left, x_right = self.axes.get_xlim()\n y_bottom, y_up = self.axes.get_ylim()\n\n self.axes.set_xlim((x0 - (x0 - x_left) * factor,\n x0 + (x_right - x0) * factor))\n self.axes.set_ylim((y0 - (y0 - y_bottom) * factor,\n y0 + (y_up - y0) * factor))\n self.update_figure()\n\n def on_motion(self, evt):\n if evt.inaxes is None:\n return\n x_pos, y_pos = evt.xdata, evt.ydata\n self.xyposUpdated.emit([x_pos, y_pos])\n\n def on_key_press(self, e):\n k, t = e.key, time.time()\n self.keycombo_cached.emit(k, t)\n QTimer.singleShot(DTMSEC, partial(self._on_delay_pop, k, t))\n\n def on_key_release(self, e):\n if len(self.dq_keycombo) != 2:\n return\n (k1, t1) = self.dq_keycombo.popleft()\n (k2, t2) = self.dq_keycombo.popleft()\n if t2 - t1 < DTSEC:\n self.process_keyshort_combo(k1, k2)\n\n def on_pick(self, evt):\n o = evt.artist\n if isinstance(o, Line2D):\n lw0, mw0 = o.get_lw(), o.get_mew()\n x, y = o.get_data()\n ind = evt.ind\n x0, y0 = x[ind][0], y[ind][0]\n o.set_lw(lw0 * 2)\n o.set_mew(mw0 * 2)\n self._last_sel_lines.setdefault(\n o.get_label(),\n (o, lw0, mw0))\n self.selectedPointChanged.emit(x0, y0)\n self.selectedLineChanged.emit(o)\n self.update_figure()\n elif isinstance(evt.artist, Axes):\n if self._last_sel_lines:\n for lbl, (o, lw0, mw0) in self._last_sel_lines.items():\n o.set_lw(lw0)\n o.set_mew(mw0)\n self.update_figure()\n self._last_sel_lines = {}\n\n def on_press(self, e):\n if e.inaxes is None:\n return\n if e.button == 1 and self._to_add_marker:\n self.draw_hvlines(e.xdata, e.ydata, self._mk_name, self._current_mc)\n self.set_visible_hvlines(self._visible_hvlines)\n self._added_marker = True\n QGuiApplication.restoreOverrideCursor()\n\n def on_release(self, e):\n pass\n\n def dragEnterEvent(self, e):\n pass\n\n def dropEvent(self, e):\n pass\n\n def init_figure(self):\n raise NotImplementedError\n\n def update_figure(self):\n if self._fig_auto_scale:\n try:\n self.axes.relim()\n except:\n pass\n else:\n self.axes.autoscale()\n self.canvas.draw_idle()\n\n def contextMenuEvent(self, evt):\n self._create_ctxtmenu().exec_(self.mapToGlobal(evt.pos()))\n\n def _create_ctxtmenu(self):\n menu = QMenu(self)\n config_action = QAction(QIcon(QPixmap(\":/tools/config.png\")),\n \"Config\", menu)\n config_action.setShortcut(\"c,c\")\n config_action.setObjectName('config_action')\n export_action = QAction(QIcon(QPixmap(\":/tools/export.png\")),\n \"Export\", menu)\n import_action = QAction(QIcon(QPixmap(\":/tools/import.png\")),\n \"Import\", menu)\n reset_action = QAction(QIcon(QPixmap(\":/tools/reset.png\")),\n \"Reset\", menu)\n tb_action = QAction(QIcon(QPixmap(\":/tools/tools.png\")),\n \"Tools\", menu)\n tb_action.setObjectName('tb_action')\n tb_action.setShortcut(\"t,t\")\n fitting_action = QAction(QIcon(QPixmap(\":/tools/fitting.png\")),\n \"Fitting\", menu)\n export_data_action = QAction(QIcon(QPixmap(\":/tools/export.png\")),\n \"Export Data\", menu)\n info_action = QAction(QIcon(QPixmap(\":/tools/info.png\")),\n \"About\", menu)\n keyshort_action = QAction(QIcon(QPixmap(\":/tools/keyshort.png\")),\n \"Shortcuts\", menu)\n\n menu.addAction(config_action)\n menu.addAction(export_action)\n menu.addAction(import_action)\n menu.addAction(reset_action)\n menu.addSeparator()\n menu.addAction(tb_action)\n menu.addAction(fitting_action)\n menu.addAction(export_data_action)\n menu.addSeparator()\n menu.addAction(keyshort_action)\n menu.addAction(info_action)\n\n menu.setStyleSheet('QMenu {margin: 2px;}')\n\n config_action.triggered.connect(self.on_config)\n export_action.triggered.connect(self.on_export_config)\n import_action.triggered.connect(self.on_import_config)\n reset_action.triggered.connect(self.on_reset_config)\n tb_action.triggered.connect(self.toggle_mpl_tools)\n fitting_action.triggered.connect(self.on_fitting_data)\n export_data_action.triggered.connect(self.on_export_data)\n info_action.triggered.connect(self.on_info)\n keyshort_action.triggered.connect(self.kbd_help)\n\n return menu\n\n @pyqtSlot()\n def on_fitting_data(self):\n \"\"\"Fitting data.\n \"\"\"\n raise NotImplementedError(\"Fitting data is to be implemented.\")\n\n @pyqtSlot()\n def on_export_data(self):\n raise NotImplementedError(\"Export data is to be implemented.\")\n\n @pyqtSlot()\n def on_info(self):\n from ._info import get_pkg_info\n QMessageBox.about(self, 'About mpl4qt', get_pkg_info())\n\n @pyqtSlot()\n def toggle_mpl_tools(self):\n self.setToolbarToggle(not self.getToolbarToggle())\n\n @pyqtSlot()\n def on_reset_config(self):\n # apply default settings\n raise NotImplementedError(\"Reset config is to be implemented.\")\n\n @pyqtSlot()\n def on_config(self):\n raise NotImplementedError(\"Config panel is to be implemented.\")\n\n @pyqtSlot()\n def on_export_config(self):\n filepath, _ = QFileDialog.getSaveFileName(self,\n \"Save Settings\",\n \"./mpl_settings.json\",\n \"JSON Files (*.json)\")\n if not filepath:\n return\n try:\n s = self.get_mpl_settings()\n s.write(filepath, sort_keys=False)\n except:\n QMessageBox.warning(self, \"Warning\",\n \"Cannot export settings to {}\".format(filepath),\n QMessageBox.Ok)\n else:\n QMessageBox.information(self, \"Information\",\n \"Successfully export settings to {}\".format(filepath),\n QMessageBox.Ok)\n\n @pyqtSlot()\n def on_import_config(self):\n filepath, _ = QFileDialog.getOpenFileName(self,\n \"Open Settings\",\n \"./mpl_settings.json\",\n \"JSON Files (*.json)\")\n if not filepath:\n return\n self._import_mpl_settings(filepath)\n\n def apply_mpl_settings(self, settings):\n pass\n\n def _import_mpl_settings(self, filepath):\n try:\n s = MatplotlibCurveWidgetSettings(filepath)\n self.apply_mpl_settings(s)\n except:\n QMessageBox.warning(self, \"Warning\",\n \"Cannot import&apply settings with {}\".format(filepath),\n QMessageBox.Ok)\n else:\n QMessageBox.information(self, \"Information\",\n \"Successfully import&apply settings with {}\".format(filepath),\n QMessageBox.Ok)\n\n def get_mpl_settings(self):\n \"\"\"Return all the settings for the current figure.\n \"\"\"\n pass\n\n def resize_figure(self):\n \"\"\"Must be triggered for set fig size.\n \"\"\"\n self.canvas.resizeEvent(QResizeEvent(self.canvas.size(), self.canvas.size()))\n\n def set_figure_color(self, color=None):\n if color is None:\n color = self.sys_bg_color.getRgbF()\n self.figure.set_facecolor(color)\n self.figure.set_edgecolor(color)\n if MPL_VERSION > \"1.5.1\":\n self.axes.set_facecolor(color)\n else:\n self.axes.set_axis_bgcolor(color)\n\n def set_ticks_color(self, color=None):\n if color is None:\n color = self.sys_bg_color.getRgbF()\n all_lbls = self.axes.get_xticklabels() + self.axes.get_yticklabels()\n [lbl.set_color(color) for lbl in all_lbls]\n\n def set_ticks_visible(self, visible, xoy=\"x\"):\n if getattr(self, \"_fig_{}ticks_visible\".format(xoy)):\n tklbls = getattr(self.axes, 'get_{}ticklabels'.format(xoy))()\n # !hiding cannot be reversed!\n [i.set_visible(visible) for i in tklbls]\n else:\n getattr(self.axes, '{}axis'.format(xoy)).reset_ticks()\n self.rotate_ticks(self._fig_xticks_angle, 'x')\n self.rotate_ticks(self._fig_yticks_angle, 'y')\n\n def set_xticks(self, tks):\n self.axes.set_xticks(tks)\n [set_font(lbl, self._fig_xyticks_font) for lbl in self.axes.get_xticklabels()]\n self.update_figure()\n\n def set_yticks(self, tks):\n self.axes.set_yticks(tks)\n [set_font(lbl, self._fig_xyticks_font) for lbl in self.axes.get_yticklabels()]\n self.update_figure()\n\n def set_xticklabels(self, tklbls):\n self.axes.set_xticklabels(tklbls)\n self.update_figure()\n\n def set_yticklabels(self, tklbls):\n self.axes.set_yticklabels(tklbls)\n self.update_figure()\n\n def toggle_mticks(self, f):\n if f:\n self.axes.xaxis.set_minor_locator(AutoMinorLocator())\n self.axes.yaxis.set_minor_locator(AutoMinorLocator())\n else:\n self.axes.xaxis.set_minor_locator(NullLocator())\n self.axes.yaxis.set_minor_locator(NullLocator())\n\n def toggle_grid(self,\n toggle_checked=False,\n which='major',\n b=None,\n color=None,\n **kws):\n if toggle_checked:\n which = 'both' if kws.get('mticks', True) else 'major'\n self.axes.grid(which=which, color=color, linestyle='--')\n else:\n self.axes.grid(b=False, which='minor')\n self.axes.grid(b=False)\n\n def set_xylabel_font(self, font=None):\n if font is None:\n font = self.sys_label_font\n set_font(self.axes.xaxis.label, font)\n set_font(self.axes.yaxis.label, font)\n\n def set_xyticks_font(self, font=None):\n if font is None:\n font = self.sys_label_font\n all_lbls = self.axes.get_xticklabels() + self.axes.get_yticklabels()\n [set_font(lbl, font) for lbl in all_lbls]\n\n def set_title_font(self, font=None):\n if font is None:\n font = self.sys_title_font\n set_font(self.axes.title, font)\n\n def set_context_menu(self, ):\n self.setContextMenuPolicy(Qt.DefaultContextMenu)\n\n def clear_figure(self):\n self.axes.clear()\n self.update_figure()\n\n def clear_data(self):\n \"\"\"Set with empty canvas.\n \"\"\"\n pass\n\n @pyqtSlot('QString', float)\n def on_update_keycombo_cache(self, key, ts):\n self.dq_keycombo.append((key, ts))\n\n def _on_delay_pop(self, k, t):\n if (k, t) not in self.dq_keycombo:\n return\n self.process_keyshort(k)\n self.dq_keycombo.remove((k, t))\n\n def set_border_color(self, c):\n for _, o in self.axes.spines.items():\n o.set_color(c.getRgbF())\n\n def set_border_lw(self, x):\n for _, o in self.axes.spines.items():\n o.set_linewidth(x)\n\n def set_border_ls(self, s):\n for _, o in self.axes.spines.items():\n o.set_linestyle(s)\n\n def set_border_visible(self, f):\n for _, o in self.axes.spines.items():\n o.set_visible(f)\n\n def getFigureAutoScale(self):\n return self._fig_auto_scale\n\n @pyqtSlot(bool)\n def setFigureAutoScale(self, f):\n \"\"\"Set xy limits as autoscale or not.\n\n Parameters\n ----------\n f : bool\n Toggle for the autoscale.\n \"\"\"\n self._fig_auto_scale = f\n if f:\n self.set_autoscale()\n #\n self.autoScaleOnUpdated.emit(f)\n\n figureAutoScale = pyqtProperty(bool, getFigureAutoScale,\n setFigureAutoScale)\n\n def getFigureBorderColor(self):\n return self._fig_border_color\n\n @pyqtSlot(QColor)\n def setFigureBorderColor(self, c, **kws):\n \"\"\"Set color for the data boundaries.\n\n Parameters\n ----------\n c : QColor\n Color of the boundaries.\n \"\"\"\n self._fig_border_color = c\n self.set_border_color(c)\n self.update_figure()\n\n figureBorderColor = pyqtProperty(QColor, getFigureBorderColor,\n setFigureBorderColor)\n\n def getFigureBorderLineWidth(self):\n return self._fig_border_lw\n\n @pyqtSlot(float)\n def setFigureBorderLineWidth(self, x):\n \"\"\"Set line width for the border.\n\n Parameters\n ----------\n x : float\n Line width.\n \"\"\"\n self._fig_border_lw = x\n self.set_border_lw(x)\n self.update_figure()\n\n figureBorderLineWidth = pyqtProperty(float, getFigureBorderLineWidth,\n setFigureBorderLineWidth)\n\n def getFigureBorderLineStyle(self):\n return self._fig_border_ls\n\n @pyqtSlot('QString')\n def setFigureBorderLineStyle(self, s):\n \"\"\"Set line style for the border.\n\n Parameters\n ----------\n s : str\n String for the line style, see `line style <https://matplotlib.org/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D>`_.\n \"\"\"\n if s not in LINE_STY_VALS:\n return\n self._fig_border_ls = s\n self.set_border_ls(s)\n self.update_figure()\n\n figureBorderLineStyle = pyqtProperty('QString',\n getFigureBorderLineStyle, setFigureBorderLineStyle)\n\n def getFigureBorderVisible(self):\n return self._fig_border_visible\n\n @pyqtSlot(bool)\n def setFigureBorderVisible(self, f):\n \"\"\"Set borders visible or not.\n\n Parameters\n ----------\n f : bool\n Line visible (True) or not (False).\n \"\"\"\n self._fig_border_visible = f\n self.set_border_visible(f)\n self.update_figure()\n\n figureBorderVisible = pyqtProperty(bool, getFigureBorderVisible,\n setFigureBorderVisible)\n\n def getFigureAspectRatio(self):\n return self._fig_aspect\n\n @pyqtSlot('QString')\n def setFigureAspectRatio(self, s):\n \"\"\"Set aspect ratio of the axes.\n\n Parameters\n ----------\n s : str\n Aspect ratio, 'auto', 'equal' and any number.\n \"\"\"\n try:\n float(s)\n except ValueError:\n if s in ('auto', 'equal'):\n self._fig_aspect = s\n else:\n return\n else:\n if float(s) <= 0:\n return\n self._fig_aspect = s\n finally:\n self.axes.set_aspect(self._fig_aspect)\n self.update_figure()\n\n figureAspectRatio = pyqtProperty('QString', getFigureAspectRatio,\n setFigureAspectRatio)\n\n def getTightLayoutToggle(self):\n return self._fig_tight_layout\n\n @pyqtSlot(bool)\n def setTightLayoutToggle(self, f):\n \"\"\"Toggle for the tight layout.\n\n Parameters\n ----------\n f : bool\n Tight layout toggle.\n \"\"\"\n self._fig_tight_layout = f\n if f:\n # self.figure.set_tight_layout({'pad': 0.1})\n self.figure.subplots_adjust(left=0.05, right=0.98, top=0.98, bottom=0.06)\n else:\n # self.figure.set_tight_layout({'pad': 1.2})\n self.figure.subplots_adjust(left=0.125, right=0.9, top=0.9, bottom=0.10)\n self.update_figure()\n\n figureTightLayout = pyqtProperty(bool, getTightLayoutToggle,\n setTightLayoutToggle)\n\n def getFigureXlabel(self):\n return self._fig_xlabel\n\n @pyqtSlot('QString')\n def setFigureXlabel(self, s):\n \"\"\"Set xlabel string.\n\n Parameters\n ----------\n s : str\n String for xlabel.\n \"\"\"\n self._fig_xlabel = s\n self.axes.set_xlabel(s)\n set_font(self.axes.xaxis.label, self._fig_xylabel_font)\n self.update_figure()\n\n figureXlabel = pyqtProperty('QString', getFigureXlabel, setFigureXlabel)\n\n def getFigureYlabel(self):\n return self._fig_ylabel\n\n @pyqtSlot('QString')\n def setFigureYlabel(self, s):\n \"\"\"Set ylabel string.\n\n Parameters\n ----------\n s : str\n String for ylabel.\n \"\"\"\n self._fig_ylabel = s\n self.axes.set_ylabel(s)\n set_font(self.axes.yaxis.label, self._fig_xylabel_font)\n self.update_figure()\n\n figureYlabel = pyqtProperty('QString', getFigureYlabel, setFigureYlabel)\n\n\n def getFigureXYlabelVisible(self):\n return self._fig_xylabel_visible\n\n @pyqtSlot(bool)\n def setFigureXYlabelVisible(self, f):\n \"\"\"Set figure xylabels visible or not.\n\n Parameters\n ----------\n f : bool\n Figure xylabels visible or not.\n \"\"\"\n self._fig_xylabel_visible = f\n self.axes.xaxis.label.set_visible(f)\n self.axes.yaxis.label.set_visible(f)\n self.update_figure()\n\n figureXYlabelVisible = pyqtProperty(bool, getFigureXYlabelVisible,\n setFigureXYlabelVisible)\n\n def getFigureTitleVisible(self):\n return self._fig_title_visible\n\n @pyqtSlot(bool)\n def setFigureTitleVisible(self, f):\n \"\"\"Set figure title visible or not.\n\n Parameters\n ----------\n f : bool\n Figure title visible or not.\n \"\"\"\n self._fig_title_visible = f\n self.axes.title.set_visible(f)\n self.update_figure()\n\n figureTitleVisible = pyqtProperty(bool, getFigureTitleVisible,\n setFigureTitleVisible)\n\n def getFigureTitle(self):\n return self._fig_title\n\n @pyqtSlot('QString')\n def setFigureTitle(self, s):\n \"\"\"Set figure title.\n\n Parameters\n ----------\n s : str\n Title for the figure.\n \"\"\"\n self._fig_title = s\n self.axes.set_title(s)\n set_font(self.axes.title, self._fig_title_font)\n self.update_figure()\n\n figureTitle = pyqtProperty('QString', getFigureTitle, setFigureTitle)\n\n def getFigureXYlabelFont(self):\n return self._fig_xylabel_font\n\n @pyqtSlot(QFont)\n def setFigureXYlabelFont(self, font):\n \"\"\"Set font for x and y labels.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_xylabel_font = font\n self.set_xylabel_font(font)\n self.update_figure()\n\n figureXYlabelFont = pyqtProperty(QFont, getFigureXYlabelFont,\n setFigureXYlabelFont)\n\n def getFigureTitleFont(self):\n return self._fig_title_font\n\n @pyqtSlot(QFont)\n def setFigureTitleFont(self, font):\n \"\"\"Set font for figure title.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_title_font = font\n self.set_title_font(font)\n self.update_figure()\n\n figureTitleFont = pyqtProperty(QFont, getFigureTitleFont,\n setFigureTitleFont)\n\n def getFigureWidth(self):\n return self._fig_width\n\n @pyqtSlot(float)\n def setFigureWidth(self, w):\n \"\"\"Set figure width in inch.\n\n Parameters\n ----------\n w : float\n Figure width in inch (>= 2.0).\n \"\"\"\n self._fig_width = max(w, 2.0)\n self.figure.set_size_inches([self._fig_width, self._fig_height])\n self.resize_figure()\n self.update_figure()\n\n figureWidth = pyqtProperty(float, getFigureWidth, setFigureWidth)\n\n def getFigureHeight(self):\n return self._fig_height\n\n @pyqtSlot(float)\n def setFigureHeight(self, h):\n \"\"\"Set figure height in inch.\n\n Parameters\n ----------\n h : float\n Figure height in inch (>= 2.0).\n \"\"\"\n self._fig_height = max(h, 2.0)\n self.figure.set_size_inches([self._fig_width, self._fig_height])\n self.resize_figure()\n self.update_figure()\n\n figureHeight = pyqtProperty(float, getFigureHeight, setFigureHeight)\n\n def getFigureDpi(self):\n return self._fig_dpi\n\n @pyqtSlot(float)\n def setFigureDpi(self, d):\n \"\"\"Set figure dpi.\n\n Parameters\n ----------\n d : float\n Figure dpi in [50.0, 600.0].\n \"\"\"\n self._fig_dpi = min(600.0, max(d, 50.0))\n self.figure.set_dpi(d)\n self.resize_figure()\n self.update_figure()\n\n figureDPI = pyqtProperty(float, getFigureDpi, setFigureDpi)\n\n def getXLimitMin(self):\n return self._xlim_min\n\n @pyqtSlot(float)\n def setXLimitMin(self, x=None):\n \"\"\"Set minimum of xlimit.\n\n Parameters\n ----------\n x : float\n Minimum of xlimit.\n \"\"\"\n if x is None:\n x, _ = self._get_default_xlim()\n self._xlim_min = x\n xmin, xmax = self.get_xlim()\n if x < xmax:\n self.axes.set_xlim([x, xmax])\n self.update_figure()\n self.xlimitMinChanged.emit(x)\n\n figureXLimitMin = pyqtProperty(float, getXLimitMin, setXLimitMin)\n\n def getXLimitMax(self):\n return self._xlim_max\n\n @pyqtSlot(float)\n def setXLimitMax(self, x=None):\n \"\"\"Set maximum of xlimit.\n\n Parameters\n ----------\n x : float\n Maximum of xlimit.\n \"\"\"\n if x is None:\n _, x = self._get_default_xlim()\n self._xlim_max = x\n xmin, xmax = self.get_xlim()\n if x > xmin:\n self.axes.set_xlim([xmin, x])\n self.update_figure()\n self.xlimitMaxChanged.emit(x)\n\n figureXLimitMax = pyqtProperty(float, getXLimitMax, setXLimitMax)\n\n def getYLimitMin(self):\n return self._ylim_min\n\n @pyqtSlot(float)\n def setYLimitMin(self, y=None):\n \"\"\"Set minimum of ylimit.\n\n Parameters\n ----------\n y : float\n Minimum of ylimit.\n \"\"\"\n if y is None:\n y, _ = self._get_default_ylim()\n self._ylim_min = y\n ymin, ymax = self.get_ylim()\n if y < ymax:\n self.axes.set_ylim([y, ymax])\n self.update_figure()\n self.ylimitMinChanged.emit(y)\n\n figureYLimitMin = pyqtProperty(float, getYLimitMin, setYLimitMin)\n\n def getYLimitMax(self):\n return self._ylim_max\n\n @pyqtSlot(float)\n def setYLimitMax(self, y=None):\n \"\"\"Set maximum of ylimit.\n\n Parameters\n ----------\n y : float\n Maximum of ylimit.\n \"\"\"\n if y is None:\n _, y = self._get_default_ylim()\n self._ylim_max = y\n ymin, ymax = self.get_ylim()\n if y > ymin:\n self.axes.set_ylim([ymin, y])\n self.update_figure()\n self.ylimitMaxChanged.emit(y)\n\n figureYLimitMax = pyqtProperty(float, getYLimitMax, setYLimitMax)\n\n def getFigureXTicksVisible(self):\n return self._fig_xticks_visible\n\n @pyqtSlot(bool)\n def setFigureXTicksVisible(self, f):\n \"\"\"Set xticklabels visible or not.\n\n Parameters\n ----------\n f : bool\n Object visible (True) or not (False).\n \"\"\"\n self.set_ticks_visible(f, \"x\")\n self.update_figure()\n self._fig_xticks_visible = f\n\n figureXTicksVisible = pyqtProperty(bool, getFigureXTicksVisible,\n setFigureXTicksVisible)\n\n def getFigureYTicksVisible(self):\n return self._fig_yticks_visible\n\n @pyqtSlot(bool)\n def setFigureYTicksVisible(self, f):\n \"\"\"Set yticklabels visible or not.\n\n Parameters\n ----------\n f : bool\n Object visible (True) or not (False).\n \"\"\"\n self.set_ticks_visible(f, \"y\")\n self.update_figure()\n self._fig_yticks_visible = f\n\n figureYTicksVisible = pyqtProperty(bool, getFigureYTicksVisible,\n setFigureYTicksVisible)\n\n def getFigureBgColor(self):\n return self._fig_bgcolor\n\n @pyqtSlot(QColor)\n def setFigureBgColor(self, color):\n \"\"\"Set figure background color.\n\n Parameters\n ----------\n color : QColor\n Color to set.\n \"\"\"\n self._fig_bgcolor = color\n self.set_figure_color(color.getRgbF())\n self.update_figure()\n self.bgColorChanged.emit(color)\n\n figureBackgroundColor = pyqtProperty(QColor, getFigureBgColor,\n setFigureBgColor)\n\n def getFigureGridColor(self):\n return self._fig_grid_color\n\n @pyqtSlot(QColor)\n def setFigureGridColor(self, c, **kws):\n \"\"\"Set color for the grid line.\n\n Parameters\n ----------\n c : QColor\n Color of the grid line.\n \"\"\"\n self._fig_grid_color = c\n self.toggle_grid(\n toggle_checked=self._fig_grid_toggle,\n color=c.getRgbF(),\n **{\n k: v\n for k, v in kws.items() if k not in ('toggle_checked', 'color')\n })\n self.update_figure()\n\n figureGridColor = pyqtProperty(QColor, getFigureGridColor,\n setFigureGridColor)\n\n def getFigureGridToggle(self):\n return self._fig_grid_toggle\n\n @pyqtSlot(bool)\n def setFigureGridToggle(self, f, **kws):\n \"\"\"Toggle for the figure grid.\n\n Parameters\n ----------\n f : bool\n Figure grid toggle.\n \"\"\"\n self._fig_grid_toggle = f\n self.toggle_grid(\n toggle_checked=f,\n color=self._fig_grid_color.getRgbF(),\n **{\n k: v\n for k, v in kws.items() if k not in ('toggle_checked', 'color')\n })\n self.update_figure()\n #\n self.gridOnUpdated.emit(f)\n\n figureGridToggle = pyqtProperty(bool, getFigureGridToggle,\n setFigureGridToggle)\n\n def getFigureMTicksToggle(self):\n return self._fig_mticks_toggle\n\n @pyqtSlot(bool)\n def setFigureMTicksToggle(self, f):\n \"\"\"Toggle for the minor ticks.\n\n Note\n ----\n Before toggle on, be sure the axis scale is linear.\n\n Parameters\n ----------\n f : bool\n Minor ticks on/off toggle.\n \"\"\"\n self._fig_mticks_toggle = f\n\n xscale = self.getFigureXScale()\n yscale = self.getFigureYScale()\n if xscale != 'linear':\n self.setFigureXScale('linear')\n if yscale != 'linear':\n self.setFigureYScale('linear')\n self.toggle_mticks(f)\n if xscale != 'linear':\n self.setFigureXScale(xscale)\n if yscale != 'linear':\n self.setFigureYScale(yscale)\n\n self.update_figure()\n\n figureMTicksToggle = pyqtProperty(bool, getFigureMTicksToggle,\n setFigureMTicksToggle)\n\n def getLegendToggle(self):\n return self._legend_toggle\n\n @pyqtSlot(bool)\n def setLegendToggle(self, f):\n \"\"\"Toggle for figure legend.\n\n Parameters\n ----------\n f : bool\n Figure legend on/off toggle.\n \"\"\"\n self._legend_toggle = f\n if f:\n self._legend_box = self.axes.legend(loc=self._legend_location)\n else:\n try:\n self._legend_box.set_visible(False)\n except AttributeError:\n pass\n self.update_figure()\n #\n self.legendOnUpdated.emit(f)\n\n figureLegendToggle = pyqtProperty(bool, getLegendToggle, setLegendToggle)\n\n def getLegendLocation(self):\n return self._legend_location\n\n @pyqtSlot(int)\n def setLegendLocation(self, i):\n \"\"\"Set legend location.\n\n Parameters\n ----------\n i : int\n Index number of legend location,\n see `matplotlib.pyplot.legend <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>`_.\n \"\"\"\n self._legend_location = i\n if self._legend_toggle:\n self._legend_box = self.axes.legend(loc=i)\n self.update_figure()\n\n figureLegendLocation = pyqtProperty(int, getLegendLocation,\n setLegendLocation)\n\n def getFigureXTicksAngle(self):\n return self._fig_xticks_angle\n\n @pyqtSlot(float)\n def setFigureXTicksAngle(self, angle):\n \"\"\"Set rotation angle for the xtick labels.\n\n Parameters\n ----------\n angle : float\n Angle in degree to rotate.\n \"\"\"\n self._fig_xticks_angle = angle\n self.rotate_ticks(angle, 'x')\n self.update_figure()\n\n figureXTicksAngle = pyqtProperty(float, getFigureXTicksAngle,\n setFigureXTicksAngle)\n\n def getFigureYTicksAngle(self):\n return self._fig_yticks_angle\n\n @pyqtSlot(float)\n def setFigureYTicksAngle(self, angle):\n \"\"\"Set rotation angle for the ytick labels.\n\n Parameters\n ----------\n angle : float\n Angle in degree to rotate.\n \"\"\"\n self._fig_yticks_angle = angle\n self.rotate_ticks(angle, 'y')\n self.update_figure()\n\n figureYTicksAngle = pyqtProperty(float, getFigureYTicksAngle,\n setFigureYTicksAngle)\n\n def getFigureXYticksFont(self):\n return self._fig_xyticks_font\n\n @pyqtSlot(QFont)\n def setFigureXYticksFont(self, font):\n \"\"\"Set font for the tick labels.\n\n Parameters\n ----------\n font : QFont\n Font to set.\n \"\"\"\n self._fig_xyticks_font = font\n self.set_xyticks_font(font)\n self.update_figure()\n\n figureXYticksFont = pyqtProperty(QFont, getFigureXYticksFont,\n setFigureXYticksFont)\n\n def getFigureXYticksColor(self):\n return self._fig_ticks_color\n\n @pyqtSlot(QColor)\n def setFigureXYticksColor(self, color):\n \"\"\"Set color for the ticks.\n\n Parameters\n ----------\n color : QColor\n Color to set.\n \"\"\"\n self._fig_ticks_color = color\n self.set_ticks_color(color.getRgbF())\n self.update_figure()\n\n figureXYticksColor = pyqtProperty(QColor, getFigureXYticksColor,\n setFigureXYticksColor)\n\n def getFigureXScale(self):\n return self._fig_xscale\n\n @pyqtSlot('QString')\n def setFigureXScale(self, s):\n \"\"\"Set x-axis scale.\n\n Parameters\n ----------\n s : str\n Scale type, 'linear', 'log', 'symlog', 'logit', etc.\n \"\"\"\n self._fig_xscale = s\n self.axes.set_xscale(s)\n self.update_figure()\n\n figureXScale = pyqtProperty('QString', getFigureXScale, setFigureXScale)\n\n def getFigureYScale(self):\n return self._fig_yscale\n\n @pyqtSlot('QString')\n def setFigureYScale(self, s):\n \"\"\"Set y-axis scale.\n\n Parameters\n ----------\n s : str\n Scale type, 'linear', 'log', 'symlog', 'logit', etc.\n \"\"\"\n self._fig_yscale = s\n self.axes.set_yscale(s)\n self.update_figure()\n\n figureYScale = pyqtProperty('QString', getFigureYScale, setFigureYScale)\n\n def getToolbarToggle(self):\n return self._fig_tb_toggle\n\n @pyqtSlot(bool)\n def setToolbarToggle(self, f):\n \"\"\"Toggle for the mpl toolbar.\n\n Parameters\n ----------\n f : bool\n Turn on/off mpl toolbar.\n \"\"\"\n self._fig_tb_toggle = f\n w = self._handlers.get('w_mpl_tools', None)\n if w is not None and not f:\n w.floatable_changed.emit(True)\n w.close()\n else:\n self.__show_mpl_tools()\n self.update_figure()\n\n figureToolbarToggle = pyqtProperty(bool, getToolbarToggle, setToolbarToggle)\n\n def _get_default_xlim(self):\n \"\"\"limit range from data\n \"\"\"\n try:\n xmin, xmax = self._x_data.min(), self._x_data.max()\n except:\n xmin, xmax = self.axes.get_xlim()\n x0, xhw = (xmin + xmax) * 0.5, (xmax - xmin) * 0.5\n return x0 - xhw * 1.1, x0 + xhw * 1.1\n\n def get_xlim(self):\n return self.axes.get_xlim()\n\n def _get_default_ylim(self):\n \"\"\"limit range from data\n \"\"\"\n try:\n ymin, ymax = self._y_data.min(), self._y_data.max()\n except:\n ymin, ymax = self.axes.get_ylim()\n y0, yhw = (ymin + ymax) * 0.5, (ymax - ymin) * 0.5\n return y0 - yhw * 1.1, y0 + yhw * 1.1\n\n def get_ylim(self):\n return self.axes.get_ylim()\n\n @pyqtSlot('QString', 'QString')\n def setXTickFormat(self, ftype, cfmt):\n if ftype == 'Auto':\n self._setXTickAutoFormat(ftype)\n elif ftype == 'Custom':\n self._setXTickCustomFormat(ftype, cfmt)\n\n def _setXTickAutoFormat(self, ftype):\n \"\"\"Set x-axis ticks formatter with Auto style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Auto'.\n \"\"\"\n self._fig_xtick_formatter_type = ftype\n if self._fig_ticks_enable_mathtext:\n formatter = AUTOFORMATTER_MATHTEXT\n else:\n formatter = AUTOFORMATTER\n self._fig_xtick_formatter = formatter\n self.axes.xaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def _setXTickCustomFormat(self, ftype, cfmt):\n \"\"\"Set x-axis ticks formatter with custom style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Custom'.\n cfmt : str\n C style string specifier.\n \"\"\"\n self._fig_xtick_formatter_type = ftype\n self._fig_xtick_cfmt = cfmt\n formatter = generate_formatter(cfmt, math_text=self._fig_ticks_enable_mathtext)\n self._fig_xtick_formatter = formatter\n self.axes.xaxis.set_major_formatter(formatter)\n self.update_figure()\n\n @pyqtSlot('QString', 'QString')\n def setYTickFormat(self, ftype, cfmt):\n if ftype == 'Auto':\n self._setYTickAutoFormat(ftype)\n elif ftype == 'Custom':\n self._setYTickCustomFormat(ftype, cfmt)\n\n def _setYTickAutoFormat(self, ftype):\n \"\"\"Set y-axis ticks formatter with Auto style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Auto'.\n \"\"\"\n self._fig_ytick_formatter_type = ftype\n if self._fig_ticks_enable_mathtext:\n formatter = AUTOFORMATTER_MATHTEXT\n else:\n formatter = AUTOFORMATTER\n self._fig_ytick_formatter = formatter\n self.axes.yaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def _setYTickCustomFormat(self, ftype, cfmt):\n \"\"\"Set y-axis ticks formatter with custom style.\n\n Parameters\n ----------\n ftype : str\n Type of formatter, 'Custom'.\n cfmt : str\n C style string specifier.\n \"\"\"\n self._fig_ytick_formatter_type = ftype\n self._fig_ytick_cfmt = cfmt\n formatter = generate_formatter(cfmt, math_text=self._fig_ticks_enable_mathtext)\n self._fig_ytick_formatter = formatter\n self.axes.yaxis.set_major_formatter(formatter)\n self.update_figure()\n\n def rotate_ticks(self, angle, axis):\n \"\"\"Rotate *axis* ticks by *angle* in degree.\n \"\"\"\n lbls = getattr(self.axes, \"get_{}ticklabels\".format(axis))()\n for o in lbls:\n o.set_rotation(angle)\n\n def set_autoscale(self, axis='both'):\n self.axes.relim(visible_only=True)\n self.axes.autoscale(axis=axis)\n self.update_figure()\n\n def process_keyshort_combo(self, k1, k2):\n \"\"\"Override this method to define combo keyshorts.\n \"\"\"\n # print(\"Capture key combo: \", k1, k2)\n if k1 == 'a' and k2 == 'x':\n # auto xscale\n self.set_autoscale('x')\n elif k1 == 'a' and k2 == 'y':\n # auto yscale\n self.set_autoscale('y')\n elif k1 == 'a' and k2 == 'a':\n if self.widget_type == 'image':\n self.setAutoColorLimit(not self.getAutoColorLimit())\n # turn on/off autoscale\n self.setFigureAutoScale(not self.getFigureAutoScale())\n elif k1 == 'a' and k2 == 'c' and self.widget_type == 'image':\n # auto color range\n self.on_auto_clim()\n elif k1 == 'shift' and k2 == '?':\n # help msgbox\n self.kbd_help()\n elif k1 == 'c' and k2 == 'c':\n self._create_ctxtmenu().findChild(QAction, 'config_action').triggered.emit()\n elif k1 == 't' and k2 == 't':\n self._create_ctxtmenu().findChild(QAction, 'tb_action').triggered.emit()\n elif k1 == 'd' and k2 == 's' and self.widget_type in ('curve', 'errorbar'):\n # circulate curve drawstyle\n self.setLineDrawStyle(\n cycle_list_next(list(LINE_DS_VALS), self.getLineDrawStyle()))\n\n def process_keyshort(self, k):\n \"\"\"Override this method to define keyshorts.\n \"\"\"\n # print(\"Capture key: \", k)\n if k == 'g':\n # turn on/off grid\n self.setFigureGridToggle(not self.getFigureGridToggle())\n elif k == 'a': # and self.widget_type != 'image':\n # autoscale\n self.set_autoscale()\n elif k == 'm':\n # turn on/off mticks\n self.setFigureMTicksToggle(not self.getFigureMTicksToggle())\n elif k == 't':\n # turn on/off tightlayout\n self.setTightLayoutToggle(not self.getTightLayoutToggle())\n elif k == 'l':\n # turn on/off legend\n self.setLegendToggle(not self.getLegendToggle())\n elif k == 'r':\n # force refresh\n self.force_update()\n elif k == 's' and self.widget_type != 'image':\n # circulate y-axis scale type\n self.setFigureYScale(\n cycle_list_next(SCALE_STY_VALS, self.getFigureYScale()))\n elif k == 'c' and self.widget_type == 'image':\n # circulate image colormap\n self.setColorMap(\n cycle_list_next(ALL_COLORMAPS, self.getColorMap()))\n\n def kbd_help(self):\n \"\"\"Help message box for keyboard shortcuts.\n \"\"\"\n from .kbdhelpdialog import KbdHelpDialog\n w = KbdHelpDialog(self)\n w.setWindowTitle(\"Keyboard Shortcuts Help\")\n w.exec_()\n\n def set_xlimit(self, *args):\n \"\"\"Set xlimit with new limit, e.g. `set_xlimit(xmin, xmax)`.\n\n See Also\n --------\n setXLimitMin, setXLimitMax\n \"\"\"\n self.axes.set_xlim(args)\n self.update_figure()\n\n def set_ylimit(self, *args):\n \"\"\"Set ylimit with new limit.\n\n See Also\n --------\n setYLimitMin, setYLimitMax\n \"\"\"\n self.axes.set_ylim(args)\n self.update_figure()\n\n\nclass MatplotlibBaseWidget(BasePlotWidget):\n \"\"\"MatplotlibBaseWidget(BasePlotWidget)\n \"\"\"\n\n def __init__(self, parent=None):\n super(MatplotlibBaseWidget, self).__init__(parent)\n self.widget_type = 'base'\n\n def init_figure(self):\n pass\n\n @pyqtSlot()\n def on_config(self):\n from .mplconfig import MatplotlibConfigPanel\n config_panel = MatplotlibConfigPanel(self)\n config_panel.exec_()\n\n def update_figure(self):\n if self._fig_auto_scale:\n try:\n self.axes.relim()\n except:\n pass\n else:\n self.axes.autoscale()\n self.canvas.draw_idle()\n\n\nclass MatplotlibCMapWidget(BasePlotWidget):\n def __init__(self, parent=None):\n super(MatplotlibCMapWidget, self).__init__(parent, False)\n self.figure.set_size_inches((self.getFigureWidth(), 0.2))\n self.figure.set_tight_layout(True)\n self.figure.subplots_adjust(\n top=0.9999, bottom=0.0001, left=0.0001, right=0.9999)\n self.axes.set_axis_off()\n\n # reverse cmap flag, '' or '_r'\n self._rcmap = ''\n\n def init_figure(self):\n gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n self.im = self.axes.imshow(gradient, aspect='auto')\n\n def set_cmap(self, c):\n if not is_cmap_valid(c):\n return\n self._cmap = c\n self.im.set_cmap(self._cmap + self._rcmap)\n self.update_figure()\n\n def set_reverse_cmap(self, f):\n self._rcmap = '_r' if f else ''\n self.set_cmap(self._cmap)\n\n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n\n app = QApplication([])\n window = MatplotlibBaseWidget()\n window.show()\n\n app.exec_()\n"
] | [
[
"numpy.vstack",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.figure.Figure",
"matplotlib.ticker.AutoMinorLocator",
"numpy.linspace",
"matplotlib.ticker.NullLocator"
]
] |
msusicky/ockovani-covid | [
"2835943b5796b04a3542782ecda125b6766cd317"
] | [
"app/fetcher/used_fetcher.py"
] | [
"import pandas as pd\n\nfrom app import db, app\nfrom app.fetcher.fetcher import Fetcher\nfrom app.models import OckovaniSpotreba, OckovaciMisto\n\n\nclass UsedFetcher(Fetcher):\n \"\"\"\n Class for updating used vaccines table.\n \"\"\"\n\n USED_CSV = 'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/ockovani-spotreba.csv'\n\n def __init__(self):\n super().__init__(OckovaniSpotreba.__tablename__, self.USED_CSV)\n\n def fetch(self, import_id: int) -> None:\n df = pd.read_csv(self._url)\n\n df = df.rename(columns={'ockovaci_misto_kod': 'ockovaci_misto_id'})\n\n df['kraj_nuts_kod'] = df['kraj_nuts_kod'].fillna('-')\n\n df['pouzite_davky'] = df['pouzite_davky'].fillna(0).astype('int')\n df['znehodnocene_davky'] = df['znehodnocene_davky'].fillna(0).astype('int')\n\n df = df.groupby(['datum', 'ockovaci_misto_id', 'ockovaci_latka', 'vyrobce'], dropna=False).sum().reset_index()\n\n # filter out missing centers\n size = len(df)\n mista_ids = [r[0] for r in db.session.query(OckovaciMisto.id).all()]\n df = df[df['ockovaci_misto_id'].isin(mista_ids)]\n\n if size > len(df):\n app.logger.warning(\"Some centers doesn't exist - {} rows skipped.\".format(size - len(df)))\n\n self._truncate()\n\n df.to_sql(self._table, db.engine, if_exists='append', index=False, method=Fetcher._psql_insert_copy)\n"
] | [
[
"pandas.read_csv"
]
] |
rodrigodelazcano/AerialRobotics | [
"44d7929721eaf3c817cf7f70966e805b36f66981"
] | [
"assignment1/src/offboard_UMD/script/plot_position.py"
] | [
"import numpy as np\nimport rosbag\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom tf.transformations import euler_from_quaternion\n\n# Read bag file\nbag = rosbag.Bag('2021-09-21-19-57-22.bag')\n\nx = []\ny = []\nz = []\nroll = []\npitch = []\nyaw = []\ntime = []\ncycles = []\ncycle_time = []\n\ninit_time = 0\n\nfor topic, msg, t in bag.read_messages(topics=['/mavros/local_position/pose', '/mavros/path_cycle']):\n if topic == '/mavros/local_position/pose':\n current_time = t.to_sec()\n x.append(msg.pose.position.x)\n y.append(msg.pose.position.y)\n z.append(msg.pose.position.z)\n\n orientation_q = msg.pose.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (r, p, ya) = euler_from_quaternion (orientation_list)\n\n roll.append(r)\n pitch.append(p)\n yaw.append(ya)\n\n if init_time == 0:\n time.append(0)\n init_time = current_time\n else:\n time.append(current_time - init_time)\n else:\n cycles.append(msg.cycle)\n cycle_time.append(t.to_sec() - init_time)\n\ndata = np.stack((x, y, z, roll, pitch, yaw, time))\n\ncycles.append(5)\ncycle_step = 0\ncycle_data = {}\npast_idx = 0\n\nfor idx, tim in enumerate(time):\n if cycle_time[cycle_step] < tim:\n cycle_data['cycle_{}'.format(cycle_step)] = data[:, past_idx:idx]\n cycle_step += 1\n past_idx = idx\n if cycle_step > 4:\n cycle_data['cycle_{}'.format(cycle_step)] = data[:, idx+1:]\n break\n\n## Plot position ##\n##################\n\nfig1, ax1 = plt.subplots(figsize=(20,20))\n\nax1.set_ylim([-3, 33])\nax1.set_xlim([0, 320])\nax1.plot(time, x, linewidth=2.5, label='x')\nax1.plot(time, y, linewidth=2.5, label='y')\nax1.plot(time, z, linewidth=2.5, label='z')\nax1.set_title(\"XYZ Position\", fontweight = 'heavy')\nax1.set(xlabel=\"Time [s]\", ylabel=\"Distance [m]\")\nax1.legend(shadow=True, fancybox=True, loc='upper right')\n\nfor value in [5, 10, 25]:\n ax1.axhline(y=value, color='k', linestyle='--', alpha=0.4)\n\n## Plot orientation ##\n######################\n\nfig2, ax2 = plt.subplots(figsize=(20,20))\n\nax2.set_ylim([-1, 1.5])\nax2.set_xlim([0, 320])\nax2.plot(time, roll, linewidth=2.5, label='roll')\nax2.plot(time, pitch, linewidth=2.5, label='pitch')\nax2.plot(time, yaw, linewidth=2.5, label='yaw')\nax2.set_title(\"RPY Orientation\", fontweight = 'heavy')\nax2.set(xlabel=\"Time [s]\", ylabel=\"Angle [rad]\")\nax2.legend(shadow=True, fancybox=True, loc='upper right')\n\nlast_tim = 0\nfor c, tim in enumerate(cycle_time):\n ax1.axvline(x=tim, color='k', linestyle='--', alpha=0.4)\n ax1.annotate(s='', xy=(last_tim,28), xytext=(tim,28), arrowprops=dict(arrowstyle='<->'))\n\n ax2.axvline(x=tim, color='k', linestyle='--', alpha=0.4)\n ax2.annotate(s='', xy=(last_tim,1), xytext=(tim,1), arrowprops=dict(arrowstyle='<->'))\n\n if c == 0:\n l = \"Takeoff\"\n else:\n l = \"Cycle {}\".format(c)\n\n ax1.text((tim-last_tim)/2 + last_tim, 29.1, l, horizontalalignment='center',\n verticalalignment='center', weight='bold')\n\n ax2.text((tim-last_tim)/2 + last_tim, 1.1, l, horizontalalignment='center',\n verticalalignment='center', weight='bold')\n\n \n last_tim = tim\n\nax1.annotate(s='', xy=(last_tim,28), xytext=(data[6, -1],28), arrowprops=dict(arrowstyle='<->'))\nax1.text((data[6, -1]-last_tim)/2 + last_tim, 29.5, 'Landing', horizontalalignment='center',\n verticalalignment='center', fontsize=10, weight='bold')\n\nax2.annotate(s='', xy=(last_tim,1), xytext=(data[6, -1],1), arrowprops=dict(arrowstyle='<->'))\nax2.text((data[6, -1]-last_tim)/2 + last_tim, 1.1, 'Landing', horizontalalignment='center',\n verticalalignment='center', fontsize=10, weight='bold')\n\n\n## Position 3D plot ##\n######################\n\nxs = [0, 0, 10, 10, 10]\nys = [0, 0, 0, 0, 5]\nzs = [0, 10, 10, 25, 25]\nfig3 = plt.figure(figsize=(20, 20))\nax3 = Axes3D(fig3, alpha=0.1)\nax3.set_title(\"3D XYZ Trajectory\", fontweight = 'heavy')\n\nfor c in cycles:\n data = cycle_data['cycle_{}'.format(c)]\n if c > 0 and c < 5:\n l = 'cycle_{}'.format(c)\n elif c == 0:\n l = 'takeoff'\n else:\n l = 'landing'\n ax3.plot3D(data[0, :], data[1, :], data[2, :], label=l, linewidth=2.5)\nax3.legend(shadow=True, fancybox=True)\nax3.scatter(xs, ys, zs, s=35, c='k')\nfor xt, yt, zt in zip(xs, ys, zs):\n ax3.text3D(xt + 0.1, yt + 0.1, zt + 0.1, '({},{},{})'.format(xt, yt, zt), \n fontsize=10, fontweight = 'heavy')\nax3.set(xlabel=\"X [m]\", ylabel=\"Y [m]\", zlabel=\"Z [m]\")\n\n## Plot trajectories in X-Y X-Z & Y-Z planes ##\n###############################################\n\nfig4 = plt.figure(figsize=(20,20))\nax4 = fig4.add_subplot(131)\nax5 = fig4.add_subplot(132)\nax6 = fig4.add_subplot(133)\n\nfor c in cycles:\n data = cycle_data['cycle_{}'.format(c)]\n if c > 0 and c < 5:\n l = 'cycle_{}'.format(c)\n elif c == 0:\n l = 'takeoff'\n else:\n l = 'landing'\n ax4.plot(data[0, :], data[1, :], label=l, linewidth=2.5)\n ax5.plot(data[0, :], data[2, :], label=l, linewidth=2.5)\n ax6.plot(data[1, :], data[2, :], label=l, linewidth=2.5)\n\nax4.set_title(\"Trajectory XY\", fontweight = 'heavy')\nax4.set(xlabel=\"X [m]\", ylabel=\"Y [m]\")\nax4.legend(shadow=True, fancybox=True, loc='upper left')\n\nax5.set_title(\"Trajectory XZ\", fontweight = 'heavy')\nax5.set(xlabel=\"X [m]\", ylabel=\"Z [m]\")\nax5.legend(shadow=True, fancybox=True, loc='lower right')\n\nax6.set_title(\"Trajectory YZ\", fontweight = 'heavy')\nax6.set(xlabel=\"Y [m]\", ylabel=\"Z [m]\")\nax6.legend(shadow=True, fancybox=True, loc='lower right')\n\nfor xt, yt, zt in zip(xs, ys, zs):\n ax4.text(xt + 0.2, yt + 0.2, '({},{})'.format(xt, yt), \n fontsize=10, fontweight = 'heavy')\n ax5.text(xt + 0.2, zt + 0.2, '({},{})'.format(xt, zt), \n fontsize=10, fontweight = 'heavy')\n ax6.text(yt + 0.2, zt + 0.2, '({},{})'.format(yt, zt), \n fontsize=10, fontweight = 'heavy')\n\nplt.show()"
] | [
[
"numpy.stack",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
kizill/coremltools | [
"11e143089a66ee219ce3a2ed98aa1aae794d4794"
] | [
"coremltools/models/_graph_visualization.py"
] | [
"# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\n\"\"\"\nFunctions related to graph visualization of mlmodels\n\"\"\"\n\nimport ast as _ast\nimport json as _json\nimport os as _os\nimport numpy as _np\nfrom ._infer_shapes_nn_mlmodel import infer_shapes as _infer_shapes\nfrom coremltools.proto import NeuralNetwork_pb2 as _NeuralNetwork_pb2\n\n\ndef _calculate_edges(cy_nodes, cy_edges, shape_dict=None):\n \"\"\"\n\n Parameters\n ----------\n cy_nodes: list of nodes for graph\n cy_edges: list of edges to be updated for graph\n shape_dict: shape_dict required for inferring shape information\n\n Returns\n -------\n\n cy_nodes: list of nodes for graph\n cy_edges: list of edges to be updated for graph\n\n \"\"\"\n node_len = len(cy_nodes)\n\n for upper_index in range(0, node_len):\n for lower_index in range(upper_index + 1, node_len):\n\n if 'outputs' in cy_nodes[upper_index]['data']['info'].keys() and \\\n 'inputs' in cy_nodes[upper_index]['data']['info'].keys() \\\n and 'outputs' in cy_nodes[lower_index]['data']['info'].keys() \\\n and 'inputs' in cy_nodes[lower_index]['data']['info'].keys():\n outputs = _ast.literal_eval(\n cy_nodes[upper_index]['data']['info']['outputs']\n )\n inputs = _ast.literal_eval(\n cy_nodes[lower_index]['data']['info']['inputs']\n )\n for output in outputs:\n if output in inputs:\n if shape_dict is None or output not in shape_dict.keys():\n label = None\n else:\n label = str(shape_dict[output])\n\n cy_edges.append(\n {\n 'data':{'id':\n '{}.{}.{}'.format(\n output,\n cy_nodes[upper_index]['data']['id'],\n cy_nodes[lower_index]['data']['id']\n ),\n 'source': cy_nodes[upper_index]['data']['id'],\n 'target': cy_nodes[lower_index]['data']['id'],\n 'label': label,\n 'shape': label\n }\n }\n )\n\n return cy_nodes, cy_edges\n\n\ndef _layer_specific_info(layer):\n \"\"\"\n\n Parameters\n ----------\n layer : Can be one of : 'activation', 'add', 'average', 'batchnorm',\n 'biDirectionalLSTM', 'bias', 'concat', 'convolution', 'crop', 'dot',\n 'embedding', 'flatten', 'gru', 'innerProduct', 'input', 'l2normalize',\n 'loadConstant', 'lrn', 'max', 'min', 'multiply', 'mvn', 'name', 'output',\n 'padding', permute', 'pooling', 'reduce', 'reorganizeData', 'reshape',\n 'scale', 'sequenceRepeat', 'simpleRecurrent', 'slice', 'softmax', 'split',\n 'unary', 'uniDirectionalLSTM', 'upsample'\n\n Returns\n -------\n info : info specific to layer type\n\n \"\"\"\n if layer.WhichOneof('layer') == 'convolution':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'outputChannels': _json.dumps(str(layer.convolution.outputChannels)),\n 'kernelChannels': _json.dumps(str(layer.convolution.kernelChannels)),\n 'groups': _json.dumps(str(layer.convolution.nGroups)),\n 'kernelSize': _json.dumps(str(layer.convolution.kernelSize)),\n 'stride': _json.dumps(str(layer.convolution.stride)),\n 'dilationFactor': _json.dumps(str(layer.convolution.dilationFactor)),\n 'isDeconvolution': _json.dumps(str(layer.convolution.isDeconvolution)),\n 'paddingType' : _json.dumps(layer.convolution.WhichOneof('ConvolutionPaddingType')),\n 'desc': 'A layer that performs spatial convolution'\n }\n if _json.dumps(layer.convolution.isDeconvolution) == 'true':\n info['type'] = 'deconvolution'\n info['desc'] = 'A layer that performs spatial deconvolution'\n\n elif layer.WhichOneof('layer') == 'activation':\n params = layer.activation\n act_type = params.WhichOneof('NonlinearityType')\n info = {\n 'type': layer.WhichOneof('layer'),\n 'activationType': act_type,\n 'desc': 'Applies specified type of activation function to input.'\n }\n if act_type == 'linear':\n info['alpha'] = _json.dumps(str(params.linear.alpha))\n info['beta'] = _json.dumps(str(params.linear.beta))\n if act_type == 'leakyReLU':\n info['alpha'] = _json.dumps(str(params.leakyReLU.alpha))\n if act_type == 'thresholdedReLU':\n info['alpha'] = _json.dumps(str(params.thresholdedReLU.alpha))\n if act_type == 'scaledTanh':\n info['alpha'] = _json.dumps(str(params.scaledTanh.alpha))\n info['beta'] = _json.dumps(str(params.scaledTanh.beta))\n if act_type == 'sigmoidHard':\n info['alpha'] = _json.dumps(str(params.sigmoidHard.alpha))\n info['beta'] = _json.dumps(str(params.sigmoidHard.beta))\n if act_type == 'ELU':\n info['alpha'] = _json.dumps(str(params.ELU.alpha))\n\n elif layer.WhichOneof('layer') == 'pooling':\n params = layer.pooling\n paddingType = params.WhichOneof('PoolingPaddingType')\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'Spatial Pooling layer to reduce dimensions of input using the '\n 'specified kernel size and type.'\n }\n if params.globalPooling:\n info['globalPooling'] = 'True'\n info['poolingType'] = 'global pooling'\n else:\n info['poolingType'] = _json.dumps(_NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Name(params.type))\n info['stride'] = _json.dumps(str(params.stride))\n info['kernelSize'] = _json.dumps(str(params.kernelSize))\n info['paddingType'] = _json.dumps(paddingType)\n\n elif layer.WhichOneof('layer') == 'add':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.add.alpha)),\n 'desc': 'A layer that performs elementwise addition.'\n }\n elif layer.WhichOneof('layer') == 'batchnorm':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'channels': _json.dumps(str(layer.batchnorm.channels)),\n 'computeMeanVar': _json.dumps(str(layer.batchnorm.computeMeanVar)),\n 'instanceNormalization': _json.dumps(str(layer.batchnorm.instanceNormalization)),\n 'desc': 'A layer that performs batch normalization, \\n'\n 'which is performed along the channel axis, \\n'\n 'and repeated along the other axes, if present.'\n }\n elif layer.WhichOneof('layer') == 'biDirectionalLSTM':\n forward_activations = \"\"\n for activation in layer.biDirectionalLSTM.activationsForwardLSTM:\n forward_activations += str(activation)[:-5] + \", \"\n backward_activations = \"\"\n for activation in layer.biDirectionalLSTM.activationsBackwardLSTM:\n backward_activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.biDirectionalLSTM.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.biDirectionalLSTM.outputVectorSize)),\n 'forward_activations': _json.dumps(forward_activations),\n 'backward_activations': _json.dumps(backward_activations),\n 'lstm_params': _json.dumps(str(layer.biDirectionalLSTM.params)),\n 'desc': 'Bidirectional long short-term memory (LSTM) layer\\n'\n 'The first LSTM operates on the input sequence in the forward direction.\\n'\n 'The second LSTM operates on the input sequence in the reverse direction.'\n }\n elif layer.WhichOneof('layer') == 'uniDirectionalLSTM':\n activations = \"\"\n for activation in layer.uniDirectionalLSTM.activations:\n activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.uniDirectionalLSTM.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.uniDirectionalLSTM.outputVectorSize)),\n 'activations': _json.dumps(activations),\n 'lstm_params': _json.dumps(str(layer.uniDirectionalLSTM.params)),\n 'reverse_input': _json.dumps(str(layer.uniDirectionalLSTM.reverseInput)),\n 'desc': 'A unidirectional long short-term memory (LSTM) layer.'\n\n }\n elif layer.WhichOneof('layer') == 'gru':\n activations = \"\"\n for activation in layer.gru.activations:\n activations += str(activation)[:-5] + \", \"\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.gru.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.gru.outputVectorSize)),\n 'activations': _json.dumps(activations),\n 'hasBiasVectors': _json.dumps(str(layer.gru.hasBiasVectors)),\n 'reverseInput': _json.dumps(str(layer.gru.reverseInput)),\n 'sequenceOutput': _json.dumps(str(layer.gru.sequenceOutput)),\n 'desc': 'Gated-Recurrent Unit (GRU) Layer.\\n'\n\n }\n elif layer.WhichOneof('layer') == 'simpleRecurrent':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputVectorSize': _json.dumps(str(layer.simpleRecurrent.inputVectorSize)),\n 'outputVectorSize': _json.dumps(str(layer.simpleRecurrent.outputVectorSize)),\n 'activation': _json.dumps(str(layer.simpleRecurrent.activation)),\n 'hasBiasVector': _json.dumps(str(layer.simpleRecurrent.hasBiasVector)),\n 'reverseInput': _json.dumps(str(layer.simpleRecurrent.reverseInput)),\n 'sequenceOutput': _json.dumps(str(layer.simpleRecurrent.sequenceOutput)),\n 'desc': 'A simple recurrent layer.'\n }\n elif layer.WhichOneof('layer') == 'bias':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shape': _json.dumps(str(layer.bias.shape)),\n 'desc': 'A layer that performs elementwise addition of a bias,\\n'\n 'which is broadcasted to match the input shape.'\n }\n elif layer.WhichOneof('layer') == 'concat':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'sequenceConcat': _json.dumps(str(layer.concat.sequenceConcat)),\n 'desc': 'A layer that concatenates along the channel axis (default) or sequence axis.'\n }\n elif layer.WhichOneof('layer') == 'crop':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'cropAmounts': _json.dumps(str(layer.crop.cropAmounts)),\n 'offset': _json.dumps(str(layer.crop.offset)),\n 'desc': 'A layer that crops the spatial dimensions of an input.\\n'\n 'If two inputs are provided, the shape of the second '\n 'input is used as the reference shape.'\n }\n elif layer.WhichOneof('layer') == 'dot':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'cosineSimilarity': _json.dumps(str(layer.dot.cosineSimilarity)),\n 'desc': 'If true, inputs are normalized first, '\n 'thereby computing the cosine similarity.'\n }\n elif layer.WhichOneof('layer') == 'embedding':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputDim': _json.dumps(str(layer.embedding.inputDim)),\n 'outputChannels': _json.dumps(str(layer.embedding.outputChannels)),\n 'hasBias': _json.dumps(str(layer.embedding.inputDim)),\n 'desc': 'A layer that performs a matrix lookup and optionally adds a bias.'\n }\n elif layer.WhichOneof('layer') == 'flatten':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Name(layer.flatten.mode)),\n 'desc': 'A layer that flattens the input.'\n }\n elif layer.WhichOneof('layer') == 'innerProduct':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'inputChannels': _json.dumps(str(layer.innerProduct.inputChannels)),\n 'outputChannels': _json.dumps(str(layer.innerProduct.outputChannels)),\n 'hasBias': _json.dumps(str(layer.innerProduct.hasBias)),\n 'desc': 'A layer that performs a matrix vector product.\\n'\n 'This is equivalent to a fully-connected, or dense layer.'\n }\n elif layer.WhichOneof('layer') == 'l2normalize':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'epsilon': _json.dumps(str(layer.l2normalize.epsilon)),\n 'desc': 'A layer that performs L2 normalization, i.e. divides by the \\n'\n 'the square root of the sum of squares of all elements of input.'\n }\n elif layer.WhichOneof('layer') == 'loadConstant':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shape': _json.dumps(str(layer.loadConstant.shape)),\n 'desc': 'The shape of the constant to be loaded'\n }\n elif layer.WhichOneof('layer') == 'lrn':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.lrn.alpha)),\n 'beta': _json.dumps(str(layer.lrn.beta)),\n 'localSize': _json.dumps(str(layer.lrn.localSize)),\n 'k': _json.dumps(str(layer.lrn.k)),\n 'desc': 'A layer that performs local response normalization (LRN).'\n }\n elif layer.WhichOneof('layer') == 'multiply':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'alpha': _json.dumps(str(layer.multiply.alpha)),\n 'desc': 'A layer that performs elementwise multiplication.'\n }\n elif layer.WhichOneof('layer') == 'mvn':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'acrossChannels': _json.dumps(str(layer.mvn.acrossChannels)),\n 'normalizeVariance': _json.dumps(str(layer.mvn.normalizeVariance)),\n 'epsilon': _json.dumps(str(layer.mvn.epsilon)),\n 'desc': 'A layer that performs mean variance normalization.'\n }\n elif layer.WhichOneof('layer') == 'padding':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'paddingAmounts': _json.dumps(str(layer.padding.paddingAmounts)),\n 'paddingType': _json.dumps(str(layer.padding.WhichOneof('PaddingType'))),\n 'desc': 'Fill a constant value in the padded region.'\n }\n elif layer.WhichOneof('layer') == 'permute':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'axis': _json.dumps(str(layer.permute.axis)),\n 'desc': 'A layer that rearranges the dimensions and data of an input.'\n }\n elif layer.WhichOneof('layer') == 'reduce':\n params = layer.reduce\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(str(params.mode)),\n 'epsilon': _json.dumps(str(params.epsilon)),\n 'axis': _json.dumps(_NeuralNetwork_pb2.ReduceLayerParams.ReduceAxis.Name(params.axis)),\n 'desc': 'A layer that reduces the input using a specified operation.'\n }\n elif layer.WhichOneof('layer') == 'reorganizeData':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Name(layer.reorganizeData.mode)),\n 'blockSize': _json.dumps(str(layer.reorganizeData.blockSize)),\n 'desc': 'A layer that reorganizes data in the input in: \\n'\n '1. SPACE_TO_DEPTH\\n'\n '2. DEPTH_TO_SPACE'\n }\n elif layer.WhichOneof('layer') == 'reshape':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'mode': _json.dumps(_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Name(layer.reshape.mode)),\n 'targetShape': _json.dumps(str(layer.reshape.targetShape)),\n 'desc': 'A layer that recasts the input into a new shape.'\n }\n elif layer.WhichOneof('layer') == 'scale':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'shapeScale': _json.dumps(str(layer.scale.shapeScale)),\n 'hasBias': _json.dumps(str(layer.scale.hasBias)),\n 'shapeBias': _json.dumps(str(layer.scale.shapeBias)),\n 'desc': 'A layer that performs elmentwise multiplication by a scale factor\\n'\n 'and optionally adds a bias;'\n }\n elif layer.WhichOneof('layer') == 'sequenceRepeat':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'nRepetitions': _json.dumps(str(layer.sequenceRepeat.nRepetitions)),\n 'desc': 'A layer that repeats a sequence.'\n }\n elif layer.WhichOneof('layer') == 'slice':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'startIndex': _json.dumps(str(layer.slice.startIndex)),\n 'endIndex': _json.dumps(str(layer.slice.endIndex)),\n 'stride': _json.dumps(str(layer.slice.stride)),\n 'axis': _json.dumps(_NeuralNetwork_pb2.SliceLayerParams.SliceAxis.Name(layer.slice.axis)),\n 'desc': 'A layer that slices the input data along a given axis.'\n }\n elif layer.WhichOneof('layer') == 'split':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'nOutputs': _json.dumps(str(layer.split.nOutputs)),\n 'desc': 'A layer that uniformly splits across the channel dimension\\n'\n 'to produce a specified number of outputs.'\n }\n elif layer.WhichOneof('layer') == 'unary':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'unary_type': _json.dumps(_NeuralNetwork_pb2.UnaryFunctionLayerParams.Operation.Name(layer.unary.type)),\n 'alpha': _json.dumps(str(layer.unary.alpha)),\n 'epsilon': _json.dumps(str(layer.unary.epsilon)),\n 'shift': _json.dumps(str(layer.unary.shift)),\n 'scale': _json.dumps(str(layer.unary.scale)),\n 'desc': 'A layer that applies a unary function.'\n }\n elif layer.WhichOneof('layer') == 'upsample':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'scalingFactor': _json.dumps(str(layer.upsample.scalingFactor)),\n 'mode': _json.dumps(_NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Name(layer.upsample.mode)),\n 'desc': 'A layer that scales up spatial dimensions.\\n'\n 'It supports two modes: '\n 'nearest neighbour (default) and bilinear.'\n }\n elif layer.WhichOneof('layer') == 'max':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise maximum '\n 'over the inputs.'\n }\n elif layer.WhichOneof('layer') == 'min':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise minimum '\n 'over the inputs.'\n }\n elif layer.WhichOneof('layer') == 'average':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that computes the elementwise average '\n 'of the inputs.'\n }\n elif layer.WhichOneof('layer') == 'softmax':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'desc': 'A layer that performs softmax normalization.\\n'\n 'Normalization is done along the channel axis.'\n }\n elif layer.WhichOneof('layer') == 'custom':\n info = {\n 'type': layer.WhichOneof('layer'),\n 'className': layer.custom.className,\n 'desc': 'A custom layer'\n }\n if layer.custom.parameters != {}:\n for key in layer.custom.parameters.keys():\n value = _get_custom_layer_value(layer.custom.parameters[key])\n info[key] = value\n if layer.custom.description:\n info['desc'] = layer.custom.description\n\n else:\n info = {\n 'type': layer.WhichOneof('layer')\n }\n\n info['inputs'] = str(layer.input)\n info['outputs'] = str(layer.output)\n\n return info\n\ndef _get_custom_layer_value(parameter):\n\n if 'intValue' in str(parameter):\n return str(parameter.intValue)\n elif 'doubleValue' in str(parameter):\n return str(parameter.doubleValue)\n elif 'boolValue' in str(parameter):\n return str(parameter.boolValue)\n elif 'longValue' in str(parameter):\n return str(parameter.longValue)\n elif 'stringValue' in str(parameter):\n return str(parameter.stringValue)\n\n\n\ndef _pipeline_component_info(model, info):\n \"\"\"\n\n Parameters\n ----------\n model : pipeline model\n info : info dict to dump model related info into\n\n model can be one of 'arrayFeatureExtractor', 'categoricalMapping',\n 'dictVectorizer', 'featureVectorizer', 'glmClassifier', 'glmRegressor',\n 'identity', 'imputer', 'neuralNetwork', 'neuralNetworkClassifier',\n 'neuralNetworkRegressor', 'normalizer', 'oneHotEncoder', 'scaler',\n 'supportVectorClassifier', 'supportVectorRegressor',\n 'treeEnsembleClassifier', 'treeEnsembleRegressor'\n\n Returns\n -------\n info : info dict with required info for model\n\n \"\"\"\n model_type = model.WhichOneof('Type')\n if model_type == 'arrayFeatureExtractor':\n info[\"desc\"] = 'Given an index, extracts the value at ' \\\n 'that index from its array input.\\n' \\\n 'Indexes are zero-based.'\n elif model_type == 'categoricalMapping':\n info[\"mappingType\"] = _json.dumps(str(model.categoricalMapping.WhichOneof('MappingType')))\n info[\"valueOnUnknown\"] = _json.dumps(str(model.categoricalMapping.WhichOneof('ValueOnUnknown')))\n info[\"desc\"] = 'This allows conversion from integers ' \\\n 'to strings, or from strings to integers.'\n elif model_type == 'dictVectorizer':\n info[\"map\"] = _json.dumps(str(model.dictVectorizer.WhichOneof('Map')))\n info[\"desc\"] = 'Uses an index mapping to convert a dictionary ' \\\n 'to an array.\\n The output array will be equal in ' \\\n 'length to the index mapping vector parameter.\\n' \\\n 'All keys in the input dictionary must be present in ' \\\n 'the index mapping vector.'\n elif model_type == 'featureVectorizer':\n info[\"inputList\"] = _json.dumps(str(model.featureVectorizer.inputList))\n info[\"desc\"] = 'A FeatureVectorizer puts one or more features into a single' \\\n ' array.\\n The ordering of features in the output array is ' \\\n 'determined by inputList.'\n elif model_type == 'glmClassifier':\n info[\"offset\"] = _json.dumps(str(model.glmClassifier.offset))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.glmClassifier.postEvaluationTransform))\n info[\"classEncoding\"] = _json.dumps(str(model.glmClassifier.classEncoding))\n info[\"classLabels\"] = _json.dumps(str(model.glmClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'A generalized linear model classifier.'\n elif model_type == 'glmRegressor':\n info[\"offset\"] = _json.dumps(str(model.glmRegressor.offset))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.glmRegressor.postEvaluationTransform))\n info[\"desc\"] = 'A generalized linear model regressor.'\n elif model_type == 'imputer':\n info[\"ImputedValue\"] = _json.dumps(str(model.imputer.WhichOneof('ImputedValue')))\n info[\"desc\"] = 'A transformer that replaces missing values with a ' \\\n 'default value,\\n such as a statistically-derived ' \\\n 'value.\\nIf ``ReplaceValue`` is set, then missing ' \\\n 'values of that type are\\n replaced with the ' \\\n 'corresponding value.'\n elif model_type == 'normalizer':\n info[\"normType\"] = _json.dumps(str(model.normalizer.normType))\n info[\"desc\"] = 'A normalization preprocessor.There are three normalization modes\\n' \\\n '1. Max\\n' \\\n '2. L1\\n' \\\n '3. L2'\n elif model_type == 'oneHotEncoder':\n info[\"CategoryType\"] = _json.dumps(str(model.oneHotEncoder.WhichOneof('CategoryType')))\n info[\"outputSparse\"] = _json.dumps(str(model.oneHotEncoder.outputSparse))\n info[\"handleUnknown\"] = _json.dumps(str(model.oneHotEncoder.handleUnknown))\n info[\"desc\"] = 'Transforms a categorical feature into an array. The array will be all\\n' \\\n 'zeros expect a single entry of one.\\n' \\\n 'Each categorical value will map to an index, this mapping is given by\\n' \\\n 'either the ``stringCategories`` parameter or the ``int64Categories``\\n' \\\n 'parameter.'\n elif model_type == 'scaler':\n info[\"shiftValue\"] = _json.dumps(str(model.scaler.shiftValue))\n info[\"scaleValue\"] = _json.dumps(str(model.scaler.scaleValue))\n info[\"desc\"] = 'A scaling operation.\\n' \\\n 'f(x) = scaleValue \\cdot (x + shiftValue)'\n elif model_type == 'supportVectorClassifier':\n info[\"kernel\"] = _json.dumps(str(model.supportVectorClassifier.kernel))\n info[\"numberOfSupportVectorsPerClass\"] = _json.dumps(str(model.supportVectorClassifier.numberOfSupportVectorsPerClass))\n info[\"rho\"] = _json.dumps(str(model.supportVectorClassifier.rho))\n info[\"probA\"] = _json.dumps(str(model.supportVectorClassifier.probA))\n info[\"probB\"] = _json.dumps(str(model.supportVectorClassifier.probB))\n info[\"ClassLabels\"] = _json.dumps(str(model.supportVectorClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'Support Vector Machine Classifier with one of ' \\\n 'Linear, RBF, Polynomial or Sigmoid ' \\\n 'kernels available'\n elif model_type == 'supportVectorRegressor':\n info[\"kernel\"] = _json.dumps(str(model.supportVectorRegressor.kernel))\n info[\"numberOfSupportVectorsPerClass\"] = _json.dumps(\n str(model.supportVectorRegressor.numberOfSupportVectorsPerClass))\n info[\"rho\"] = _json.dumps(str(model.supportVectorRegressor.rho))\n info[\"desc\"] = 'Support Vector Machine Regressor with one of ' \\\n 'Linear, RBF, Polynomial or Sigmoid kernels available'\n elif model_type == 'treeEnsembleClassifier':\n info[\"treeEnsemble\"] = _json.dumps(str(model.treeEnsembleClassifier.treeEnsemble))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.treeEnsembleClassifier.postEvaluationTransform))\n info[\"ClassLabels\"] = _json.dumps(str(model.treeEnsembleClassifier.WhichOneof('ClassLabels')))\n info[\"desc\"] = 'Each tree is a collection of nodes, each of which is identified ' \\\n 'by a unique identifier.\\nEach node is either a branch or a leaf node.' \\\n ' A branch node evaluates a value according to a behavior;\\n' \\\n 'A tree must have exactly one root node, which has no parent node.'\n elif model_type == 'treeEnsembleRegressor':\n info[\"treeEnsemble\"] = _json.dumps(str(model.treeEnsembleRegressor.treeEnsemble))\n info[\"postEvaluationTransform\"] = _json.dumps(str(model.treeEnsembleRegressor.postEvaluationTransform))\n info[\"desc\"] = 'Each tree is a collection of nodes, each of which is identified' \\\n ' by a unique identifier.\\nEach node is either a branch or a leaf' \\\n ' node. A branch node evaluates a value according to a behavior;\\n' \\\n 'A tree must have exactly one root node, which has no parent node.'\n return info\n\n\ndef _neural_network_node_info(nn_spec, cy_nodes, child=False, parent=None):\n \"\"\"\n\n Parameters\n ----------\n nn_spec : Neural Network spec of mlmodel\n cy_nodes: list of nodes to update with nn layers\n child: If child of a parent pipeline component\n parent : Parent node of the Neural Network spec\n\n Returns\n -------\n\n cy_nodes: Updated with layer specific information\n\n \"\"\"\n layers = nn_spec.layers\n for layer in layers:\n info = _layer_specific_info(layer)\n if child:\n info[\"name\"] = layer.name\n cy_nodes.append({\n 'data': {\n 'id': layer.name,\n 'name': info[\"type\"],\n 'info': info,\n 'parent': parent\n },\n 'classes': info[\"type\"],\n })\n else:\n info[\"name\"] = layer.name\n cy_nodes.append({\n 'data': {\n 'id': layer.name,\n 'name': info[\"type\"],\n 'info': info\n },\n 'classes': info[\"type\"],\n })\n\n return cy_nodes\n\n\ndef _neural_network_nodes_and_edges(nn_spec,\n cy_nodes,\n cy_edges,\n spec_outputs,\n input_spec,\n input_shape_dict=None\n ):\n \"\"\"\n\n Parameters\n ----------\n nn_spec : Neural Network Spec\n cy_nodes : list to add nn nodes to\n cy_edges : list to add edges for nn nodes to\n spec_outputs : outputs of nn spec\n input_spec : input spec of Neural Network\n\n Returns\n -------\n\n cy_data : concatenated list of updated cy_nodes and cy_edges\n\n \"\"\"\n cy_nodes = _neural_network_node_info(nn_spec, cy_nodes)\n cy_nodes.append({\n 'data': {\n 'id': 'output_node',\n 'name': '',\n 'info': {\n 'type': 'output node'\n },\n 'classes': 'output',\n\n }\n })\n\n for model_output, output_type in spec_outputs:\n cy_nodes.append({\n 'data': {\n 'id': str(model_output),\n 'name': str(model_output),\n 'info': {\n 'type': \"\\n\".join(str(output_type).split(\"\\n\")),\n 'inputs': str([model_output]),\n 'outputs': str([])\n },\n 'parent': 'output_node'\n },\n 'classes': 'output'\n })\n\n shape_dict = _infer_shapes(nn_spec, input_spec, input_shape_dict=input_shape_dict)\n cy_nodes, cy_edges = _calculate_edges(cy_nodes, cy_edges, shape_dict)\n\n cy_data = cy_nodes + cy_edges\n return cy_data\n\n\ndef _pipeline_nodes_and_edges(cy_nodes, cy_edges, pipeline_spec, spec_outputs):\n \"\"\"\n\n Parameters\n ----------\n cy_nodes : list to add nn nodes to\n cy_edges : list to add edges for nn nodes to\n pipeline_spec: Spec of pipeline mlmodel\n spec_outputs: spec outputs of pipeline mlmodel\n\n Returns\n -------\n\n cy_data : concatenated list of updated cy_nodes and cy_edges\n\n \"\"\"\n i = 1\n nn_model_types = ['neuralNetwork', 'neuralNetworkClassifier', 'neuralNetworkRegressor']\n models = pipeline_spec.models\n shape_dict = None\n for model in models:\n sub_model_type = model.WhichOneof('Type')\n if not sub_model_type:\n sub_model_type = 'input'\n info = {}\n input_names = []\n output_names = []\n info['Pipeline Component'] = sub_model_type.upper()\n for model_input in model.description.input:\n input_names.append(model_input.name)\n info['inputs'] = str(input_names)\n\n for model_output in model.description.output:\n output_names.append(model_output.name)\n info['outputs'] = str(output_names)\n\n info = _pipeline_component_info(model, info)\n\n if sub_model_type in nn_model_types:\n cy_nodes.append({\n 'data': {\n 'id': \"{}_{}\".format(sub_model_type, i),\n 'name': sub_model_type,\n 'info': info\n },\n 'classes': 'parent',\n })\n if sub_model_type == 'neuralNetwork':\n nn_spec = model.neuralNetwork\n elif sub_model_type == 'neuralNetworkClassifier':\n nn_spec = model.neuralNetworkClassifier\n elif sub_model_type == 'neuralNetworkRegressor':\n nn_spec = model.neuralNetworkRegressor\n cy_nodes = _neural_network_node_info(nn_spec, cy_nodes, child=True, parent=\"{}_{}\".format(sub_model_type, i))\n shape_dict = _infer_shapes(nn_spec, model.description.input)\n else:\n cy_nodes.append({\n 'data': {\n 'id': \"{}_{}\".format(sub_model_type, i),\n 'name': sub_model_type,\n 'info': info\n },\n 'classes': sub_model_type\n })\n i += 1\n\n cy_nodes.append({\n 'data': {\n 'id': 'output_node',\n 'name': '',\n 'info': {\n 'type': 'output node'\n },\n 'classes': 'output',\n\n }\n })\n\n for model_output, output_type in spec_outputs:\n cy_nodes.append({\n 'data': {\n 'id': str(model_output),\n 'name': str(model_output),\n 'info': {\n 'type': \"\\n\".join(str(output_type).split(\"\\n\")),\n 'inputs': str([model_output]),\n 'outputs': str([])\n },\n 'parent' : 'output_node'\n },\n 'classes': 'output'\n })\n\n\n cy_nodes, cy_edges = _calculate_edges(cy_nodes, cy_edges, shape_dict)\n\n cy_data = cy_nodes + cy_edges\n return cy_data\n\n\ndef _start_server(port, web_dir):\n \"\"\"\n\n Parameters\n ----------\n port : localhost port to start server on\n web_dir: directory containing server files\n\n Returns\n -------\n\n None\n\n \"\"\"\n import subprocess\n import webbrowser\n if port is None:\n port = _np.random.randint(8000, 9000)\n import sys\n if sys.version_info[0] < 3:\n http_server = 'SimpleHTTPServer'\n else:\n http_server = 'http.server'\n subprocess.Popen([sys.executable, '-m', http_server, str(port)], cwd=web_dir)\n webbrowser.open_new_tab('localhost:{}'.format(str(port)))\n return True\n"
] | [
[
"numpy.random.randint"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.