repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
openvinotoolkit/nncf_pytorch | [
"13a483eac6ed891720ba90d7902142c4b3bfa599"
] | [
"tests/torch/nas/test_state.py"
] | [
"\"\"\"\n Copyright (c) 2022 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport logging\nfrom copy import deepcopy\nfrom functools import partial\n\nimport pytest\nimport torch\n\nfrom nncf.common.utils.logger import logger as nncf_logger\nfrom nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames\nfrom nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames\nfrom nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames\nfrom nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames\nfrom nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim\nfrom nncf.torch.model_creation import create_nncf_network\nfrom tests.torch.helpers import BasicConvTestModel\nfrom tests.torch.helpers import get_empty_config\nfrom tests.torch.nas.creators import build_elastic_model_from_handler\nfrom tests.torch.nas.descriptors import ElasticityDesc\nfrom tests.torch.nas.helpers import do_conv2d\nfrom tests.torch.nas.helpers import move_model_to_cuda_if_available\nfrom tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS\nfrom tests.torch.nas.test_elastic_depth import BasicTestSuperNet\nfrom tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel\nfrom tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS\nfrom tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS\nfrom tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel\nfrom tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel\n\n\[email protected]_fixture()\ndef _nncf_caplog(caplog):\n nncf_logger.propagate = True\n yield caplog\n nncf_logger.propagate = False\n\n\ndef ref_width_output_fn(model, x):\n return model.get_minimal_subnet_output_without_reorg(x)\n\n\nCOMMON_WIDTH_STATE_DESCS = [\n ElasticityDesc(\n ElasticityDim.WIDTH,\n model_cls=TwoConvAddConvTestModel,\n params=BASIC_ELASTIC_WIDTH_PARAMS,\n ref_state={\n 'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,\n 'grouped_node_names_to_prune': [\n ['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',\n 'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']\n ]\n },\n ref_output_fn=ref_width_output_fn\n ),\n ElasticityDesc(\n ElasticityDim.WIDTH,\n model_cls=TwoSequentialConvBNTestModel,\n params=BASIC_ELASTIC_WIDTH_PARAMS,\n ref_state={\n 'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,\n 'grouped_node_names_to_prune': [\n ['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],\n ['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']\n ]\n },\n ref_output_fn=ref_width_output_fn\n ),\n]\n\n\ndef ref_kernel_output_fn(model, x):\n conv = model.conv\n ref_padding = 1\n ref_weights = conv.weight[:, :, 1:4, 1:4]\n return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)\n\n\nCOMMON_KERNEL_DESC = ElasticityDesc(\n ElasticityDim.KERNEL,\n model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),\n params=BASIC_ELASTIC_KERNEL_PARAMS,\n ref_output_fn=ref_kernel_output_fn,\n ref_state={\n SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,\n EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']\n },\n input_size=[1, 1, 5, 5]\n)\n\nCOMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(\n ElasticityDim.DEPTH,\n model_cls=BasicTestSuperNet,\n params={\n 'mode': 'auto',\n 'min_block_size': 2\n },\n ref_state={\n 'elasticity_params': {\n 'allow_linear_combination': False,\n 'allow_nested_blocks': False,\n 'max_block_size': 50,\n 'min_block_size': 2,\n 'skipped_blocks': None\n },\n EDBuilderStateNames.SKIPPED_BLOCKS: [\n {\n 'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',\n 'end_node_name': 'BasicTestSuperNet/__add___0'\n }\n ],\n EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},\n EDBuilderStateNames.OrdinalIds: [[1, 3]],\n },\n ref_search_space=[[0], []]\n)\n\n\ndef ref_depth_output_fn(model, x):\n model.set_skipped_layers(['conv1'])\n return model(x)\n\n\nCOMMON_DEPTH_BASIC_DESC = ElasticityDesc(\n ElasticityDim.DEPTH,\n model_cls=DepthBasicConvTestModel,\n params=BASIC_ELASTIC_DEPTH_PARAMS,\n ref_output_fn=ref_depth_output_fn,\n ref_search_space=[[0], []],\n ref_state={\n 'elasticity_params': {\n 'allow_linear_combination': False,\n 'allow_nested_blocks': False,\n 'max_block_size': 50,\n 'min_block_size': 6,\n 'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',\n 'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]\n },\n EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],\n EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],\n EDBuilderStateNames.OrdinalIds: None,\n }\n)\n\nLIST_STATE_AFTER_BUILD_DESCS = [\n *COMMON_WIDTH_STATE_DESCS,\n COMMON_DEPTH_SUPERNET_DESC,\n COMMON_KERNEL_DESC\n]\n\n\[email protected]('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))\ndef test_can_get_builder_state_after_build(desc):\n _, builder = desc.build_handler()\n actual_state = builder.get_state()\n assert actual_state == desc.ref_state\n\n\nELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}\nLIST_STATE_BEFORE_BUILD_DESCS = [\n ElasticityDesc(\n ElasticityDim.WIDTH,\n params=ELASTIC_WIDTH_PARAMS_BB,\n ref_state={\n SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,\n EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []\n }\n ),\n ElasticityDesc(\n ElasticityDim.KERNEL,\n params=BASIC_ELASTIC_KERNEL_PARAMS,\n ref_state={\n SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,\n EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []\n }\n ),\n COMMON_DEPTH_BASIC_DESC\n]\n\n\[email protected]('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))\nclass TestBeforeBuild:\n def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):\n builder = desc.create_builder()\n actual_state = builder.get_state()\n assert actual_state == desc.ref_state\n\n def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):\n old_builder = desc.create_builder_with_config({})\n old_state = old_builder.get_state()\n\n new_params = desc.params\n new_builder = desc.create_builder_with_config(new_params)\n new_builder.load_state(old_state)\n\n record = next(iter(_nncf_caplog.records))\n assert record.levelno == logging.WARNING\n\n def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):\n old_builder = desc.create_builder()\n old_state = old_builder.get_state()\n\n new_params = desc.params.copy()\n new_builder = desc.create_builder_with_config(new_params)\n new_builder.load_state(old_state)\n\n assert not _nncf_caplog.records\n\n\nLIST_LOAD_STATE_DESCS = [\n COMMON_DEPTH_BASIC_DESC,\n *COMMON_WIDTH_STATE_DESCS,\n COMMON_KERNEL_DESC\n]\n\n\[email protected]('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))\ndef test_can_load_handler_state(desc: ElasticityDesc):\n model = desc.model_cls()\n move_model_to_cuda_if_available(model)\n model_copy = deepcopy(model)\n device = next(iter(model.parameters())).device\n dummy_input = torch.ones(model.INPUT_SIZE).to(device)\n\n input_size = desc.input_size\n if not input_size:\n input_size = model.INPUT_SIZE\n config = get_empty_config(input_sample_sizes=input_size)\n old_nncf_network = create_nncf_network(model, config)\n old_builder = desc.create_builder()\n old_handler = old_builder.build(old_nncf_network)\n elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)\n old_handler.activate_minimum_subnet()\n old_output = elastic_model(dummy_input)\n ref_output = desc.ref_output_fn(model, dummy_input)\n assert torch.allclose(old_output, ref_output)\n\n new_nncf_network = create_nncf_network(model_copy, config)\n builder_state = old_builder.get_state()\n # no need in config to restore builder state\n new_builder = desc.create_builder_with_config({})\n\n new_builder.load_state(builder_state)\n new_handler = new_builder.build(new_nncf_network)\n elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)\n new_handler.activate_minimum_subnet()\n new_output = elastic_model(dummy_input)\n assert torch.allclose(old_output, new_output)\n"
] | [
[
"torch.ones",
"torch.allclose"
]
] |
baba1587/jax | [
"cb77f2a22de49e85da93f43b7dc448aa238d5207"
] | [
"jax/lax/lax.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport builtins\nimport collections\nimport enum\nimport functools\nimport itertools\nimport operator\nimport string\nfrom typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union,\n Tuple, Type)\nimport warnings\n\nimport numpy as onp\n\nfrom ..util import partial, prod\n\nfrom .. import core\nfrom .. import ad_util\nfrom .. import api\nfrom .. import linear_util as lu\nfrom .. import dtypes\nfrom .. import lazy\nfrom .. import lib\nfrom ..config import flags\nfrom ..core import Primitive\nfrom ..abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,\n AbstractToken, array_types, make_shaped_array,\n raise_to_shaped, abstract_token, canonicalize_shape)\nfrom ..interpreters import partial_eval as pe\nfrom ..interpreters import xla\nfrom ..interpreters import pxla\nfrom ..interpreters import ad\nfrom ..interpreters import batching\nfrom ..interpreters import masking\nfrom ..util import curry, cache, safe_zip, unzip2, prod\nfrom ..tree_util import build_tree, tree_unflatten, tree_map\nfrom ..lib import pytree\nfrom ..lib import xla_bridge\nfrom ..lib import xla_client\n\nxb = xla_bridge\nxc = xla_client\nxops = xla_client.ops\n\nFLAGS = flags.FLAGS\n\n_max = builtins.max\n_min = builtins.max\n_reduce = functools.reduce\n\nArray = Any\nDType = Any\nShape = Sequence[int]\n\n@cache()\ndef broadcast_shapes(*shapes):\n \"\"\"Returns the shape that results from NumPy broadcasting of `shapes`.\"\"\"\n if len(shapes) == 1:\n return shapes[0]\n ndim = _max(len(shape) for shape in shapes)\n shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])\n is_zero = onp.any(shapes == 0, axis=0)\n max_shape = onp.max(shapes, axis=0)\n result_shape = onp.where(is_zero, 0, max_shape)\n if not onp.all((shapes == result_shape) | (shapes == 1)):\n raise ValueError(\"Incompatible shapes for broadcasting: {}\"\n .format(tuple(map(tuple, shapes))))\n return canonicalize_shape(result_shape)\n\ndef _identity(x): return x\n\n### traceables\n\ndef neg(x: Array) -> Array:\n r\"\"\"Elementwise negation: :math:`-x`.\"\"\"\n return neg_p.bind(x)\n\ndef sign(x: Array) -> Array:\n r\"\"\"Elementwise sign.\n\n For floating-point inputs, returns\n :math:`\\mathrm{sign}(x) = \\begin{cases}\n -1 & x < 0\\\\\n -0 & x = -0\\\\\n \\mathit{NaN} & x = \\mathit{NaN}\\\\\n +0 & x = +0\\\\\n 1 & x > 0\n \\end{cases}`\n\n For signed integer inputs, returns\n :math:`\\mathrm{sign}(x) = \\begin{cases}\n -1 & x < 0\\\\\n 0 & x = 0\\\\\n 1 & x > 0\n \\end{cases}`\n\n For complex inputs, returns the complex phase, i.e.\n :math:`\\mathrm{sign}(x) = \\frac{x}{|x|}`.\n \"\"\"\n return sign_p.bind(x)\n\ndef nextafter(x1: Array, x2: Array) -> Array:\n r\"\"\"Returns the next representable value after `x1` in the direction of `x2`.\"\"\"\n return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))\n\ndef floor(x: Array) -> Array:\n r\"\"\"Elementwise floor: :math:`\\left\\lfloor x \\right\\rfloor`.\"\"\"\n return floor_p.bind(x)\n\ndef ceil(x: Array) -> Array:\n r\"\"\"Elementwise ceiling: :math:`\\left\\lceil x \\right\\rceil`.\"\"\"\n return ceil_p.bind(x)\n\ndef round(x: Array) -> Array:\n r\"\"\"Elementwise round.\n\n Rounds values to the nearest integer. Halfway values (e.g., `0.5`) are rounded\n away from zero.\"\"\"\n return round_p.bind(x)\n\ndef is_finite(x: Array) -> Array:\n r\"\"\"Elementwise :math:`\\mathrm{isfinite}`.\n\n For each element x returns `True` if and only if x is not :math:`\\pm\\infty` or\n :math:`\\mathit{NaN}`.\n \"\"\"\n return is_finite_p.bind(x)\n\ndef exp(x: Array) -> Array:\n r\"\"\"Elementwise exponential: :math:`e^x`.\"\"\"\n return exp_p.bind(x)\n\ndef expm1(x: Array) -> Array:\n r\"\"\"Elementwise :math:`e^{x} - 1`.\"\"\"\n return expm1_p.bind(x)\n\ndef log(x: Array) -> Array:\n r\"\"\"Elementwise natural logarithm: :math:`\\mathrm{log}(x)`.\"\"\"\n return log_p.bind(x)\n\ndef log1p(x: Array) -> Array:\n r\"\"\"Elementwise :math:`\\mathrm{log}(1 + x)`.\"\"\"\n return log1p_p.bind(x)\n\ndef tanh(x: Array) -> Array:\n r\"\"\"Elementwise hyperbolic tangent: :math:`\\mathrm{tanh}(x)`.\"\"\"\n return tanh_p.bind(x)\n\ndef sin(x: Array) -> Array:\n r\"\"\"Elementwise sine: :math:`\\mathrm{sin}(x)`.\"\"\"\n return sin_p.bind(x)\n\ndef cos(x: Array) -> Array:\n r\"\"\"Elementwise cosine: :math:`\\mathrm{cos}(x)`.\"\"\"\n return cos_p.bind(x)\n\ndef atan2(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise arc tangent of two variables:\n :math:`\\mathrm{atan}({x \\over y})`.\"\"\"\n return atan2_p.bind(x, y)\n\ndef betainc(a: Array, b: Array, x: Array) -> Array:\n r\"\"\"Elementwise regularized incomplete beta integral.\"\"\"\n return regularized_incomplete_beta_p.bind(a, b, x)\n\ndef lgamma(x: Array) -> Array:\n r\"\"\"Elementwise log gamma: :math:`\\mathrm{log}(\\Gamma(x))`.\"\"\"\n return lgamma_p.bind(x)\n\ndef digamma(x: Array) -> Array:\n r\"\"\"Elementwise digamma: :math:`\\psi(x)`.\"\"\"\n return digamma_p.bind(x)\n\ndef igamma(a: Array, x: Array) -> Array:\n r\"\"\"Elementwise regularized incomplete gamma function.\"\"\"\n return igamma_p.bind(a, x)\n\ndef igammac(a: Array, x: Array) -> Array:\n r\"\"\"Elementwise complementary regularized incomplete gamma function.\"\"\"\n return igammac_p.bind(a, x)\n\ndef igamma_grad_a(a: Array, x: Array) -> Array:\n r\"\"\"Elementwise derivative of the regularized incomplete gamma function.\"\"\"\n return igamma_grad_a_p.bind(a, x)\n\ndef bessel_i0e(x: Array) -> Array:\n r\"\"\"Exponentially scaled modified Bessel function of order 0:\n :math:`\\mathrm{i0e}(x) = e^{-|x|} \\mathrm{i0}(x)`\n \"\"\"\n return bessel_i0e_p.bind(x)\n\ndef bessel_i1e(x: Array) -> Array:\n r\"\"\"Exponentially scaled modified Bessel function of order 1:\n :math:`\\mathrm{i1e}(x) = e^{-|x|} \\mathrm{i1}(x)`\n \"\"\"\n return bessel_i1e_p.bind(x)\n\ndef erf(x: Array) -> Array:\n r\"\"\"Elementwise error function: :math:`\\mathrm{erf}(x)`.\"\"\"\n return erf_p.bind(x)\n\ndef erfc(x: Array) -> Array:\n r\"\"\"Elementwise complementary error function:\n :math:`\\mathrm{erfc}(x) = 1 - \\mathrm{erf}(x)`.\"\"\"\n return erfc_p.bind(x)\n\ndef erf_inv(x: Array) -> Array:\n r\"\"\"Elementwise inverse error function: :math:`\\mathrm{erf}^{-1}(x)`.\"\"\"\n return erf_inv_p.bind(x)\n\ndef real(x: Array) -> Array:\n r\"\"\"Elementwise extract real part: :math:`\\mathrm{Re}(x)`.\n\n Returns the real part of a complex number.\n \"\"\"\n return real_p.bind(x)\n\ndef imag(x: Array) -> Array:\n r\"\"\"Elementwise extract imaginary part: :math:`\\mathrm{Im}(x)`.\n\n Returns the imaginary part of a complex number.\n \"\"\"\n return imag_p.bind(x)\n\ndef complex(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise make complex number: :math:`x + jy`.\n\n Builds a complex number from real and imaginary parts.\n \"\"\"\n return complex_p.bind(_brcast(x, y), _brcast(y, x))\n\ndef conj(x: Array) -> Array:\n r\"\"\"Elementwise complex conjugate function: :math:`\\overline{x}`.\"\"\"\n return conj_p.bind(x, input_dtype=_dtype(x))\n\ndef abs(x: Array) -> Array:\n r\"\"\"Elementwise absolute value: :math:`|x|`.\"\"\"\n return abs_p.bind(x)\n\ndef pow(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise power: :math:`x^y`.\"\"\"\n return pow_p.bind(x, y)\n\ndef integer_pow(x: Array, y: int) -> Array:\n r\"\"\"Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer.\"\"\"\n if y == 0:\n return _ones(x)\n elif y == 1:\n return x\n else:\n return integer_pow_p.bind(x, y=y)\n\ndef sqrt(x: Array) -> Array:\n r\"\"\"Elementwise square root: :math:`\\sqrt{x}`.\"\"\"\n return sqrt_p.bind(x)\n\ndef rsqrt(x: Array) -> Array:\n r\"\"\"Elementwise reciprocal square root: :math:`1 \\over \\sqrt{x}.\"\"\"\n return rsqrt_p.bind(x)\n\ndef bitwise_not(x: Array) -> Array:\n r\"\"\"Elementwise NOT: :math:`\\neg x`.\"\"\"\n return not_p.bind(x)\n\ndef bitwise_and(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise AND: :math:`x \\wedge y`.\"\"\"\n return and_p.bind(x, y)\n\ndef bitwise_or(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise OR: :math:`x \\vee y`.\"\"\"\n return or_p.bind(x, y)\n\ndef bitwise_xor(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise exclusive OR: :math:`x \\oplus y`.\"\"\"\n return xor_p.bind(x, y)\n\ndef population_count(x: Array) -> Array:\n r\"\"\"Elementwise popcount, count the number of set bits in each element.\"\"\"\n return population_count_p.bind(x)\n\ndef add(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise addition: :math:`x + y`.\"\"\"\n return add_p.bind(x, y)\n\ndef sub(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise subtraction: :math:`x - y`.\"\"\"\n return sub_p.bind(x, y)\n\ndef mul(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise multiplication: :math:`x \\times y`.\"\"\"\n return mul_p.bind(x, y)\n\ndef div(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise division: :math:`x \\over y`.\"\"\"\n return div_p.bind(x, y)\n\ndef rem(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise remainder: :math:`x \\bmod y`.\"\"\"\n return rem_p.bind(x, y)\n\ndef max(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise maximum: :math:`\\mathrm{max}(x, y)`\n\n For complex numbers, uses a lexicographic comparison on the\n `(real, imaginary)` pairs.\"\"\"\n return max_p.bind(x, y)\n\ndef min(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise minimum: :math:`\\mathrm{min}(x, y)`\n\n For complex numbers, uses a lexicographic comparison on the\n `(real, imaginary)` pairs.\"\"\"\n return min_p.bind(x, y)\n\ndef shift_left(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise left shift: :math:`x \\ll y`.\"\"\"\n return shift_left_p.bind(x, y)\n\ndef shift_right_arithmetic(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise arithmetic right shift: :math:`x \\gg y`.\"\"\"\n return shift_right_arithmetic_p.bind(x, y)\n\ndef shift_right_logical(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise logical right shift: :math:`x \\gg y`.\"\"\"\n return shift_right_logical_p.bind(x, y)\n\ndef eq(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise equals: :math:`x = y`.\"\"\"\n return eq_p.bind(x, y)\n\ndef ne(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise not-equals: :math:`x \\neq y`.\"\"\"\n return ne_p.bind(x, y)\n\ndef ge(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise greater-than-or-equals: :math:`x \\geq y`.\"\"\"\n return ge_p.bind(x, y)\n\ndef gt(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise greater-than: :math:`x > y`.\"\"\"\n return gt_p.bind(x, y)\n\ndef le(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise less-than-or-equals: :math:`x \\leq y`.\"\"\"\n return le_p.bind(x, y)\n\ndef lt(x: Array, y: Array) -> Array:\n r\"\"\"Elementwise less-than: :math:`x < y`.\"\"\"\n return lt_p.bind(x, y)\n\ndef convert_element_type(operand: Array, new_dtype: DType) -> Array:\n \"\"\"Elementwise cast.\n\n Wraps XLA's `ConvertElementType\n <https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_\n operator, which performs an elementwise conversion from one type to another.\n Similar to a C++ `static_cast`.\n\n Args:\n operand: an array or scalar value to be cast\n new_dtype: the new type. Should be a NumPy type.\n\n Returns:\n An array with the same shape as `operand`, cast elementwise to `new_dtype`.\n \"\"\"\n new_dtype = dtypes.canonicalize_dtype(new_dtype)\n # Avoids dropping precision by casting Python scalars to the default Jax\n # type. If we passed a Python scalar directly to the bind call below, it is\n # cast to the default type as part of the calling convention.\n if type(operand) in dtypes.python_scalar_dtypes:\n operand = onp.asarray(operand, new_dtype)\n old_dtype = dtypes.canonicalize_dtype(_dtype(operand))\n if old_dtype == new_dtype:\n return operand\n if (dtypes.issubdtype(old_dtype, onp.complexfloating) and\n not dtypes.issubdtype(new_dtype, onp.complexfloating)):\n msg = \"Casting complex values to real discards the imaginary part\"\n warnings.warn(msg, onp.ComplexWarning, stacklevel=2)\n return convert_element_type_p.bind(\n operand, new_dtype=new_dtype, old_dtype=old_dtype)\n\ndef bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:\n \"\"\"Elementwise bitcast.\n\n Wraps XLA's `BitcastConvertType\n <https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_\n operator, which performs a bit cast from one type to another. The bitwidth\n of the source and destination types must match.\n\n Args:\n operand: an array or scalar value to be cast\n new_dtype: the new type. Should be a NumPy type.\n\n Returns:\n An array with the same shape as `operand`, bitcast elementwise to\n `new_dtype`.\n \"\"\"\n new_dtype = dtypes.canonicalize_dtype(new_dtype)\n old_dtype = _dtype(operand)\n if old_dtype != new_dtype:\n return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)\n else:\n return operand\n\ndef clamp(min: Array, x: Array, max: Array) -> Array:\n r\"\"\"Elementwise clamp.\n\n Returns :math:`\\mathrm{clamp}(x) = \\begin{cases}\n \\mathit{min} & \\text{if } x < \\mathit{min},\\\\\n \\mathit{max} & \\text{if } x > \\mathit{max},\\\\\n x & \\text{otherwise}\n \\end{cases}`.\n \"\"\"\n return clamp_p.bind(min, x, max)\n\ndef concatenate(operands: Sequence[Array], dimension: int) -> Array:\n \"\"\"Concatenates a sequence of arrays along `dimension`.\n\n Wraps XLA's `Concatenate\n <https://www.tensorflow.org/xla/operation_semantics#concatenate>`_\n operator.\n\n Args:\n operands: a sequence of arrays to concatenate. The arrays must have equal\n shapes, except in the `dimension` axis.\n dimension: the dimension along which to concatenate the arrays.\n\n Returns:\n An array containing the concatenation.\n \"\"\"\n return concatenate_p.bind(*operands, dimension=dimension)\n\nPrecision = xla_client.PrecisionConfig.Precision\nPrecision.__str__ = lambda precision: precision.name\nPrecisionType = Any\n\nclass ConvDimensionNumbers(NamedTuple):\n \"\"\"Describes batch, spatial, and feature dimensions of a convolution.\n\n Args:\n lhs_spec: a tuple of nonnegative integer dimension numbers containing\n `(batch dimension, feature dimension, spatial dimensions...)`.\n rhs_spec: a tuple of nonnegative integer dimension numbers containing\n `(out feature dimension, in feature dimension, spatial dimensions...)`.\n out_spec: a tuple of nonnegative integer dimension numbers containing\n `(batch dimension, feature dimension, spatial dimensions...)`.\n \"\"\"\n lhs_spec: Sequence[int]\n rhs_spec: Sequence[int]\n out_spec: Sequence[int]\n\nConvGeneralDilatedDimensionNumbers = Union[\n None, ConvDimensionNumbers, Tuple[str, str, str]]\n\ndef conv_general_dilated(\n lhs: Array, rhs: Array, window_strides: Sequence[int],\n padding: Union[str, Sequence[Tuple[int, int]]],\n lhs_dilation: Optional[Sequence[int]] = None,\n rhs_dilation: Optional[Sequence[int]] = None,\n dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,\n feature_group_count: int = 1, batch_group_count: int = 1,\n precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"General n-dimensional convolution operator, with optional dilation.\n\n Wraps XLA's `Conv\n <https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_\n operator.\n\n Args:\n lhs: a rank `n+2` dimensional input array.\n rhs: a rank `n+2` dimensional array of kernel weights.\n window_strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension.\n lhs_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `lhs`. LHS dilation\n is also known as transposed convolution.\n rhs_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `rhs`. RHS dilation\n is also known as atrous convolution.\n dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or\n a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string\n of length `n+2`.\n feature_group_count: integer, default 1. See XLA HLO docs.\n batch_group_count: integer, default 1. See XLA HLO docs.\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n An array containing the convolution result.\n\n In the string case of `dimension_numbers`, each character identifies by\n position:\n\n - the batch dimensions in `lhs`, `rhs`, and the output with the character\n 'N',\n - the feature dimensions in `lhs` and the output with the character 'C',\n - the input and output feature dimensions in rhs with the characters 'I'\n and 'O' respectively, and\n - spatial dimension correspondences between lhs, rhs, and the output using\n any distinct characters.\n\n For example, to indicate dimension numbers consistent with the `conv` function\n with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As\n another example, to indicate dimension numbers consistent with the TensorFlow\n Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the\n latter form of convolution dimension specification, window strides are\n associated with spatial dimension character labels according to the order in\n which the labels appear in the `rhs_spec` string, so that `window_strides[0]`\n is matched with the dimension corresponding to the first character\n appearing in rhs_spec that is not `'I'` or `'O'`.\n\n If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`\n (for a 2D convolution).\n \"\"\"\n dnums: ConvDimensionNumbers\n dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)\n if lhs_dilation is None:\n lhs_dilation = (1,) * (lhs.ndim - 2)\n elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):\n raise ValueError(\n \"String padding is not implemented for transposed convolution \"\n \"using this op. Please either exactly specify the required padding or \"\n \"use conv_transpose.\")\n if rhs_dilation is None:\n rhs_dilation = (1,) * (rhs.ndim - 2)\n if isinstance(padding, str):\n lhs_perm, rhs_perm, _ = dnums\n rhs_shape = onp.take(rhs.shape, rhs_perm)[2:]\n effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]\n padding = padtype_to_pads(\n onp.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,\n window_strides, padding)\n return conv_general_dilated_p.bind(\n lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),\n lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),\n dimension_numbers=dnums,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n lhs_shape=lhs.shape, rhs_shape=rhs.shape,\n precision=_canonicalize_precision(precision))\n\ndef dot(lhs: Array, rhs: Array, precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"Vector/vector, matrix/vector, and matrix/matrix multiplication.\n\n Wraps XLA's `Dot\n <https://www.tensorflow.org/xla/operation_semantics#dot>`_\n operator.\n\n For more general contraction, see the `dot_general` operator.\n\n Args:\n lhs: an array of rank 1 or 2.\n rhs: an array of rank 1 or 2.\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n An array containing the product.\n \"\"\"\n if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:\n return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),\n precision=precision)\n else:\n raise TypeError(\"Incompatible shapes for dot: got {} and {}.\".format(\n lhs.shape, rhs.shape))\n\n\nDotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],\n Tuple[Sequence[int], Sequence[int]]]\n\ndef dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,\n precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"More general contraction operator.\n\n Wraps XLA's `DotGeneral\n <https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_\n operator.\n\n Args:\n lhs: an array\n rhs: an array\n dimension_numbers: a tuple of tuples of the form\n `((lhs_contracting_dims, rhs_contracting_dims),\n (lhs_batch_dims, rhs_batch_dims))`\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n An array containing the result.\n \"\"\"\n contract_dims_seq, batch_dims_seq = dimension_numbers\n contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))\n batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))\n if not dtypes.issubdtype(lhs.dtype, onp.inexact):\n # TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a\n # sum of products instead.\n lhs_contract_dims, rhs_contract_dims = contract_dims\n lhs_batch_dims, rhs_batch_dims = batch_dims\n lhs_noncontract_dims = tuple(sorted(\n set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))\n rhs_noncontract_dims = tuple(sorted(\n set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))\n lhs = transpose(lhs,\n lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)\n rhs = transpose(rhs,\n rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)\n new_lhs_shape = onp.insert(onp.array(onp.shape(lhs), dtype=onp.int64),\n len(lhs_batch_dims) + len(lhs_noncontract_dims),\n (1,) * len(rhs_noncontract_dims))\n new_rhs_shape = onp.insert(onp.array(onp.shape(rhs), dtype=onp.int64),\n len(lhs_batch_dims),\n (1,) * len(lhs_noncontract_dims))\n lhs = reshape(lhs, new_lhs_shape)\n rhs = reshape(rhs, new_rhs_shape)\n out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +\n len(rhs_noncontract_dims))\n op_product = bitwise_and if lhs.dtype == onp.bool_ else mul\n op_sum = bitwise_or if lhs.dtype == onp.bool_ else add\n return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,\n tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))\n\n return dot_general_p.bind(lhs, rhs,\n dimension_numbers=(contract_dims, batch_dims),\n precision=_canonicalize_precision(precision))\n\ndef broadcast(operand: Array, sizes: Sequence[int]) -> Array:\n \"\"\"Broadcasts an array, adding new major dimensions.\n\n Wraps XLA's `Broadcast\n <https://www.tensorflow.org/xla/operation_semantics#broadcast>`_\n operator.\n\n Args:\n operand: an array\n sizes: a sequence of integers, giving the sizes of new major dimensions\n to add.\n\n Returns:\n An array containing the result.\n \"\"\"\n dims = tuple(range(len(sizes), len(sizes) + onp.ndim(operand)))\n return broadcast_in_dim(operand, tuple(sizes) + onp.shape(operand), dims)\n\ndef broadcast_in_dim(operand: Array, shape: Shape,\n broadcast_dimensions: Sequence[int]) -> Array:\n \"\"\"Wraps XLA's `BroadcastInDim\n <https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_\n operator.\n \"\"\"\n shape = _broadcast_in_dim_shape_rule(\n operand, shape=shape, broadcast_dimensions=broadcast_dimensions)\n if onp.ndim(operand) == len(shape) and not len(broadcast_dimensions):\n return operand\n return broadcast_in_dim_p.bind(\n operand, shape=tuple(shape),\n broadcast_dimensions=tuple(broadcast_dimensions))\n\ndef broadcast_to_rank(x: Array, rank: int) -> Array:\n \"\"\"Adds leading dimensions of ``1`` to give ``x`` rank ``rank``.\"\"\"\n return broadcast(x, (1,) * (rank - x.ndim))\n\ndef reshape(operand: Array, new_sizes: Shape,\n dimensions: Optional[Sequence[int]] = None) -> Array:\n \"\"\"Wraps XLA's `Reshape\n <https://www.tensorflow.org/xla/operation_semantics#reshape>`_\n operator.\n \"\"\"\n new_sizes = canonicalize_shape(new_sizes) # TODO\n new_sizes = tuple(new_sizes)\n same_shape = onp.shape(operand) == new_sizes\n same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))\n if onp.shape(operand) and same_shape and same_dims:\n return operand\n else:\n return reshape_p.bind(\n operand, new_sizes=new_sizes,\n dimensions=None if dimensions is None or same_dims else tuple(dimensions))\n\ndef pad(operand: Array, padding_value: Array,\n padding_config: Sequence[Tuple[int, int, int]]) -> Array:\n \"\"\"Wraps XLA's `Pad\n <https://www.tensorflow.org/xla/operation_semantics#pad>`_\n operator.\n \"\"\"\n return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))\n\ndef rev(operand: Array, dimensions: Sequence[int]) -> Array:\n \"\"\"Wraps XLA's `Rev\n <https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_\n operator.\n \"\"\"\n return rev_p.bind(operand, dimensions=tuple(dimensions))\n\ndef select(pred: Array, on_true: Array, on_false: Array) -> Array:\n \"\"\"Wraps XLA's `Select\n <https://www.tensorflow.org/xla/operation_semantics#select>`_\n operator.\n \"\"\"\n return select_p.bind(pred, on_true, on_false)\n\ndef slice(operand: Array, start_indices: Sequence[int],\n limit_indices: Sequence[int],\n strides: Optional[Sequence[int]] = None) -> Array:\n \"\"\"Wraps XLA's `Slice\n <https://www.tensorflow.org/xla/operation_semantics#slice>`_\n operator.\n \"\"\"\n if (onp.all(onp.equal(start_indices, 0))\n and onp.all(onp.equal(limit_indices, operand.shape))\n and strides is None):\n return operand\n else:\n return slice_p.bind(operand, start_indices=tuple(start_indices),\n limit_indices=tuple(limit_indices),\n strides=None if strides is None else tuple(strides))\n\ndef dynamic_slice(operand: Array, start_indices: Sequence[Array],\n slice_sizes: Shape) -> Array:\n \"\"\"Wraps XLA's `DynamicSlice\n <https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_\n operator.\n\n Args:\n operand: an array to slice.\n start_indices: a list of scalar indices, one per dimension.\n slice_sizes: the size of the slice. Must be a sequence of non-negative\n integers with length equal to `ndim(operand)`.\n\n Returns:\n An array containing the slice.\n \"\"\"\n start_indices = _dynamic_slice_indices(operand, start_indices)\n return dynamic_slice_p.bind(operand, *start_indices,\n slice_sizes=tuple(slice_sizes))\n\ndef dynamic_update_slice(operand: Array, update: Array,\n start_indices: Array) -> Array:\n \"\"\"Wraps XLA's `DynamicUpdateSlice\n <https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_\n operator.\n\n Args:\n operand: an array to slice.\n update: an array containing the new values to write onto `operand`.\n start_indices: a list of scalar indices, one per dimension.\n\n Returns:\n An array containing the slice.\n \"\"\"\n start_indices = _dynamic_slice_indices(operand, start_indices)\n return dynamic_update_slice_p.bind(operand, update, *start_indices)\n\n\nclass GatherDimensionNumbers(NamedTuple):\n \"\"\"\n Describes the dimension number arguments to an `XLA's Gather operator\n <https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA\n documentation for more details of what the dimension numbers mean.\n\n Args:\n offset_dims: the set of dimensions in the `gather` output that offset into\n an array sliced from `operand`. Must be a tuple of integers in ascending\n order, each representing a dimension number of the output.\n collapsed_slice_dims: the set of dimensions `i` in `operand` that have\n `slice_sizes[i] == 1` and that should not have a corresponding dimension\n in the output of the gather. Must be a tuple of integers in ascending\n order.\n start_index_map: for each dimension in `start_indices`, gives the\n corresponding dimension in `operand` that is to be sliced. Must be a\n tuple of integers with size equal to `start_indices.shape[-1]`.\n\n Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is\n implicit; there is always an index vector dimension and it must always be the\n last dimension. To gather scalar indices, add a trailing dimension of size 1.\n \"\"\"\n offset_dims: Sequence[int]\n collapsed_slice_dims: Sequence[int]\n start_index_map: Sequence[int]\n\n\ndef gather(operand: Array, start_indices: Array,\n dimension_numbers: GatherDimensionNumbers,\n slice_sizes: Shape) -> Array:\n \"\"\"Gather operator.\n\n Wraps `XLA's Gather operator\n <https://www.tensorflow.org/xla/operation_semantics#gather>`_.\n\n The semantics of gather are complicated, and its API might change in the\n future. For most use cases, you should prefer `Numpy-style indexing\n <https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_\n (e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.\n\n Args:\n operand: an array from which slices should be taken\n start_indices: the indices at which slices should be taken\n dimension_numbers: a `lax.GatherDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices` and the output relate.\n slice_sizes: the size of each slice. Must be a sequence of non-negative\n integers with length equal to `ndim(operand)`.\n\n Returns:\n An array containing the gather output.\n \"\"\"\n return gather_p.bind(\n operand, start_indices, dimension_numbers=dimension_numbers,\n slice_sizes=canonicalize_shape(slice_sizes))\n\n\nclass ScatterDimensionNumbers(NamedTuple):\n \"\"\"\n Describes the dimension number arguments to an `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA\n documentation for more details of what the dimension numbers mean.\n\n Args:\n update_window_dims: the set of dimensions in the `updates` that are window\n dimensions. Must be a tuple of integers in ascending\n order, each representing a dimension number.\n inserted_window_dims: the set of size 1 window dimensions that must be inserted\n into the shape of `updates`. Must be a tuple of integers in ascending\n order, each representing a dimension number of the output. These are the\n mirror image of `collapsed_slice_dims` in the case of `gather`.\n scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives\n the corresponding dimension in `operand`. Must be a sequence of integers\n with size equal to indices.shape[-1].\n\n Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is\n implicit; there is always an index vector dimension and it must always be the\n last dimension. To scatter scalar indices, add a trailing dimension of size 1.\n \"\"\"\n update_window_dims: Sequence[int]\n inserted_window_dims: Sequence[int]\n scatter_dims_to_operand_dims: Sequence[int]\n\ndef scatter_add(operand: Array, scatter_indices: Array, updates: Array,\n dimension_numbers: ScatterDimensionNumbers) -> Array:\n \"\"\"Scatter-add operator.\n\n Wraps `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where\n addition is used to combine updates and values from `operand`.\n\n The semantics of scatter are complicated and its API is subject to change.\n\n Args:\n operand: an array to which the scatter should be applied\n scatter_indices: an array that gives the indices in `operand` to which each\n update in `updates` should be applied.\n updates: the updates that should be scattered onto `operand`.\n dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices`, `updates` and the output\n relate.\n\n Returns:\n An array containing the sum of `operand` and the scattered updates.\n \"\"\"\n jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))\n return scatter_add_p.bind(\n operand, scatter_indices, updates, update_jaxpr=jaxpr,\n update_consts=consts, dimension_numbers=dimension_numbers)\n\ndef scatter_mul(operand: Array, scatter_indices: Array, updates: Array,\n dimension_numbers: ScatterDimensionNumbers) -> Array:\n \"\"\"Scatter-multiply operator.\n\n Wraps `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where\n multiplication is used to combine updates and values from `operand`.\n\n The semantics of scatter are complicated and its API is subject to change.\n\n Args:\n operand: an array to which the scatter should be applied\n scatter_indices: an array that gives the indices in `operand` to which each\n update in `updates` should be applied.\n updates: the updates that should be scattered onto `operand`.\n dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices`, `updates` and the output\n relate.\n\n Returns:\n An array containing the sum of `operand` and the scattered updates.\n \"\"\"\n jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))\n return scatter_mul_p.bind(\n operand, scatter_indices, updates, update_jaxpr=jaxpr,\n update_consts=consts, dimension_numbers=dimension_numbers)\n\ndef scatter_min(operand: Array, scatter_indices: Array, updates: Array,\n dimension_numbers: ScatterDimensionNumbers) -> Array:\n \"\"\"Scatter-min operator.\n\n Wraps `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where\n the `min` function is used to combine updates and values from `operand`.\n\n The semantics of scatter are complicated and its API is subject to change.\n\n Args:\n operand: an array to which the scatter should be applied\n scatter_indices: an array that gives the indices in `operand` to which each\n update in `updates` should be applied.\n updates: the updates that should be scattered onto `operand`.\n dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices`, `updates` and the output\n relate.\n\n Returns:\n An array containing the sum of `operand` and the scattered updates.\n \"\"\"\n jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))\n return scatter_min_p.bind(\n operand, scatter_indices, updates, update_jaxpr=jaxpr,\n update_consts=consts, dimension_numbers=dimension_numbers)\n\ndef scatter_max(operand: Array, scatter_indices: Array, updates: Array,\n dimension_numbers: ScatterDimensionNumbers) -> Array:\n \"\"\"Scatter-max operator.\n\n Wraps `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where\n the `max` function is used to combine updates and values from `operand`.\n\n The semantics of scatter are complicated and its API is subject to change.\n\n Args:\n operand: an array to which the scatter should be applied\n scatter_indices: an array that gives the indices in `operand` to which each\n update in `updates` should be applied.\n updates: the updates that should be scattered onto `operand`.\n dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices`, `updates` and the output\n relate.\n\n Returns:\n An array containing the sum of `operand` and the scattered updates.\n \"\"\"\n jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))\n return scatter_max_p.bind(\n operand, scatter_indices, updates, update_jaxpr=jaxpr,\n update_consts=consts, dimension_numbers=dimension_numbers)\n\n# Define this outside of scatter to ensure cache hits.\n_scatter_reduction_computation = lambda x, y: y\n\ndef scatter(operand: Array, scatter_indices:Array, updates: Array,\n dimension_numbers: ScatterDimensionNumbers) -> Array:\n \"\"\"Scatter-update operator.\n\n Wraps `XLA's Scatter operator\n <https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates\n replace values from `operand`.\n\n If multiple updates are performed to the same index of operand, they may be\n applied in any order.\n\n The semantics of scatter are complicated and its API is subject to change.\n\n Args:\n operand: an array to which the scatter should be applied\n scatter_indices: an array that gives the indices in `operand` to which each\n update in `updates` should be applied.\n updates: the updates that should be scattered onto `operand`.\n dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes\n how dimensions of `operand`, `start_indices`, `updates` and the output\n relate.\n\n Returns:\n An array containing the sum of `operand` and the scattered updates.\n \"\"\"\n jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,\n _abstractify(_const(operand, 0)))\n return scatter_p.bind(\n operand, scatter_indices, updates, update_jaxpr=jaxpr,\n update_consts=consts, dimension_numbers=dimension_numbers)\n\ndef index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:\n indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)\n indices = indices % onp.array([src.shape[ax] for ax in axes])\n slice_sizes = list(src.shape)\n for ax in axes:\n slice_sizes[ax] = 1\n offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))\n dnums = GatherDimensionNumbers(\n offset_dims=offset_dims,\n collapsed_slice_dims=axes,\n start_index_map=axes)\n return gather(src, indices, dimension_numbers=dnums,\n slice_sizes=tuple(slice_sizes))\n\ndef transpose(operand: Array, permutation: Sequence[int]) -> Array:\n \"\"\"Wraps XLA's `Transpose\n <https://www.tensorflow.org/xla/operation_semantics#transpose>`_\n operator.\n \"\"\"\n permutation = tuple(permutation)\n if permutation == tuple(range(len(permutation))):\n return operand\n else:\n return transpose_p.bind(operand, permutation=permutation)\n\ndef reduce(operand: Array, init_value: Array, computation: Callable,\n dimensions: Sequence[int]) -> Array:\n \"\"\"Wraps XLA's `Reduce\n <https://www.tensorflow.org/xla/operation_semantics#reduce>`_\n operator.\n \"\"\"\n monoid_reducer = _get_monoid_reducer(computation, init_value)\n if monoid_reducer:\n return monoid_reducer(operand, dimensions)\n else:\n jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))\n return reduce_p.bind(operand, init_value, computation=computation,\n jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))\n\n@cache()\ndef _reduction_jaxpr(computation, aval):\n pval = pe.PartialVal.unknown(aval)\n comp = lu.wrap_init(lambda x, y: (computation(x, y),))\n jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)\n return jaxpr, consts\n\ndef _get_monoid_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:\n aval = core.get_aval(x)\n dtype = _dtype(x)\n if (type(aval) is ConcreteArray) and aval.shape == ():\n if monoid_op is add:\n return aval.val == 0 and _reduce_sum\n if monoid_op is mul:\n return aval.val == 1 and _reduce_prod\n elif monoid_op is bitwise_or and dtype == onp.bool_:\n return aval.val == _get_max_identity(dtype) and _reduce_or\n elif monoid_op is bitwise_and and dtype == onp.bool_:\n return aval.val == _get_min_identity(dtype) and _reduce_and\n elif monoid_op is max:\n return aval.val == _get_max_identity(dtype) and _reduce_max\n elif monoid_op is min:\n return aval.val == _get_min_identity(dtype) and _reduce_min\n return None\n\ndef _get_max_identity(dtype: DType) -> Array:\n if dtypes.issubdtype(dtype, onp.inexact):\n return onp.array(-onp.inf, dtype)\n elif dtypes.issubdtype(dtype, onp.integer):\n return onp.array(dtypes.iinfo(dtype).min, dtype)\n elif dtypes.issubdtype(dtype, onp.bool_):\n return onp.array(False, onp.bool_)\n\ndef _get_min_identity(dtype: DType) -> Array:\n if dtypes.issubdtype(dtype, onp.inexact):\n return onp.array(onp.inf, dtype)\n elif dtypes.issubdtype(dtype, onp.integer):\n return onp.array(dtypes.iinfo(dtype).max, dtype)\n elif dtypes.issubdtype(dtype, onp.bool_):\n return onp.array(True, onp.bool_)\n\ndef _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_sum_p.bind(operand, axes=tuple(axes))\n\ndef _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_prod_p.bind(operand, axes=tuple(axes))\n\ndef _reduce_max(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_max_p.bind(operand, axes=tuple(axes))\n\ndef _reduce_min(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_min_p.bind(operand, axes=tuple(axes))\n\ndef _reduce_or(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_or_p.bind(operand, axes=tuple(axes))\n\ndef _reduce_and(operand: Array, axes: Sequence[int]) -> Array:\n return reduce_and_p.bind(operand, axes=tuple(axes))\n\ndef reduce_window(operand: Array, init_value: Array, computation: Callable,\n window_dimensions: Shape, window_strides: Sequence[int],\n padding: str) -> Array:\n \"\"\"Wraps XLA's `ReduceWindow\n <https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_\n operator.\n \"\"\"\n monoid_reducer = _get_monoid_window_reducer(computation, init_value)\n if monoid_reducer:\n return monoid_reducer(operand, window_dimensions, window_strides, padding)\n else:\n jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))\n return reduce_window_p.bind(\n operand, init_value, jaxpr=jaxpr, consts=consts,\n window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:\n aval = core.get_aval(x)\n if (type(aval) is ConcreteArray) and aval.shape == ():\n if monoid_op is add:\n return aval.val == 0 and _reduce_window_sum\n elif monoid_op is max:\n return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max\n elif monoid_op is min:\n return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min\n return None\n\ndef _reduce_window_sum(operand: Array, window_dimensions: Shape,\n window_strides: Sequence[int], padding: str) -> Array:\n return reduce_window_sum_p.bind(\n operand, window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _reduce_window_prod(operand: Array, window_dimensions: Shape,\n window_strides: Sequence[int], padding: str) -> Array:\n init_value = _const(operand, 1)\n jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))\n return reduce_window_p.bind(\n operand, init_value, jaxpr=jaxpr, consts=consts,\n window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _reduce_window_max(operand: Array, window_dimensions: Shape,\n window_strides: Sequence[int], padding: str) -> Array:\n return reduce_window_max_p.bind(\n operand, window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _reduce_window_min(operand: Array, window_dimensions: Shape,\n window_strides: Sequence[int], padding: str) -> Array:\n return reduce_window_min_p.bind(\n operand, window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _select_and_scatter(operand: Array, select: Callable,\n window_dimensions: Shape, window_strides: Sequence[int],\n padding: str, source: Array, init_value: Array,\n scatter: Callable) -> Array:\n select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))\n scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))\n return select_and_scatter_p.bind(\n operand, source, init_value, select_jaxpr=select_jaxpr,\n select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,\n scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _select_and_scatter_add(source: Array, operand: Array,\n select_prim: core.Primitive,\n window_dimensions: Shape,\n window_strides: Sequence[int],\n padding: str) -> Array:\n return select_and_scatter_add_p.bind(\n source, operand, select_prim=select_prim,\n window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef _select_and_gather_add(tangents: Array, operand: Array,\n select_prim: core.Primitive,\n window_dimensions: Shape,\n window_strides: Sequence[int],\n padding: str) -> Array:\n return select_and_gather_add_p.bind(\n tangents, operand, select_prim=select_prim,\n window_dimensions=tuple(window_dimensions),\n window_strides=tuple(window_strides), padding=padding)\n\ndef cumsum(operand: Array, axis: int) -> Array:\n \"\"\"Computes a cumulative sum along `axis`.\"\"\"\n return cumsum_p.bind(operand, axis=int(axis))\n\ndef cumprod(operand: Array, axis: int) -> Array:\n \"\"\"Computes a cumulative product along `axis`.\"\"\"\n return cumprod_p.bind(operand, axis=int(axis))\n\ndef sort(operand: Union[Array, Tuple[Array, ...]], dimension: int = -1\n ) -> Union[Array, Tuple[Array, ...]]:\n \"\"\"Wraps XLA's `Sort\n <https://www.tensorflow.org/xla/operation_semantics#sort>`_\n operator.\n \"\"\"\n if isinstance(operand, tuple):\n if len(operand) == 0:\n raise TypeError(\"Sort requires at least one operand\")\n dimension = _canonicalize_axis(dimension, len(operand[0].shape))\n return tuple(sort_p.bind(*operand, dimension=dimension))\n else:\n dimension = _canonicalize_axis(dimension, len(operand.shape))\n return sort_p.bind(operand, dimension=dimension)[0]\n\ndef sort_key_val(keys: Array, values: Array,\n dimension: int = -1) -> Tuple[Array, Array]:\n \"\"\"Sorts ``keys`` along ``dimension`` and applies same permutation to ``values``.\"\"\"\n dimension = _canonicalize_axis(dimension, len(keys.shape))\n k, v = sort_p.bind(keys, values, dimension=dimension)\n return k, v\n\ndef top_k(operand: Array, k: int) -> Tuple[Array, Array]:\n \"\"\"Returns top ``k`` values and their indices along the last axis of ``operand``.\"\"\"\n k = int(k)\n if k < 0:\n raise ValueError(\"k argument to top_k must be nonnegative, got {}\".format(k))\n return top_k_p.bind(operand, k=k)\n\ndef tie_in(x: Array, y: Array) -> Array:\n \"\"\"Gives ``y`` a fake data dependence on ``x``.\n\n When staging to XLA (e.g. running under jit or pmap), values that don't depend\n on computation inputs are computed op-by-op, and folded into the XLA\n computation as constants.\n\n ``tie_in`` provides a way to explicitly stage values into the computation.\n When staging to XLA and ``x`` is already staged, then the result of ``tie_in``\n is ``y``, but staged to XLA. Downstream use of the result will also be staged\n to XLA.\n \"\"\"\n return tie_in_p.bind(x, y)\n\n\ndef full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:\n \"\"\"Returns an array of `shape` filled with `fill_value`.\n\n Arguments:\n shape: sequence of integers, describing the shape of the output array.\n fill_value: the value to fill the new array with.\n dtype: the type of the output array, or `None`. If not `None`, `fill_value`\n will be cast to `dtype`.\n \"\"\"\n shape = canonicalize_shape(shape)\n if onp.shape(fill_value):\n msg = \"full must be called with scalar fill_value, got fill_value.shape {}.\"\n raise TypeError(msg.format(onp.shape(fill_value)))\n dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))\n # TODO(mattjj): remove device_put when dtype conversion produces DeviceArray\n fill_value = xla.device_put_p.bind(convert_element_type(fill_value, dtype))\n return broadcast(fill_value, shape)\n\ndef iota(dtype: DType, size: int) -> Array:\n \"\"\"Wraps XLA's `Iota\n <https://www.tensorflow.org/xla/operation_semantics#iota>`_\n operator.\n \"\"\"\n size = size if type(size) is masking.Poly else int(size)\n shape = canonicalize_shape((size,))\n dtype = dtypes.canonicalize_dtype(dtype)\n lazy_expr = lazy.iota(dtype, shape[0])\n aval = ShapedArray(shape, dtype)\n return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())\n\ndef broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:\n \"\"\"Convenience wrapper around ``iota``.\"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = canonicalize_shape(shape)\n dimension = int(dimension)\n return broadcast_in_dim(iota(dtype, shape[dimension]), shape, [dimension])\n\ndef _eye(dtype: DType, shape: Shape, offset: int) -> Array:\n \"\"\"Like numpy.eye, create a 2D array with ones on a diagonal.\n\n This function exists for creating lazy identity matrices; that is,\n materialization of the array is delayed and it may be fused into consumers to\n avoid materialization at all.\"\"\"\n N, M = tuple(map(int, shape))\n offset = int(offset)\n dtype = dtypes.canonicalize_dtype(dtype)\n lazy_expr = lazy.eye(dtype, (N, M), offset)\n aval = ShapedArray((N, M), dtype)\n return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())\n\ndef _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:\n \"\"\"This function exists for creating lazy Kronecker delta arrays, particularly\n for use in jax.numpy.einsum to express traces. It differs from ``eye`` in that\n it can create arrays of any rank, but doesn't allow offsets.\"\"\"\n shape = tuple(map(int, shape))\n axes = tuple(map(int, axes))\n dtype = dtypes.canonicalize_dtype(dtype)\n base_shape = tuple(onp.take(shape, axes))\n lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)\n aval = ShapedArray(shape, dtype)\n return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())\n\ndef _tri(dtype: DType, shape: Shape, offset: int) -> Array:\n \"\"\"Like numpy.tri, create a 2D array with ones below a diagonal.\n This function exists for creating lazy triangular matrices, particularly for\n use in jax.numpy.tri.\"\"\"\n N, M = tuple(map(int, shape))\n offset = int(offset)\n dtype = dtypes.canonicalize_dtype(dtype)\n lazy_expr = lazy.tri(dtype, (N, M), offset)\n aval = ShapedArray((N, M), dtype)\n return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())\n\ndef stop_gradient(x):\n \"\"\"Stops gradient computation.\n\n Operationally `stop_gradient` is the identity function, that is, it returns\n argument `x` unchanged. However, `stop_gradient` prevents the flow of\n gradients during forward or reverse-mode automatic differentiation. If there\n are multiple nested gradient computations, `stop_gradient` stops gradients\n for all of them.\n\n For example:\n\n >>> jax.grad(lambda x: x**2)(3.)\n array(6., dtype=float32)\n >>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)\n array(0., dtype=float32)\n >>> jax.grad(jax.grad(lambda x: x**2))(3.)\n array(2., dtype=float32)\n >>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)\n array(0., dtype=float32)\n \"\"\"\n return tree_map(ad_util.stop_gradient_p.bind, x)\n\n\n### convenience wrappers around traceables\n\n\ndef conv(lhs: Array, rhs: Array, window_strides: Sequence[int],\n padding: str, precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"Convenience wrapper around `conv_general_dilated`.\n\n Args:\n lhs: a rank `n+2` dimensional input array.\n rhs: a rank `n+2` dimensional array of kernel weights.\n window_strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`.\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n An array containing the convolution result.\n \"\"\"\n pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)\n return conv_general_dilated(lhs, rhs, window_strides, padding,\n precision=precision)\n\ndef conv_with_general_padding(lhs: Array, rhs: Array,\n window_strides: Sequence[int],\n padding: Union[str, Sequence[Tuple[int, int]]],\n lhs_dilation: Optional[Sequence[int]],\n rhs_dilation: Optional[Sequence[int]],\n precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"Convenience wrapper around `conv_general_dilated`.\n\n Args:\n lhs: a rank `n+2` dimensional input array.\n rhs: a rank `n+2` dimensional array of kernel weights.\n window_strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension.\n lhs_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `lhs`. LHS dilation\n is also known as transposed convolution.\n rhs_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `rhs`. RHS dilation\n is also known as atrous convolution.\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n An array containing the convolution result.\n \"\"\"\n return conv_general_dilated(\n lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,\n rhs_dilation=rhs_dilation, precision=precision)\n\n\ndef _conv_transpose_padding(k, s, padding):\n \"\"\"Calculate before and after padding for a dim of transposed convolution.\n\n Args:\n k: int: kernel dimension.\n s: int: dimension stride value.\n padding: 'same' or 'valid' padding mode for original forward conv.\n\n Returns:\n 2-tuple: ints: before and after padding for transposed convolution.\n \"\"\"\n if padding == 'SAME':\n pad_len = k + s - 2\n if s > k - 1:\n pad_a = k - 1\n else:\n pad_a = int(onp.ceil(pad_len / 2))\n elif padding == 'VALID':\n pad_len = k + s - 2 + _max(k - s, 0)\n pad_a = k - 1\n else:\n raise ValueError('Padding mode must be `SAME` or `VALID`.')\n pad_b = pad_len - pad_a\n return pad_a, pad_b\n\n\ndef _flip_axes(x, axes):\n \"\"\"Flip ndarray 'x' along each axis specified in axes tuple.\"\"\"\n for axis in axes:\n x = onp.flip(x, axis)\n return x\n\n\ndef conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],\n padding: Union[str, Sequence[Tuple[int, int]]],\n rhs_dilation: Optional[Sequence[int]] = None,\n dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,\n transpose_kernel: bool = False,\n precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"Convenience wrapper for calculating the N-d convolution \"transpose\".\n\n This function directly calculates a fractionally strided conv rather than\n indirectly calculating the gradient (transpose) of a forward convolution.\n\n Args:\n lhs: a rank `n+2` dimensional input array.\n rhs: a rank `n+2` dimensional array of kernel weights.\n strides: sequence of `n` integers, sets fractional stride.\n padding: 'SAME', 'VALID' will set as transpose of corresponding forward\n conv, or a sequence of `n` integer 2-tuples describing before-and-after\n padding for each `n` spatial dimension.\n rhs_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `rhs`. RHS dilation\n is also known as atrous convolution.\n dimension_numbers: tuple of dimension descriptors as in\n lax.conv_general_dilated. Defaults to tensorflow convention.\n transpose_kernel: if True flips spatial axes and swaps the input/output\n channel axes of the kernel. This makes the output of this function identical\n to the gradient-derived functions like keras.layers.Conv2DTranspose\n applied to the same kernel. For typical use in neural nets this is completely\n pointless and just makes input/output channel specification confusing.\n precision: Optional. Either `None`, which means the default precision for\n the backend, or a `Precision` enum value.\n\n Returns:\n Transposed N-d convolution, with output padding following the conventions of\n keras.layers.Conv2DTranspose.\n \"\"\"\n assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) > 2\n ndims = len(lhs.shape)\n one = (1,) * (ndims - 2)\n # Set dimensional layout defaults if not specified.\n if dimension_numbers is None:\n if ndims == 3:\n dimension_numbers = ('NHC', 'HIO', 'NHC')\n elif ndims == 4:\n dimension_numbers = ('NHWC', 'HWIO', 'NHWC')\n elif ndims == 5:\n dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')\n else:\n raise ValueError('No 4+ dimensional dimension_number defaults.')\n dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)\n k_shape = onp.take(rhs.shape, dn.rhs_spec)\n k_sdims = k_shape[2:]\n # Calculate correct output shape given padding and strides.\n pads: Union[str, Sequence[Tuple[int, int]]]\n if padding in {'SAME', 'VALID'}:\n if rhs_dilation is None:\n rhs_dilation = (1,) * (rhs.ndim - 2)\n effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)\n pads = [_conv_transpose_padding(k, s, padding)\n for k,s in zip(effective_k_size, strides)]\n else:\n pads = padding\n if transpose_kernel:\n # flip spatial dims and swap input / output channel axes\n rhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])\n rhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])\n return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,\n precision=precision)\n\n\ndef full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,\n shape: Optional[Shape] = None) -> Array:\n \"\"\"Create a full array like np.full based on the example array `x`.\n\n Args:\n x: example array-like, used for shape and dtype information.\n fill_value: a scalar value to fill the entries of the output array.\n dtype: optional, a dtype parameter for the output ndarray.\n shape: optional, a shape parameter for the output ndarray.\n\n Returns:\n An ndarray with the same shape as `x` with its entries set equal to\n `fill_value`, similar to the output of np.full.\n \"\"\"\n fill_shape = onp.shape(x) if shape is None else canonicalize_shape(shape)\n fill_value = tie_in(x, fill_value)\n return full(fill_shape, fill_value, dtype or _dtype(x))\n\n\ndef collapse(operand: Array, start_dimension: int, stop_dimension: int) -> Array:\n lo, hi = start_dimension, stop_dimension\n size = prod(operand.shape[lo:hi])\n new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]\n return reshape(operand, new_shape)\n\n\ndef slice_in_dim(operand: Array, start_index: Optional[int],\n limit_index: Optional[int],\n stride: int = 1, axis: int = 0)-> Array:\n \"\"\"Convenience wrapper around slice applying to only one dimension.\"\"\"\n start_indices = [0] * operand.ndim\n limit_indices = list(operand.shape)\n strides = [1] * operand.ndim\n\n # translate `None`\n len_axis = operand.shape[axis]\n start_index_int = int(start_index) if start_index is not None else 0\n limit_index_int = int(limit_index) if limit_index is not None else len_axis\n\n # translate negative indices\n if start_index_int < 0:\n start_index_int = start_index_int + len_axis\n if limit_index_int < 0:\n limit_index_int = limit_index_int + len_axis\n\n axis = int(axis)\n start_indices[axis] = start_index_int\n limit_indices[axis] = limit_index_int\n strides[axis] = int(stride)\n\n return slice(operand, start_indices, limit_indices, strides)\n\n\ndef index_in_dim(operand: Array, index: int, axis: int = 0,\n keepdims: bool = True) -> Array:\n \"\"\"Convenience wrapper around slice to perform int indexing.\"\"\"\n index, axis = int(index), int(axis)\n axis_size = operand.shape[axis]\n wrapped_index = index + axis_size if index < 0 else index\n if not 0 <= wrapped_index < axis_size:\n msg = 'index {} is out of bounds for axis {} with size {}'\n raise IndexError(msg.format(index, axis, axis_size))\n result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)\n if keepdims:\n return result\n else:\n return reshape(result, onp.delete(operand.shape, axis))\n\n\ndef dynamic_slice_in_dim(operand: Array, start_index: Array,\n slice_size: int, axis: int = 0) -> Array:\n \"\"\"Convenience wrapper around dynamic_slice applying to one dimension.\"\"\"\n start_indices = [_zero(start_index)] * operand.ndim\n slice_sizes = list(operand.shape)\n\n axis = int(axis)\n start_indices[axis] = start_index\n slice_sizes[axis] = int(slice_size)\n return dynamic_slice(operand, start_indices, slice_sizes)\n\n\ndef dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,\n keepdims: bool = True) -> Array:\n \"\"\"Convenience wrapper around dynamic_slice to perform int indexing.\"\"\"\n result = dynamic_slice_in_dim(operand, index, 1, axis)\n if keepdims:\n return result\n else:\n return reshape(result, onp.delete(operand.shape, axis))\n\n\ndef dynamic_update_slice_in_dim(operand: Array, update: Array,\n start_index: Array, axis: int) -> Array:\n axis = int(axis)\n start_indices = [_zero(start_index)] * _ndim(operand)\n start_indices[axis] = start_index\n return dynamic_update_slice(operand, update, start_indices)\n\n\ndef dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,\n axis: int) -> Array:\n axis = int(axis)\n if _ndim(update) != _ndim(operand):\n assert _ndim(update) + 1 == _ndim(operand)\n ax = axis % _ndim(operand)\n update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax+1:])\n return dynamic_update_slice_in_dim(operand, update, index, axis)\n\n\ndef batch_matmul(lhs: Array, rhs: Array,\n precision: Optional[PrecisionType] = None) -> Array:\n \"\"\"Batch matrix multiplication.\"\"\"\n if _min(lhs.ndim, rhs.ndim) < 2:\n raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'\n .format(lhs.ndim, rhs.ndim))\n if lhs.ndim != rhs.ndim:\n raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'\n .format(lhs.ndim, rhs.ndim))\n lhs_contract = (lhs.ndim - 1,)\n rhs_contract = (rhs.ndim - 2,)\n batch = tuple(range(lhs.ndim - 2))\n return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),\n precision=precision)\n\n\n# These functions also exist in the XLA client library, but we treat them\n# as non-primitive to maintain a smaller set of autodiff primitives.\n\ndef square(x: Array) -> Array:\n r\"\"\"Elementwise square: :math:`x^2`.\"\"\"\n return integer_pow(x, 2)\n\ndef reciprocal(x: Array) -> Array:\n r\"\"\"Elementwise reciprocal: :math:`1 \\over x`.\"\"\"\n return integer_pow(x, -1)\n\ndef _upcast_fp16_for_computation(f):\n @functools.wraps(f)\n def f_wrapped(x):\n dtype = _dtype(x)\n if dtype == onp.float16 or dtype == dtypes.bfloat16:\n return convert_element_type(\n f(convert_element_type(x, onp.float32)), dtype)\n return f(x)\n\n return f_wrapped\n\[email protected]\n@_upcast_fp16_for_computation\ndef tan(x: Array) -> Array:\n r\"\"\"Elementwise tangent: :math:`\\mathrm{tan}(x)`.\"\"\"\n return div(sin(x), cos(x))\n\[email protected]\ndef asin(x: Array) -> Array:\n r\"\"\"Elementwise arc sine: :math:`\\mathrm{asin}(x)`.\"\"\"\n return mul(_const(x, 2),\n atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))\n\[email protected]\ndef acos(x: Array) -> Array:\n r\"\"\"Elementwise arc cosine: :math:`\\mathrm{acos}(x)`.\"\"\"\n return select(\n ne(x, _const(x, -1.0)),\n mul(_const(x, 2),\n atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),\n full_like(x, onp.pi))\n\ndef atan(x: Array) -> Array:\n r\"\"\"Elementwise arc tangent: :math:`\\mathrm{atan}(x)`.\"\"\"\n return atan2(x, _const(x, 1))\n\ndef sinh(x: Array) -> Array:\n r\"\"\"Elementwise hyperbolic sine: :math:`\\mathrm{sinh}(x)`.\"\"\"\n return sinh_p.bind(x)\n\ndef cosh(x: Array) -> Array:\n r\"\"\"Elementwise hyperbolic cosine: :math:`\\mathrm{cosh}(x)`.\"\"\"\n return cosh_p.bind(x)\n\ndef asinh(x: Array) -> Array:\n r\"\"\"Elementwise inverse hyperbolic sine: :math:`\\mathrm{asinh}(x)`.\"\"\"\n return asinh_p.bind(x)\n\ndef acosh(x: Array) -> Array:\n r\"\"\"Elementwise inverse hyperbolic cosine: :math:`\\mathrm{acosh}(x)`.\"\"\"\n return acosh_p.bind(x)\n\ndef atanh(x: Array) -> Array:\n r\"\"\"Elementwise inverse hyperbolic tangent: :math:`\\mathrm{atanh}(x)`.\"\"\"\n return atanh_p.bind(x)\n\n\n# Add some methods to ShapedArray that rely on lax primitives\n\nShapedArray.broadcast = core.aval_method(broadcast)\nShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy\nShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy\n\ndef _iter(tracer):\n if tracer.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\") # same as numpy error\n else:\n n = tracer.shape[0]\n # return (index_in_dim(tracer, i, keepdims=False) for i in range(n))\n return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])\nShapedArray._iter = staticmethod(_iter)\n\n# Add some ad handlers that use (or could use) lax primitives\n\ndef zeros_like_array(x):\n return full_like(x, 0)\n\nfor t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,\n [xla.DeviceArray, pxla.ShardedDeviceArray]):\n ad_util.jaxval_adders[t] = add\nad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array\nad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array\n\n\n### primitives\n\n\n_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)\n_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)\n_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype\n\ndef standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):\n prim = Primitive(name)\n prim.def_impl(partial(xla.apply_primitive, prim))\n prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))\n xla.translations[prim] = translation_rule or partial(standard_translate, name)\n return prim\n\n\ndef standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):\n assert all(isinstance(arg, UnshapedArray) for arg in args), args\n least_specialized = _max(\n map(type, args), key=operator.attrgetter('array_abstraction_level'))\n if least_specialized is ConcreteArray:\n return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs))\n elif least_specialized is ShapedArray:\n return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))\n elif least_specialized is UnshapedArray:\n return UnshapedArray(dtype_rule(*args, **kwargs))\n else:\n raise TypeError(args, least_specialized)\n\n\ndef standard_translate(name, c, *args, **kwargs):\n xla_opname = ''.join(term.capitalize() for term in name.split('_'))\n return getattr(xops, xla_opname)(*args, **kwargs)\n\n\ndef unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):\n if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):\n msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'\n typename = str(onp.dtype(aval.dtype).name)\n accepted_typenames = (t.__name__ for t in accepted_dtypes)\n raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))\n return result_dtype(aval.dtype)\n\n\ndef unop(result_dtype, accepted_dtypes, name, translation_rule=None):\n dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)\n prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,\n translation_rule=translation_rule)\n batching.defvectorized(prim)\n masking.defvectorized(prim)\n return prim\nstandard_unop = partial(unop, _identity)\n_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)\n\n\ndef naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):\n aval_dtypes = [aval.dtype for aval in avals]\n for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):\n if not any(dtypes.issubdtype(aval_dtype, t) for t in types):\n msg = ('{} does not accept dtype {} at position {}. '\n 'Accepted dtypes at position {} are subtypes of {}.')\n typename = str(onp.dtype(aval_dtype).name)\n typenames = ', '.join(t.__name__ for t in types)\n raise TypeError(msg.format(name, typename, i, i, typenames))\n _check_same_dtypes(name, False, *aval_dtypes)\n return result_dtype(*avals)\n\n\ndef _broadcasting_shape_rule(name, *avals):\n shapes = onp.array([aval.shape for aval in avals if aval.shape])\n if not shapes.size:\n return ()\n if len({len(shape) for shape in shapes}) != 1:\n msg = '{} got arrays of different rank: {}.'\n raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))\n is_zero = onp.any(shapes == 0, axis=0)\n max_shape = onp.max(shapes, axis=0)\n result_shape = onp.where(is_zero, 0, max_shape)\n if not onp.all((shapes == result_shape) | (shapes == 1)):\n msg = '{} got incompatible shapes for broadcasting: {}.'\n raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))\n return tuple(result_shape)\n\n\ndef naryop(result_dtype, accepted_dtypes, name, translation_rule=None):\n dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)\n shape_rule = partial(_broadcasting_shape_rule, name)\n prim = standard_primitive(shape_rule, dtype_rule, name,\n translation_rule=translation_rule)\n batching.defbroadcasting(prim)\n masking.defnaryop(prim)\n return prim\nstandard_naryop = partial(naryop, _input_dtype)\n\n\ndef _broadcast_translate(translate: Callable):\n # Decorator for translation rules which adds explicit broadcasting of\n # positional arguments. This is necessary only for a handful of primitives\n # whose XLA implementations do not support broadcasting.\n def _broadcast_array(array, array_shape, result_shape):\n if array_shape == result_shape:\n return array\n bcast_dims = tuple(range(len(result_shape) - len(array_shape),\n len(result_shape)))\n result = xops.BroadcastInDim(array, result_shape, bcast_dims)\n return result\n\n def _broadcasted_translation_rule(c, *args, **kwargs):\n shapes = [c.get_shape(arg).dimensions() for arg in args]\n result_shape = broadcast_shapes(*shapes)\n args = [_broadcast_array(arg, arg_shape, result_shape)\n for arg, arg_shape in zip(args, shapes)]\n return translate(c, *args, **kwargs)\n return _broadcasted_translation_rule\n\n# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs\n# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just\n# a broadcast). but saving the shape info with the primitives isn't great either\n# because then we can't trace these ops without shape data.\ndef _brcast(x, *others):\n # Used in jvprules to make naryop broadcasting explicit for transposability.\n # Requires shape info during jvp tracing, which isn't strictly necessary.\n # We don't need full numpy broadcasting, but otherwise the logic is the same\n # so we reuse the broadcast_shapes function after filtering out scalars.\n shapes = tuple(filter(None, map(onp.shape, (x,) + others)))\n shape = shapes and broadcast_shapes(*shapes)\n if onp.shape(x) != shape:\n return _brcast_to(x, shape)\n else:\n return x\n\n\ndef _brcast_to(x, shape):\n x_shape = onp.shape(x)\n assert x_shape != shape\n if x_shape:\n assert len(x_shape) == len(shape)\n broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))\n squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))\n inshape = onp.delete(x_shape, squeezed_dimensions)\n return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)\n else:\n return broadcast(x, shape)\n\n\n_float = {onp.floating}\n_complex = {onp.complexfloating}\n_complex_elem_types = {onp.float32, onp.float64}\n_int = {onp.integer}\n_bool = {onp.bool_}\n\n_num = _int | _float | _complex\n_any = _int | _float | _complex | _bool\n_bool_or_int = _int | _bool\n\nneg_p = standard_unop(_num, 'neg')\nad.deflinear(neg_p, lambda t: [neg(t)])\n\ndef _sign_translation_rule(c, x):\n shape = c.get_shape(x)\n dtype = shape.numpy_dtype()\n if dtypes.issubdtype(dtype, onp.unsignedinteger):\n zero = xb.constant(c, onp.array(0, dtype=dtype))\n dims = c.get_shape(x).dimensions()\n return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),\n xops.Broadcast(xb.constant(c, onp.array(1, dtype=dtype)),\n dims))\n return xops.Sign(x)\n\nsign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)\nad.defjvp_zero(sign_p)\n\nnextafter_p = standard_naryop(\n [_float, _float], 'nextafter',\n translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))\n\nfloor_p = standard_unop(_float, 'floor')\nad.defjvp_zero(floor_p)\n\nceil_p = standard_unop(_float, 'ceil')\nad.defjvp_zero(ceil_p)\n\nround_p = standard_unop(_float, 'round')\nad.defjvp_zero(round_p)\n\nis_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')\nad.defjvp_zero(is_finite_p)\n\nexp_p = standard_unop(_float | _complex, 'exp')\nad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))\n\nlog_p = standard_unop(_float | _complex, 'log')\nad.defjvp(log_p, lambda g, x: div(g, x))\n\nexpm1_p = standard_unop(_float | _complex, 'expm1')\nad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))\n\nlog1p_p = standard_unop(_float | _complex, 'log1p')\nad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))\n\ntanh_p = standard_unop(_float | _complex, 'tanh')\nad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))\n\nsin_p = standard_unop(_float | _complex, 'sin')\nad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))\n\ncos_p = standard_unop(_float | _complex, 'cos')\nad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))\n\natan2_p = standard_naryop([_float, _float], 'atan2')\nad.defjvp(atan2_p,\n lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),\n lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))\n\nsinh_p = standard_unop(_float | _complex, 'sinh')\nad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))\n\ncosh_p = standard_unop(_float | _complex, 'cosh')\nad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))\n\nasinh_p = standard_unop(_float | _complex, 'asinh')\nad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))\n\nacosh_p = standard_unop(_float | _complex, 'acosh')\nad.defjvp(acosh_p,\n lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))\n\natanh_p = standard_unop(_float | _complex, 'atanh')\nad.defjvp(atanh_p,\n lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))\n\nregularized_incomplete_beta_p = standard_naryop(\n [_float, _float, _float], 'regularized_incomplete_beta',\n translation_rule=_broadcast_translate(\n partial(standard_translate, 'regularized_incomplete_beta')))\n\ndef betainc_gradx(g, a, b, x):\n lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)\n partial_x = exp((b - 1) * log1p(-x) +\n (a - 1) * log(x) - lbeta)\n return partial_x * g\n\ndef betainc_grad_not_implemented(g, a, b, x):\n raise ValueError(\"Betainc gradient with respect to a and b not supported.\")\n\nad.defjvp(regularized_incomplete_beta_p,\n betainc_grad_not_implemented,\n betainc_grad_not_implemented,\n betainc_gradx)\n\nlgamma_p = standard_unop(_float, 'lgamma')\nad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))\n\ndigamma_p = standard_unop(_float, 'digamma')\n\nigamma_p = standard_naryop(\n [_float, _float], 'igamma',\n translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))\nigamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',\n translation_rule=_broadcast_translate(partial(standard_translate,\n 'igamma_grad_a')))\n\ndef igamma_gradx(g, a, x):\n return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))\n\ndef igamma_grada(g, a, x):\n return _brcast(g, a, x) * igamma_grad_a(a, x)\n\nad.defjvp(igamma_p, igamma_grada, igamma_gradx)\n\nigammac_p = standard_naryop(\n [_float, _float], 'igammac',\n translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))\n\ndef igammac_gradx(g, a, x):\n return -igamma_gradx(g, a, x)\n\ndef igammac_grada(g, a, x):\n return -igamma_grada(g, a, x)\n\nad.defjvp(igammac_p, igammac_grada, igammac_gradx)\n\nbessel_i0e_p = standard_unop(_float, 'bessel_i0e')\nad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))\n\nbessel_i1e_p = standard_unop(_float, 'bessel_i1e')\ndef _bessel_i1e_jvp(g, y, x):\n eps = dtypes.finfo(_dtype(x)).eps\n x_is_not_tiny = abs(x) > eps\n safe_x = select(x_is_not_tiny, x, full_like(x, eps))\n dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))\n dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))\n return g * dy_dx\nad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)\n\nerf_p = standard_unop(_float, 'erf')\nad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),\n mul(g, exp(neg(square(x))))))\n\nerfc_p = standard_unop(_float, 'erfc')\nad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),\n mul(neg(g), exp(neg(square(x))))))\n\nerf_inv_p = standard_unop(_float, 'erf_inv')\nad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),\n mul(g, exp(square(ans)))))\n\nreal_p = unop(_complex_basetype, _complex, 'real')\nad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))])\n\nimag_p = unop(_complex_basetype, _complex, 'imag')\nad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))\n\n_complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype\ncomplex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],\n 'complex')\nad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])\n\nconj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')\n\ndef _conj_transpose_rule(t, x, *, input_dtype):\n assert ad.is_undefined_primal(x)\n if dtypes.issubdtype(input_dtype, onp.complexfloating):\n return [conj(t)]\n else:\n return [real(t)]\n\nxla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)\nad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)\nad.primitive_transposes[conj_p] = _conj_transpose_rule\n\nabs_p = unop(_complex_basetype, _num, 'abs')\n\ndef _abs_jvp_rule(g, ans, x):\n if _iscomplex(x):\n return _maybe_real(mul(g, div(_maybe_conj(x),\n _replace_zero(convert_element_type(ans, _dtype(x))))))\n else:\n return select(ge(x, _zero(x)), g, neg(g))\nad.defjvp2(abs_p, _abs_jvp_rule)\n_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x\n_maybe_real = lambda x: real(x) if _iscomplex(x) else x\n\nsqrt_p = standard_unop(_float | _complex, 'sqrt')\nad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))\n\nrsqrt_p = standard_unop(_float | _complex, 'rsqrt')\nad.defjvp2(rsqrt_p,\n lambda g, ans, x:\n mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))\n\npow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')\n\ndef _pow_jvp_lhs(g, ans, x, y):\n jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))\n return mul(_brcast(g, y), jac)\n\ndef _pow_jvp_rhs(g, ans, x, y):\n return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))\n\nad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)\n\n\ndef _integer_pow_dtype_rule(x, *, y):\n dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)\n if y < 0 and dtypes.issubdtype(dtype, onp.integer):\n raise TypeError(\"Integers cannot be raised to negative powers, got \"\n f\"integer_pow({x}, {y})\")\n return dtype\n\ndef _integer_pow_translation_rule(c, x, *, y):\n if y == 0:\n shape = c.get_shape(x)\n return xb.constant(c, onp.array(1, dtype=shape.numpy_dtype()))\n is_reciprocal = y < 0\n if is_reciprocal:\n y = -y\n acc = None\n while y > 0:\n if y & 1:\n acc = x if acc is None else xops.Mul(acc, x)\n y >>= 1\n if y > 0:\n x = xops.Mul(x, x)\n return xops.Reciprocal(acc) if is_reciprocal else acc\n\ndef _integer_pow_jvp(g, x, *, y):\n return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))\n\ninteger_pow_p = standard_primitive(\n _attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',\n translation_rule=_integer_pow_translation_rule)\nbatching.defvectorized(integer_pow_p)\nmasking.defvectorized(integer_pow_p)\nad.defjvp(integer_pow_p, _integer_pow_jvp)\n\n_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)\n\nnot_p = standard_unop(_bool_or_int, 'not')\n\nand_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')\nad.defjvp_zero(and_p)\n\nor_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')\nad.defjvp_zero(or_p)\n\nxor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')\nad.defjvp_zero(xor_p)\n\npopulation_count_p = standard_unop(_bool_or_int, 'population_count')\n\ndef _add_transpose(t, x, y):\n # The following linearity assertion is morally true, but because in some cases we\n # instantiate zeros for convenience, it doesn't always hold.\n # assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)\n return [t, t]\n\nadd_p = standard_naryop([_num, _num], 'add')\nad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))\nad.primitive_transposes[add_p] = _add_transpose\n\n\ndef _sub_transpose(t, x, y):\n # The following linearity assertion is morally true, but because in some cases\n # we instantiate zeros for convenience, it doesn't always hold.\n # assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)\n return [t, neg(t) if t is not ad_util.zero else ad_util.zero]\n\nsub_p = standard_naryop([_num, _num], 'sub')\nad.defjvp(sub_p,\n lambda g, x, y: _brcast(g, y),\n lambda g, x, y: _brcast(neg(g), x))\nad.primitive_transposes[sub_p] = _sub_transpose\n\nmul_p = standard_naryop([_num, _num], 'mul')\nad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)\n\n\ndef _div_transpose_rule(cotangent, x, y):\n assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)\n res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)\n return res, None\ndiv_p = standard_naryop([_num, _num], 'div')\nad.defjvp(div_p,\n lambda g, x, y: div(_brcast(g, y), y),\n lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))\nad.primitive_transposes[div_p] = _div_transpose_rule\n\nrem_p = standard_naryop([_num, _num], 'rem')\nad.defjvp(rem_p,\n lambda g, x, y: _brcast(g, y),\n lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))\n\n\ndef _broadcasting_select(c, which, x, y):\n \"\"\"Wrapper around XLA `Select` that broadcasts its arguments.\"\"\"\n which_shape, x_shape, y_shape = (\n c.get_shape(t).dimensions() for t in (which, x, y))\n out_shape = broadcast_shapes(which_shape, x_shape, y_shape)\n bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),\n len(out_shape)))\n which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))\n x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))\n y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))\n return xops.Select(which, x, y)\n\n\ndef _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):\n dtype = c.get_shape(x).numpy_dtype()\n if dtypes.issubdtype(dtype, onp.complexfloating):\n rx = xops.Real(x)\n ry = xops.Real(y)\n return _broadcasting_select(\n c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),\n cmp(rx, ry)),\n x, y)\n return minmax(x, y)\n\nmax_p = standard_naryop([_any, _any], 'max', translation_rule=partial(\n _minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))\nad.defjvp2(max_p,\n lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),\n lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))\n\nmin_p = standard_naryop([_any, _any], 'min', translation_rule=partial(\n _minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))\nad.defjvp2(min_p,\n lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),\n lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))\n\n\nshift_left_p = standard_naryop([_int, _int], 'shift_left')\nad.defjvp_zero(shift_left_p)\n\nshift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')\nad.defjvp_zero(shift_right_arithmetic_p)\n\nshift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')\nad.defjvp_zero(shift_right_logical_p)\n\neq_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')\nad.defjvp_zero(eq_p)\n\nne_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')\nad.defjvp_zero(ne_p)\n\nge_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')\nad.defjvp_zero(ge_p)\n\ngt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')\nad.defjvp_zero(gt_p)\n\nle_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'le')\nad.defjvp_zero(le_p)\n\nlt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')\nad.defjvp_zero(lt_p)\n\n\ndef _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):\n return operand.shape\n\ndef _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):\n return new_dtype\n\ndef _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):\n if (dtypes.issubdtype(old_dtype, onp.complexfloating) and\n not dtypes.issubdtype(new_dtype, onp.complexfloating)):\n operand = xops.Real(operand)\n new_etype = xla_client.dtype_to_etype(new_dtype)\n return xops.ConvertElementType(operand, new_element_type=new_etype)\n\ndef _convert_element_type_transpose_rule(t, *, new_dtype, old_dtype):\n assert t.dtype == new_dtype, (t.dtype, new_dtype)\n return [convert_element_type_p.bind(t, new_dtype=old_dtype,\n old_dtype=new_dtype)]\n\nconvert_element_type_p = standard_primitive(\n _convert_element_type_shape_rule, _convert_element_type_dtype_rule,\n 'convert_element_type', _convert_element_type_translation_rule)\nad.deflinear(convert_element_type_p, _convert_element_type_transpose_rule)\nbatching.defvectorized(convert_element_type_p)\nmasking.defvectorized(convert_element_type_p)\n\n\ndef _bitcast_convert_type_shape_rule(operand, *, new_dtype):\n return operand.shape\n\ndef _bitcast_convert_type_dtype_rule(operand, *, new_dtype):\n return new_dtype\n\ndef _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):\n new_etype = xla_bridge.dtype_to_etype(new_dtype)\n return xops.BitcastConvertType(operand, new_element_type=new_etype)\n\nbitcast_convert_type_p = standard_primitive(\n _bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,\n 'bitcast_convert_type', _bitcast_convert_type_translation_rule)\nad.defjvp_zero(bitcast_convert_type_p)\nbatching.defvectorized(bitcast_convert_type_p)\nmasking.defvectorized(bitcast_convert_type_p)\n\n\ndef _conv_general_dilated_shape_rule(\n lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count, batch_group_count,\n **unused_kwargs):\n assert type(dimension_numbers) is ConvDimensionNumbers\n if not feature_group_count > 0:\n msg = (\"conv_general_dilated feature_group_count \"\n \"must be a positive integer, got {}.\")\n raise ValueError(msg.format(feature_group_count))\n lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]\n quot, rem = divmod(lhs_feature_count, feature_group_count)\n if rem:\n msg = (\"conv_general_dilated feature_group_count must divide lhs feature \"\n \"dimension size, but {} does not divide {}.\")\n raise ValueError(msg.format(feature_group_count, lhs_feature_count))\n if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:\n msg = (\"conv_general_dilated lhs feature dimension size divided by \"\n \"feature_group_count must equal the rhs input feature dimension \"\n \"size, but {} // {} != {}.\")\n raise ValueError(msg.format(lhs_feature_count, feature_group_count,\n rhs.shape[dimension_numbers.rhs_spec[1]]))\n if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:\n msg = (\"conv_general_dilated rhs output feature dimension size must be a \"\n \"multiple of feature_group_count, but {} is not a multiple of {}.\")\n raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],\n feature_group_count))\n\n if not batch_group_count > 0:\n msg = (\"conv_general_dilated batch_group_count \"\n \"must be a positive integer, got {}.\")\n raise ValueError(msg.format(batch_group_count))\n lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]\n if lhs_batch_count % batch_group_count != 0:\n msg = (\"conv_general_dilated batch_group_count must divide lhs batch \"\n \"dimension size, but {} does not divide {}.\")\n raise ValueError(msg.format(batch_group_count, lhs_batch_count))\n if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:\n msg = (\"conv_general_dilated rhs output feature dimension size must be a \"\n \"multiple of batch_group_count, but {} is not a multiple of {}.\")\n raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],\n batch_ground_count))\n\n if not batch_group_count > 0 and feature_group_count > 0:\n msg = (\"At most one of batch_group_count and feature_group_count may be > \"\n \"1, got batch_group_count={} and feature_group_count={}\")\n raise ValueError(msg.format(batch_group_count, feature_group_count))\n\n lhs_perm, rhs_perm, out_perm = dimension_numbers\n lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)\n rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)\n out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,\n batch_group_count)\n return tuple(onp.take(out_trans, onp.argsort(out_perm)))\n\ndef _conv_general_dilated_dtype_rule(\n lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, **unused_kwargs):\n return naryop_dtype_rule(_input_dtype, [_float, _float],\n 'conv_general_dilated', lhs, rhs)\n\n_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]\n_conv_sdims = lambda spec: spec[2:]\n\n# Understanding the convolution transpose rules:\n# Ignoring the spatial dimensions, let m = batch, j = input feature,\n# k = output feature.\n#\n# Convolution computes the following contraction:\n# Forward: [m, j] [j, k] -> [m, k]\n#\n# The transposes are similar to the rules for transposing a matmul:\n# LHS transpose: [m, k] [k, j] -> [m, j]\n# RHS transpose: [j, m] [m, k] -> [j, k]\n#\n# With feature grouping, we have the following signatures:\n# Forward: [m, gj] [j, gk] -> [m, gk]\n# LHS transpose: [m, gk] [k, gj] -> [m, gj]\n# --> implemented as feature grouping after transposing the group from the\n# kernel input features to the kernel output features.\n# RHS transpose: [gj, m] [m, gk] -> [j, gk]\n# --> which is batch grouping.\n#\n# With batch grouping, we have the following signatures:\n# Forward: [gm,j] [j,gk]->[m,gk]\n# LHS transpose: [m, gk][gk, j] -> [gm, j]\n# --> implemented as feature grouping with transposing the group on the kernel\n# and the output.\n# RHS transpose: [j, gm][m, gk] -> [j, gk]\n# --> which is feature grouping.\n\ndef _conv_general_dilated_transpose_lhs(\n g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count, batch_group_count,\n lhs_shape, rhs_shape, precision):\n assert type(dimension_numbers) is ConvDimensionNumbers\n assert batch_group_count == 1 or feature_group_count == 1\n lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n t_rhs_spec = _conv_spec_transpose(rhs_spec)\n if feature_group_count > 1:\n # in addition to switching the dims in the spec, need to move the feature\n # group axis into the transposed rhs's output feature dim\n rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)\n rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)\n elif batch_group_count > 1:\n rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)\n rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)\n feature_group_count = batch_group_count\n trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)\n padding = _conv_general_vjp_lhs_padding(\n onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),\n window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,\n rhs_dilation)\n revd_weights = rev(rhs, rhs_sdims)\n out = conv_general_dilated(\n g, revd_weights, window_strides=lhs_dilation, padding=padding,\n lhs_dilation=window_strides, rhs_dilation=rhs_dilation,\n dimension_numbers=trans_dimension_numbers,\n feature_group_count=feature_group_count,\n batch_group_count=1, precision=precision)\n if batch_group_count > 1:\n out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)\n out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)\n return out\n\ndef _conv_general_dilated_transpose_rhs(\n g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers: ConvDimensionNumbers, feature_group_count: int,\n batch_group_count: int, lhs_shape, rhs_shape, precision):\n assert type(dimension_numbers) is ConvDimensionNumbers\n if onp.size(g) == 0:\n # Avoids forming degenerate convolutions where the RHS has spatial size 0.\n return ad_util.zero\n lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)\n lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)\n assert batch_group_count == 1 or feature_group_count == 1\n if batch_group_count > 1:\n feature_group_count = batch_group_count\n batch_group_count = 1\n elif feature_group_count > 1:\n batch_group_count = feature_group_count\n feature_group_count = 1\n trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)\n padding = _conv_general_vjp_rhs_padding(\n onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),\n window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,\n rhs_dilation)\n return conv_general_dilated(\n lhs, g, window_strides=rhs_dilation, padding=padding,\n lhs_dilation=lhs_dilation, rhs_dilation=window_strides,\n dimension_numbers=trans_dimension_numbers,\n feature_group_count=feature_group_count,\n batch_group_count=batch_group_count, precision=precision)\n\ndef _conv_general_dilated_translation_rule(\n c, lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count, batch_group_count, precision,\n **unused_kwargs):\n assert type(dimension_numbers) is ConvDimensionNumbers\n dimension_numbers = _conv_general_proto(dimension_numbers)\n return xops.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,\n rhs_dilation, dimension_numbers,\n feature_group_count, batch_group_count,\n precision_config=_precision_config(precision))\n\ndef _conv_general_dilated_batch_rule(\n batched_args, batch_dims, *, window_strides, padding,\n lhs_dilation, rhs_dilation, dimension_numbers,\n feature_group_count, batch_group_count, precision, **unused_kwargs):\n assert batch_group_count == 1 or feature_group_count == 1\n lhs, rhs = batched_args\n lhs_bdim, rhs_bdim = batch_dims\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n\n if lhs_bdim is not None and rhs_bdim is not None:\n assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]\n if batch_group_count > 1:\n new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)\n batch_group_count *= lhs.shape[lhs_bdim]\n else:\n new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)\n feature_group_count *= lhs.shape[lhs_bdim]\n new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)\n out = conv_general_dilated(\n new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,\n dimension_numbers, feature_group_count=feature_group_count,\n batch_group_count=batch_group_count,\n precision=precision)\n out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)\n return out, out_spec[1]\n\n elif lhs_bdim is not None:\n if batch_group_count == 1:\n new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)\n out = conv_general_dilated(new_lhs, rhs, window_strides, padding,\n lhs_dilation, rhs_dilation, dimension_numbers,\n feature_group_count, precision=precision)\n out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)\n return out, out_spec[0]\n else:\n new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),\n batch_group_count, lhs)\n new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),\n lhs_spec[0] + 1,\n new_lhs)\n new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)\n out = conv_general_dilated(new_lhs, rhs, window_strides, padding,\n lhs_dilation, rhs_dilation, dimension_numbers,\n feature_group_count, batch_group_count,\n precision=precision)\n out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)\n return out, out_spec[0]\n\n elif rhs_bdim is not None:\n if feature_group_count == 1 and batch_group_count == 1:\n new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)\n out = conv_general_dilated(lhs, new_rhs, window_strides, padding,\n lhs_dilation, rhs_dilation, dimension_numbers,\n feature_group_count, batch_group_count,\n precision=precision)\n out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)\n return out, out_spec[1]\n else:\n # groups need to be outermost, so we need to factor them out of the\n # rhs output feature dim, then factor the batch dim into the remaining rhs\n # output feature dim, then put groups back in. We do something\n # similar on the output. An alternative which would require more FLOPs but\n # fewer reshapes would be to broadcast lhs.\n group_count = (feature_group_count if feature_group_count > 1\n else batch_group_count)\n new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),\n group_count, rhs)\n new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),\n rhs_spec[0] + 1,\n new_rhs)\n new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)\n out = conv_general_dilated(lhs, new_rhs, window_strides, padding,\n lhs_dilation, rhs_dilation, dimension_numbers,\n feature_group_count, batch_group_count,\n precision=precision)\n out = _reshape_axis_out_of(out_spec[1], group_count, out)\n out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)\n out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)\n return out, out_spec[1]\n\nconv_general_dilated_p = standard_primitive(\n _conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,\n 'conv_general_dilated', _conv_general_dilated_translation_rule)\nad.defbilinear(conv_general_dilated_p,\n _conv_general_dilated_transpose_lhs,\n _conv_general_dilated_transpose_rhs)\nbatching.primitive_batchers[conv_general_dilated_p] = \\\n _conv_general_dilated_batch_rule\n\n\ndef _reshape_axis_into(src, dst, x):\n perm = [i for i in range(x.ndim) if i != src]\n perm.insert(dst, src)\n new_shape = list(onp.delete(x.shape, src))\n new_shape[dst] *= x.shape[src]\n return reshape(x, new_shape, perm)\n\ndef _reshape_axis_out_of(src, size1, x):\n shape = list(x.shape)\n size2, ragged = divmod(shape[src], size1)\n assert not ragged\n shape[src:src+1] = [size1, size2]\n return reshape(x, shape)\n\ndef _precision_config(precision):\n if precision is not None:\n config = xla_client.PrecisionConfig()\n config.operand_precision.extend((precision, precision))\n return config\n return None\n\n\ndef _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):\n (lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers\n if len(lhs_batch) != len(rhs_batch):\n msg = (\"dot_general requires equal numbers of lhs_batch and rhs_batch \"\n \"dimensions, got lhs_batch {} and rhs_batch {}.\")\n raise TypeError(msg.format(lhs_batch, rhs_batch))\n if not onp.all(onp.equal(lhs_batch, rhs_batch)):\n msg = (\"dot_general requires same lhs and rhs batch dimension numbers, \"\n \"got {} and {}.\")\n raise TypeError(msg.format(lhs_batch, rhs_batch))\n lhs_batch_shape = onp.take(lhs.shape, lhs_batch)\n rhs_batch_shape = onp.take(rhs.shape, rhs_batch)\n if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):\n msg = (\"dot_general requires lhs batch dimensions and rhs batch dimensions \"\n \"to have the same shape, got {} and {}.\")\n raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))\n if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):\n msg = (\"dot_general requires lhs batch dimensions to precede contracting \"\n \"and non-contracting dimensions, got lhs_batch {}.\")\n raise TypeError(msg.format(lhs_batch))\n if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):\n msg = (\"dot_general requires rhs batch dimensions to precede contracting \"\n \"and non-contracting dimensions, got rhs_batch {}.\")\n raise TypeError(msg.format(rhs_batch))\n lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)\n rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)\n if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):\n msg = (\"dot_general requires contracting dimensions to have the same \"\n \"shape, got {} and {}.\")\n raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))\n\n batch_shape = tuple(onp.take(lhs.shape, lhs_batch))\n lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)\n lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))\n rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)\n rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))\n return batch_shape + lhs_tensored_shape + rhs_tensored_shape\n\n\ndef _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):\n return naryop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)\n\n\ndef _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,\n swap_ans=False):\n (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers\n x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)\n x_kept = remaining(range(x_ndim), x_contract, x_batch)\n y_kept = remaining(range(y.ndim), y_contract, y_batch)\n if swap_ans:\n ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)\n else:\n ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)\n dims = ((ans_y, y_kept), (ans_batch, y_batch))\n x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))\n out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)\n return transpose(dot_general(g, y, dims, precision=precision),\n tuple(out_axes))\n\ndef _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):\n (x_contract, y_contract), (x_batch, y_batch) = dimension_numbers\n swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))\n return _dot_general_transpose_lhs(\n g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,\n swap_ans=True)\n\n\ndef _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,\n precision):\n # there are three kinds of dimensions in a dot_general:\n # - contraction dimensions appear in lhs and rhs but not the result\n # - batch dimensions appear in lhs, rhs, and result\n # - tensor product dimensions appear in the result and one of lhs or rhs\n (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers\n lhs, rhs = batched_args\n lbd, rbd = batch_dims\n assert lbd is not None or rbd is not None\n if lbd is not None and rbd is not None:\n # adding a batch dimension\n if lbd != 0:\n lhs = batching.moveaxis(lhs, lbd, 0)\n if rbd != 0:\n rhs = batching.moveaxis(rhs, rbd, 0)\n lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))\n rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))\n lhs_contract = tuple(onp.add(1, lhs_contract))\n rhs_contract = tuple(onp.add(1, rhs_contract))\n result_batch_dim = 0\n else:\n # adding a tensor product dimension\n if lbd is not None:\n if lhs_batch == () or lbd > onp.max(lhs_batch):\n # can avoid transposes\n bump_lhs_contract = onp.greater_equal(lhs_contract, lbd)\n lhs_contract = tuple(onp.add(lhs_contract, bump_lhs_contract))\n result_batch_dim = lbd - len(lhs_contract) + sum(bump_lhs_contract)\n else:\n # move the new dimension to the end of lhs to avoid changing batch dims\n lhs = batching.moveaxis(lhs, lbd, lhs.ndim - 1)\n # lhs tensor product dims in result come after batch dims\n result_batch_dim = lhs.ndim - len(lhs_contract) - 1\n else:\n if rhs_batch == () or rbd > onp.max(rhs_batch):\n # can avoid transposes\n bump_rhs_contract = onp.greater_equal(rhs_contract, rbd)\n rhs_contract = tuple(onp.add(rhs_contract, bump_rhs_contract))\n result_batch_dim = (rbd + (lhs.ndim - len(lhs_contract) - len(lhs_batch))\n - (len(rhs_contract) - sum(bump_rhs_contract)))\n else:\n # move the new dimension to the end of rhs to avoid changing batch dims\n rhs = batching.moveaxis(rhs, rbd, rhs.ndim - 1)\n # rhs tensor product dims in result come after batch dims + lhs tensor\n # product dims\n result_batch_dim = (lhs.ndim - len(lhs_contract) - len(lhs_batch) +\n rhs.ndim - len(rhs_contract) - 1)\n new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]\n batched_out = dot_general(lhs, rhs, new_dimension_numbers,\n precision=precision)\n return batched_out, int(result_batch_dim)\n\ndef _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):\n return xops.DotGeneral(lhs, rhs,\n xc.make_dot_dimension_numbers(dimension_numbers),\n precision_config=_precision_config(precision))\n\ndef _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,\n precision):\n lhs, rhs = padded_vals\n lhs_shape, rhs_shape = logical_shapes\n lhs_ndim, rhs_ndim = len(lhs_shape), len(rhs_shape)\n (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers\n\n # we need only mask the lhs contraction dimensions\n if len(lhs_contract) == 0:\n return dot_general(lhs, rhs, dimension_numbers, precision=precision)\n else:\n masks = [broadcasted_iota(onp.int32, lhs.shape, d) < lhs_shape[d]\n for d in lhs_contract]\n mask_intersection = masks[0]\n for mask in masks[1:]:\n mask_intersection &= mask\n masked_lhs = select(mask_intersection, lhs, zeros_like_array(lhs))\n return dot_general(masked_lhs, rhs, dimension_numbers, precision=precision)\n\ndot_general_p = standard_primitive(_dot_general_shape_rule,\n _dot_general_dtype_rule, 'dot_general',\n _dot_general_translation_rule)\nad.defbilinear(dot_general_p,\n _dot_general_transpose_lhs, _dot_general_transpose_rhs)\nbatching.primitive_batchers[dot_general_p] = _dot_general_batch_rule\nmasking.masking_rules[dot_general_p] = _dot_general_masking_rule\n\n\ndef _broadcast_shape_rule(operand, sizes):\n _check_shapelike('broadcast', 'sizes', sizes)\n return tuple(sizes) + operand.shape\n\ndef _broadcast_batch_rule(batched_args, batch_dims, *, sizes):\n operand, = batched_args\n bdim, = batch_dims\n new_bdim = None if bdim is None else bdim + len(sizes)\n return broadcast(operand, sizes), new_bdim\n\nbroadcast_p = standard_primitive(\n _broadcast_shape_rule, _input_dtype, 'broadcast')\nad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])\nbatching.primitive_batchers[broadcast_p] = _broadcast_batch_rule\n\ndef _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):\n if type(operand) is xla.DeviceArray:\n shape = _broadcast_in_dim_shape_rule(\n operand, shape=shape, broadcast_dimensions=broadcast_dimensions)\n aval = ShapedArray(shape, _dtype(operand))\n lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)\n return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)\n else:\n return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,\n broadcast_dimensions=broadcast_dimensions)\n\ndef _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):\n _check_shapelike('broadcast_in_dim', 'shape', shape)\n _check_shapelike('broadcast_in_dim', 'broadcast_dimensions',\n broadcast_dimensions)\n operand_ndim = onp.ndim(operand)\n if operand_ndim != len(broadcast_dimensions):\n msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '\n 'operand ndim; got broadcast_dimensions {} for operand ndim {}.')\n raise TypeError(msg.format(broadcast_dimensions, operand_ndim))\n if len(shape) < operand_ndim:\n msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '\n 'to the operand shape; got operand ndim {} and target broadcast ndim {}.')\n raise TypeError(msg.format(operand_ndim, len(shape)))\n if not set(broadcast_dimensions).issubset(set(range(len(shape)))):\n msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '\n 'dimensions, got {} for operand ndim {} and shape {}.')\n raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))\n if any(operand.shape[i] != 1 and operand.shape[i] != shape[broadcast_dimensions[i]]\n for i in range(operand_ndim)):\n msg = ('broadcast_in_dim operand dimension sizes must either be 1, or be '\n 'equal to their corresponding dimensions in the target broadcast shape; '\n 'got operand of shape {}, target broadcast shape {}, '\n 'broadcast_dimensions {} ')\n raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))\n if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or\n tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):\n msg = ('broadcast_in_dim broadcast_dimensions must be strictly increasing; '\n 'got broadcast_dimensions {}')\n raise TypeError(msg.format(broadcast_dimensions))\n\n return shape\n\ndef _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):\n axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))\n return [_reduce_sum(t, axes)]\n\ndef _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,\n broadcast_dimensions):\n operand, = batched_args\n bdim, = batch_dims\n new_operand = batching.moveaxis(operand, bdim, 0)\n new_shape = (operand.shape[bdim],) + shape\n new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions))\n return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0\n\n\nbroadcast_in_dim_p = standard_primitive(\n _broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')\nbroadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)\nad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)\nbatching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule\n\n\ndef _clamp_shape_rule(min, operand, max):\n if min.shape and min.shape != operand.shape:\n m = \"clamp requires min.shape == operand.shape or min.shape == (), got {}.\"\n raise TypeError(m.format(min.shape))\n if max.shape and max.shape != operand.shape:\n m = \"clamp requires max.shape == operand.shape or max.shape == (), got {}.\"\n raise TypeError(m.format(max.shape))\n return operand.shape\n\n_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],\n 'clamp')\n\nclamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')\nad.defjvp(clamp_p,\n lambda g, min, operand, max:\n select(bitwise_and(gt(min, operand), lt(min, max)),\n _brcast(g, operand), _zeros(operand)),\n lambda g, min, operand, max:\n select(bitwise_and(gt(operand, min), lt(operand, max)),\n g, _zeros(operand)),\n lambda g, min, operand, max:\n select(lt(max, operand), _brcast(g, operand), _zeros(operand)))\n\n\ndef _concatenate_shape_rule(*operands, **kwargs):\n dimension = kwargs.pop('dimension')\n if not operands:\n msg = \"concatenate expects at least one operand, got 0.\"\n raise TypeError(msg)\n if not all(isinstance(operand, UnshapedArray) for operand in operands):\n msg = \"All objects to concatenate must be arrays, got {}.\"\n op = next(op for op in operands if not isinstance(op, UnshapedArray))\n raise TypeError(msg.format(type(op)))\n if len(set(operand.ndim for operand in operands)) != 1:\n msg = \"Cannot concatenate arrays with different ranks, got {}.\"\n raise TypeError(msg.format(\", \".join(str(o.ndim) for o in operands)))\n shapes = onp.array([operand.shape for operand in operands])\n if not 0 <= dimension < shapes.shape[1]:\n msg = \"concatenate dimension out of bounds: dimension {} for shapes {}.\"\n raise TypeError(msg.format(dimension, \", \".join(map(str, shapes))))\n if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):\n msg = (\"Cannot concatenate arrays with shapes that differ in dimensions \"\n \"other than the one being concatenated: dimension {} for shapes {}.\")\n raise TypeError(msg.format(dimension, \", \".join(map(str, shapes))))\n\n concat_size = sum(o.shape[dimension] for o in operands)\n ex_shape = operands[0].shape\n return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]\n\ndef _concatenate_dtype_rule(*operands, **kwargs):\n _check_same_dtypes('concatenate', False, *(o.dtype for o in operands))\n return operands[0].dtype\n\ndef _concatenate_translation_rule(c, *operands, **kwargs):\n dimension = kwargs.pop('dimension')\n return xops.ConcatInDim(c, operands, dimension)\n\ndef _concatenate_transpose_rule(t, *operands, dimension):\n operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape\n for o in operands]\n if t is ad_util.zero:\n return [ad_util.zero if ad.is_undefined_primal(o) else None for o in operands]\n else:\n limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])\n starts = onp.zeros((len(operands), t.ndim), dtype=int)\n starts[1:, dimension] = limit_points[:-1]\n limits = onp.tile(t.shape, (len(operands), 1))\n limits[:, dimension] = limit_points\n\n return [slice(t, start, limit) if ad.is_undefined_primal(o) else None\n for o, start, limit in zip(operands, starts, limits)]\n\ndef _concatenate_batch_rule(batched_args, batch_dims, *, dimension):\n size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)\n if bdim is not None)\n operands = [batching.moveaxis(op, bdim, 0) if bdim is not None\n else broadcast(op, (size,))\n for op, bdim in zip(batched_args, batch_dims)]\n return concatenate(operands, dimension + 1), 0\n\n# The concatenate_p masking rule requires use of a while-loop construct and so\n# is defined in lax_control_flow.py\n\nconcatenate_p = standard_primitive(\n _concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',\n _concatenate_translation_rule)\nad.deflinear(concatenate_p, _concatenate_transpose_rule)\nad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule\nbatching.primitive_batchers[concatenate_p] = _concatenate_batch_rule\n\n\ndef _pad_dtype_rule(operand, padding_value, *, padding_config):\n if operand.dtype != padding_value.dtype:\n msg = \"pad operand and padding_value must be same dtype: got {} and {}.\"\n raise TypeError(msg.format(operand.dtype, padding_value.dtype))\n\n return _input_dtype(operand, padding_value)\n\ndef _pad_shape_rule(operand, padding_value, *, padding_config):\n lo, hi, interior = zip(*padding_config)\n out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),\n onp.multiply(interior, onp.subtract(operand.shape, 1)))\n return tuple(out_shape)\n\ndef _pad_transpose(t, operand, padding_value, *, padding_config):\n if t is ad_util.zero:\n return [ad_util.zero if ad.is_undefined_primal(operand) else None,\n ad_util.zero if ad.is_undefined_primal(padding_value) else None]\n\n lo, hi, interior = zip(*padding_config)\n total = lambda x: _reduce_sum(x, list(range(t.ndim)))\n\n def t_op():\n unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior))\n unpadded = pad(t, onp.array(0., t.dtype), unpad_config)\n return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1))\n\n t_operand = t_op() if ad.is_undefined_primal(operand) else None\n t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None\n\n return [t_operand, t_padv]\n\ndef _pad_batch_rule(batched_args, batch_dims, *, padding_config):\n operand, padding_value = batched_args\n operand_bdim, padding_value_bdim = batch_dims\n if padding_value_bdim is None:\n assert operand_bdim is not None\n padding_config = list(padding_config)\n padding_config.insert(operand_bdim, (0, 0, 0))\n return pad(operand, padding_value, padding_config), operand_bdim\n else:\n raise NotImplementedError # loop and stack\n\ndef _pad_translation_rule(c, operand, padding_value, *, padding_config):\n return xops.Pad(operand, padding_value,\n xc.make_padding_config(padding_config))\n\npad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',\n translation_rule=_pad_translation_rule)\nad.deflinear(pad_p, _pad_transpose)\nad.primitive_transposes[pad_p] = _pad_transpose\nbatching.primitive_batchers[pad_p] = _pad_batch_rule\n\n\n# We have a nonstandard reshape impl so that we can be lazy about data movement.\ndef _reshape_impl(operand, *, new_sizes, dimensions):\n old_sizes = onp.shape(operand)\n if type(operand) is xla.DeviceArray and dimensions is None:\n bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)\n if bcast_dims is not None:\n aval = ShapedArray(new_sizes, operand.dtype)\n lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)\n return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)\n\n if type(operand) is pxla.ShardedDeviceArray and dimensions is None:\n array = _reshape_sharded_device_array(operand, new_sizes, old_sizes)\n if array is not None:\n return array\n\n return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,\n dimensions=dimensions)\n\ndef _is_singleton_reshape(old, new):\n # A singleton reshape is one where only singleton dimensions are added. We\n # want to detect them because they can be expressed as (lazy) broadcasts.\n old, new = iter(old), iter(new)\n d1, d2 = next(old, None), next(new, None)\n bcast_dims = []\n i = 0\n while True:\n if d1 is d2 is None:\n return bcast_dims\n elif d1 == d2:\n bcast_dims.append(i)\n i += 1\n d1, d2 = next(old, None), next(new, None)\n elif d2 == 1:\n i += 1\n d2 = next(new, None)\n else:\n return None\n\ndef _reshape_sharded_device_array(array, new_sizes, old_sizes):\n \"\"\"Returns None if `array` could not be efficiently reshaped.\n\n This function is primarily to support soft_pmap, although these optimizations\n could be useful when directly calling reshape as well.\n \"\"\"\n # TODO(jekbradbury): the axis split/merge logic below assumes that\n # ShardedDevicesArrays are always sharded across their leading axes. Remove\n # this constraint, especially if/when we add APIs that produce sharding across\n # interior axes.\n if any(num_shards != 1 for num_shards\n in array.sharding_spec.shards_per_axis[1:]):\n return None\n\n # TODO(skye): handle replicated buffers\n if array.sharding_spec.replication_factor != 1:\n return None\n\n # ShardedDevicesArrays require all buffers to have the same shape\n chunk_shape = array.device_buffers[0].shape().dimensions()\n chunk_size = chunk_shape[0] if len(chunk_shape) > 0 else 1\n\n if _is_axis_merge(old_sizes, new_sizes):\n num_chunks, ragged = divmod(new_sizes[0], chunk_size)\n if ragged: return None\n aval = ShapedArray(new_sizes, array.dtype)\n sharding_spec = pxla.ShardingSpec(\n shards_per_axis=(num_chunks,) + (1,) * (len(new_sizes) - 1),\n is_axis_materialized=(True,) * len(new_sizes),\n replication_factor=1)\n return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)\n\n if _is_axis_split(old_sizes, new_sizes):\n split_axis_size, ragged = divmod(old_sizes[0], chunk_size)\n if ragged: return None\n if new_sizes[0] != split_axis_size: return None\n aval = ShapedArray(new_sizes, array.dtype)\n sharding_spec = pxla._pmap_sharding_spec(\n new_sizes[0], new_sizes[0], ShapedArray(new_sizes[1:], array.dtype), True)\n return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)\n\n return None\n\ndef _is_axis_merge(s1, s2):\n # TODO(skye): we might still be able to handle these cases as merges, I\n # haven't thought about it much.\n if len(s1) < 2 or len(s2) < 1: return False\n return s1[2:] == s2[1:] and s1[0] * s1[1] == s2[0]\n\ndef _is_axis_split(s1, s2):\n return _is_axis_merge(s2, s1)\n\ndef _reshape_shape_rule(operand, *, new_sizes, dimensions):\n if not onp.all(onp.greater_equal(new_sizes, 0)):\n msg = 'reshape new_sizes must all be positive, got {}.'\n raise TypeError(msg.format(new_sizes))\n if prod(onp.shape(operand)) != prod(new_sizes):\n msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'\n raise TypeError(msg.format(new_sizes, onp.shape(operand)))\n if dimensions is not None:\n if set(dimensions) != set(range(onp.ndim(operand))):\n msg = ('reshape dimensions must be a permutation of operand dimensions, '\n 'got dimensions {} for shape {}.')\n raise TypeError(msg.format(dimensions, onp.shape(operand)))\n return tuple(new_sizes)\n\ndef _reshape_dtype_rule(operand, *, new_sizes, dimensions):\n return operand.dtype\n\ndef _reshape_translation_rule(c, operand, *, new_sizes, dimensions):\n if dimensions is None:\n return xops.Reshape(operand, new_sizes)\n else:\n return xops.Reshape(operand, dimensions, new_sizes)\n\ndef _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):\n assert ad.is_undefined_primal(operand)\n if dimensions is None:\n return [reshape(t, operand.aval.shape)]\n else:\n return [transpose(reshape(t, onp.take(operand.aval.shape, dimensions)),\n onp.argsort(dimensions))]\n\ndef _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):\n operand, = batched_args\n bdim, = batch_dims\n operand = batching.moveaxis(operand, bdim, 0)\n if dimensions is not None:\n dimensions = (0,) + tuple(onp.add(1, dimensions))\n return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0\n\nreshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,\n 'reshape', _reshape_translation_rule)\nreshape_p.def_impl(_reshape_impl)\nad.deflinear2(reshape_p, _reshape_transpose_rule)\nbatching.primitive_batchers[reshape_p] = _reshape_batch_rule\n\n\ndef _rev_shape_rule(operand, *, dimensions):\n _check_shapelike('rev', 'dimensions', dimensions)\n if len(set(dimensions)) != len(dimensions):\n msg = 'rev dimensions must be unique, got {}.'\n raise TypeError(msg.format(dimensions))\n if dimensions and not _max(dimensions) < operand.ndim:\n msg = ('rev dimensions must all be less than operand ndim, got dimensions '\n '{} for operand ndim {}.')\n raise TypeError(msg.format(dimensions, operand.ndim))\n return operand.shape\n\ndef _rev_batch_rule(batched_args, batch_dims, *, dimensions):\n operand, = batched_args\n bdim, = batch_dims\n new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]\n return rev(operand, new_dimensions), bdim\n\nrev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')\nad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])\nbatching.primitive_batchers[rev_p] = _rev_batch_rule\n\n\ndef _transpose_impl(operand, *, permutation):\n if type(operand) is xla.DeviceArray:\n lazy_expr = lazy.transpose(operand._lazy_expr, permutation)\n aval = ShapedArray(lazy_expr.shape, operand.dtype)\n return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)\n else:\n return xla.apply_primitive(transpose_p, operand, permutation=permutation)\n\ndef _transpose_shape_rule(operand, *, permutation):\n if not isinstance(permutation, (tuple, list, onp.ndarray)):\n msg = \"transpose permutation must be a tuple/list/ndarray, got {}.\"\n raise TypeError(msg.format(type(permutation)))\n if tuple(sorted(permutation)) != tuple(range(operand.ndim)):\n msg = (\"transpose permutation isn't a permutation of operand dimensions, \"\n \"got permutation {} for operand shape {}.\")\n raise TypeError(msg.format(permutation, operand.shape))\n return tuple(onp.take(operand.shape, permutation))\n\ndef _transpose_batch_rule(batched_args, batch_dims, *, permutation):\n operand, = batched_args\n bdim, = batch_dims\n perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)\n return transpose(operand, perm), 0\n\ntranspose_p = standard_primitive(_transpose_shape_rule, _input_dtype,\n 'transpose')\ntranspose_p.def_impl(_transpose_impl)\nad.deflinear(transpose_p,\n lambda t, permutation: [transpose(t, onp.argsort(permutation))])\nbatching.primitive_batchers[transpose_p] = _transpose_batch_rule\n\n\ndef _select_shape_rule(pred, on_true, on_false):\n if on_true.shape != on_false.shape:\n msg = \"select on_true and on_false must have the same shape, got {} and {}.\"\n raise TypeError(msg.format(on_true.shape, on_false.shape))\n if pred.shape and pred.shape != on_true.shape:\n msg = (\"select pred must be scalar or have the same shape as on_true and \"\n \"on_false, got pred shape {} for on_true and on_false of shape {}.\")\n raise TypeError(msg.format(pred.shape, on_true.shape))\n return on_true.shape\n\ndef _select_dtype_rule(pred, on_true, on_false):\n _check_same_dtypes(\"select\", False, on_true.dtype, on_false.dtype)\n if not dtypes.issubdtype(pred.dtype, onp.bool_):\n msg = \"select pred must be boolean type, got {}.\"\n raise TypeError(msg.format(pred.dtype))\n return on_true.dtype\n\ndef _select_transpose_rule(t, pred, on_true, on_false):\n assert not ad.is_undefined_primal(pred)\n if t is ad_util.zero:\n return [None,\n ad_util.zero if ad.is_undefined_primal(on_true) else None,\n ad_util.zero if ad.is_undefined_primal(on_false) else None]\n else:\n zeros = full_like(t, 0)\n return [None,\n select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,\n select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]\n\ndef _select_batch_rule(batched_args, batch_dims, **unused_kwargs):\n pred, on_true, on_false, = batched_args\n pred_bdim, ot_bdim, of_bdim = batch_dims\n size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)\n if i is not None)\n\n # avoid transposes and some broadcasts in special cases\n if pred_bdim == ot_bdim == of_bdim:\n if onp.shape(pred) == onp.shape(on_true):\n return select(pred, on_true, on_false), pred_bdim\n else:\n # vmapped function had a scalar pred with nonscalar args\n assert onp.ndim(pred) == 1\n pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])\n return select(pred, on_true, on_false), pred_bdim\n elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:\n if ot_bdim == of_bdim:\n return select(pred, on_true, on_false), ot_bdim\n elif onp.shape(on_true) == onp.shape(on_false):\n on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)\n return select(pred, on_true, on_false), ot_bdim\n\n pred = batching.bdim_at_front(pred, pred_bdim, size) if onp.shape(pred) else pred\n if not onp.shape(on_true) == onp.shape(on_false) == ():\n on_true = batching.bdim_at_front(on_true, ot_bdim, size)\n on_false = batching.bdim_at_front(on_false, of_bdim, size)\n assert onp.shape(on_true) == onp.shape(on_false)\n if 0 < onp.ndim(pred) < onp.ndim(on_true):\n # vmapped function had a scalar pred with nonscalar args\n assert onp.ndim(pred) == 1\n pred = broadcast_in_dim(pred, on_true.shape, [0])\n if onp.ndim(pred) > onp.ndim(on_true):\n assert onp.ndim(on_true) == 0\n on_true = broadcast(on_true, pred.shape)\n on_false = broadcast(on_false, pred.shape)\n return select(pred, on_true, on_false), 0\n\nselect_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')\nad.defjvp(select_p,\n None,\n lambda g, b, x, y: select(b, g, _zeros(g)),\n lambda g, b, x, y: select(b, _zeros(g), g))\nad.primitive_transposes[select_p] = _select_transpose_rule\nbatching.primitive_batchers[select_p] = _select_batch_rule\n\n\ndef _slice_shape_rule(operand, *, start_indices, limit_indices, strides):\n _check_shapelike(\"slice\", \"start_indices\", start_indices)\n _check_shapelike(\"slice\", \"limit_indices\", limit_indices)\n if operand.ndim != len(start_indices):\n msg = (\"slice start_indices must have length equal to the number of \"\n \"dimensions of the operand, got indices {} for operand shape {}.\")\n raise TypeError(msg.format(start_indices, operand.shape))\n if len(start_indices) != len(limit_indices):\n msg = (\"slice limit_indices must have the same length as start_indices, \"\n \"got start_inidices {} and limit_indices {}.\")\n raise TypeError(msg.format(start_indices, limit_indices))\n if not onp.all(onp.less_equal(limit_indices, operand.shape)):\n msg = (\"slice limit_indices must be less than or equal to operand shape, \"\n \"got limit_indices {} for operand shape {}.\")\n raise TypeError(msg.format(limit_indices, operand.shape))\n if not onp.all(onp.greater_equal(start_indices, 0)):\n msg = (\"slice start_indices must be greater than or equal to zero, \"\n \"got start_indices of {}.\")\n raise TypeError(msg.format(start_indices))\n if not onp.all(onp.greater_equal(limit_indices, start_indices)):\n msg = (\"slice limit_indices must be greater than or equal to start_indices,\"\n \" got start_indices {} and limit_indices {}.\")\n raise TypeError(msg.format(start_indices, limit_indices))\n if strides is None:\n strides = onp.ones(operand.ndim, onp.int32)\n else:\n _check_shapelike(\"slice\", \"strides\", strides)\n if len(strides) != operand.ndim:\n msg = (\"slice strides must have length equal to the number of dimensions \"\n \"of the operand, got strides {} for operand shape {}.\")\n raise TypeError(msg.format(strides, operand.shape))\n if not onp.all(onp.greater(strides, 0)):\n msg = \"slice strides must be positive, got {}\"\n raise TypeError(msg.format(strides))\n\n result_shape = onp.floor_divide(\n onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)\n return tuple(result_shape)\n\ndef _slice_translation_rule(c, operand, *, start_indices, limit_indices,\n strides):\n return xops.Slice(operand, start_indices, limit_indices,\n strides or [1] * len(start_indices))\n\ndef _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):\n assert ad.is_undefined_primal(operand)\n operand_shape = operand.aval.shape\n if strides is None or onp.all(onp.equal(strides, 1)):\n pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),\n (0,) * len(start_indices))\n else:\n real_limits = onp.add(onp.add(start_indices, 1),\n onp.multiply(onp.subtract(t.shape, 1), strides))\n pads = zip(start_indices, onp.subtract(operand_shape, real_limits),\n onp.subtract(strides, 1))\n result = pad(t, _const(t, 0), pads)\n assert result.shape == operand_shape\n return [result]\n\ndef _slice_batching_rule(batched_args, batch_dims, *, start_indices,\n limit_indices, strides):\n operand, = batched_args\n bdim, = batch_dims\n\n new_start_indices = list(start_indices)\n new_start_indices.insert(bdim, 0)\n\n new_limit_indices = list(limit_indices)\n new_limit_indices.insert(bdim, operand.shape[bdim])\n\n if strides is None:\n new_strides = None\n else:\n new_strides = list(strides)\n new_strides.insert(bdim, 1)\n\n out = slice(operand, new_start_indices, new_limit_indices, new_strides)\n return out, bdim\n\nslice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',\n _slice_translation_rule)\nad.deflinear2(slice_p, _slice_transpose_rule)\nbatching.primitive_batchers[slice_p] = _slice_batching_rule\n\n\ndef _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):\n if operand.ndim != len(start_indices):\n msg = (\"dynamic_slice start_indices must have length equal to the number \"\n \"of dimensions of the operand, got indices {} for operand shape {}.\")\n raise TypeError(msg.format(start_indices, operand.shape))\n if len(start_indices) != len(slice_sizes):\n msg = (\"dynamic_slice slice_sizes must have the same length as \"\n \"start_indices, got start_inidices length {} and slice_sizes {}.\")\n raise TypeError(msg.format(len(start_indices), slice_sizes))\n if not onp.all(onp.less_equal(slice_sizes, operand.shape)):\n msg = (\"slice slice_sizes must be less than or equal to operand shape, \"\n \"got slice_sizes {} for operand shape {}.\")\n raise TypeError(msg.format(slice_sizes, operand.shape))\n if not onp.all(onp.greater_equal(slice_sizes, 0)):\n msg = (\"slice slice_sizes must be greater than or equal to zero, \"\n \"got slice_sizes of {}.\")\n raise TypeError(msg.format(slice_sizes))\n return tuple(slice_sizes)\n\ndef _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):\n if any(i.dtype != start_indices[0].dtype or\n not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):\n msg = (\"index arguments to dynamic_slice must be integers of the same \"\n \"type, got: {}\")\n raise TypeError(msg.format(\", \".join(i.dtype.name for i in start_indices)))\n return operand.dtype\n\ndef _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):\n return xops.DynamicSlice(operand, start_indices, slice_sizes)\n\ndef _dynamic_slice_jvp(primals, tangents, *, slice_sizes):\n tangent_out = ad_util.zero\n if tangents[0] is not ad_util.zero:\n tangent_out = dynamic_slice(tangents[0], primals[1:], slice_sizes)\n return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out\n\ndef _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):\n assert ad.is_undefined_primal(operand)\n assert all(not ad.is_undefined_primal(s) for s in start_indices)\n operand_shape = operand.aval.shape\n zeros = full(operand_shape, tie_in(t, _zero(t)))\n return ([dynamic_update_slice(zeros, t, start_indices)] +\n [None] * len(start_indices))\n\ndef _batch_dynamic_slice_indices(indices, bdims):\n size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)\n if size < 0:\n return concatenate([reshape(i, [1]) for i in indices], 0), None\n indices = concatenate(\n [broadcast_in_dim(x, (size, 1),\n broadcast_dimensions=((0,) if i is not None else ()))\n for x, i in zip(indices, bdims)],\n dimension=1)\n return indices, 0\n\ndef _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):\n # A dynamic slice is a special case of gather; we can delegate to the gather\n # batching rule.\n # TODO(phawkins): consider removing dynamic_slice entirely and using gather\n # always.\n operand, *start_indices = batched_args\n operand_bd, *start_idx_bds = batch_dims\n operand_shape = (operand.shape if operand_bd is batching.not_mapped\n else tuple(onp.delete(operand.shape, operand_bd)))\n dims = tuple(range(len(operand_shape)))\n dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),\n start_index_map=dims)\n index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)\n return _gather_batching_rule(\n [operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n\n\ndynamic_slice_p = standard_primitive(\n _dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',\n _dynamic_slice_translation_rule)\nad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO\nad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule\nbatching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule\n\n\ndef _dynamic_update_slice_shape_rule(operand, update, *start_indices):\n if operand.ndim != update.ndim:\n msg = (\"dynamic_update_slice update must have the same rank as operand, \"\n \"got update shape {} for operand shape {}.\")\n raise TypeError(msg.format(update.shape, operand.shape))\n if operand.ndim != len(start_indices):\n msg = (\"dynamic_update_slice start_indices must have length equal to the \"\n \"rank of operand, got indices {} for operand shape {}.\")\n raise TypeError(msg.format(start_indices, operand.shape))\n if not onp.all(onp.less_equal(update.shape, operand.shape)):\n msg = (\"dynamic_update_slice update shape must be smaller than operand \"\n \"shape, got update shape {} for operand shape {}.\")\n raise TypeError(msg.format(update.shape, operand.shape))\n return operand.shape\n\ndef _dynamic_update_slice_dtype_rule(operand, update, *start_indices):\n _check_same_dtypes(\"dynamic_update_slice\", False, operand.dtype, update.dtype)\n if any(i.dtype != start_indices[0].dtype or\n not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):\n msg = (\"index arguments to dynamic_update_slice must be integers of the \"\n \"same type, got {}\")\n raise TypeError(msg.format(\", \".join(i.dtype.name for i in start_indices)))\n return operand.dtype\n\ndef _dynamic_update_slice_jvp(primals, tangents):\n operand, update = primals[:2]\n start_indices = primals[2:]\n g_operand, g_update = tangents[:2]\n val_out = dynamic_update_slice(operand, update, start_indices)\n if g_operand is ad_util.zero and g_update is ad_util.zero:\n tangent_out = ad_util.zero\n else:\n g_operand = ad.instantiate_zeros(operand, g_operand)\n g_update = ad.instantiate_zeros(update, g_update)\n tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)\n return val_out, tangent_out\n\ndef _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):\n assert all(not ad.is_undefined_primal(x) for x in start_indices)\n if ad.is_undefined_primal(update):\n update_shape = update.aval.shape\n else:\n update_shape = update.shape\n dus = dynamic_update_slice\n ds = dynamic_slice\n zeros = _zeros(t, shape=update_shape)\n operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None\n update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None\n return [operand_t, update_t] + [None] * len(start_indices)\n\ndef _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):\n return xops.DynamicUpdateSlice(operand, update, start_indices)\n\ndef _dynamic_update_slice_batching_rule(batched_args, batch_dims):\n # A dynamic update slice is a special case of scatter; we can delegate to the\n # scatter batching rule.\n # TODO(phawkins): consider removing dynamic_update_slice entirely and using\n # scatter always.\n operand, update, *start_idx = batched_args\n operand_bd, update_bd, *start_idx_bd = batch_dims\n update_shape = (update.shape if update_bd is batching.not_mapped\n else tuple(onp.delete(update.shape, update_bd)))\n dims = tuple(range(len(update_shape)))\n dnums = ScatterDimensionNumbers(update_window_dims=dims,\n inserted_window_dims=(),\n scatter_dims_to_operand_dims=dims)\n index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)\n return _scatter_batching_rule(\n scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),\n update_jaxpr=None, update_consts=None, dimension_numbers=dnums)\n\n\ndynamic_update_slice_p = standard_primitive(\n _dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,\n 'dynamic_update_slice', _dynamic_update_slice_translation_rule)\nad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp\nad.primitive_transposes[dynamic_update_slice_p] = \\\n _dynamic_update_slice_transpose_rule\nbatching.primitive_batchers[dynamic_update_slice_p] = \\\n _dynamic_update_slice_batching_rule\n\n\ndef _gather_dimensions_proto(indices_shape, dimension_numbers):\n assert type(dimension_numbers) is GatherDimensionNumbers\n proto = xla_client.GatherDimensionNumbers()\n proto.offset_dims.extend(dimension_numbers.offset_dims)\n proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)\n proto.start_index_map.extend(dimension_numbers.start_index_map)\n assert indices_shape.rank() > 0\n proto.index_vector_dim = indices_shape.rank() - 1\n return proto\n\ndef _gather_dtype_rule(operand, start_indices, **kwargs):\n if not dtypes.issubdtype(start_indices.dtype, onp.integer):\n raise ValueError(\"start_indices must have an integer type\")\n return dtypes.canonicalize_dtype(operand.dtype)\n\ndef _gather_shape_rule(operand, start_indices, *, dimension_numbers,\n slice_sizes):\n if len(operand.shape) != len(slice_sizes):\n msg = (\"slice_sizes must have rank equal to the gather operand; \"\n \"operand.shape={}, slice_sizes={}\".format(operand.shape, slice_sizes))\n raise ValueError(msg)\n result_rank = len(dimension_numbers.offset_dims) + start_indices.ndim - 1\n start_indices_shape = iter(start_indices.shape[:-1])\n slice_sizes = iter(onp.delete(slice_sizes, dimension_numbers.collapsed_slice_dims))\n return tuple(next(slice_sizes) if i in dimension_numbers.offset_dims\n else next(start_indices_shape) for i in range(result_rank))\n\ndef _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,\n slice_sizes):\n indices_shape = c.get_shape(start_indices)\n return xops.Gather(\n operand, start_indices,\n _gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,\n indices_are_sorted=False)\n\ndef _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,\n slice_sizes):\n return gather(g, start_indices, dimension_numbers, slice_sizes)\n\ndef _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,\n slice_sizes):\n assert ad.is_undefined_primal(operand)\n operand_shape = operand.aval.shape\n if t is ad_util.zero:\n return [ad_util.zero, ad_util.zero]\n zeros = full(operand_shape, tie_in(t, _zero(t)))\n scatter_dnums = ScatterDimensionNumbers(\n update_window_dims=dimension_numbers.offset_dims,\n inserted_window_dims=dimension_numbers.collapsed_slice_dims,\n scatter_dims_to_operand_dims=dimension_numbers.start_index_map)\n return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]\n\ndef _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,\n slice_sizes):\n operand, start_indices = batched_args\n operand_bdim, start_indices_bdim = batch_dims\n\n if operand_bdim is not None and start_indices_bdim is None:\n operand = batching.moveaxis(operand, operand_bdim, 0)\n slice_sizes = (operand.shape[0],) + slice_sizes\n offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims))\n collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))\n start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))\n dnums = GatherDimensionNumbers(\n offset_dims=offset_dims,\n collapsed_slice_dims=collapsed_slice_dims,\n start_index_map=start_index_map)\n return gather(operand, start_indices, dimension_numbers=dnums,\n slice_sizes=slice_sizes), 0\n\n elif operand_bdim is None and start_indices_bdim is not None:\n start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)\n offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))\n dnums = GatherDimensionNumbers(\n offset_dims=offset_dims,\n collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,\n start_index_map=dimension_numbers.start_index_map)\n return gather(operand, start_indices, dimension_numbers=dnums,\n slice_sizes=slice_sizes), 0\n\n else:\n # move our batch dimensions to the front to preserve sanity\n operand = batching.moveaxis(operand, operand_bdim, 0)\n start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)\n\n # Example: user code had start_indices shape (3, 4, 5), and we have to deal\n # with start_indices shape (7, 3, 4, 5). We transform that to a\n # start_indices of shape (7, 3, 4, 6) where we concatenated an iota that\n # counts along our batch dimension to the front of the ndindex.\n count_shape = list(start_indices.shape)\n count_shape[-1] = 1\n counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)\n start_indices = concatenate([counts, start_indices], len(count_shape) - 1)\n\n slice_sizes = (1,) + slice_sizes\n collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))\n offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))\n start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))\n\n dnums = GatherDimensionNumbers(\n offset_dims=offset_dims,\n collapsed_slice_dims=collapsed_slice_dims,\n start_index_map=start_index_map)\n return gather(operand, start_indices, dimension_numbers=dnums,\n slice_sizes=slice_sizes), 0\n\ngather_p = standard_primitive(\n _gather_shape_rule, _gather_dtype_rule, 'gather',\n _gather_translation_rule)\nad.defjvp(gather_p, _gather_jvp_rule, None)\n\nad.primitive_transposes[gather_p] = _gather_transpose_rule\nbatching.primitive_batchers[gather_p] = _gather_batching_rule\n\n\ndef _scatter_dimensions_proto(indices_shape, dimension_numbers):\n assert type(dimension_numbers) is ScatterDimensionNumbers\n proto = xla_client.ScatterDimensionNumbers()\n proto.update_window_dims.extend(dimension_numbers.update_window_dims)\n proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)\n proto.scatter_dims_to_operand_dims.extend(\n dimension_numbers.scatter_dims_to_operand_dims)\n assert indices_shape.rank() > 0\n proto.index_vector_dim = indices_shape.rank() - 1\n return proto\n\ndef _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):\n if not dtypes.issubdtype(scatter_indices.dtype, onp.integer):\n raise ValueError(\"scatter_indices must have an integer type\")\n _check_same_dtypes(\"scatter\", False, operand.dtype, updates.dtype)\n return dtypes.canonicalize_dtype(operand.dtype)\n\ndef _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):\n return operand.shape\n\ndef _scatter_translation_rule(c, operand, scatter_indices, updates,\n update_jaxpr, update_consts, dimension_numbers):\n dtype = c.get_shape(operand).numpy_dtype()\n init_value = xb.constant(c, onp.array(0, dtype))\n update_computation = _reduction_computation(\n c, update_jaxpr, update_consts, init_value)\n indices_shape = c.get_shape(scatter_indices)\n return xops.Scatter(operand, scatter_indices, updates, update_computation,\n _scatter_dimensions_proto(indices_shape, dimension_numbers),\n False, False)\n\ndef _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,\n dimension_numbers):\n operand, scatter_indices, updates = primals\n g_operand, g_scatter_indices, g_updates = tangents\n val_out = scatter_add_p.bind(\n operand, scatter_indices, updates, update_jaxpr=update_jaxpr,\n update_consts=update_consts, dimension_numbers=dimension_numbers)\n if g_operand is ad_util.zero and g_updates is ad_util.zero:\n tangent_out = ad_util.zero\n else:\n g_operand = ad.instantiate_zeros(operand, g_operand)\n g_updates = ad.instantiate_zeros(updates, g_updates)\n tangent_out = scatter_add_p.bind(\n g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,\n update_consts=update_consts, dimension_numbers=dimension_numbers)\n return val_out, tangent_out\n\ndef _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,\n update_jaxpr, update_consts, dimension_numbers):\n assert not ad.is_undefined_primal(scatter_indices)\n if ad.is_undefined_primal(updates):\n updates_shape = updates.aval.shape\n else:\n updates_shape = updates.shape\n if t is ad_util.zero:\n return [ad_util.zero, None, ad_util.zero]\n\n operand_t = update_t = None\n if ad.is_undefined_primal(operand):\n operand_t = t\n\n if ad.is_undefined_primal(updates):\n gather_dnums = GatherDimensionNumbers(\n offset_dims=dimension_numbers.update_window_dims,\n collapsed_slice_dims=dimension_numbers.inserted_window_dims,\n start_index_map=dimension_numbers.scatter_dims_to_operand_dims)\n slice_sizes = []\n pos = 0\n for i in range(len(t.shape)):\n if i in dimension_numbers.inserted_window_dims:\n slice_sizes.append(1)\n else:\n slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])\n pos += 1\n update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,\n slice_sizes=slice_sizes)\n return [operand_t, None, update_t]\n\ndef _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,\n update_jaxpr, update_consts, dimension_numbers):\n assert not ad.is_undefined_primal(scatter_indices)\n if ad.is_undefined_primal(updates):\n updates_shape = updates.aval.shape\n else:\n updates_shape = updates.shape\n if t is ad_util.zero:\n return [ad_util.zero, None, ad_util.zero]\n\n operand_t = update_t = None\n if ad.is_undefined_primal(operand):\n operand_t = scatter_mul(t, scatter_indices, updates,\n dimension_numbers=dimension_numbers)\n\n if ad.is_undefined_primal(updates):\n gather_dnums = GatherDimensionNumbers(\n offset_dims=dimension_numbers.update_window_dims,\n collapsed_slice_dims=dimension_numbers.inserted_window_dims,\n start_index_map=dimension_numbers.scatter_dims_to_operand_dims)\n slice_sizes = []\n pos = 0\n for i in range(len(t.shape)):\n if i in dimension_numbers.inserted_window_dims:\n slice_sizes.append(1)\n else:\n slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])\n pos += 1\n update_t = gather(mul(t, operand), scatter_indices,\n dimension_numbers=gather_dnums, slice_sizes=slice_sizes)\n return [operand_t, None, update_t]\n\n\ndef _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,\n update_jaxpr, update_consts, dimension_numbers):\n operand, scatter_indices, updates = batched_args\n operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims\n del update_jaxpr, update_consts # Unused.\n\n # move the operand batch dim to the front if it is not None, otherwise create\n # it at the front (so that we can scatter into it)\n size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)\n if ax is not None)\n operand = batching.bdim_at_front(operand, operand_bdim, size)\n operand_bdim = 0\n\n updates = batching.bdim_at_front(updates, updates_bdim, size)\n\n if scatter_indices_bdim is None:\n inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))\n update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))\n scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))\n dnums = ScatterDimensionNumbers(\n update_window_dims=update_window_dims,\n inserted_window_dims=inserted_window_dims,\n scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)\n return scatter_op(operand, scatter_indices, updates, dnums), 0\n\n\n # see the third case in _gather_batching_rule for comparison and comments\n scatter_indices = batching.bdim_at_front(\n scatter_indices, scatter_indices_bdim, size)\n\n count_shape = list(scatter_indices.shape)\n count_shape[-1] = 1\n counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)\n scatter_indices = concatenate([counts, scatter_indices],\n len(count_shape) - 1)\n\n update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))\n inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))\n scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))\n\n dnums = ScatterDimensionNumbers(\n update_window_dims=update_window_dims,\n inserted_window_dims=inserted_window_dims,\n scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)\n return scatter_op(operand, scatter_indices, updates, dnums), 0\n\nscatter_add_p = standard_primitive(\n _scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',\n _scatter_translation_rule)\nad.primitive_jvps[scatter_add_p] = _scatter_add_jvp\nad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule\nbatching.primitive_batchers[scatter_add_p] = (\n partial(_scatter_batching_rule, scatter_add))\n\n\nscatter_mul_p = standard_primitive(\n _scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',\n _scatter_translation_rule)\n\ndef _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers, **kw):\n return mul(x, scatter_add(zeros_like_array(x), i, g,\n dimension_numbers=dimension_numbers))\n\nad.defjvp(scatter_mul_p,\n lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),\n None,\n _scatter_mul_jvp_rhs)\nad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule\nbatching.primitive_batchers[scatter_mul_p] = (\n partial(_scatter_batching_rule, scatter_mul))\n\n# TODO(jlebar): Add derivatives.\nscatter_min_p = standard_primitive(\n _scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',\n _scatter_translation_rule)\nbatching.primitive_batchers[scatter_min_p] = (\n partial(_scatter_batching_rule, scatter_min))\n\n# TODO(jlebar): Add derivatives.\nscatter_max_p = standard_primitive(\n _scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',\n _scatter_translation_rule)\nbatching.primitive_batchers[scatter_max_p] = (\n partial(_scatter_batching_rule, scatter_max))\n\n\ndef _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,\n dimension_numbers):\n operand, scatter_indices, updates = primals\n g_operand, g_scatter_indices, g_updates = tangents\n dnums = dimension_numbers\n\n if g_operand is ad_util.zero and g_updates is ad_util.zero:\n val_out = scatter_p.bind(\n operand, scatter_indices, updates, update_jaxpr=update_jaxpr,\n update_consts=update_consts, dimension_numbers=dnums)\n tangent_out = ad_util.zero\n return val_out, tangent_out\n\n g_operand = ad.instantiate_zeros(operand, g_operand)\n g_updates = ad.instantiate_zeros(updates, g_updates)\n\n # If there are overlapping indices in the scatter, it is unspecified which\n # update \"wins\". So we use the following perhaps surprising scheme:\n # a) attach a positive ID to each update in updates, forming (value, id) pairs\n # (using a new array dimension because scatter doesn't actually support\n # pairs).\n # b) perform the scatter, yielding (value, id) updates, which we split apart.\n # c) perform the inverse gather on the ids (similar to\n # _scatter_add_transpose), and use it to build a mask for the tangent of\n # `updates`.\n # d) perform a scatter-add on the masked JVP values. A benefit of using\n # scatter-add here is that we don't need a `scatter` transpose rule.\n\n # a) add unique positive IDs (iotas) to the updates, and zeros to the operand.\n operand_shape = operand.shape\n updates_shape = updates.shape\n updates_dtype = _dtype(updates)\n\n new_operand = reshape(operand, (1,) + operand_shape)\n new_operand = pad(new_operand, _zero(operand),\n ((0, 1, 0),) + tuple((0, 0, 0) for _ in operand_shape))\n\n # We specify the dtype here in case `updates_shape` is an empty tuple, in\n # which case numpy defaults to float64.\n ids_shape = onp.array(updates_shape, dtype=onp.int32)\n ids_shape[dnums.update_window_dims,] = 1\n num_ids = onp.prod(ids_shape)\n update_ids = add(reshape(iota(updates_dtype, num_ids), ids_shape),\n _ones(updates))\n\n # TODO(phawkins): there is a potential bug here if the number of updates\n # is large enough to overflow the number of mantissa bits in a float so IDs\n # end up colliding. We could also utilize the exponent and sign bits, with a\n # little more work.\n assert num_ids < (2 ** dtypes.finfo(updates_dtype).nmant)\n\n updates = reshape(updates, (1,) + updates_shape)\n reshaped_update_ids = reshape(update_ids, (1,) + updates_shape)\n updates_and_ids = concatenate((updates, reshaped_update_ids), 0)\n\n new_dnums = ScatterDimensionNumbers(\n update_window_dims=(0,) + tuple(d + 1 for d in dnums.update_window_dims),\n inserted_window_dims=tuple(d + 1 for d in dnums.inserted_window_dims),\n scatter_dims_to_operand_dims=tuple(d + 1 for d in dnums.scatter_dims_to_operand_dims))\n outputs = scatter_p.bind(\n new_operand, scatter_indices, updates_and_ids, update_jaxpr=update_jaxpr,\n update_consts=update_consts, dimension_numbers=new_dnums)\n val_out = index_in_dim(outputs, 0, keepdims=False)\n scattered_ids = index_in_dim(outputs, 1, keepdims=False)\n\n # b) compute the inverse gather that \"undoes\" the scatter on the id values.\n gather_dnums = GatherDimensionNumbers(\n offset_dims=dnums.update_window_dims,\n collapsed_slice_dims=dnums.inserted_window_dims,\n start_index_map=dnums.scatter_dims_to_operand_dims)\n slice_sizes = []\n pos = 0\n for i in range(len(scattered_ids.shape)):\n if i in dnums.inserted_window_dims:\n slice_sizes.append(1)\n else:\n slice_sizes.append(updates_shape[dnums.update_window_dims[pos]])\n pos += 1\n gathered_update_ids = gather(scattered_ids, scatter_indices,\n dimension_numbers=gather_dnums,\n slice_sizes=slice_sizes)\n\n # c) mask off input JVP elements that do not correspond to a primal output.\n masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),\n g_operand, _zeros(g_operand))\n masked_g_updates = select(eq(update_ids, gathered_update_ids),\n g_updates, _zeros(g_updates))\n\n # d) perform a scatter-add to compute the tangent output.\n tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,\n dimension_numbers=dnums)\n return val_out, tangent_out\n\n\nscatter_p = standard_primitive(\n _scatter_shape_rule, _scatter_dtype_rule, 'scatter',\n _scatter_translation_rule)\nad.primitive_jvps[scatter_p] = _scatter_jvp\nbatching.primitive_batchers[scatter_p] = (\n partial(_scatter_batching_rule, scatter))\n\n\ndef _reduce_shape_rule(operand, init_value, *, computation, jaxpr, consts,\n dimensions):\n return tuple(onp.delete(operand.shape, dimensions))\n\ndef _reduce_translation_rule(c, operand, init_value, *, computation, jaxpr,\n consts, dimensions):\n xla_computation = _reduction_computation(c, jaxpr, consts, init_value)\n return xops.Reduce(c, [operand], [init_value], xla_computation, dimensions)\n\ndef _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr, consts,\n dimensions):\n operand, init_value = batched_args\n operand_bdim, init_value_bdim = batch_dims\n if init_value_bdim is None:\n assert operand_bdim is not None\n new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]\n new_operand_bdim = operand_bdim - int(onp.sum(onp.less(dimensions, operand_bdim)))\n return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim\n else:\n raise NotImplementedError # loop and stack\n\ndef _reduction_computation(c, jaxpr, consts, init_value):\n shape = c.get_shape(init_value)\n axis_env = xla.AxisEnv(1) # no parallel primitives inside reductions\n subc = xla_bridge.make_computation_builder(\"reduction_computation\")\n assert len(consts) == 0, \"Reduction computations cannot have constants\"\n args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]\n out, = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)\n return subc.build(out)\n\ndef _masking_defreducer(prim, identity):\n masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)\n\ndef _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,\n axes):\n (padded_val,), (logical_shape,) = padded_vals, logical_shapes\n padded_shape = masking.padded_shape_as_value(padded_val.shape)\n masks = [broadcasted_iota(onp.int32, padded_shape, i) < d\n for i, d in enumerate(logical_shape) if i in axes]\n mask = _reduce(operator.and_, masks)\n masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))\n return prim.bind(masked_val, axes=axes)\n\nreduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',\n _reduce_translation_rule)\nbatching.primitive_batchers[reduce_p] = _reduce_batch_rule\n\n\ndef _reduce_number_dtype_rule(name, operand, *args, **kw):\n if not dtypes.issubdtype(operand.dtype, onp.number):\n raise TypeError(\"{} does not accept dtype {}. Accepted dtypes are subtypes \"\n \"of number.\".format(name, onp.dtype(operand.dtype).name))\n return dtypes.canonicalize_dtype(operand.dtype)\n\ndef _reduce_sum_shape_rule(operand, *, axes):\n return _reduce_op_shape_rule(operand, axes=axes)\n\ndef _reduce_sum_translation_rule(c, operand, *, axes):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n return xops.Reduce(c, [operand], [xb.constant(c, onp.array(0, dtype))],\n xla.primitive_subcomputation(add_p, scalar, scalar),\n axes)\n\ndef _reduce_sum_transpose_rule(cotangent, operand, *, axes):\n assert ad.is_undefined_primal(operand)\n input_shape = operand.aval.shape\n broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))\n result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)\n assert result.shape == input_shape\n return [result]\n\nreduce_sum_p = standard_primitive(\n _reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),\n 'reduce_sum', _reduce_sum_translation_rule)\nad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)\nbatching.defreducer(reduce_sum_p)\n_masking_defreducer(reduce_sum_p,\n lambda shape, dtype: onp.broadcast_to(onp.array(0, dtype), shape))\n\n\ndef _reduce_op_shape_rule(operand, *, axes):\n return tuple(onp.delete(operand.shape, axes))\n\ndef _reduce_prod_translation_rule(c, operand, *, axes):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n return xops.Reduce(c, [operand], [xb.constant(c, onp.array(1, dtype))],\n xla.primitive_subcomputation(mul_p, scalar, scalar), axes)\n\ndef _reduce_prod_jvp_rule(primals, tangents, *, axes):\n operand, = primals\n tangent, = tangents\n input_shape = onp.array(operand.shape)\n\n n = onp.prod(input_shape[list(axes)])\n non_axes = onp.delete(onp.arange(len(input_shape)), axes)\n\n # Move the reduced axes to the front, and flatten them to 1D.\n permutation = axes + tuple(non_axes)\n new_shape = (n,) + tuple(input_shape[non_axes])\n operand = reshape(operand, new_shape, permutation)\n tangent = reshape(tangent, new_shape, permutation)\n\n def _reduce_prod_tree(x, axis=0):\n \"\"\"Reduce by repeatedly splitting the array and multiplying.\"\"\"\n while x.shape[axis] > 1:\n n = x.shape[axis]\n n1 = (n + 1) // 2\n n2 = n - n1\n x1 = slice_in_dim(x, 0, n1)\n x2 = slice_in_dim(x, n1, None)\n if n2 != n1:\n paddings = [(0, 0, 0)] * len(x.shape)\n paddings[axis] = (0, 1, 0)\n x2 = pad(x2, _const(x, 1), paddings)\n x = x1 * x2\n shape = list(x.shape)\n del shape[axis]\n return reshape(x, shape)\n\n return api.jvp(_reduce_prod_tree, (operand,), (tangent,))\n\n\nreduce_prod_p = standard_primitive(\n _reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),\n 'reduce_prod', _reduce_prod_translation_rule)\nad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule\nbatching.defreducer(reduce_prod_p)\n\n\ndef _reduce_chooser_shape_rule(operand, *, axes):\n return tuple(onp.delete(operand.shape, axes))\n\ndef _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],\n xla.primitive_subcomputation(prim, scalar, scalar), axes)\n\ndef _reduce_chooser_jvp_rule(g, ans, operand, *, axes):\n # TODO(mattjj): an alternative is to use variadic reduce to compute the chosen\n # locations in a single pass (rather than comparing equality) and use a\n # gather, and/or even push along the chosen elements of g (b/112040122)\n shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]\n location_indicators = convert_element_type(\n _eq_meet(operand, reshape(ans, shape)), g.dtype)\n counts = _reduce_sum(location_indicators, axes)\n return div(_reduce_sum(mul(g, location_indicators), axes), counts)\n\n_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,\n _get_max_identity)\nreduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,\n 'reduce_max', _reduce_max_translation_rule)\nad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)\nbatching.defreducer(reduce_max_p)\n\n\n_reduce_min_translation_rule = partial(\n _reduce_chooser_translation_rule, min_p, _get_min_identity)\nreduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,\n 'reduce_min', _reduce_min_translation_rule)\nad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)\nbatching.defreducer(reduce_min_p)\n\n\ndef _reduce_logical_shape_rule(operand, *, axes):\n if operand.dtype != onp.bool_:\n msg = \"logical reduction requires operand dtype bool, got {}.\"\n raise TypeError(msg.format(operand.dtype))\n return tuple(onp.delete(operand.shape, axes))\n\ndef _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):\n scalar = ShapedArray((), onp.bool_)\n return xops.Reduce(c, [operand], [xb.constant(c, identity(onp.bool_))],\n xla.primitive_subcomputation(prim, scalar, scalar), axes)\n\n_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,\n or_p, _get_max_identity)\nreduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),\n 'reduce_or', _reduce_or_translation_rule)\nbatching.defreducer(reduce_or_p)\n\n\n_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,\n and_p, _get_min_identity)\nreduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),\n 'reduce_and', _reduce_and_translation_rule)\nbatching.defreducer(reduce_and_p)\n\ndef _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,\n window_dimensions, window_strides, padding):\n if operand.dtype != init_value.dtype:\n msg = (\"reduce_window got inconsistent dtypes for operand and init_value: \"\n \" got operand dtype {} and init_value dtype {}.\")\n raise TypeError(msg.format(operand.dtype, init_value.dtype))\n return _common_reduce_window_shape_rule(operand, window_dimensions,\n window_strides, padding)\n\ndef _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,\n window_dimensions, window_strides, padding):\n xla_computation = _reduction_computation(c, jaxpr, consts, init_value)\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n return xops.ReduceWindowWithGeneralPadding(\n operand, init_value, xla_computation, window_dimensions,\n window_strides, (), (), pads)\n\ndef _generic_reduce_window_batch_rule(\n batched_args, batch_dims, *, jaxpr, consts, window_dimensions,\n window_strides, padding):\n operand, init = batched_args\n bdim, init_bdim = batch_dims\n if init_bdim is not None:\n raise NotImplementedError(\"reduce_window batching is not implemented for \"\n \"initial values\")\n\n def reduce_window(x, window_dimensions, window_strides, padding):\n return reduce_window_p.bind(\n x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,\n window_strides=window_strides, padding=padding)\n return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,),\n window_dimensions, window_strides, padding)\n\n\nreduce_window_p = standard_primitive(\n _reduce_window_shape_rule, _input_dtype, 'reduce_window',\n _reduce_window_translation_rule)\nbatching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule\n\n\ndef _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,\n padding):\n if not dtypes.issubdtype(operand.dtype, onp.number):\n msg = \"operand to reduce_window_sum must have a number dtype, got {}\"\n raise TypeError(msg.format(onp.dtype(operand.dtype).name))\n return _common_reduce_window_shape_rule(operand, window_dimensions,\n window_strides, padding)\n\ndef _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,\n window_strides, padding):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n return xops.ReduceWindowWithGeneralPadding(\n operand, xb.constant(c, onp.array(0, dtype)),\n xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,\n window_strides, (), (), pads)\n\ndef _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,\n window_strides, padding):\n assert ad.is_undefined_primal(operand)\n input_shape = operand.aval.shape\n in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,\n padding)\n ones = [1] * len(input_shape)\n pads = _conv_general_vjp_lhs_padding(\n input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,\n ones, ones)\n padding_config = [(lo, hi, stride - 1)\n for (lo, hi), stride in zip(pads, window_strides)]\n pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)\n result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,\n xla_client.PaddingType.VALID)\n assert result.shape == input_shape\n return [result]\n\ndef _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,\n window_dimensions, window_strides, padding):\n operand, = batched_args\n bdim, = bdims\n\n if bdim is not None:\n window_dimensions = \\\n window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]\n window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]\n\n operand = reduce_window(\n operand, window_dimensions, window_strides, padding)\n\n return operand, bdim\n\nreduce_window_sum_p = standard_primitive(\n _reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',\n _reduce_window_sum_translation_rule)\nad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)\nbatching.primitive_batchers[reduce_window_sum_p] = partial(\n _reduce_window_batch_rule, _reduce_window_sum)\n\ndef _reduce_window_chooser_translation_rule(\n prim, identity, c, operand, *, window_dimensions, window_strides, padding):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n return xops.ReduceWindowWithGeneralPadding(\n operand, xb.constant(c, identity(dtype)),\n xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,\n window_strides, (), (), pads)\n\ndef _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,\n window_strides, padding):\n assert prim is max_p or prim is min_p\n select_prim = ge_p if prim is max_p else le_p\n return _select_and_gather_add(g, operand, select_prim, window_dimensions,\n window_strides, padding)\n\n\ndef _common_reduce_window_shape_rule(operand, window_dimensions,\n window_strides, padding):\n _check_shapelike(\"reduce_window\", \"window_dimensions\", window_dimensions)\n _check_shapelike(\"reduce_window\", \"window_strides\", window_strides)\n if operand.ndim != len(window_dimensions):\n msg = (\"reduce_window got the wrong number of window_dimensions for \"\n \"operand: got operand shape {} with window_dimensions {}.\")\n raise TypeError(msg.format(operand.shape, window_dimensions))\n if len(window_strides) != len(window_dimensions):\n msg = (\"reduce_window got inconsistent window_strides and \"\n \"window_dimensions: got window_strides {} and window_dimensions {}.\")\n raise TypeError(msg.format(window_strides, window_dimensions))\n\n return reduce_window_shape_tuple(operand.shape, window_dimensions,\n window_strides, padding)\n\ndef reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,\n padding):\n pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)\n operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))\n t = onp.floor_divide(\n onp.subtract(operand_padded, window_dimensions), window_strides) + 1\n return tuple(t)\n\n_reduce_window_max_translation_rule = partial(\n _reduce_window_chooser_translation_rule, max_p, _get_max_identity)\nreduce_window_max_p = standard_primitive(\n _common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',\n _reduce_window_max_translation_rule)\nad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))\nbatching.primitive_batchers[reduce_window_max_p] = partial(\n _reduce_window_batch_rule, _reduce_window_max)\n\n_reduce_window_min_translation_rule = partial(\n _reduce_window_chooser_translation_rule, min_p, _get_min_identity)\nreduce_window_min_p = standard_primitive(\n _common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',\n _reduce_window_min_translation_rule)\nad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))\n\n_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,\n _reduce_window_min)\nbatching.primitive_batchers[reduce_window_min_p] = partial(\n _reduce_window_batch_rule, _reduce_window_min)\n\n\ndef _select_and_scatter_shape_rule(\n operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,\n scatter_consts, window_dimensions, window_strides, padding):\n _check_shapelike(\"select_and_scatter\", \"window_dimensions\", window_dimensions)\n _check_shapelike(\"select_and_scatter\", \"window_strides\", window_strides)\n if len(window_dimensions) != len(window_strides):\n msg = (\"select_and_scatter got inconsistent window_strides and \"\n \"window_dimensions: got window_strides {} and window_dimensions {}.\")\n raise TypeError(msg.format(window_strides, window_dimensions))\n return operand.shape\n\ndef _select_and_scatter_translation(\n c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,\n scatter_consts, window_dimensions, window_strides, padding):\n select = _reduction_computation(c, select_jaxpr, select_consts, init_value)\n scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n return xops.SelectAndScatterWithGeneralPadding(\n operand, select, window_dimensions, window_strides, pads, source,\n init_value, scatter)\n\nselect_and_scatter_p = standard_primitive(\n _select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',\n _select_and_scatter_translation)\n\n\ndef _select_and_scatter_add_shape_rule(\n source, operand, *, select_prim, window_dimensions, window_strides,\n padding):\n return operand.shape\n\ndef _select_and_scatter_add_translation(\n c, source, operand, *, select_prim, window_dimensions, window_strides,\n padding):\n dtype = c.get_shape(operand).numpy_dtype()\n scalar = ShapedArray((), dtype)\n select = xla.primitive_subcomputation(select_prim, scalar, scalar)\n scatter = xla.primitive_subcomputation(add_p, scalar, scalar)\n zero = xb.constant(c, onp.array(0, dtype))\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n return xops.SelectAndScatterWithGeneralPadding(\n operand, select, window_dimensions, window_strides, pads, source, zero,\n scatter)\n\ndef _select_and_scatter_add_jvp(\n primals, tangents, *, select_prim, window_dimensions, window_strides,\n padding):\n source, operand = primals\n g_source, g_operand = tangents\n val_out = _select_and_scatter_add(\n source, operand, select_prim, window_dimensions, window_strides,\n padding)\n del g_operand\n if g_source is ad_util.zero:\n tangent_out = ad_util.zero\n else:\n tangent_out = _select_and_scatter_add(\n g_source, operand, select_prim, window_dimensions,\n window_strides, padding)\n return val_out, tangent_out\n\ndef _select_and_scatter_add_transpose(\n t, source, operand, *, select_prim, window_dimensions, window_strides,\n padding):\n assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)\n source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,\n window_strides, padding)\n return [source_t, None]\n\ndef _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs):\n source, operand = batched_args\n s_bdims, o_bdims = batch_dims\n\n if s_bdims is not None and o_bdims is not None:\n #TODO(#212): use a map construct instead of unrolling.\n source = batching.moveaxis(source, s_bdims, 0)\n operand = batching.moveaxis(operand, o_bdims, 0)\n outputs = [\n _select_and_scatter_add(s, o, **kwargs) for s, o in zip(source, operand)]\n outputs = [reshape(out, (1,) + out.shape) for out in outputs]\n outputs = concatenate(outputs, 0)\n return outputs, 0\n elif s_bdims is not None:\n #TODO(#212): use a map construct instead of unrolling.\n source = batching.moveaxis(source, s_bdims, 0)\n outputs = [\n _select_and_scatter_add(s, operand, **kwargs) for s in source]\n outputs = [reshape(out, (1,) + out.shape) for out in outputs]\n outputs = concatenate(outputs, 0)\n return outputs, 0\n elif o_bdims is not None:\n #TODO(#212): use a map construct instead of unrolling.\n operand = batching.moveaxis(operand, o_bdims, 0)\n outputs = [\n _select_and_scatter_add(source, o, **kwargs) for o in operand]\n outputs = [reshape(out, (1,) + out.shape) for out in outputs]\n outputs = concatenate(outputs, 0)\n return outputs, 0\n\nselect_and_scatter_add_p = standard_primitive(\n _select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',\n _select_and_scatter_add_translation)\nad.primitive_transposes[select_and_scatter_add_p] = \\\n _select_and_scatter_add_transpose\nad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp\nbatching.primitive_batchers[select_and_scatter_add_p] = \\\n _select_and_scatter_add_batch_rule\n\ndef _select_and_gather_add_shape_rule(\n tangents, operand, *, select_prim, window_dimensions, window_strides,\n padding):\n if tangents.shape != operand.shape:\n msg = (\"select_and_gather_add tangents and operand shapes must match, \"\n \"got {} and {}.\")\n raise TypeError(msg.format(tangents.shape, operand.shape))\n return _common_reduce_window_shape_rule(operand, window_dimensions,\n window_strides, padding)\n\n\n_UINT_DTYPES = {\n 16: onp.uint16,\n 32: onp.uint32,\n 64: onp.uint64,\n}\n\n_INT_DTYPES = {\n 16: onp.int16,\n 32: onp.int32,\n 64: onp.int64,\n}\n\ndef _select_and_gather_add_translation(\n c, tangents, operand, *, select_prim, window_dimensions, window_strides,\n padding, max_bits=64):\n shape = c.get_shape(operand)\n dtype = shape.numpy_dtype()\n etype = shape.xla_element_type()\n nbits = dtypes.finfo(dtype).bits\n\n assert nbits <= max_bits\n double_word_reduction = nbits * 2 <= max_bits\n\n const = lambda c, dtype, x: xb.constant(c, onp.array(x, dtype=dtype),\n canonicalize_types=False)\n\n if double_word_reduction:\n # TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so\n # we implement a pair-wise ReduceWindow by packing two k-bit values into\n # 2k-bit unsigned integer using bit tricks.\n word_dtype = _UINT_DTYPES[nbits]\n double_word_dtype = _UINT_DTYPES[nbits * 2]\n word_type = xla_client.dtype_to_etype(word_dtype)\n double_word_type = xla_client.dtype_to_etype(double_word_dtype)\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = xops.BitcastConvertType(a, word_type)\n b = xops.BitcastConvertType(b, word_type)\n a = xops.ConvertElementType(a, double_word_type)\n b = xops.ConvertElementType(b, double_word_type)\n a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))\n return xops.Or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(c, t):\n st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))\n return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)\n\n else:\n # The double-word trick above only works if we have a sufficiently large\n # type. As an alternative, we can pack two half words into a single word,\n # at the cost of precision.\n # TODO(b/73062247): add support for tuple reductions and remove this case.\n warnings.warn(\"Using reduced precision for gradient of reduce-window \"\n \"min/max operator to work around missing XLA support for \"\n \"pair-reductions. This is likely from a second or \"\n \"higher derivative of a max-pooling operation.\")\n r_nbits = nbits // 2\n # Drop/round the bottom mantissa bits.\n nexp = dtypes.finfo(dtype).nexp\n nmant = r_nbits - nexp - 1\n\n double_word_dtype = word_dtype = _UINT_DTYPES[nbits]\n word_type = xla_client.dtype_to_etype(word_dtype)\n\n # Packs two values into a tuple.\n def pack(a, b):\n a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)\n b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)\n a = xops.BitcastConvertType(a, word_type)\n b = xops.BitcastConvertType(b, word_type)\n b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))\n return xops.Or(a, b)\n\n # Unpacks the first element of a tuple.\n def fst(c, t):\n st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))\n return xops.BitcastConvertType(st, etype)\n\n # Unpacks the second element of a tuple.\n def snd(t):\n return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),\n etype)\n\n def reducer():\n c = xla_bridge.make_computation_builder(\"select_and_gather_pair_reducer\")\n x = xb.parameter(c, 0,\n xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))\n y = xb.parameter(c, 1,\n xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))\n assert select_prim is ge_p or select_prim is le_p\n which = xops.Ge if select_prim is ge_p else xops.Le\n xops.Select(which(fst(c, x), fst(c, y)), x, y)\n return c.build()\n\n\n assert select_prim is ge_p or select_prim is le_p, select_prim\n init = -onp.inf if select_prim is ge_p else onp.inf\n pads = xc.window_padding_type_to_pad_values(\n padding, c.get_shape(operand).dimensions(), window_dimensions,\n window_strides)\n out = xops.ReduceWindowWithGeneralPadding(\n pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),\n reducer(), window_dimensions, window_strides, (), (), pads)\n return snd(out)\n\ndef _select_and_gather_add_jvp(\n primals, tangents, *, select_prim, window_dimensions, window_strides,\n padding):\n source, operand = primals\n g_source, g_operand = tangents\n val_out = _select_and_gather_add(\n source, operand, select_prim, window_dimensions, window_strides,\n padding)\n del g_operand\n if g_source is ad_util.zero:\n tangent_out = ad_util.zero\n else:\n tangent_out = _select_and_gather_add(\n g_source, operand, select_prim, window_dimensions,\n window_strides, padding)\n return val_out, tangent_out\n\ndef _select_and_gather_add_transpose(\n t, tangents, operand, *, select_prim, window_dimensions, window_strides,\n padding):\n assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)\n result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,\n window_strides, padding)\n return [result, None]\n\ndef _select_and_gather_add_batching_rule(\n batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,\n padding):\n t, x = batched_args\n t_bdim, x_bdim = batch_dims\n size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)\n if bdim is not None)\n t = batching.bdim_at_front(t, t_bdim, size)\n x = batching.bdim_at_front(x, x_bdim, size)\n window_dimensions = (1,) + window_dimensions\n window_strides = (1,) + window_strides\n out = _select_and_gather_add(t, x, select_prim, window_dimensions,\n window_strides, padding)\n return (out, 0)\n\n\nselect_and_gather_add_p = standard_primitive(\n _select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',\n _select_and_gather_add_translation)\nad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp\nad.primitive_transposes[select_and_gather_add_p] = \\\n _select_and_gather_add_transpose\nbatching.primitive_batchers[select_and_gather_add_p] = \\\n _select_and_gather_add_batching_rule\nxla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(\n _select_and_gather_add_translation,\n max_bits=32)\n\n\n# Parallel prefix-scan. See:\n# https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda\n# and\n# Blelloch, Guy E. 1990. \"Prefix Sums and Their Applications.\", Technical Report\n# CMU-CS-90-190, School of Computer Science, Carnegie Mellon University.\n#\n# Unlike the Blelloch algorithm, we use an out-of-place algorithm that uses 2n\n# space. This is somewhat wasteful if we are interested only in the output of\n# the forward pass, but more memory-efficient if we intend to differentiate\n# through the implementation of the scan.\ndef _prescan_power_of_two(x, axis: int, op: Callable, unit):\n n = x.shape[axis]\n assert n != 0 and n & (n - 1) == 0, \"n must be a power of 2\"\n\n # Upsweep\n xs = []\n for d in range(0, n.bit_length() - 1):\n x1 = slice_in_dim(x, 0, None, stride=2, axis=axis)\n xs.append(x1)\n x2 = slice_in_dim(x, 1, None, stride=2, axis=axis)\n x = op(x1, x2)\n total = x\n\n # Downsweep\n x = full_like(total, unit)\n pad_left = [(0, 0, 0)] * len(x.shape)\n pad_left[axis] = (1, 0, 1)\n pad_right = [(0, 0, 0)] * len(x.shape)\n pad_right[axis] = (0, 1, 1)\n for w in reversed(xs):\n x1 = pad(x, _const(x, 0), pad_right)\n x2 = pad(x, _const(x, 0), pad_left)\n w = pad(w, _const(x, 0), pad_left)\n x = x1 + op(x2, w)\n\n return x, total\n\n\ndef _parallel_prefix_scan(x, axis: int, op: Callable, unit):\n n = x.shape[axis]\n if n == 0:\n return x\n # Pads to the next largest power of two\n nbits = n.bit_length()\n if n == (1 << (nbits - 1)):\n nbits -= 1\n padding = [(0, 0, 0)] * len(x.shape)\n padding[axis] = (0, (1 << nbits) - n, 0)\n x = pad(x, _const(x, unit), padding)\n x, total = _prescan_power_of_two(x, axis, op, unit)\n return concatenate((slice_in_dim(x, 1, n, axis=axis), total), dimension=axis)\n\n_cumsum_prefix_scan = partial(_parallel_prefix_scan, op=add, unit=0)\n_cumprod_prefix_scan = partial(_parallel_prefix_scan, op=mul, unit=1)\n\ndef _cumred_shape_rule(x, *, axis: int):\n if axis < 0 or axis >= x.ndim:\n raise ValueError(\n \"axis {} is out of bounds for array of shape {}\".format(axis, x.shape))\n return x.shape\n\ndef _cumsum_transpose_rule(t, *, axis: int):\n return [rev(cumsum(rev(t, (axis,)), axis=axis), (axis,))]\n\ndef _cumprod_jvp_rule(primals, tangents, *, axis: int):\n # Irrespective of backend, we always use the parallel prefix scan\n # implementation when differentiating because reduce_window is not\n # arbitrarily differentiable.\n return api.jvp(partial(_cumprod_prefix_scan, axis=axis), primals, tangents)\n\n\ndef _cumred_tpu_translation_rule(window_reduce: Callable, unit, x, *,\n axis: int):\n # On TPU, an implementation using reduce_window is handled specially by the\n # compiler and is efficient. On other backends, it is O(n^2).\n n = x.shape[axis]\n if n == 0:\n return x\n padding = [(0, 0, 0)] * x.ndim\n padding[axis] = (n - 1, 0, 0)\n x = pad(x, _const(x, unit), padding)\n strides = [1] * x.ndim\n window_dims = [1] * x.ndim\n window_dims[axis] = n\n return window_reduce(x, window_dims, strides, xla_client.PaddingType.VALID)\n\ndef _cumred_batch_rule(prim, batched_args, batch_dims, *, axis: int):\n operand, = batched_args\n bdim, = batch_dims\n axis = axis if axis < bdim else axis + 1\n return prim.bind(operand, axis=axis), bdim\n\n\ncumsum_p = standard_primitive(\n _cumred_shape_rule, partial(_reduce_number_dtype_rule, \"cumsum\"),\n 'cumsum', xla.lower_fun(_cumsum_prefix_scan, multiple_results=False))\nad.deflinear(cumsum_p, _cumsum_transpose_rule)\nxla.backend_specific_translations['tpu'][cumsum_p] = xla.lower_fun(\n partial(_cumred_tpu_translation_rule, _reduce_window_sum, 0),\n multiple_results=False)\nbatching.primitive_batchers[cumsum_p] = partial(_cumred_batch_rule, cumsum_p)\n\n\ncumprod_p = standard_primitive(\n _cumred_shape_rule, partial(_reduce_number_dtype_rule, \"cumprod\"),\n 'cumprod', xla.lower_fun(_cumprod_prefix_scan, multiple_results=False))\nad.primitive_jvps[cumprod_p] = _cumprod_jvp_rule\nxla.backend_specific_translations['tpu'][cumprod_p] = xla.lower_fun(\n partial(_cumred_tpu_translation_rule, _reduce_window_prod, 1),\n multiple_results=False)\nbatching.primitive_batchers[cumprod_p] = partial(_cumred_batch_rule, cumprod_p)\n\n\ndef _sort_abstract_eval(*args, **kwargs):\n args = tuple(raise_to_shaped(arg) for arg in args)\n if any(arg.shape != args[0].shape for arg in args[1:]):\n shapes = \" \".join(str(a.shape) for a in args)\n raise TypeError(f\"Arguments to sort must have equal shapes, got: {shapes}\")\n return args\n\n\ndef _float_to_int_for_sort(x):\n # Switch from a floating point value to a integer value in such a way that\n # when using the integer value to compare, we get the same result for normal\n # values, and -nan is treated as the smallest value, and nan is treated as\n # the largest value.\n # If f is a float, and\n # x = bit_cast<int32>(f);\n # y = x < 0 ? int32_max - x : x;\n # then y is ordered as an int32 such that finite values have the obvious\n # order, -0 is ordered before 0, and -NaN and NaN appear at the beginning\n # and end of the ordering.\n # Note that in order to avoid -x to overflow, we calculate\n # int32_max - x as unsigned, and then convert back to signed.\n if x.dtype == dtypes.bfloat16:\n x = convert_element_type(x, onp.float32)\n nbits = onp.finfo(x).bits\n signed_dtype = _INT_DTYPES[nbits]\n unsigned_dtype = _UINT_DTYPES[nbits]\n\n signed = bitcast_convert_type(x, signed_dtype)\n unsigned = bitcast_convert_type(x, unsigned_dtype)\n flipped = bitcast_convert_type(\n sub(unsigned_dtype(onp.iinfo(signed_dtype).max), unsigned), signed_dtype)\n return select(lt(signed, _zero(signed)), flipped, signed)\n\n# Default comparator that sorts the operands only on their first arguments.\n# For floating point types, a total order is created where\n# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.\n# For complex types, the (real, imag) pairs are sorted lexicographically\n# (following NumPy's semantics).\n# This code adds complex-number support to the algorithm from:\n# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33\ndef _sort_lt_comparator(*operands):\n assert len(operands) >= 2 and len(operands) % 2 == 0, operands\n x, y = operands[:2]\n assert x.dtype == y.dtype, (x.dtype, y.dtype)\n if onp.issubdtype(x.dtype, onp.complexfloating):\n x_keys = [_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))]\n y_keys = [_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))]\n elif onp.issubdtype(x.dtype, onp.floating):\n x_keys = [_float_to_int_for_sort(x)]\n y_keys = [_float_to_int_for_sort(y)]\n else:\n x_keys = [x]\n y_keys = [y]\n\n p = None\n for xk, yk in zip(x_keys[::-1], y_keys[::-1]):\n p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None\n else lt(xk, yk))\n return p\n\ndef _sort_translation_rule(c, *operands, dimension):\n types = [c.get_shape(x).xla_element_type() for x in operands]\n subc = xla_bridge.make_computation_builder(\"sort_lt_comparator\")\n params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))\n for i, typ in enumerate(types) for j in range(2)]\n result = xla.lower_fun(_sort_lt_comparator,\n multiple_results=False)(subc, *params)\n comparator = subc.build(result)\n out = xops.Sort(c, operands, dimension=dimension, is_stable=True,\n comparator=comparator)\n return out if len(operands) != 1 else xops.Tuple(c, [out])\n\ndef _sort_jvp(primals, tangents, *, dimension):\n shape = primals[0].shape\n iotas = []\n for dim, size in enumerate(shape):\n dtype = onp.int32 if size < onp.iinfo(onp.int32).max else onp.int64\n iotas.append(broadcasted_iota(dtype, shape, dim))\n primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension)\n idx = tuple(primals[-1] if i == dimension else iotas[i]\n for i in range(len(shape)))\n tangents_out = tuple(ad_util.zero if t is ad_util.zero else t[idx]\n for t in tangents)\n return tuple(primals[:-1]), tangents_out\n\ndef _sort_batch_rule(batched_args, batch_dims, *, dimension):\n prototype_arg, new_bdim = next(\n (a, b) for a, b in zip(batched_args, batch_dims) if b is not None)\n new_args = []\n for arg, bdim in zip(batched_args, batch_dims):\n if bdim is None:\n dims = onp.delete(onp.arange(prototype_arg.ndim), new_bdim)\n new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))\n else:\n new_args.append(batching.moveaxis(arg, bdim, new_bdim))\n new_dimension = dimension + (new_bdim <= dimension)\n bdims = (new_bdim,) * len(new_args)\n return sort_p.bind(*new_args, dimension=new_dimension), bdims\n\n\nsort_p = Primitive('sort')\nsort_p.multiple_results = True\nsort_p.def_impl(partial(xla.apply_primitive, sort_p))\nsort_p.def_abstract_eval(_sort_abstract_eval)\nxla.translations[sort_p] = _sort_translation_rule\nad.primitive_jvps[sort_p] = _sort_jvp\nbatching.primitive_batchers[sort_p] = _sort_batch_rule\n\n\ndef _top_k_abstract_eval(operand, *, k):\n if k < 0:\n raise ValueError(\"k argument to top_k must be nonnegative, got {}\".format(k))\n if len(operand.shape) == 0:\n raise TypeError(\"top_k operand must have >= 1 dimension, got {}\"\n .format(operand.shape))\n shape = list(operand.shape)\n if shape[-1] < k:\n msg = \"k argument to top_k must be no larger than minor dimension; {} vs {}\"\n raise ValueError(msg.format(k, shape))\n shape[-1] = k\n return (ShapedArray(shape, operand.dtype),\n ShapedArray(shape, onp.dtype(onp.int32)))\n\ndef _top_k_jvp(primals, tangents, *, k):\n operand, = primals\n tangent, = tangents\n primals_out = top_k(operand, k)\n if tangent is ad_util.zero:\n tangents_out = (ad_util.zero, ad_util.zero)\n else:\n _, k_idxs = primals_out\n idx_shape = k_idxs.shape\n rank = len(idx_shape)\n gather_index_shape = idx_shape + (1,)\n gather_indices = []\n for i in range(rank-1):\n _iota = iota(k_idxs.dtype, idx_shape[i])\n _iota = tie_in(operand, _iota)\n _iota = broadcast_in_dim(_iota, gather_index_shape, (i,))\n gather_indices.append(_iota)\n gather_indices.append(reshape(k_idxs, gather_index_shape))\n gather_indices = concatenate(gather_indices, dimension=rank)\n slice_sizes = (1,) * rank\n dnums = GatherDimensionNumbers(\n offset_dims=(),\n collapsed_slice_dims=tuple(range(rank)),\n start_index_map=tuple(range(rank)))\n tangents_out = (gather(tangent, gather_indices, dnums, slice_sizes),\n ad_util.zero)\n return primals_out, tangents_out\n\ndef _top_k_batch_rule(batched_args, batch_dims, *, k):\n operand, = batched_args\n bdim, = batch_dims\n if bdim == operand.ndim-1:\n perm = onp.arange(operand.ndim)\n perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]\n top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)\n return (transpose(top_k_v, perm),\n transpose(top_k_i, perm)), (bdim, bdim)\n else:\n return top_k(operand, k=k), (bdim, bdim)\n\ntop_k_p = Primitive('top_k')\ntop_k_p.multiple_results = True\ntop_k_p.def_impl(partial(xla.apply_primitive, top_k_p))\ntop_k_p.def_abstract_eval(_top_k_abstract_eval)\nxla.translations[top_k_p] = partial(standard_translate, 'top_k')\nad.primitive_jvps[top_k_p] = _top_k_jvp\nbatching.primitive_batchers[top_k_p] = _top_k_batch_rule\n\ndef _tie_in_transpose_rule(t):\n return [ad_util.zero, t]\n\ndef _tie_in_batch_rule(batched_args, batch_dims):\n y = tie_in(*batched_args)\n _, bdim_y = batch_dims\n return y, bdim_y\n\ntie_in_p = Primitive('tie_in')\ntie_in_p.def_impl(lambda x, y: y)\ntie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))\nxla.translations[tie_in_p] = lambda c, x, y: y\nad.deflinear(tie_in_p, _tie_in_transpose_rule)\nbatching.primitive_batchers[tie_in_p] = _tie_in_batch_rule\nmasking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]\n\n\ndef _stop_gradient_jvp_rule(primals, tangents):\n # if we don't call stop_gradient here, we'd only peel off one autodiff tracer\n x, = primals\n return stop_gradient(x), ad_util.zero\n\ndef _stop_gradient_batch_rule(batched_args, batch_dims):\n x, = batched_args\n dim, = batch_dims\n return stop_gradient(x), dim\n\nxla.translations[ad_util.stop_gradient_p] = lambda c, x: x\nad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule\nbatching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule\n\n\ndef create_token(x):\n \"\"\"Creates an XLA token value with no preconditions for sequencing effects.\n\n Experimental.\n\n Args:\n x: a dummy argument used to tie the CreateToken operator into a trace. The\n value of `x` is ignored.\n \"\"\"\n # x is a dummy argument used to tie the operator into a trace.\n return create_token_p.bind(x)\n\ncreate_token_p = Primitive(\"create_token\")\ncreate_token_p.def_impl(partial(xla.apply_primitive, create_token_p))\ncreate_token_p.def_abstract_eval(lambda _: abstract_token)\nxla.translations[create_token_p] = lambda c, _: xops.CreateToken(c)\n\ndef after_all(*operands):\n \"\"\"Merges one or more XLA token values. Experimental.\n\n Wraps the XLA AfterAll operator.\"\"\"\n return after_all_p.bind(*operands)\n\ndef _after_all_abstract_eval(*operands):\n if any(x is not abstract_token for x in operands):\n raise TypeError(\"Arguments to after_all must be tokens\")\n return abstract_token\n\n\ndef _after_all_translation_rule(c, *operands):\n return xops.AfterAll(c, operands)\n\nafter_all_p = Primitive(\"after_all\")\nafter_all_p.def_impl(partial(xla.apply_primitive, after_all_p))\nafter_all_p.def_abstract_eval(_after_all_abstract_eval)\nxla.translations[after_all_p] = _after_all_translation_rule\n\n\ndef infeed(token, shape=None):\n \"\"\"Consumes an infeed value of `shape` from the host. Experimental.\n\n `token` is used to sequence infeed and outfeed effects.\n \"\"\"\n flat_shapes, treedef = pytree.flatten(shape)\n for shape in flat_shapes:\n if not isinstance(shape, ShapedArray):\n raise TypeError(\"shape argument to infeed must be a pytree of \"\n \"ShapedArray values, got {}\".format(shape))\n xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes))\n return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])\n\ndef _infeed_abstract_eval(token, *, shapes):\n if token is not abstract_token:\n raise TypeError(\"First argument to infeed must be a token\")\n return shapes + (abstract_token,)\n\n\ndef _infeed_translation_rule(c, token, *, shapes):\n shape = tuple(xla.aval_to_xla_shape(x).with_major_to_minor_layout_if_absent()\n for x in shapes)\n xs_and_token = xops.InfeedWithToken(token,\n xla_client.Shape.tuple_shape(shape))\n xs = xops.GetTupleElement(xs_and_token, 0)\n token = xops.GetTupleElement(xs_and_token, 1)\n outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]\n return xops.Tuple(c, outs)\n\ninfeed_p = Primitive(\"infeed\")\ninfeed_p.multiple_results = True\ninfeed_p.def_impl(partial(xla.apply_primitive, infeed_p))\ninfeed_p.def_abstract_eval(_infeed_abstract_eval)\nxla.translations[infeed_p] = _infeed_translation_rule\n\ndef outfeed(token, xs):\n \"\"\"Outfeeds value `xs` to the host. Experimental.\n\n `token` is used to sequence infeed and outfeed effects.\n \"\"\"\n flat_xs, _ = pytree.flatten(xs)\n return outfeed_p.bind(token, *flat_xs)\n\ndef _outfeed_abstract_eval(token, *xs):\n if token is not abstract_token:\n raise TypeError(\"First argument to outfeed must be a token\")\n return abstract_token\n\n\ndef _outfeed_translation_rule(c, token, *xs):\n t = xops.Tuple(c, xs)\n return xops.OutfeedWithToken(t, token, c.get_shape(t))\n\noutfeed_p = Primitive(\"outfeed\")\noutfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))\noutfeed_p.def_abstract_eval(_outfeed_abstract_eval)\nxla.translations[outfeed_p] = _outfeed_translation_rule\n\ndef rng_uniform(a, b, shape):\n \"\"\"Stateful PRNG generator. Experimental and its use is discouraged.\n\n Returns uniformly distributed random numbers in the range [a, b)\n\n You should use jax.random for most purposes; this function exists only for\n niche use cases with special performance requirements.\n\n This API may be removed at any time.\n \"\"\"\n return rng_uniform_p.bind(a, b, shape=tuple(shape))\n\ndef _rng_uniform_abstract_eval(a, b, *, shape):\n if a.dtype != b.dtype:\n raise ValueError(\n \"Arguments to rng_uniform must have identical dtypes, got {} \"\n \"and {}.\".format(a.dtype, b.dtype))\n if a.shape != () or b.shape != ():\n raise ValueError(\n \"Arguments to rng_uniform must be scalars; got shapes {} and {}.\"\n .format(a.shape, b.shape))\n return ShapedArray(shape, a.dtype)\n\ndef _rng_uniform_translation_rule(c, a, b, *, shape):\n xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)\n return xops.RngUniform(a, b, xla_shape)\n\nrng_uniform_p = Primitive(\"rng_uniform\")\nrng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))\nrng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)\nxla.translations[rng_uniform_p] = _rng_uniform_translation_rule\n\n### util\n\n_ndim = onp.ndim\n\n\ndef _dilate_shape(shape, dilation):\n \"\"\"Utility function for computing the shape resulting from a dilation.\"\"\"\n if not onp.all(onp.greater(dilation, 0)):\n msg = \"All dilations must be positive, got {}.\"\n raise TypeError(msg.format(dilation))\n dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)\n return onp.where(shape == 0, 0,\n onp.multiply(dilation, onp.subtract(shape, 1)) + 1)\n\ndef _ceil_divide(x1, x2):\n return -onp.floor_divide(onp.negative(x1), x2)\n\ndef padtype_to_pads(in_shape, window_shape, window_strides, padding):\n \"\"\"Convert padding string to list of pairs of pad values.\"\"\"\n PaddingType = xla_client.PaddingType\n\n if isinstance(padding, str):\n mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}\n try:\n padding = mapping[padding.upper()]\n except KeyError as err:\n msg = \"Unrecognized padding type: expected 'VALID' or 'SAME', got {}.\"\n raise RuntimeError(msg.format(padding)) from err\n\n if padding == PaddingType.SAME:\n out_shape = _ceil_divide(in_shape, window_strides)\n pad_sizes = onp.maximum(0, (out_shape - 1) * window_strides +\n window_shape - in_shape)\n return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]\n elif padding == PaddingType.VALID:\n return [(0, 0)] * len(in_shape)\n else:\n msg = \"Unknown padding type: {}.\"\n raise TypeError(msg.format(padding))\n\n\ndef _check_same_dtypes(name, ignore_fp_precision, *ttypes):\n \"\"\"Check that dtypes agree, possibly ignoring float precision.\"\"\"\n # the `ignore_fp_precision` flag exists because the XLA shape inference logic\n # allows mixed floating point precision, but the HLO verifier often rejects it\n types = list(map(onp.dtype, ttypes)) # canonicalize\n if ignore_fp_precision:\n types = [\n onp.floating if dtypes.issubdtype(dtype, onp.floating)\n else onp.complexfloating if dtypes.issubdtype(dtype, onp.complexfloating)\n else dtype for dtype in types]\n if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:\n if ignore_fp_precision:\n msg = (\"{} requires arguments to have same dtypes up to floating point \"\n \"precision, got {}.\")\n else:\n msg = \"{} requires arguments to have the same dtypes, got {}.\"\n raise TypeError(msg.format(name, \", \".join(map(str, types))))\n\n\ndef _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):\n \"\"\"Check that conv shapes are valid and are consistent with window_strides.\"\"\"\n if len(lhs_shape) != len(rhs_shape):\n msg = \"Arguments to {} must have same rank, got {} and {}.\"\n raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))\n if len(lhs_shape) < 2:\n msg = \"Arguments to {} must have rank at least 2, got {} and {}.\"\n raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))\n if lhs_shape[1] != rhs_shape[1]:\n msg = \"Arguments to {} must agree on input feature size, got {} and {}.\"\n raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))\n _check_shapelike(name, \"window_strides\", window_strides)\n if not onp.all(onp.greater(window_strides, 0)):\n msg = \"All elements of window_strides must be positive, got {}.\"\n raise TypeError(msg.format(window_strides))\n if len(window_strides) != len(lhs_shape) - 2:\n msg = \"{} window_strides has wrong length: expected {}, got {}.\"\n expected_length = len(lhs_shape) - 2\n raise TypeError(msg.format(name, expected_length, len(window_strides)))\n\n\ndef conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):\n \"\"\"Compute the shape tuple of a conv given input shapes in canonical order.\"\"\"\n if isinstance(pads, str):\n pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)\n if len(pads) != len(lhs_shape) - 2:\n msg = \"Wrong number of explicit pads for convolution: expected {}, got {}.\"\n raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))\n\n lhs_padded = onp.add(lhs_shape[2:], onp.sum(onp.array(pads).reshape(-1, 2),\n axis=1))\n out_space = onp.floor_divide(\n onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1\n out_space = onp.maximum(0, out_space)\n assert lhs_shape[0] % batch_group_count == 0\n out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])\n return tuple(out_shape + tuple(out_space))\n\n\ndef conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,\n dimension_numbers):\n lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)\n lhs_trans = onp.take(lhs_shape, lhs_perm)\n rhs_trans = onp.take(rhs_shape, rhs_perm)\n out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)\n return tuple(onp.take(out_trans, onp.argsort(out_perm)))\n\n\ndef conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,\n dimension_numbers):\n lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)\n lhs_trans = onp.take(lhs_shape, lhs_perm)\n rhs_trans = onp.take(rhs_shape, rhs_perm)\n if isinstance(padding, str):\n padding = [_conv_transpose_padding(k, s, padding)\n for k,s in zip(rhs_trans[2:], window_strides)]\n padding = list(map(onp.sum, padding))\n unpad_out_space = [(i-1) * s - k + 2\n for i, k, s in zip(lhs_trans[2:],\n rhs_trans[2:],\n window_strides)]\n out_space = onp.sum([unpad_out_space, padding], axis=0).tolist()\n out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))\n return tuple(onp.take(out_trans, onp.argsort(out_perm)))\n\n\ndef _check_shapelike(fun_name, arg_name, obj):\n \"\"\"Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints).\"\"\"\n if not isinstance(obj, (tuple, list, onp.ndarray)):\n msg = \"{} {} must be of type tuple/list/ndarray, got {}.\"\n raise TypeError(msg.format(fun_name, arg_name, type(obj)))\n # bool(obj) for an ndarray raises an error, so we check len\n if not len(obj): # pylint: disable=g-explicit-length-test\n return\n obj_arr = onp.array(obj)\n if obj_arr.ndim != 1:\n msg = \"{} {} must be rank 1, got {}.\"\n raise TypeError(msg.format(obj_arr.ndim))\n try:\n canonicalize_shape(obj_arr)\n except TypeError:\n msg = \"{} {} must have every element be an integer type, got {}.\"\n raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))\n if not (obj_arr >= 0).all():\n msg = \"{} {} must have every element be nonnegative, got {}.\"\n raise TypeError(msg.format(fun_name, arg_name, obj))\n\n\ndef _dynamic_slice_indices(operand, start_indices):\n if not isinstance(start_indices, (tuple, list)):\n if start_indices.ndim != 1:\n raise ValueError(\"Slice indices must be a 1D sequence, got {}\"\n .format(start_indices.shape))\n start_indices = [reshape(slice(start_indices, [i], [i+1]), ())\n for i in range(operand.ndim)]\n else:\n start_indices = [onp.asarray(i, dtype=dtypes.int_) if isinstance(i, int)\n else i for i in start_indices]\n if len(start_indices) != operand.ndim:\n msg = (\"Length of slice indices must match number of operand dimensions ({} \"\n \"vs {})\")\n raise ValueError(msg.format(len(start_indices), operand.shape))\n # map int over operand.shape to raise any dynamic-shape errors\n return [select(lt(i, _const(i, 0)), add(i, _const(i, int(d))), i)\n for i, d in zip(start_indices, operand.shape)]\n\n\n\ndef _const(example, val):\n if dtypes.is_python_scalar(example):\n return dtypes.scalar_type_of(example)(val)\n return onp.array(val, _dtype(example))\n\n_zeros: Callable = partial(full_like, fill_value=0)\n_zero: Callable = partial(full_like, shape=(), fill_value=0)\n_ones: Callable = partial(full_like, fill_value=1)\n_one: Callable = partial(full_like, shape=(), fill_value=1)\n_twos: Callable = partial(full_like, fill_value=2)\n_two: Callable = partial(full_like, shape=(), fill_value=2)\n\ndtype: Callable = dtypes.result_type\n_dtype: Callable = dtypes.result_type\n\ndef _iscomplex(x) -> bool:\n return dtypes.issubdtype(_dtype(x), onp.complexfloating)\n\n\ndef ranges_like(*xs):\n start = 0\n for x in xs:\n x_len = len(x)\n yield range(start, start + x_len)\n start += x_len\n\n\ndef remaining(original, *removed_lists):\n blacklist = set(itertools.chain(*removed_lists))\n return [i for i in original if i not in blacklist]\n\n\ndef _canonicalize_precision(precision):\n if precision is None:\n return None\n if isinstance(precision, Precision):\n return precision\n else:\n msg = \"Precision argument must be None or a lax.Precision value; got {}\"\n raise ValueError(msg.format(precision))\n\n\ndef conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):\n \"\"\"Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.\n\n Args:\n lhs_shape: tuple of nonnegative integers, shape of the convolution input.\n rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.\n dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers\n object following the convolution dimension number specification format in\n xla_client.py.\n\n Returns:\n A `ConvDimensionNumbers` object that represents `dimension_numbers` in the\n canonical form used by lax functions.\n \"\"\"\n if isinstance(dimension_numbers, ConvDimensionNumbers):\n return dimension_numbers\n if len(lhs_shape) != len(rhs_shape):\n msg = \"convolution requires lhs and rhs ndim to be equal, got {} and {}.\"\n raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))\n\n if dimension_numbers is None:\n iota = tuple(range(len(lhs_shape)))\n return ConvDimensionNumbers(iota, iota, iota)\n elif isinstance(dimension_numbers, (list, tuple)):\n if len(dimension_numbers) != 3:\n msg = \"convolution dimension_numbers list/tuple must be length 3, got {}.\"\n raise TypeError(msg.format(len(dimension_numbers)))\n if not all(isinstance(elt, str) for elt in dimension_numbers):\n msg = \"convolution dimension_numbers elements must be strings, got {}.\"\n raise TypeError(msg.format(tuple(map(type, dimension_numbers))))\n msg = (\"convolution dimension_numbers[{}] must have len equal to the ndim \"\n \"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.\")\n for i, elt in enumerate(dimension_numbers):\n if len(elt) != len(lhs_shape):\n raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))\n\n lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)\n return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)\n else:\n msg = \"convolution dimension_numbers must be tuple/list or None, got {}.\"\n raise TypeError(msg.format(type(dimension_numbers)))\n\n\ndef conv_general_permutations(dimension_numbers):\n \"\"\"Utility for convolution dimension permutations relative to Conv HLO.\"\"\"\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n lhs_char, rhs_char, out_char = charpairs = (\"N\", \"C\"), (\"O\", \"I\"), (\"N\", \"C\")\n for i, (a, b) in enumerate(charpairs):\n if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:\n msg = (\"convolution dimension_numbers[{}] must contain the characters \"\n \"'{}' and '{}' exactly once, got {}.\")\n raise TypeError(msg.format(i, a, b, dimension_numbers[i]))\n if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):\n msg = (\"convolution dimension_numbers[{}] cannot have duplicate \"\n \"characters, got {}.\")\n raise TypeError(msg.format(i, dimension_numbers[i]))\n if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==\n set(out_spec) - set(out_char)):\n msg = (\"convolution dimension_numbers elements must each have the same \"\n \"set of spatial characters, got {}.\")\n raise TypeError(msg.format(dimension_numbers))\n\n def getperm(spec, charpair):\n spatial = (i for i, c in enumerate(spec) if c not in charpair)\n if spec is not rhs_spec:\n spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))\n return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)\n\n lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)\n return lhs_perm, rhs_perm, out_perm\n\n\ndef _conv_general_proto(dimension_numbers):\n assert type(dimension_numbers) is ConvDimensionNumbers\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n proto = xla_client.ConvolutionDimensionNumbers()\n proto.input_batch_dimension = lhs_spec[0]\n proto.input_feature_dimension = lhs_spec[1]\n proto.output_batch_dimension = out_spec[0]\n proto.output_feature_dimension = out_spec[1]\n proto.kernel_output_feature_dimension = rhs_spec[0]\n proto.kernel_input_feature_dimension = rhs_spec[1]\n proto.input_spatial_dimensions.extend(lhs_spec[2:])\n proto.kernel_spatial_dimensions.extend(rhs_spec[2:])\n proto.output_spatial_dimensions.extend(out_spec[2:])\n return proto\n\n\ndef _conv_general_vjp_lhs_padding(\n in_shape, window_dimensions, window_strides, out_shape, padding,\n lhs_dilation, rhs_dilation):\n lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)\n rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)\n out_dilated_shape = _dilate_shape(out_shape, window_strides)\n pad_before = onp.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1\n pad_after = (onp.add(lhs_dilated_shape, rhs_dilated_shape) - 1\n - out_dilated_shape - pad_before)\n return zip(pad_before, pad_after)\n\n\ndef _conv_general_vjp_rhs_padding(\n in_shape, window_dimensions, window_strides, out_shape, padding,\n lhs_dilation, rhs_dilation):\n lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)\n rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)\n out_dilated_shape = _dilate_shape(out_shape, window_strides)\n total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1\n return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]\n\n\ndef _balanced_eq(x, z, y):\n return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),\n select(_eq_meet(y, z), _twos(z), _ones(z)))\n\n\ndef _eq_meet(a, b):\n a_dtype, b_dtype = _dtype(a), _dtype(b)\n if a_dtype != b_dtype:\n higher_dtype = dtypes.promote_types(a_dtype, b_dtype)\n if higher_dtype == a_dtype:\n a = convert_element_type(a, b_dtype)\n else:\n b = convert_element_type(b, a_dtype)\n return eq(a, b)\n\n\ndef _abstractify(x):\n return raise_to_shaped(core.get_aval(x))\n\n\ndef _check_user_dtype_supported(dtype, fun_name=None):\n onp_dtype = onp.dtype(dtype)\n if onp_dtype.kind not in \"biufc\" and onp_dtype.type != dtypes.bfloat16:\n msg = f\"JAX only supports number and bool dtypes, got dtype {dtype}\"\n raise TypeError(msg)\n if dtype is not None and onp_dtype != dtypes.canonicalize_dtype(dtype):\n msg = (\"Explicitly requested dtype {} {} is not available, \"\n \"and will be truncated to dtype {}. To enable more dtypes, set the \"\n \"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell \"\n \"environment variable. \"\n \"See https://github.com/google/jax#current-gotchas for more.\")\n fun_name = \"requested in {}\".format(fun_name) if fun_name else \"\"\n truncated_dtype = dtypes.canonicalize_dtype(dtype).name\n warnings.warn(msg.format(dtype, fun_name , truncated_dtype))\n\n\ndef _canonicalize_axis(axis, num_dims):\n \"\"\"Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims).\"\"\"\n axis = int(axis)\n if axis < 0:\n axis = axis + num_dims\n if axis < 0 or axis >= num_dims:\n raise ValueError(\n \"axis {} is out of bounds for array of dimension {}\".format(\n axis, num_dims))\n return axis\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.take",
"numpy.subtract",
"numpy.dtype",
"numpy.less",
"numpy.any",
"numpy.issubdtype",
"numpy.asarray",
"numpy.argsort",
"numpy.size",
"numpy.greater_equal",
"numpy.add",
"numpy.delete",
"numpy.negative",
"numpy.where",
"numpy.sqrt",
"numpy.ceil",
"numpy.zeros",
"numpy.equal",
"numpy.greater",
"numpy.arange",
"numpy.max",
"numpy.all",
"numpy.ndim",
"numpy.prod",
"numpy.maximum",
"numpy.finfo",
"numpy.zeros_like",
"numpy.cumsum",
"numpy.swapaxes",
"numpy.not_equal",
"numpy.iinfo",
"numpy.shape",
"numpy.less_equal",
"numpy.flip",
"numpy.array"
]
] |
mak213k/Servidor_automatizado_python | [
"75a111b9d3b2c50c6f2a9a36d21432053f02284d"
] | [
"ServidorPython/python32_web/Lib/site-packages/numpy/lib/tests/test_recfunctions.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport pytest\n\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy.ma.mrecords import MaskedRecords\nfrom numpy.ma.testutils import assert_equal\nfrom numpy.testing import assert_, assert_raises\nfrom numpy.lib.recfunctions import (\n drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,\n find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,\n repack_fields, unstructured_to_structured, structured_to_unstructured,\n apply_along_fields, require_fields, assign_fields_by_name)\nget_names = np.lib.recfunctions.get_names\nget_names_flat = np.lib.recfunctions.get_names_flat\nzip_descr = np.lib.recfunctions.zip_descr\n\n\nclass TestRecFunctions(object):\n # Misc tests\n\n def setup(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array([('A', 1.), ('B', 2.)],\n dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_zip_descr(self):\n # Test zip_descr\n (w, x, y, z) = self.data\n\n # Std array\n test = zip_descr((x, x), flatten=True)\n assert_equal(test,\n np.dtype([('', int), ('', int)]))\n test = zip_descr((x, x), flatten=False)\n assert_equal(test,\n np.dtype([('', int), ('', int)]))\n\n # Std & flexible-dtype\n test = zip_descr((x, z), flatten=True)\n assert_equal(test,\n np.dtype([('', int), ('A', '|S3'), ('B', float)]))\n test = zip_descr((x, z), flatten=False)\n assert_equal(test,\n np.dtype([('', int),\n ('', [('A', '|S3'), ('B', float)])]))\n\n # Standard & nested dtype\n test = zip_descr((x, w), flatten=True)\n assert_equal(test,\n np.dtype([('', int),\n ('a', int),\n ('ba', float), ('bb', int)]))\n test = zip_descr((x, w), flatten=False)\n assert_equal(test,\n np.dtype([('', int),\n ('', [('a', int),\n ('b', [('ba', float), ('bb', int)])])]))\n\n def test_drop_fields(self):\n # Test drop_fields\n a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n\n # A basic field\n test = drop_fields(a, 'a')\n control = np.array([((2, 3.0),), ((5, 6.0),)],\n dtype=[('b', [('ba', float), ('bb', int)])])\n assert_equal(test, control)\n\n # Another basic field (but nesting two fields)\n test = drop_fields(a, 'b')\n control = np.array([(1,), (4,)], dtype=[('a', int)])\n assert_equal(test, control)\n\n # A nested sub-field\n test = drop_fields(a, ['ba', ])\n control = np.array([(1, (3.0,)), (4, (6.0,))],\n dtype=[('a', int), ('b', [('bb', int)])])\n assert_equal(test, control)\n\n # All the nested sub-field from a field: zap that field\n test = drop_fields(a, ['ba', 'bb'])\n control = np.array([(1,), (4,)], dtype=[('a', int)])\n assert_equal(test, control)\n\n test = drop_fields(a, ['a', 'b'])\n assert_(test is None)\n\n def test_rename_fields(self):\n # Test rename fields\n a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],\n dtype=[('a', int),\n ('b', [('ba', float), ('bb', (float, 2))])])\n test = rename_fields(a, {'a': 'A', 'bb': 'BB'})\n newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]\n control = a.view(newdtype)\n assert_equal(test.dtype, newdtype)\n assert_equal(test, control)\n\n def test_get_names(self):\n # Test get_names\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_names(ndtype)\n assert_equal(test, ('A', 'B'))\n\n ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])\n test = get_names(ndtype)\n assert_equal(test, ('a', ('b', ('ba', 'bb'))))\n\n def test_get_names_flat(self):\n # Test get_names_flat\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_names_flat(ndtype)\n assert_equal(test, ('A', 'B'))\n\n ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])\n test = get_names_flat(ndtype)\n assert_equal(test, ('a', 'b', 'ba', 'bb'))\n\n def test_get_fieldstructure(self):\n # Test get_fieldstructure\n\n # No nested fields\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_fieldstructure(ndtype)\n assert_equal(test, {'A': [], 'B': []})\n\n # One 1-nested field\n ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])\n test = get_fieldstructure(ndtype)\n assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})\n\n # One 2-nested fields\n ndtype = np.dtype([('A', int),\n ('B', [('BA', int),\n ('BB', [('BBA', int), ('BBB', int)])])])\n test = get_fieldstructure(ndtype)\n control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],\n 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}\n assert_equal(test, control)\n\n def test_find_duplicates(self):\n # Test find_duplicates\n a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),\n (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],\n mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),\n (0, (0, 0)), (1, (0, 0)), (0, (1, 0))],\n dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])\n\n test = find_duplicates(a, ignoremask=False, return_index=True)\n control = [0, 2]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='A', return_index=True)\n control = [0, 1, 2, 3, 5]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='B', return_index=True)\n control = [0, 1, 2, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='BA', return_index=True)\n control = [0, 1, 2, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='BB', return_index=True)\n control = [0, 1, 2, 3, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n def test_find_duplicates_ignoremask(self):\n # Test the ignoremask option of find_duplicates\n ndtype = [('a', int)]\n a = ma.array([1, 1, 1, 2, 2, 3, 3],\n mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)\n test = find_duplicates(a, ignoremask=True, return_index=True)\n control = [0, 1, 3, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, ignoremask=False, return_index=True)\n control = [0, 1, 2, 3, 4, 6]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n def test_repack_fields(self):\n dt = np.dtype('u1,f4,i8', align=True)\n a = np.zeros(2, dtype=dt)\n\n assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))\n assert_equal(repack_fields(a).itemsize, 13)\n assert_equal(repack_fields(repack_fields(dt), align=True), dt)\n\n # make sure type is preserved\n dt = np.dtype((np.record, dt))\n assert_(repack_fields(dt).type is np.record)\n\n def test_structured_to_unstructured(self):\n a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])\n out = structured_to_unstructured(a)\n assert_equal(out, np.zeros((4,5), dtype='f8'))\n\n b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)\n assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))\n out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)\n assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))\n\n c = np.arange(20).reshape((4,5))\n out = unstructured_to_structured(c, a.dtype)\n want = np.array([( 0, ( 1., 2), [ 3., 4.]),\n ( 5, ( 6., 7), [ 8., 9.]),\n (10, (11., 12), [13., 14.]),\n (15, (16., 17), [18., 19.])],\n dtype=[('a', 'i4'),\n ('b', [('f0', 'f4'), ('f1', 'u2')]),\n ('c', 'f4', (2,))])\n assert_equal(out, want)\n\n d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n assert_equal(apply_along_fields(np.mean, d),\n np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))\n assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),\n np.array([ 3. , 5.5, 9. , 11. ]))\n\n # check that for uniform field dtypes we get a view, not a copy:\n d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])\n dd = structured_to_unstructured(d)\n ddd = unstructured_to_structured(dd, d.dtype)\n assert_(dd.base is d)\n assert_(ddd.base is d)\n\n # including uniform fields with subarrays unpacked\n d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),\n (8, [9, 10], [[11, 12], [13, 14]])],\n dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])\n dd = structured_to_unstructured(d)\n ddd = unstructured_to_structured(dd, d.dtype)\n assert_(dd.base is d)\n assert_(ddd.base is d)\n\n # test that nested fields with identical names don't break anything\n point = np.dtype([('x', int), ('y', int)])\n triangle = np.dtype([('a', point), ('b', point), ('c', point)])\n arr = np.zeros(10, triangle)\n res = structured_to_unstructured(arr, dtype=int)\n assert_equal(res, np.zeros((10, 6), dtype=int))\n\n\n def test_field_assignment_by_name(self):\n a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])\n newdt = [('b', 'f4'), ('c', 'u1')]\n\n assert_equal(require_fields(a, newdt), np.ones(2, newdt))\n\n b = np.array([(1,2), (3,4)], dtype=newdt)\n assign_fields_by_name(a, b, zero_unassigned=False)\n assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))\n assign_fields_by_name(a, b)\n assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))\n\n # test nested fields\n a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])\n newdt = [('a', [('c', 'u1')])]\n assert_equal(require_fields(a, newdt), np.ones(2, newdt))\n b = np.array([((2,),), ((3,),)], dtype=newdt)\n assign_fields_by_name(a, b, zero_unassigned=False)\n assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))\n assign_fields_by_name(a, b)\n assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))\n\n # test unstructured code path for 0d arrays\n a, b = np.array(3), np.array(0)\n assign_fields_by_name(b, a)\n assert_equal(b[()], 3)\n\n\nclass TestRecursiveFillFields(object):\n # Test recursive_fill_fields.\n def test_simple_flexible(self):\n # Test recursive_fill_fields on flexible-array\n a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])\n b = np.zeros((3,), dtype=a.dtype)\n test = recursive_fill_fields(a, b)\n control = np.array([(1, 10.), (2, 20.), (0, 0.)],\n dtype=[('A', int), ('B', float)])\n assert_equal(test, control)\n\n def test_masked_flexible(self):\n # Test recursive_fill_fields on masked flexible-array\n a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],\n dtype=[('A', int), ('B', float)])\n b = ma.zeros((3,), dtype=a.dtype)\n test = recursive_fill_fields(a, b)\n control = ma.array([(1, 10.), (2, 20.), (0, 0.)],\n mask=[(0, 1), (1, 0), (0, 0)],\n dtype=[('A', int), ('B', float)])\n assert_equal(test, control)\n\n\nclass TestMergeArrays(object):\n # Test merge_arrays\n\n def setup(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array(\n [(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_solo(self):\n # Test merge_arrays on a single array.\n (_, x, _, z) = self.data\n\n test = merge_arrays(x)\n control = np.array([(1,), (2,)], dtype=[('f0', int)])\n assert_equal(test, control)\n test = merge_arrays((x,))\n assert_equal(test, control)\n\n test = merge_arrays(z, flatten=False)\n assert_equal(test, z)\n test = merge_arrays(z, flatten=True)\n assert_equal(test, z)\n\n def test_solo_w_flatten(self):\n # Test merge_arrays on a single array w & w/o flattening\n w = self.data[0]\n test = merge_arrays(w, flatten=False)\n assert_equal(test, w)\n\n test = merge_arrays(w, flatten=True)\n control = np.array([(1, 2, 3.0), (4, 5, 6.0)],\n dtype=[('a', int), ('ba', float), ('bb', int)])\n assert_equal(test, control)\n\n def test_standard(self):\n # Test standard & standard\n # Test merge arrays\n (_, x, y, _) = self.data\n test = merge_arrays((x, y), usemask=False)\n control = np.array([(1, 10), (2, 20), (-1, 30)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n\n test = merge_arrays((x, y), usemask=True)\n control = ma.array([(1, 10), (2, 20), (-1, 30)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_flatten(self):\n # Test standard & flexible\n (_, x, _, z) = self.data\n test = merge_arrays((x, z), flatten=True)\n control = np.array([(1, 'A', 1.), (2, 'B', 2.)],\n dtype=[('f0', int), ('A', '|S3'), ('B', float)])\n assert_equal(test, control)\n\n test = merge_arrays((x, z), flatten=False)\n control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],\n dtype=[('f0', int),\n ('f1', [('A', '|S3'), ('B', float)])])\n assert_equal(test, control)\n\n def test_flatten_wflexible(self):\n # Test flatten standard & nested\n (w, x, _, _) = self.data\n test = merge_arrays((x, w), flatten=True)\n control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],\n dtype=[('f0', int),\n ('a', int), ('ba', float), ('bb', int)])\n assert_equal(test, control)\n\n test = merge_arrays((x, w), flatten=False)\n controldtype = [('f0', int),\n ('f1', [('a', int),\n ('b', [('ba', float), ('bb', int)])])]\n control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],\n dtype=controldtype)\n assert_equal(test, control)\n\n def test_wmasked_arrays(self):\n # Test merge_arrays masked arrays\n (_, x, _, _) = self.data\n mx = ma.array([1, 2, 3], mask=[1, 0, 0])\n test = merge_arrays((x, mx), usemask=True)\n control = ma.array([(1, 1), (2, 2), (-1, 3)],\n mask=[(0, 1), (0, 0), (1, 0)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n test = merge_arrays((x, mx), usemask=True, asrecarray=True)\n assert_equal(test, control)\n assert_(isinstance(test, MaskedRecords))\n\n def test_w_singlefield(self):\n # Test single field\n test = merge_arrays((np.array([1, 2]).view([('a', int)]),\n np.array([10., 20., 30.])),)\n control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('a', int), ('f1', float)])\n assert_equal(test, control)\n\n def test_w_shorter_flex(self):\n # Test merge_arrays w/ a shorter flexndarray.\n z = self.data[-1]\n\n # Fixme, this test looks incomplete and broken\n #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))\n #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],\n # dtype=[('A', '|S3'), ('B', float), ('C', int)])\n #assert_equal(test, control)\n\n # Hack to avoid pyflakes warnings about unused variables\n merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))\n np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],\n dtype=[('A', '|S3'), ('B', float), ('C', int)])\n\n def test_singlerecord(self):\n (_, x, y, z) = self.data\n test = merge_arrays((x[0], y[0], z[0]), usemask=False)\n control = np.array([(1, 10, ('A', 1))],\n dtype=[('f0', int),\n ('f1', int),\n ('f2', [('A', '|S3'), ('B', float)])])\n assert_equal(test, control)\n\n\nclass TestAppendFields(object):\n # Test append_fields\n\n def setup(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_append_single(self):\n # Test simple case\n (_, x, _, _) = self.data\n test = append_fields(x, 'A', data=[10, 20, 30])\n control = ma.array([(1, 10), (2, 20), (-1, 30)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('f0', int), ('A', int)],)\n assert_equal(test, control)\n\n def test_append_double(self):\n # Test simple case\n (_, x, _, _) = self.data\n test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])\n control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],\n mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],\n dtype=[('f0', int), ('A', int), ('B', int)],)\n assert_equal(test, control)\n\n def test_append_on_flex(self):\n # Test append_fields on flexible type arrays\n z = self.data[-1]\n test = append_fields(z, 'C', data=[10, 20, 30])\n control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],\n mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('C', int)],)\n assert_equal(test, control)\n\n def test_append_on_nested(self):\n # Test append_fields on nested fields\n w = self.data[0]\n test = append_fields(w, 'C', data=[10, 20, 30])\n control = ma.array([(1, (2, 3.0), 10),\n (4, (5, 6.0), 20),\n (-1, (-1, -1.), 30)],\n mask=[(\n 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],\n dtype=[('a', int),\n ('b', [('ba', float), ('bb', int)]),\n ('C', int)],)\n assert_equal(test, control)\n\n\nclass TestStackArrays(object):\n # Test stack_arrays\n def setup(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_solo(self):\n # Test stack_arrays on single arrays\n (_, x, _, _) = self.data\n test = stack_arrays((x,))\n assert_equal(test, x)\n assert_(test is x)\n\n test = stack_arrays(x)\n assert_equal(test, x)\n assert_(test is x)\n\n def test_unnamed_fields(self):\n # Tests combinations of arrays w/o named fields\n (_, x, y, _) = self.data\n\n test = stack_arrays((x, x), usemask=False)\n control = np.array([1, 2, 1, 2])\n assert_equal(test, control)\n\n test = stack_arrays((x, y), usemask=False)\n control = np.array([1, 2, 10, 20, 30])\n assert_equal(test, control)\n\n test = stack_arrays((y, x), usemask=False)\n control = np.array([10, 20, 30, 1, 2])\n assert_equal(test, control)\n\n def test_unnamed_and_named_fields(self):\n # Test combination of arrays w/ & w/o named fields\n (_, x, _, z) = self.data\n\n test = stack_arrays((x, z))\n control = ma.array([(1, -1, -1), (2, -1, -1),\n (-1, 'A', 1), (-1, 'B', 2)],\n mask=[(0, 1, 1), (0, 1, 1),\n (1, 0, 0), (1, 0, 0)],\n dtype=[('f0', int), ('A', '|S3'), ('B', float)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, x))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n (-1, -1, 1), (-1, -1, 2), ],\n mask=[(0, 0, 1), (0, 0, 1),\n (1, 1, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('f2', int)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, z, x))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n ('A', 1, -1), ('B', 2, -1),\n (-1, -1, 1), (-1, -1, 2), ],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 1), (0, 0, 1),\n (1, 1, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('f2', int)])\n assert_equal(test, control)\n\n def test_matching_named_fields(self):\n # Test combination of arrays w/ matching field names\n (_, x, _, z) = self.data\n zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)])\n test = stack_arrays((z, zz))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n (\n 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 0), (0, 0, 0), (0, 0, 0)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, zz, x))\n ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]\n control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),\n ('a', 10., 100., -1), ('b', 20., 200., -1),\n ('c', 30., 300., -1),\n (-1, -1, -1, 1), (-1, -1, -1, 2)],\n dtype=ndtype,\n mask=[(0, 0, 1, 1), (0, 0, 1, 1),\n (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),\n (1, 1, 1, 0), (1, 1, 1, 0)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_defaults(self):\n # Test defaults: no exception raised if keys of defaults are not fields.\n (_, _, _, z) = self.data\n zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)])\n defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}\n test = stack_arrays((z, zz), defaults=defaults)\n control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),\n (\n 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 0), (0, 0, 0), (0, 0, 0)])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n def test_autoconversion(self):\n # Tests autoconversion\n adtype = [('A', int), ('B', bool), ('C', float)]\n a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)\n bdtype = [('A', int), ('B', float), ('C', float)]\n b = ma.array([(4, 5, 6)], dtype=bdtype)\n control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],\n dtype=bdtype)\n test = stack_arrays((a, b), autoconvert=True)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n with assert_raises(TypeError):\n stack_arrays((a, b), autoconvert=False)\n\n def test_checktitles(self):\n # Test using titles in the field names\n adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]\n a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)\n bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]\n b = ma.array([(4, 5, 6)], dtype=bdtype)\n test = stack_arrays((a, b))\n control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],\n dtype=bdtype)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_subdtype(self):\n z = np.array([\n ('A', 1), ('B', 2)\n ], dtype=[('A', '|S3'), ('B', float, (1,))])\n zz = np.array([\n ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)\n ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])\n\n res = stack_arrays((z, zz))\n expected = ma.array(\n data=[\n (b'A', [1.0], 0),\n (b'B', [2.0], 0),\n (b'a', [10.0], 100.0),\n (b'b', [20.0], 200.0),\n (b'c', [30.0], 300.0)],\n mask=[\n (False, [False], True),\n (False, [False], True),\n (False, [False], False),\n (False, [False], False),\n (False, [False], False)\n ],\n dtype=zz.dtype\n )\n assert_equal(res.dtype, expected.dtype)\n assert_equal(res, expected)\n assert_equal(res.mask, expected.mask)\n\n\nclass TestJoinBy(object):\n def setup(self):\n self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('c', int)])\n self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('d', int)])\n\n def test_inner_join(self):\n # Basic test of join_by\n a, b = self.a, self.b\n\n test = join_by('a', a, b, jointype='inner')\n control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),\n (7, 57, 67, 107, 102), (8, 58, 68, 108, 103),\n (9, 59, 69, 109, 104)],\n dtype=[('a', int), ('b1', int), ('b2', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_join(self):\n a, b = self.a, self.b\n\n # Fixme, this test is broken\n #test = join_by(('a', 'b'), a, b)\n #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),\n # (7, 57, 107, 102), (8, 58, 108, 103),\n # (9, 59, 109, 104)],\n # dtype=[('a', int), ('b', int),\n # ('c', int), ('d', int)])\n #assert_equal(test, control)\n\n # Hack to avoid pyflakes unused variable warnings\n join_by(('a', 'b'), a, b)\n np.array([(5, 55, 105, 100), (6, 56, 106, 101),\n (7, 57, 107, 102), (8, 58, 108, 103),\n (9, 59, 109, 104)],\n dtype=[('a', int), ('b', int),\n ('c', int), ('d', int)])\n\n def test_join_subdtype(self):\n # tests the bug in https://stackoverflow.com/q/44769632/102441\n from numpy.lib import recfunctions as rfn\n foo = np.array([(1,)],\n dtype=[('key', int)])\n bar = np.array([(1, np.array([1,2,3]))],\n dtype=[('key', int), ('value', 'uint16', 3)])\n res = join_by('key', foo, bar)\n assert_equal(res, bar.view(ma.MaskedArray))\n\n def test_outer_join(self):\n a, b = self.a, self.b\n\n test = join_by(('a', 'b'), a, b, 'outer')\n control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),\n (2, 52, 102, -1), (3, 53, 103, -1),\n (4, 54, 104, -1), (5, 55, 105, -1),\n (5, 65, -1, 100), (6, 56, 106, -1),\n (6, 66, -1, 101), (7, 57, 107, -1),\n (7, 67, -1, 102), (8, 58, 108, -1),\n (8, 68, -1, 103), (9, 59, 109, -1),\n (9, 69, -1, 104), (10, 70, -1, 105),\n (11, 71, -1, 106), (12, 72, -1, 107),\n (13, 73, -1, 108), (14, 74, -1, 109)],\n mask=[(0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 1, 0),\n (0, 0, 1, 0), (0, 0, 1, 0),\n (0, 0, 1, 0), (0, 0, 1, 0)],\n dtype=[('a', int), ('b', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_leftouter_join(self):\n a, b = self.a, self.b\n\n test = join_by(('a', 'b'), a, b, 'leftouter')\n control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),\n (2, 52, 102, -1), (3, 53, 103, -1),\n (4, 54, 104, -1), (5, 55, 105, -1),\n (6, 56, 106, -1), (7, 57, 107, -1),\n (8, 58, 108, -1), (9, 59, 109, -1)],\n mask=[(0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1)],\n dtype=[('a', int), ('b', int), ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_different_field_order(self):\n # gh-8940\n a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])\n b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])\n # this should not give a FutureWarning:\n j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)\n assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])\n\n def test_duplicate_keys(self):\n a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])\n b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])\n assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)\n\n @pytest.mark.xfail(reason=\"See comment at gh-9343\")\n def test_same_name_different_dtypes_key(self):\n a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])\n b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])\n expected_dtype = np.dtype([\n ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])\n\n a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)\n b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)\n res = join_by('key', a, b)\n\n assert_equal(res.dtype, expected_dtype)\n\n def test_same_name_different_dtypes(self):\n # gh-9338\n a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])\n b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])\n expected_dtype = np.dtype([\n ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])\n\n a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)\n b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)\n res = join_by('key', a, b)\n\n assert_equal(res.dtype, expected_dtype)\n\n def test_subarray_key(self):\n a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])\n a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)\n\n b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])\n b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)\n\n expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])\n expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)\n\n res = join_by('pos', a, b)\n assert_equal(res.dtype, expected_dtype)\n assert_equal(res, expected)\n\n def test_padded_dtype(self):\n dt = np.dtype('i1,f4', align=True)\n dt.names = ('k', 'v')\n assert_(len(dt.descr), 3) # padding field is inserted\n\n a = np.array([(1, 3), (3, 2)], dt)\n b = np.array([(1, 1), (2, 2)], dt)\n res = join_by('k', a, b)\n\n # no padding fields remain\n expected_dtype = np.dtype([\n ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')\n ])\n\n assert_equal(res.dtype, expected_dtype)\n\n\nclass TestJoinBy2(object):\n @classmethod\n def setup(cls):\n cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('c', int)])\n cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('d', int)])\n\n def test_no_r1postfix(self):\n # Basic test of join_by no_r1postfix\n a, b = self.a, self.b\n\n test = join_by(\n 'a', a, b, r1postfix='', r2postfix='2', jointype='inner')\n control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),\n (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),\n (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),\n (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),\n (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],\n dtype=[('a', int), ('b', int), ('b2', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_no_postfix(self):\n assert_raises(ValueError, join_by, 'a', self.a, self.b,\n r1postfix='', r2postfix='')\n\n def test_no_r2postfix(self):\n # Basic test of join_by no_r2postfix\n a, b = self.a, self.b\n\n test = join_by(\n 'a', a, b, r1postfix='1', r2postfix='', jointype='inner')\n control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),\n (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),\n (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),\n (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),\n (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],\n dtype=[('a', int), ('b1', int), ('b', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_two_keys_two_vars(self):\n a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),\n np.arange(50, 60), np.arange(10, 20))),\n dtype=[('k', int), ('a', int), ('b', int), ('c', int)])\n\n b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),\n np.arange(65, 75), np.arange(0, 10))),\n dtype=[('k', int), ('a', int), ('b', int), ('c', int)])\n\n control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),\n (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),\n (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),\n (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),\n (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],\n dtype=[('k', int), ('a', int), ('b1', int),\n ('b2', int), ('c1', int), ('c2', int)])\n test = join_by(\n ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')\n assert_equal(test.dtype, control.dtype)\n assert_equal(test, control)\n\nclass TestAppendFieldsObj(object):\n \"\"\"\n Test append_fields with arrays containing objects\n \"\"\"\n # https://github.com/numpy/numpy/issues/2346\n\n def setup(self):\n from datetime import date\n self.data = dict(obj=date(2000, 1, 1))\n\n def test_append_to_objects(self):\n \"Test append_fields when the base array contains objects\"\n obj = self.data['obj']\n x = np.array([(obj, 1.), (obj, 2.)],\n dtype=[('A', object), ('B', float)])\n y = np.array([10, 20], dtype=int)\n test = append_fields(x, 'C', data=y, usemask=False)\n control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],\n dtype=[('A', object), ('B', float), ('C', int)])\n assert_equal(test, control)\n"
] | [
[
"numpy.ones",
"numpy.lib.recfunctions.rename_fields",
"numpy.ma.testutils.assert_equal",
"numpy.dtype",
"numpy.lib.recfunctions.require_fields",
"numpy.lib.recfunctions.append_fields",
"numpy.lib.recfunctions.recursive_fill_fields",
"numpy.lib.recfunctions.drop_fields",
"numpy.lib.recfunctions.join_by",
"numpy.lib.recfunctions.find_duplicates",
"numpy.ma.zeros",
"numpy.tile",
"numpy.zeros",
"numpy.lib.recfunctions.get_fieldstructure",
"numpy.arange",
"numpy.lib.recfunctions.structured_to_unstructured",
"numpy.lib.recfunctions.apply_along_fields",
"numpy.lib.recfunctions.stack_arrays",
"numpy.testing.assert_raises",
"numpy.lib.recfunctions.repack_fields",
"numpy.lib.recfunctions.unstructured_to_structured",
"numpy.ma.array",
"numpy.lib.recfunctions.assign_fields_by_name",
"numpy.testing.assert_",
"numpy.array",
"numpy.lib.recfunctions.merge_arrays"
]
] |
sainjusajan/django-oscar | [
"466e8edc807be689b0a28c9e525c8323cc48b8e1"
] | [
"oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"Pylab (matplotlib) support utilities.\"\"\"\r\nfrom __future__ import print_function\r\n\r\n# Copyright (c) IPython Development Team.\r\n# Distributed under the terms of the Modified BSD License.\r\n\r\nfrom io import BytesIO\r\n\r\nfrom IPython.core.display import _pngxy\r\nfrom IPython.utils.decorators import flag_calls\r\nfrom IPython.utils import py3compat\r\n\r\n# If user specifies a GUI, that dictates the backend, otherwise we read the\r\n# user's mpl default from the mpl rc structure\r\nbackends = {'tk': 'TkAgg',\r\n 'gtk': 'GTKAgg',\r\n 'gtk3': 'GTK3Agg',\r\n 'wx': 'WXAgg',\r\n 'qt': 'Qt4Agg', # qt3 not supported\r\n 'qt4': 'Qt4Agg',\r\n 'qt5': 'Qt5Agg',\r\n 'osx': 'MacOSX',\r\n 'nbagg': 'nbAgg',\r\n 'notebook': 'nbAgg',\r\n 'agg': 'agg',\r\n 'inline': 'module://ipykernel.pylab.backend_inline',\r\n 'ipympl': 'module://ipympl.backend_nbagg',\r\n}\r\n\r\n# We also need a reverse backends2guis mapping that will properly choose which\r\n# GUI support to activate based on the desired matplotlib backend. For the\r\n# most part it's just a reverse of the above dict, but we also need to add a\r\n# few others that map to the same GUI manually:\r\nbackend2gui = dict(zip(backends.values(), backends.keys()))\r\n# Our tests expect backend2gui to just return 'qt'\r\nbackend2gui['Qt4Agg'] = 'qt'\r\n# In the reverse mapping, there are a few extra valid matplotlib backends that\r\n# map to the same GUI support\r\nbackend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'\r\nbackend2gui['GTK3Cairo'] = 'gtk3'\r\nbackend2gui['WX'] = 'wx'\r\nbackend2gui['CocoaAgg'] = 'osx'\r\n# And some backends that don't need GUI integration\r\ndel backend2gui['nbAgg']\r\ndel backend2gui['agg']\r\ndel backend2gui['module://ipykernel.pylab.backend_inline']\r\n\r\n#-----------------------------------------------------------------------------\r\n# Matplotlib utilities\r\n#-----------------------------------------------------------------------------\r\n\r\n\r\ndef getfigs(*fig_nums):\r\n \"\"\"Get a list of matplotlib figures by figure numbers.\r\n\r\n If no arguments are given, all available figures are returned. If the\r\n argument list contains references to invalid figures, a warning is printed\r\n but the function continues pasting further figures.\r\n\r\n Parameters\r\n ----------\r\n figs : tuple\r\n A tuple of ints giving the figure numbers of the figures to return.\r\n \"\"\"\r\n from matplotlib._pylab_helpers import Gcf\r\n if not fig_nums:\r\n fig_managers = Gcf.get_all_fig_managers()\r\n return [fm.canvas.figure for fm in fig_managers]\r\n else:\r\n figs = []\r\n for num in fig_nums:\r\n f = Gcf.figs.get(num)\r\n if f is None:\r\n print('Warning: figure %s not available.' % num)\r\n else:\r\n figs.append(f.canvas.figure)\r\n return figs\r\n\r\n\r\ndef figsize(sizex, sizey):\r\n \"\"\"Set the default figure size to be [sizex, sizey].\r\n\r\n This is just an easy to remember, convenience wrapper that sets::\r\n\r\n matplotlib.rcParams['figure.figsize'] = [sizex, sizey]\r\n \"\"\"\r\n import matplotlib\r\n matplotlib.rcParams['figure.figsize'] = [sizex, sizey]\r\n\r\n\r\ndef print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):\r\n \"\"\"Print a figure to an image, and return the resulting file data\r\n \r\n Returned data will be bytes unless ``fmt='svg'``,\r\n in which case it will be unicode.\r\n \r\n Any keyword args are passed to fig.canvas.print_figure,\r\n such as ``quality`` or ``bbox_inches``.\r\n \"\"\"\r\n from matplotlib import rcParams\r\n # When there's an empty figure, we shouldn't return anything, otherwise we\r\n # get big blank areas in the qt console.\r\n if not fig.axes and not fig.lines:\r\n return\r\n\r\n dpi = fig.dpi\r\n if fmt == 'retina':\r\n dpi = dpi * 2\r\n fmt = 'png'\r\n \r\n # build keyword args\r\n kw = dict(\r\n format=fmt,\r\n facecolor=fig.get_facecolor(),\r\n edgecolor=fig.get_edgecolor(),\r\n dpi=dpi,\r\n bbox_inches=bbox_inches,\r\n )\r\n # **kwargs get higher priority\r\n kw.update(kwargs)\r\n \r\n bytes_io = BytesIO()\r\n fig.canvas.print_figure(bytes_io, **kw)\r\n data = bytes_io.getvalue()\r\n if fmt == 'svg':\r\n data = data.decode('utf-8')\r\n return data\r\n \r\ndef retina_figure(fig, **kwargs):\r\n \"\"\"format a figure as a pixel-doubled (retina) PNG\"\"\"\r\n pngdata = print_figure(fig, fmt='retina', **kwargs)\r\n # Make sure that retina_figure acts just like print_figure and returns\r\n # None when the figure is empty.\r\n if pngdata is None:\r\n return\r\n w, h = _pngxy(pngdata)\r\n metadata = dict(width=w//2, height=h//2)\r\n return pngdata, metadata\r\n\r\n# We need a little factory function here to create the closure where\r\n# safe_execfile can live.\r\ndef mpl_runner(safe_execfile):\r\n \"\"\"Factory to return a matplotlib-enabled runner for %run.\r\n\r\n Parameters\r\n ----------\r\n safe_execfile : function\r\n This must be a function with the same interface as the\r\n :meth:`safe_execfile` method of IPython.\r\n\r\n Returns\r\n -------\r\n A function suitable for use as the ``runner`` argument of the %run magic\r\n function.\r\n \"\"\"\r\n \r\n def mpl_execfile(fname,*where,**kw):\r\n \"\"\"matplotlib-aware wrapper around safe_execfile.\r\n\r\n Its interface is identical to that of the :func:`execfile` builtin.\r\n\r\n This is ultimately a call to execfile(), but wrapped in safeties to\r\n properly handle interactive rendering.\"\"\"\r\n\r\n import matplotlib\r\n import matplotlib.pyplot as plt\r\n\r\n #print '*** Matplotlib runner ***' # dbg\r\n # turn off rendering until end of script\r\n is_interactive = matplotlib.rcParams['interactive']\r\n matplotlib.interactive(False)\r\n safe_execfile(fname,*where,**kw)\r\n matplotlib.interactive(is_interactive)\r\n # make rendering call now, if the user tried to do it\r\n if plt.draw_if_interactive.called:\r\n plt.draw()\r\n plt.draw_if_interactive.called = False\r\n\r\n # re-draw everything that is stale\r\n try:\r\n da = plt.draw_all\r\n except AttributeError:\r\n pass\r\n else:\r\n da()\r\n\r\n return mpl_execfile\r\n\r\n\r\ndef _reshow_nbagg_figure(fig):\r\n \"\"\"reshow an nbagg figure\"\"\"\r\n try:\r\n reshow = fig.canvas.manager.reshow\r\n except AttributeError:\r\n raise NotImplementedError()\r\n else:\r\n reshow()\r\n\r\n\r\ndef select_figure_formats(shell, formats, **kwargs):\r\n \"\"\"Select figure formats for the inline backend.\r\n\r\n Parameters\r\n ==========\r\n shell : InteractiveShell\r\n The main IPython instance.\r\n formats : str or set\r\n One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.\r\n **kwargs : any\r\n Extra keyword arguments to be passed to fig.canvas.print_figure.\r\n \"\"\"\r\n import matplotlib\r\n from matplotlib.figure import Figure\r\n\r\n svg_formatter = shell.display_formatter.formatters['image/svg+xml']\r\n png_formatter = shell.display_formatter.formatters['image/png']\r\n jpg_formatter = shell.display_formatter.formatters['image/jpeg']\r\n pdf_formatter = shell.display_formatter.formatters['application/pdf']\r\n\r\n if isinstance(formats, py3compat.string_types):\r\n formats = {formats}\r\n # cast in case of list / tuple\r\n formats = set(formats)\r\n\r\n [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]\r\n mplbackend = matplotlib.get_backend().lower()\r\n if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':\r\n formatter = shell.display_formatter.ipython_display_formatter\r\n formatter.for_type(Figure, _reshow_nbagg_figure)\r\n\r\n supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}\r\n bad = formats.difference(supported)\r\n if bad:\r\n bs = \"%s\" % ','.join([repr(f) for f in bad])\r\n gs = \"%s\" % ','.join([repr(f) for f in supported])\r\n raise ValueError(\"supported formats are: %s not %s\" % (gs, bs))\r\n \r\n if 'png' in formats:\r\n png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))\r\n if 'retina' in formats or 'png2x' in formats:\r\n png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))\r\n if 'jpg' in formats or 'jpeg' in formats:\r\n jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))\r\n if 'svg' in formats:\r\n svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))\r\n if 'pdf' in formats:\r\n pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))\r\n\r\n#-----------------------------------------------------------------------------\r\n# Code for initializing matplotlib and importing pylab\r\n#-----------------------------------------------------------------------------\r\n\r\n\r\ndef find_gui_and_backend(gui=None, gui_select=None):\r\n \"\"\"Given a gui string return the gui and mpl backend.\r\n\r\n Parameters\r\n ----------\r\n gui : str\r\n Can be one of ('tk','gtk','wx','qt','qt4','inline').\r\n gui_select : str\r\n Can be one of ('tk','gtk','wx','qt','qt4','inline').\r\n This is any gui already selected by the shell.\r\n\r\n Returns\r\n -------\r\n A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',\r\n 'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').\r\n \"\"\"\r\n\r\n import matplotlib\r\n\r\n if gui and gui != 'auto':\r\n # select backend based on requested gui\r\n backend = backends[gui]\r\n else:\r\n # We need to read the backend from the original data structure, *not*\r\n # from mpl.rcParams, since a prior invocation of %matplotlib may have\r\n # overwritten that.\r\n # WARNING: this assumes matplotlib 1.1 or newer!!\r\n backend = matplotlib.rcParamsOrig['backend']\r\n # In this case, we need to find what the appropriate gui selection call\r\n # should be for IPython, so we can activate inputhook accordingly\r\n gui = backend2gui.get(backend, None)\r\n\r\n # If we have already had a gui active, we need it and inline are the\r\n # ones allowed.\r\n if gui_select and gui != gui_select:\r\n gui = gui_select\r\n backend = backends[gui]\r\n\r\n return gui, backend\r\n\r\n\r\ndef activate_matplotlib(backend):\r\n \"\"\"Activate the given backend and set interactive to True.\"\"\"\r\n\r\n import matplotlib\r\n matplotlib.interactive(True)\r\n \r\n # Matplotlib had a bug where even switch_backend could not force\r\n # the rcParam to update. This needs to be set *before* the module\r\n # magic of switch_backend().\r\n matplotlib.rcParams['backend'] = backend\r\n\r\n import matplotlib.pyplot\r\n matplotlib.pyplot.switch_backend(backend)\r\n\r\n # This must be imported last in the matplotlib series, after\r\n # backend/interactivity choices have been made\r\n import matplotlib.pyplot as plt\r\n\r\n plt.show._needmain = False\r\n # We need to detect at runtime whether show() is called by the user.\r\n # For this, we wrap it into a decorator which adds a 'called' flag.\r\n plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)\r\n\r\n\r\ndef import_pylab(user_ns, import_all=True):\r\n \"\"\"Populate the namespace with pylab-related values.\r\n \r\n Imports matplotlib, pylab, numpy, and everything from pylab and numpy.\r\n \r\n Also imports a few names from IPython (figsize, display, getfigs)\r\n \r\n \"\"\"\r\n\r\n # Import numpy as np/pyplot as plt are conventions we're trying to\r\n # somewhat standardize on. Making them available to users by default\r\n # will greatly help this.\r\n s = (\"import numpy\\n\"\r\n \"import matplotlib\\n\"\r\n \"from matplotlib import pylab, mlab, pyplot\\n\"\r\n \"np = numpy\\n\"\r\n \"plt = pyplot\\n\"\r\n )\r\n exec(s, user_ns)\r\n \r\n if import_all:\r\n s = (\"from matplotlib.pylab import *\\n\"\r\n \"from numpy import *\\n\")\r\n exec(s, user_ns)\r\n \r\n # IPython symbols to add\r\n user_ns['figsize'] = figsize\r\n from IPython.core.display import display\r\n # Add display and getfigs to the user's namespace\r\n user_ns['display'] = display\r\n user_ns['getfigs'] = getfigs\r\n\r\n\r\ndef configure_inline_support(shell, backend):\r\n \"\"\"Configure an IPython shell object for matplotlib use.\r\n\r\n Parameters\r\n ----------\r\n shell : InteractiveShell instance\r\n\r\n backend : matplotlib backend\r\n \"\"\"\r\n # If using our svg payload backend, register the post-execution\r\n # function that will pick up the results for display. This can only be\r\n # done with access to the real shell object.\r\n\r\n # Note: if we can't load the inline backend, then there's no point\r\n # continuing (such as in terminal-only shells in environments without\r\n # zeromq available).\r\n try:\r\n from ipykernel.pylab.backend_inline import InlineBackend\r\n except ImportError:\r\n return\r\n import matplotlib\r\n\r\n cfg = InlineBackend.instance(parent=shell)\r\n cfg.shell = shell\r\n if cfg not in shell.configurables:\r\n shell.configurables.append(cfg)\r\n\r\n if backend == backends['inline']:\r\n from ipykernel.pylab.backend_inline import flush_figures\r\n shell.events.register('post_execute', flush_figures)\r\n\r\n # Save rcParams that will be overwrittern\r\n shell._saved_rcParams = dict()\r\n for k in cfg.rc:\r\n shell._saved_rcParams[k] = matplotlib.rcParams[k]\r\n # load inline_rc\r\n matplotlib.rcParams.update(cfg.rc)\r\n new_backend_name = \"inline\"\r\n else:\r\n from ipykernel.pylab.backend_inline import flush_figures\r\n try:\r\n shell.events.unregister('post_execute', flush_figures)\r\n except ValueError:\r\n pass\r\n if hasattr(shell, '_saved_rcParams'):\r\n matplotlib.rcParams.update(shell._saved_rcParams)\r\n del shell._saved_rcParams\r\n new_backend_name = \"other\"\r\n\r\n # only enable the formats once -> don't change the enabled formats (which the user may\r\n # has changed) when getting another \"%matplotlib inline\" call.\r\n # See https://github.com/ipython/ipykernel/issues/29\r\n cur_backend = getattr(configure_inline_support, \"current_backend\", \"unset\")\r\n if new_backend_name != cur_backend:\r\n # Setup the default figure format\r\n select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)\r\n configure_inline_support.current_backend = new_backend_name\r\n"
] | [
[
"matplotlib.interactive",
"matplotlib._pylab_helpers.Gcf.figs.get",
"matplotlib.pyplot.draw",
"matplotlib._pylab_helpers.Gcf.get_all_fig_managers",
"matplotlib.get_backend",
"matplotlib.pyplot.switch_backend",
"matplotlib.rcParams.update"
]
] |
sherrytp/TradingEvolved | [
"4bc9cc18244954bff37a80f67cce658bd0802b5d"
] | [
"examples/old/zipline_alpaca2.py"
] | [
"# https://github.com/RomanMichaelPaolucci/AI_Stock_Trading/blob/master/IBM.csv\nimport abc\nimport threading\nimport time\nimport pandas as pd\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.models import Sequential, model_from_json\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom alpaca_trade_api import REST\n\n\nclass AlpacaPaperSocket(REST):\n def __init__(self):\n super().__init__(\n key_id='PKPO0ZH3XTVB336B7TEO',\n secret_key='gcs4U2Hp/ACI4A5UwYjYugrPqB2odD/m40Zuz5qw',\n base_url='https://paper-api.alpaca.markets'\n )\n\n\nclass TradingSystem(abc.ABC):\n\n def __init__(self, api, symbol, time_frame, system_id, system_label):\n # Connect to api\n # Connect to BrokenPipeError\n # Save fields to class\n self.api = api\n self.symbol = symbol\n self.time_frame = time_frame\n self.system_id = system_id\n self.system_label = system_label\n thread = threading.Thread(target=self.system_loop)\n thread.start()\n\n @abc.abstractmethod\n def place_buy_order(self):\n pass\n\n @abc.abstractmethod\n def place_sell_order(self):\n pass\n\n @abc.abstractmethod\n def system_loop(self):\n pass\n\n\n# Class to develop your AI portfolio manager\nclass PMModelDevelopment:\n\n def __init__(self):\n # Read your data in and split the dependent and independent\n data = pd.read_csv('IBM.csv')\n X = data['Delta Close']\n y = data.drop(['Delta Close'], axis=1)\n\n # Train test spit\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n # Create the sequential\n network = Sequential()\n\n # Create the structure of the neural network\n network.add(Dense(1, input_shape=(1,), activation='tanh'))\n network.add(Dense(3, activation='tanh'))\n network.add(Dense(3, activation='tanh'))\n network.add(Dense(3, activation='tanh'))\n network.add(Dense(1, activation='tanh'))\n\n # Compile the model\n network.compile(\n optimizer='rmsprop',\n loss='hinge',\n metrics=['accuracy']\n )\n # Train the model\n network.fit(X_train.values, y_train.values, epochs=100)\n\n # Evaluate the predictions of the model\n y_pred = network.predict(X_test.values)\n y_pred = np.around(y_pred, 0)\n print(classification_report(y_test, y_pred))\n\n # Save structure to json\n model = network.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model)\n\n # Save weights to HDF5\n network.save_weights(\"weights.h5\")\n\n\n# AI Portfolio Manager\nclass PortfolioManagementModel:\n\n def __init__(self):\n # Data in to test that the saving of weights worked\n data = pd.read_csv('IBM.csv')\n X = data['Delta Close']\n y = data.drop(['Delta Close'], axis=1)\n\n # Read structure from json\n json_file = open('model.json', 'r')\n json = json_file.read()\n json_file.close()\n self.network = model_from_json(json)\n\n # Read weights from HDF5\n self.network.load_weights(\"weights.h5\")\n\n # Verify weights and structure are loaded\n y_pred = self.network.predict(X.values)\n y_pred = np.around(y_pred, 0)\n print(classification_report(y, y_pred))\n\nPortfolioManagementModel()\n\n\n# in implemenation create a vector to store data...\nclass PortfolioManagementSystem(TradingSystem):\n\n def __init__(self):\n super().__init__(AlpacaPaperSocket(), 'IBM', 86400, 1, 'AI_PM')\n self.AI = PortfolioManagementModel()\n\n def place_buy_order(self):\n self.api.submit_order(\n symbol='IBM',\n qty=1,\n side='buy',\n type='market',\n time_in_force='day',\n )\n\n def place_sell_order(self):\n self.api.submit_order(\n symbol='IBM',\n qty=1,\n side='sell',\n type='market',\n time_in_force='day',\n )\n\n def system_loop(self):\n # Variables for weekly close\n this_weeks_close = 0\n last_weeks_close = 0\n delta = 0\n day_count = 0\n while(True):\n # Wait a day to request more data\n time.sleep(1440)\n # Request EoD data for IBM\n data_req = self.api.get_barset('IBM', timeframe='1D', limit=1).df\n # Construct dataframe to predict\n x = pd.DataFrame(\n data=[[\n data_req['IBM']['close'][0]]], columns='Close'.split()\n )\n if(day_count == 7):\n day_count = 0\n last_weeks_close = this_weeks_close\n this_weeks_close = x['Close']\n delta = this_weeks_close - last_weeks_close\n\n # AI choosing to buy, sell, or hold\n if np.around(self.AI.network.predict([delta])) <= -.5:\n self.place_sell_order()\n\n elif np.around(self.AI.network.predict([delta]) >= .5):\n self.place_buy_order()\n\n\nPortfolioManagementSystem()\n"
] | [
[
"pandas.read_csv",
"numpy.around",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report"
]
] |
amelieEmily/RobustDARTS | [
"b26e127c6e9c330258786f5eb77b17d367f546ff"
] | [
"src/utils.py"
] | [
"import os\nimport yaml\nimport numpy as np\nimport torch\nimport shutil\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom collections import namedtuple\n\nclass MyDumper(yaml.Dumper):\n\n def increase_indent(self, flow=False, indentless=False):\n return super(MyDumper, self).increase_indent(flow, False)\n\n\nGenotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')\n\nPRIMITIVES = [\n 'none',\n 'noise',\n 'max_pool_3x3',\n 'avg_pool_3x3',\n 'skip_connect',\n 'sep_conv_3x3',\n 'sep_conv_5x5',\n 'dil_conv_3x3',\n 'dil_conv_5x5'\n]\n\n\nclass EVLocalAvg(object):\n def __init__(self, window=5, ev_freq=2, total_epochs=50):\n \"\"\" Keep track of the eigenvalues local average.\n\n Args:\n window (int): number of elements used to compute local average.\n Default: 5\n ev_freq (int): frequency used to compute eigenvalues. Default:\n every 2 epochs\n total_epochs (int): total number of epochs that DARTS runs.\n Default: 50\n\n \"\"\"\n self.window = window\n self.ev_freq = ev_freq\n self.epochs = total_epochs\n\n self.stop_search = False\n self.stop_epoch = total_epochs - 1\n self.stop_genotype = None\n\n self.ev = []\n self.ev_local_avg = []\n self.genotypes = {}\n self.la_epochs = {}\n\n # start and end index of the local average window\n self.la_start_idx = 0\n self.la_end_idx = self.window\n\n def reset(self):\n self.ev = []\n self.ev_local_avg = []\n self.genotypes = {}\n self.la_epochs = {}\n\n def update(self, epoch, ev, genotype):\n \"\"\" Method to update the local average list.\n\n Args:\n epoch (int): current epoch\n ev (float): current dominant eigenvalue\n genotype (namedtuple): current genotype\n\n \"\"\"\n self.ev.append(ev)\n self.genotypes.update({epoch: genotype})\n # set the stop_genotype to the current genotype in case the early stop\n # procedure decides not to early stop\n self.stop_genotype = genotype\n\n # since the local average computation starts after the dominant\n # eigenvalue in the first epoch is already computed we have to wait\n # at least until we have 3 eigenvalues in the list.\n if (len(self.ev) >= int(np.ceil(self.window/2))) and (epoch <\n self.epochs - 1):\n # start sliding the window as soon as the number of eigenvalues in\n # the list becomes equal to the window size\n if len(self.ev) < self.window:\n self.ev_local_avg.append(np.mean(self.ev))\n else:\n assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window\n self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:\n self.la_end_idx]))\n self.la_start_idx += 1\n self.la_end_idx += 1\n\n # keep track of the offset between the current epoch and the epoch\n # corresponding to the local average. NOTE: in the end the size of\n # self.ev and self.ev_local_avg should be equal\n self.la_epochs.update({epoch: int(epoch -\n int(self.ev_freq*np.floor(self.window/2)))})\n\n elif len(self.ev) < int(np.ceil(self.window/2)):\n self.la_epochs.update({epoch: -1})\n\n # since there is an offset between the current epoch and the local\n # average epoch, loop in the last epoch to compute the local average of\n # these number of elements: window, window - 1, window - 2, ..., ceil(window/2)\n elif epoch == self.epochs - 1:\n for i in range(int(np.ceil(self.window/2))):\n assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window - i\n self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:\n self.la_end_idx + 1]))\n self.la_start_idx += 1\n\n def early_stop(self, epoch, factor=1.3, es_start_epoch=10, delta=4):\n \"\"\" Early stopping criterion\n\n Args:\n epoch (int): current epoch\n factor (float): threshold factor for the ration between the current\n and prefious eigenvalue. Default: 1.3\n es_start_epoch (int): until this epoch do not consider early\n stopping. Default: 20\n delta (int): factor influencing which previous local average we\n consider for early stopping. Default: 2\n \"\"\"\n if int(self.la_epochs[epoch] - self.ev_freq*delta) >= es_start_epoch:\n # the current local average corresponds to\n # epoch - int(self.ev_freq*np.floor(self.window/2))\n current_la = self.ev_local_avg[-1]\n # by default take the local average corresponding to epoch\n # delta*self.ev_freq\n previous_la = self.ev_local_avg[-1 - delta]\n\n self.stop_search = current_la / previous_la > factor\n if self.stop_search:\n self.stop_epoch = int(self.la_epochs[epoch] - self.ev_freq*delta)\n self.stop_genotype = self.genotypes[self.stop_epoch]\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0/batch_size))\n return res\n\ndef write_yaml_results_eval(args, results_file, result_to_log):\n setting = '_'.join([args.space, args.dataset])\n regularization = '_'.join(\n [str(args.search_dp), str(args.search_wd)]\n )\n results_file = os.path.join(args._save, results_file+'.yaml')\n\n try:\n with open(results_file, 'r') as f:\n result = yaml.load(f, Loader=yaml.Loader)\n if setting in result.keys():\n if regularization in result[setting].keys():\n if args.search_task_id in result[setting][regularization]:\n result[setting][regularization][args.search_task_id].append(result_to_log)\n else:\n result[setting][regularization].update({args.search_task_id:\n [result_to_log]})\n else:\n result[setting].update({regularization: {args.search_task_id:\n [result_to_log]}})\n else:\n result.update({setting: {regularization: {args.search_task_id:\n [result_to_log]}}})\n with open(results_file, 'w') as f:\n yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)\n except (AttributeError, FileNotFoundError) as e:\n result = {\n setting: {\n regularization: {\n args.search_task_id: [result_to_log]\n }\n }\n }\n with open(results_file, 'w') as f:\n yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)\n\ndef write_yaml_results(args, results_file, result_to_log):\n setting = '_'.join([args.space, args.dataset])\n regularization = '_'.join(\n [str(args.drop_path_prob), str(args.weight_decay)]\n )\n results_file = os.path.join(args._save, results_file+'.yaml')\n\n try:\n with open(results_file, 'r') as f:\n result = yaml.load(f, Loader=yaml.Loader)\n if setting in result.keys():\n if regularization in result[setting].keys():\n result[setting][regularization].update({args.task_id: result_to_log})\n else:\n result[setting].update({regularization: {args.task_id: result_to_log}})\n else:\n result.update({setting: {regularization: {args.task_id: result_to_log}}})\n with open(results_file, 'w') as f:\n yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)\n except (AttributeError, FileNotFoundError) as e:\n result = {\n setting: {\n regularization: {\n args.task_id: result_to_log\n }\n }\n }\n with open(results_file, 'w') as f:\n yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)\n\n\nclass Cutout(object):\n def __init__(self, length, prob=1.0):\n self.length = length\n self.prob = prob\n\n def __call__(self, img):\n if np.random.binomial(1, self.prob):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\ndef _data_transforms_svhn(args):\n SVHN_MEAN = [0.4377, 0.4438, 0.4728]\n SVHN_STD = [0.1980, 0.2010, 0.1970]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(SVHN_MEAN, SVHN_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(SVHN_MEAN, SVHN_STD),\n ])\n return train_transform, valid_transform\n\ndef _data_transforms_dr_detection(args):\n\n DR_DETECTION_MEAN = [0.42, 0.22, 0.075]\n DR_DETECTION_STD = [0.27, 0.15, 0.081]\n if args.is_eval:\n\n train_transform = transforms.Compose([\n transforms.Resize(540), # 256\n transforms.RandomRotation((-45.0, +45.0)),\n transforms.RandomResizedCrop(512, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ColorJitter(brightness=0.1, contrast=[0.75,1.5],\n saturation=[0.75,1.5], hue=0.15),\n transforms.ToTensor(),\n transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD)\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.Resize(540),\n transforms.CenterCrop(512),\n transforms.ToTensor(),\n transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),\n ])\n\n else:\n train_transform = transforms.Compose([\n transforms.Resize(256), # 256\n transforms.RandomRotation((-45.0, +45.0)),\n transforms.RandomResizedCrop(224, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ColorJitter(brightness=0.1, contrast=[0.75, 1.5],\n saturation=[0.75, 1.5], hue=0.15),\n transforms.ToTensor(),\n transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),\n # transforms.RandomErasing(),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),\n ])\n return train_transform, valid_transform\n\ndef _data_transforms_malaria(args):\n\n train_transform = transforms.Compose([\n transforms.Resize(100),\n transforms.RandomCrop(64), # 224\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.Resize(100),\n transforms.RandomCrop(64), # 224\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n ])\n return train_transform, valid_transform\n\ndef _data_transforms_mnist(args):\n MNIST_MEAN = [0.5, 0.5, 0.5]\n MNIST_STD = [0.5, 0.5, 0.5]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(MNIST_MEAN, MNIST_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(MNIST_MEAN, MNIST_STD),\n ])\n return train_transform, valid_transform\n\ndef _data_transforms_cifar100(args):\n CIFAR_MEAN = [0.5071, 0.4865, 0.4409]\n CIFAR_STD = [0.2673, 0.2564, 0.2762]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\n\ndef _data_transforms_cifar10(args):\n CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\n CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length,\n args.cutout_prob))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for v in model.parameters())/1e6\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\ndef save_checkpoint(state, is_best, save, epoch, task_id):\n filename = \"checkpoint_{}_{}.pth.tar\".format(task_id, epoch)\n filename = os.path.join(save, filename)\n\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\ndef load_checkpoint(model, optimizer, scheduler, architect, save, la_tracker,\n epoch, task_id):\n filename = \"checkpoint_{}_{}.pth.tar\".format(task_id, epoch)\n filename = os.path.join(save, filename)\n\n checkpoint = torch.load(filename)\n\n model.load_state_dict(checkpoint['state_dict'])\n model.alphas_normal.data = checkpoint['alphas_normal']\n model.alphas_reduce.data = checkpoint['alphas_reduce']\n optimizer.load_state_dict(checkpoint['optimizer'])\n architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])\n la_tracker.ev = checkpoint['ev']\n la_tracker.ev_local_avg = checkpoint['ev_local_avg']\n la_tracker.genotypes = checkpoint['genotypes']\n la_tracker.la_epochs = checkpoint['la_epochs']\n la_tracker.la_start_idx = checkpoint['la_start_idx']\n la_tracker.la_end_idx = checkpoint['la_end_idx']\n lr = checkpoint['lr']\n return lr\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1.-drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n\n\ndef print_args(args):\n for arg, val in args.__dict__.items():\n print(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))\n print()\n"
] | [
[
"numpy.random.binomial",
"numpy.ones",
"torch.load",
"numpy.ceil",
"torch.save",
"numpy.floor",
"numpy.clip",
"torch.from_numpy",
"numpy.random.randint",
"numpy.mean"
]
] |
gertjanvanzwieten/nutils | [
"ec04d66e4797398496453181f96b14ad2edae228"
] | [
"tests/test_types.py"
] | [
"from nutils.testing import *\nimport nutils.types\nimport inspect, pickle, itertools, ctypes, stringly, tempfile, io, os\nimport numpy\n\nclass apply_annotations(TestCase):\n\n def test_without_annotations(self):\n @nutils.types.apply_annotations\n def f(a, b):\n return a, b\n a, b = f(1, 2)\n self.assertEqual(a, 1)\n self.assertEqual(b, 2)\n\n def test_pos_or_kw(self):\n @nutils.types.apply_annotations\n def f(a:int, b, c:str):\n return a, b, c\n a, b, c = f(1, 2, 3)\n self.assertEqual(a, 1)\n self.assertEqual(b, 2)\n self.assertEqual(c, '3')\n\n def test_with_signature(self):\n def f(a):\n return a\n f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])\n f = nutils.types.apply_annotations(f)\n self.assertEqual(f(1), '1')\n\n def test_posonly(self):\n def f(a):\n return a\n f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])\n f = nutils.types.apply_annotations(f)\n self.assertEqual(f(1), '1')\n\n def test_kwonly(self):\n @nutils.types.apply_annotations\n def f(a:str, *, b:int, c:bool):\n return a, b, c\n self.assertEqual(f(1, b='2', c=3), ('1', 2, True))\n\n def test_varpos(self):\n @nutils.types.apply_annotations\n def f(a:str, *args):\n return a, args\n self.assertEqual(f(1, 2, 3), ('1', (2, 3)))\n\n def test_varpos_annotated(self):\n map_str = lambda args: map(str, args)\n @nutils.types.apply_annotations\n def f(a:str, *args:map_str):\n return a, args\n self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))\n\n def test_varkw(self):\n @nutils.types.apply_annotations\n def f(a:str, **kwargs):\n return a, kwargs\n self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))\n\n def test_varkw_annotated(self):\n map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}\n @nutils.types.apply_annotations\n def f(a:str, **kwargs:map_str):\n return a, kwargs\n self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))\n\n def test_posonly_varkw(self):\n def f(a, b, **c):\n return a, b, c\n f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),\n inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),\n inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])\n f = nutils.types.apply_annotations(f)\n self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))\n self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))\n self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))\n self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))\n\n def test_default_none(self):\n @nutils.types.apply_annotations\n def f(a:str=None):\n return a\n self.assertEqual(f(), None)\n self.assertEqual(f(None), None)\n self.assertEqual(f(1), '1')\n\nclass nutils_hash(TestCase):\n\n class custom:\n @property\n def __nutils_hash__(self):\n return b'01234567890123456789'\n def f(self):\n pass\n\n def test_ellipsis(self):\n self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')\n\n def test_None(self):\n self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')\n\n def test_bool(self):\n self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')\n self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')\n\n def test_int(self):\n self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')\n self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')\n\n def test_float(self):\n self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')\n self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')\n\n def test_complex(self):\n self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')\n self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')\n\n def test_inequality_numbers(self):\n self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())\n self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())\n self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())\n\n def test_str(self):\n self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')\n self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')\n\n def test_bytes(self):\n self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')\n self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')\n\n def test_tuple(self):\n self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')\n self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')\n self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())\n\n def test_frozenset(self):\n self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')\n self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')\n\n @unittest.skipIf(sys.version_info < (3,7), \"not supported in this Python version\")\n def test_dataclass(self):\n import dataclasses\n A = dataclasses.make_dataclass('A', [('n', int), ('f', float)])\n self.assertEqual(nutils.types.nutils_hash(A(n=1, f=2.5)).hex(), 'daf4235240e897beb9586db3c91663b24e229c52')\n\n def test_type_bool(self):\n self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')\n\n def test_type_int(self):\n self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')\n\n def test_type_float(self):\n self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')\n\n def test_type_complex(self):\n self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')\n\n def test_type_str(self):\n self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')\n\n def test_type_bytes(self):\n self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')\n\n def test_type_tuple(self):\n self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')\n\n def test_type_frozenset(self):\n self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')\n\n def test_type_bufferedreader(self):\n try:\n fid, path = tempfile.mkstemp()\n os.write(fid, b'test')\n os.close(fid)\n with open(path, 'rb') as f:\n f.seek(2)\n self.assertEqual(nutils.types.nutils_hash(f).hex(), '4edef1af3aa845b9e8bbde2d8265be5f30be4c2a')\n self.assertEqual(f.tell(), 2)\n with open(path, 'rb+') as f, self.assertRaises(TypeError):\n nutils.types.nutils_hash(f).hex()\n finally:\n os.unlink(path)\n\n def test_type_boundmethod(self):\n self.assertEqual(nutils.types.nutils_hash(self.custom().f).hex(), 'ebf7084bb2504922235ab035a9197b9cb4cf47af')\n\n def test_custom(self):\n self.assertEqual(nutils.types.nutils_hash(self.custom()).hex(), b'01234567890123456789'.hex())\n\n def test_unhashable(self):\n with self.assertRaises(TypeError):\n nutils.types.nutils_hash([])\n\nclass CacheMeta(TestCase):\n\n def test_property(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = 'x',\n @property\n def x(self):\n nonlocal ncalls\n ncalls += 1\n return 1\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.x, 1)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x, 1)\n self.assertEqual(ncalls, 1)\n\n def test_set_property(self):\n\n class T(metaclass=nutils.types.CacheMeta):\n __cache__ = 'x',\n @property\n def x(self):\n return 1\n\n t = T()\n with self.assertRaises(AttributeError):\n t.x = 1\n\n def test_del_property(self):\n\n class T(metaclass=nutils.types.CacheMeta):\n __cache__ = 'x',\n @property\n def x(self):\n return 1\n\n t = T()\n with self.assertRaises(AttributeError):\n del t.x\n\n def test_method_without_args(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = 'x',\n def x(self):\n nonlocal ncalls\n ncalls += 1\n return 1\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.x(), 1)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(), 1)\n self.assertEqual(ncalls, 1)\n\n def test_method_with_args(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = 'x',\n def x(self, a, b):\n nonlocal ncalls\n ncalls += 1\n return a + b\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.x(1, 2), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(a=1, b=2), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(2, 2), 4)\n self.assertEqual(ncalls, 2)\n self.assertEqual(t.x(a=2, b=2), 4)\n self.assertEqual(ncalls, 2)\n self.assertEqual(t.x(1, 2), 3)\n self.assertEqual(ncalls, 3)\n\n def test_method_with_args_and_preprocessors(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = 'x',\n @nutils.types.apply_annotations\n def x(self, a:int, b:int):\n nonlocal ncalls\n ncalls += 1\n return a + b\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.x(1, 2), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(a='1', b='2'), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x('2', '2'), 4)\n self.assertEqual(ncalls, 2)\n self.assertEqual(t.x(a=2, b=2), 4)\n self.assertEqual(ncalls, 2)\n self.assertEqual(t.x('1', 2), 3)\n self.assertEqual(ncalls, 3)\n\n def test_method_with_kwargs(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = 'x',\n def x(self, a, **kwargs):\n nonlocal ncalls\n ncalls += 1\n return a + sum(kwargs.values())\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.x(1, b=2), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(a=1, b=2), 3)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.x(1, b=2, c=3), 6)\n self.assertEqual(ncalls, 2)\n self.assertEqual(t.x(a=1, b=2, c=3), 6)\n self.assertEqual(ncalls, 2)\n\n def test_subclass_redefined_property(self):\n\n class T(metaclass=nutils.types.CacheMeta):\n __cache__ = 'x',\n @property\n def x(self):\n return 1\n\n class U(T):\n __cache__ = 'x',\n @property\n def x(self):\n return super().x + 1\n @property\n def y(self):\n return super().x\n\n u1 = U()\n self.assertEqual(u1.x, 2)\n self.assertEqual(u1.y, 1)\n\n u2 = U()\n self.assertEqual(u2.y, 1)\n self.assertEqual(u2.x, 2)\n\n def test_missing_attribute(self):\n\n with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):\n class T(metaclass=nutils.types.CacheMeta):\n __cache__ = 'x',\n\n def test_invalid_attribute(self):\n\n with self.assertRaisesRegex(TypeError, \"Don't know how to cache attribute x: None\"):\n class T(metaclass=nutils.types.CacheMeta):\n __cache__ = 'x',\n x = None\n\n def test_name_mangling(self):\n\n for withslots in False, True:\n with self.subTest(withslots=withslots):\n\n class T(metaclass=nutils.types.CacheMeta):\n if withslots:\n __slots__ = ()\n __cache__ = '__x',\n @property\n def __x(self):\n nonlocal ncalls\n ncalls += 1\n return 1\n @property\n def y(self):\n return self.__x\n\n ncalls = 0\n t = T()\n self.assertEqual(ncalls, 0)\n self.assertEqual(t.y, 1)\n self.assertEqual(ncalls, 1)\n self.assertEqual(t.y, 1)\n self.assertEqual(ncalls, 1)\n\nclass strictint(TestCase):\n\n def test_int(self):\n value = nutils.types.strictint(1)\n self.assertEqual(value, 1)\n self.assertEqual(type(value), int)\n\n def test_numpy_int(self):\n value = nutils.types.strictint(numpy.int64(1))\n self.assertEqual(value, 1)\n self.assertEqual(type(value), int)\n\n def test_float(self):\n with self.assertRaises(ValueError):\n nutils.types.strictint(1.)\n\n def test_numpy_float(self):\n with self.assertRaises(ValueError):\n nutils.types.strictint(numpy.float64(1.))\n\n def test_complex(self):\n with self.assertRaises(ValueError):\n nutils.types.strictint(1+0j)\n\n def test_str(self):\n with self.assertRaises(ValueError):\n nutils.types.strictint('1')\n\nclass strictfloat(TestCase):\n\n def test_int(self):\n value = nutils.types.strictfloat(1)\n self.assertEqual(value, 1.)\n self.assertEqual(type(value), float)\n\n def test_numpy_int(self):\n value = nutils.types.strictfloat(numpy.int64(1))\n self.assertEqual(value, 1.)\n self.assertEqual(type(value), float)\n\n def test_float(self):\n value = nutils.types.strictfloat(1.)\n self.assertEqual(value, 1.)\n self.assertEqual(type(value), float)\n\n def test_numpy_float(self):\n value = nutils.types.strictfloat(numpy.float64(1.))\n self.assertEqual(value, 1.)\n self.assertEqual(type(value), float)\n\n def test_complex(self):\n with self.assertRaises(ValueError):\n nutils.types.strictint(1+0j)\n\n def test_str(self):\n with self.assertRaises(ValueError):\n nutils.types.strictfloat('1.')\n\nclass strictstr(TestCase):\n\n def test_str(self):\n value = nutils.types.strictstr('spam')\n self.assertEqual(value, 'spam')\n self.assertEqual(type(value), str)\n\n def test_int(self):\n with self.assertRaises(ValueError):\n nutils.types.strictstr(1)\n\nclass strict(TestCase):\n\n def test_valid(self):\n self.assertEqual(nutils.types.strict[int](1), 1)\n\n def test_invalid(self):\n with self.assertRaises(ValueError):\n nutils.types.strict[int]('1')\n\n def test_call(self):\n with self.assertRaises(TypeError):\n nutils.types.strict()\n\nclass tupletype(TestCase):\n\n def test_valid1(self):\n value = nutils.types.tuple[nutils.types.strictint]([])\n self.assertEqual(value, ())\n self.assertEqual(type(value), tuple)\n\n def test_valid2(self):\n value = nutils.types.tuple[nutils.types.strictint]([1,2,3])\n self.assertEqual(value, (1,2,3))\n self.assertEqual(type(value), tuple)\n\n def test_invalid(self):\n with self.assertRaises(ValueError):\n nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])\n\n def test_without_item_constructor(self):\n src = 1,2,3\n self.assertEqual(nutils.types.tuple(src), tuple(src))\n\n def test_name(self):\n self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')\n\nclass frozendict(TestCase):\n\n def test_constructor(self):\n src = {'spam': 1, 'eggs': 2.3}\n for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:\n with self.subTest(name):\n frozen = nutils.types.frozendict(value)\n self.assertIsInstance(frozen, nutils.types.frozendict)\n self.assertEqual(dict(frozen), src)\n\n def test_constructor_invalid(self):\n with self.assertRaises(ValueError):\n nutils.types.frozendict(['spam', 'eggs', 1])\n\n def test_clsgetitem(self):\n T = nutils.types.frozendict[str, float]\n src = {1: 2, 'spam': '2.3'}\n for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:\n with self.subTest(name):\n frozen = T(value)\n self.assertIsInstance(frozen, nutils.types.frozendict)\n self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})\n\n def test_clsgetitem_invalid_types(self):\n with self.assertRaises(RuntimeError):\n nutils.types.frozendict[str, float, bool]\n\n def test_clsgetitem_invalid_value(self):\n T = nutils.types.frozendict[str, float]\n with self.assertRaises(ValueError):\n T(1)\n\n def test_setitem(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n with self.assertRaises(TypeError):\n frozen['eggs'] = 3\n\n def test_delitem(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n with self.assertRaises(TypeError):\n del frozen['eggs']\n\n def test_getitem_existing(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n self.assertEqual(frozen['spam'], 1)\n\n def test_getitem_nonexisting(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n with self.assertRaises(KeyError):\n frozen['foo']\n\n def test_contains(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n self.assertIn('spam', frozen)\n self.assertNotIn('foo', frozen)\n\n def test_iter(self):\n src = {'spam': 1, 'eggs': 2.3}\n frozen = nutils.types.frozendict(src)\n self.assertEqual(frozenset(frozen), frozenset(src))\n\n def test_len(self):\n src = {'spam': 1, 'eggs': 2.3}\n frozen = nutils.types.frozendict(src)\n self.assertEqual(len(frozen), len(src))\n\n def test_hash(self):\n src = {'spam': 1, 'eggs': 2.3}\n self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))\n\n def test_copy(self):\n src = {'spam': 1, 'eggs': 2.3}\n copy = nutils.types.frozendict(src).copy()\n self.assertIsInstance(copy, dict)\n self.assertEqual(copy, src)\n\n def test_pickle(self):\n src = {'spam': 1, 'eggs': 2.3}\n frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))\n self.assertIsInstance(frozen, nutils.types.frozendict)\n self.assertEqual(dict(frozen), src)\n\n def test_eq_same_id(self):\n src = {'spam': 1, 'eggs': 2.3}\n a = nutils.types.frozendict(src)\n self.assertEqual(a, a)\n\n def test_eq_other_id(self):\n src = {'spam': 1, 'eggs': 2.3}\n a = nutils.types.frozendict(src)\n b = nutils.types.frozendict(src)\n self.assertEqual(a, b)\n\n def test_eq_deduplicated(self):\n src = {'spam': 1, 'eggs': 2.3}\n a = nutils.types.frozendict(src)\n b = nutils.types.frozendict(src)\n a == b # this replaces `a.__base` with `b.__base`\n self.assertEqual(a, b)\n\n def test_ineq_frozendict(self):\n src = {'spam': 1, 'eggs': 2.3}\n self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))\n\n def test_ineq_dict(self):\n src = {'spam': 1, 'eggs': 2.3}\n self.assertNotEqual(nutils.types.frozendict(src), src)\n\n def test_nutils_hash(self):\n frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})\n self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')\n\nclass frozenmultiset(TestCase):\n\n def test_constructor(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:\n with self.subTest(name=name):\n frozen = nutils.types.frozenmultiset(value)\n for item in 'spam', 'bacon', 'sausage':\n self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})\n\n def test_clsgetitem(self):\n src = False, 1, numpy.int64(2)\n frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)\n self.assertEqual(set(frozen), {0, 1, 2})\n\n def test_preserve_order(self):\n for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:\n with self.subTest(src=src):\n self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)\n\n def test_and(self):\n for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],\n [['spam'], ['eggs'], []],\n [['spam','spam']]*3]:\n with self.subTest(l=l, r=r, lar=lar):\n self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))\n with self.subTest(l=r, r=l, lar=lar):\n self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))\n\n def test_sub(self):\n for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],\n [['spam'], ['eggs'], ['spam'], ['eggs']],\n [['spam'], ['spam'], [], []]]:\n with self.subTest(l=l, r=r, lmr=lmr):\n self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))\n with self.subTest(l=r, r=l, lmr=rml):\n self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))\n\n def test_pickle(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))\n self.assertIsInstance(frozen, nutils.types.frozenmultiset)\n self.assertEqual(frozen, nutils.types.frozenmultiset(src))\n\n def test_hash(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n ref = nutils.types.frozenmultiset(src)\n for perm in itertools.permutations(src):\n with self.subTest(perm=perm):\n self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))\n\n def test_nutils_hash(self):\n for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):\n with self.subTest(perm=perm):\n frozen = nutils.types.frozenmultiset(perm)\n self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')\n\n def test_eq(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n ref = nutils.types.frozenmultiset(src)\n for perm in itertools.permutations(src):\n with self.subTest(perm=perm):\n self.assertEqual(nutils.types.frozenmultiset(src), ref)\n\n def test_contains(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n frozen = nutils.types.frozenmultiset(src)\n for item in 'spam', 'bacon', 'eggs':\n with self.subTest(item=item):\n if item in src:\n self.assertIn(item, frozen)\n else:\n self.assertNotIn(item, frozen)\n\n def test_len(self):\n src = 'spam', 'bacon', 'sausage', 'spam'\n frozen = nutils.types.frozenmultiset(src)\n self.assertEqual(len(frozen), len(src))\n\n def test_nonzero(self):\n self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))\n self.assertFalse(nutils.types.frozenmultiset([]))\n\n def test_add(self):\n l = nutils.types.frozenmultiset(['spam', 'bacon'])\n r = nutils.types.frozenmultiset(['sausage', 'spam'])\n lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])\n self.assertEqual(l+r, lpr)\n\n def test_isdisjoint(self):\n for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],\n [['spam'], ['eggs'], True],\n [['spam'], ['spam'], False]]:\n with self.subTest(l=l, r=r, disjoint=disjoint):\n self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)\n\nclass frozenarray(TestCase):\n\n def _test_constructor(self, src, frozen_dtype, src_types=(list,numpy.array,nutils.types.frozenarray)):\n src = list(src)\n for copy in True, False:\n for src_type in src_types:\n with self.subTest(copy=copy, src_type=src_type):\n frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)\n self.assertIsInstance(frozen, nutils.types.frozenarray)\n self.assertEqual(frozen.tolist(), src)\n def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):\n src = list(src)\n for copy in True, False:\n for src_type in list, numpy.array, nutils.types.frozenarray:\n with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):\n nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)\n def test_constructor_bool(self):\n self._test_constructor((False, True), bool)\n def test_constructor_bool_emptyarray(self):\n self._test_constructor((), bool, src_types=[list])\n def test_constructor_int(self):\n self._test_constructor((0,1), int)\n def test_constructor_int_upcast(self):\n self._test_constructor((False,True), int)\n def test_constructor_int_downcast(self):\n self._test_constructor((0.,1.), int)\n def test_constructor_int_emptyarray(self):\n self._test_constructor((), int, src_types=[list])\n def test_constructor_float(self):\n self._test_constructor((0.,1.), float)\n def test_constructor_float_upcast(self):\n self._test_constructor((0,1), float)\n def test_constructor_float_downcast(self):\n src = [0.+0j,1.+0j]\n for copy in True, False:\n with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):\n nutils.types.frozenarray(src, copy=copy, dtype=float)\n for src_type in numpy.array, nutils.types.frozenarray:\n with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):\n nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)\n def test_constructor_complex(self):\n self._test_constructor((0+0j,1+1j), complex)\n def test_constructor_strictint(self):\n self._test_constructor((0,1), nutils.types.strictint)\n def test_constructor_strictint_upcast(self):\n self._test_constructor((False,True), nutils.types.strictint)\n def test_constructor_strictint_downcast(self):\n self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')\n def test_constructor_strictint_emptyarray(self):\n self._test_constructor((), nutils.types.strictint, src_types=[list])\n def test_constructor_strictfloat(self):\n self._test_constructor((0.,1.), nutils.types.strictfloat)\n def test_constructor_strictfloat_upcast(self):\n self._test_constructor((0,1), nutils.types.strictfloat)\n def test_constructor_strictfloat_downcast(self):\n self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')\n def test_constructor_invalid_dtype(self):\n self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')\n\n def test_clsgetitem(self):\n src = [0.,1.]\n frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)\n self.assertIsInstance(frozen, nutils.types.frozenarray)\n self.assertEqual(frozen.tolist(), src)\n\n def test_clsgetitem_invalid(self):\n src = [0.,1.]\n with self.assertRaises(ValueError):\n nutils.types.frozenarray[nutils.types.strictint](src)\n\n def test_nutils_hash(self):\n a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))\n b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))\n self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())\n self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())\n self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')\n self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')\n\n def test_pickle(self):\n src = [[1,2],[3,4]]\n value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))\n self.assertIsInstance(value, nutils.types.frozenarray)\n self.assertEqual(value, nutils.types.frozenarray(src))\n\n def test_eq_same_instance(self):\n a = nutils.types.frozenarray([[1,2],[3,4]], int)\n self.assertEqual(a, a)\n\n def test_eq_not_frozenarray(self):\n a = nutils.types.frozenarray([[1,2],[3,4]], int)\n self.assertNotEqual(a, [[1,2],[3,4]])\n\n def test_eq_same_base(self):\n base = numpy.array([[1,2],[3,4]], int)\n a = nutils.types.frozenarray(base, copy=False)\n b = nutils.types.frozenarray(base, copy=False)\n self.assertEqual(a, b)\n\n def test_eq_different_array(self):\n a = nutils.types.frozenarray([[1,2],[3,4]], int)\n b = nutils.types.frozenarray([[1,3],[2,4]], int)\n self.assertNotEqual(a, b)\n\n def test_eq_different_dtype(self):\n a = nutils.types.frozenarray([[1,2],[3,4]], int)\n b = nutils.types.frozenarray([[1,2],[3,4]], float)\n self.assertNotEqual(a, b)\n\n def test_eq_different_base(self):\n a = nutils.types.frozenarray([[1,2],[3,4]], int)\n b = nutils.types.frozenarray([[1,2],[3,4]], int)\n self.assertEqual(a, b)\n\n def test_ineq_equal(self):\n l = nutils.types.frozenarray([1,2], int)\n r = nutils.types.frozenarray([1,2], int)\n self.assertFalse(l < r)\n self.assertTrue(l <= r)\n self.assertFalse(l > r)\n self.assertTrue(l >= r)\n\n def test_ineq_smaller(self):\n l = nutils.types.frozenarray([1,2], int)\n r = nutils.types.frozenarray([2,1], int)\n self.assertTrue(l < r)\n self.assertTrue(l <= r)\n self.assertFalse(l > r)\n self.assertFalse(l >= r)\n\n def test_ineq_larger(self):\n l = nutils.types.frozenarray([2,1], int)\n r = nutils.types.frozenarray([1,2], int)\n self.assertFalse(l < r)\n self.assertFalse(l <= r)\n self.assertTrue(l > r)\n self.assertTrue(l >= r)\n\n def test_ineq_incomparable(self):\n array = nutils.types.frozenarray([1,2], int)\n for op in operator.lt, operator.le, operator.gt, operator.ge:\n with self.subTest(op=op), self.assertRaises(TypeError):\n op(array, 1)\n\n def test_full(self):\n self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))\n\n def test_as_numpy_array(self):\n\n a = numpy.array(nutils.types.frozenarray([1,2]))\n self.assertIsInstance(a, numpy.ndarray)\n\nclass c_array(TestCase):\n\n def test_idempotence(self):\n a = numpy.array([1,2,3], dtype=numpy.int64)\n P = nutils.types.c_array[numpy.int64]\n a_ct = P(a)\n self.assertEqual(P(a_ct), a_ct)\n\n def test_list(self):\n a = [1,2,3]\n a_ct = nutils.types.c_array[numpy.int64](a)\n self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)\n\n def test_array(self):\n a = numpy.array([1,2,3], dtype=numpy.int64)\n a_ct = nutils.types.c_array[numpy.int64](a)\n self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)\n\n def test_array_invalid_dtype(self):\n a = numpy.array([1,2,3], dtype=numpy.int32)\n with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\\\.$'):\n a_ct = nutils.types.c_array[numpy.int64](a)\n\n def test_array_noncontinguous(self):\n a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T\n with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\\\.$'):\n a_ct = nutils.types.c_array[numpy.int64](a)\n\n def test_wo_getitem(self):\n with self.assertRaises(TypeError):\n nutils.types.c_array()\n\nclass T_Immutable(nutils.types.Immutable):\n def __init__(self, x, y, *, z):\n pass\n\nclass T_Singleton(nutils.types.Singleton):\n def __init__(self, x, y, *, z):\n pass\n\n@parametrize\nclass ImmutableFamily(TestCase):\n\n def test_pickle(self):\n T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]\n a = T(1, 2, z=3)\n b = pickle.loads(pickle.dumps(a))\n self.assertEqual(a, b)\n\n def test_eq(self):\n class T(self.cls):\n def __init__(self, x, y):\n pass\n class U(self.cls):\n def __init__(self, x, y):\n pass\n\n self.assertEqual(T(1, 2), T(1, 2))\n self.assertNotEqual(T(1, 2), T(2, 1))\n self.assertNotEqual(T(1, 2), U(1, 2))\n\n def test_canonical_args(self):\n class T(self.cls):\n def __init__(self, x, y, z=3):\n pass\n\n self.assertEqual(T(x=1, y=2), T(1, 2, 3))\n\n def test_keyword_args(self):\n class T(self.cls):\n def __init__(self, x, y, **kwargs):\n pass\n\n a = T(x=1, y=2, z=3)\n b = T(1, 2, z=3)\n self.assertEqual(a, b)\n\n def test_preprocessors(self):\n class T(self.cls):\n @nutils.types.apply_annotations\n def __init__(self, x: int):\n pass\n\n self.assertEqual(T(1), T('1'))\n self.assertEqual(T(1), T(x='1'))\n\n def test_nutils_hash(self):\n class T(self.cls):\n def __init__(self, x, y):\n pass\n class T1(self.cls, version=1):\n def __init__(self, x, y):\n pass\n class U(self.cls):\n def __init__(self, x, y):\n pass\n\n self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())\n self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())\n self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())\n # Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.\n self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')\n self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')\n\n @parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)\n def test_deduplication(self):\n class T(self.cls):\n def __init__(self, x, y):\n pass\n class U(self.cls):\n def __init__(self, x, y):\n pass\n\n a = T(1, 2)\n b = T(1, 2)\n c = T(2, 1)\n d = U(1, 2)\n self.assertIs(a, b)\n self.assertEqual(a, b)\n self.assertIsNot(a, c)\n self.assertNotEqual(a, c)\n self.assertIsNot(a, d)\n self.assertNotEqual(a, d)\n\nImmutableFamily(cls=nutils.types.Immutable)\nImmutableFamily(cls=nutils.types.Singleton)\n\n# vim:sw=2:sts=2:et\n"
] | [
[
"numpy.array",
"numpy.float64",
"numpy.int64"
]
] |
islasimpson/snowpaper_2022 | [
"d6ee677f696d7fd6e7cadef8168ce4fd8b184cac",
"d6ee677f696d7fd6e7cadef8168ce4fd8b184cac",
"d6ee677f696d7fd6e7cadef8168ce4fd8b184cac"
] | [
"DATA_SORT/3cities/SCAM/outputcesmscam_TREFHT_CLM5_CLM5F_001.py",
"DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/outputcesmscam_FLNSC_CLM5_CLM5F_001.py",
"DATA_SORT/3cities/SCAM/weakerrelaxation/outputcesmscam_TREFHT_SNOWDa_CLM5F_002.py"
] | [
"import importlib\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom CASutils import filter_utils as filt\nfrom CASutils import readdata_utils as read\nfrom CASutils import calendar_utils as cal\n\nimportlib.reload(filt)\nimportlib.reload(read)\nimportlib.reload(cal)\n\n\nexpname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',\n 'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',\n 'SID_SNOWD_SNOWDF_01.001.FSCAM.sidsnowd1']\n\noutname='SCAM_CLM5_CLM5F_001'\n\ncityname=['Saskatoon','Toronto','Siderovsk']\ncitylon=[253.330, 280.617, 82.3139]\ncitylat=[52.1579, 43.6532, 66.5973]\n\nfor icity in np.arange(0,3,1):\n\n basedir=\"/project/cas02/islas/CLM5_CLM4/raw/SCAM_new_lowrelax/\"\n pathout=\"/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/\"\n \n fpath=basedir+expname[icity]+\"/atm/hist/h0concat.nc\"\n print(fpath)\n dat = read.read_sfc_cesm(fpath,\"1979-01-01T12:00:00\",\"2014-12-31T12:00:00\")\n \n if (icity == 0): \n trefht = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],\n dims=['time','city'], name='trefht')\n \n trefht[:,icity] = dat.TREFHT.isel(lon=0,lat=0)\n \n trefht.to_netcdf(path=pathout+\"TREFHT_\"+outname+\".nc\")\n",
"import importlib\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom CASutils import filter_utils as filt\nfrom CASutils import readdata_utils as read\nfrom CASutils import calendar_utils as cal\n\nimportlib.reload(filt)\nimportlib.reload(read)\nimportlib.reload(cal)\n\n\nexpname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',\n 'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',\n 'SID_CLM5_CLM5F_01.001.FSCAM.sid_1979_2014']\n\noutname='SCAM_CLM5_CLM5F_01'\n\ncityname=['Saskatoon','Toronto','Siderovsk']\ncitylon=[253.330, 280.617, 82.3139]\ncitylat=[52.1579, 43.6532, 66.5973]\n\nfor icity in np.arange(0,3,1):\n\n basedir=\"/project/cas02/islas/CLM5_CLM4/raw/SCAM_CLM_INIT_60days_withclearsky/\"\n pathout=\"/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/\"\n \n fpath=basedir+expname[icity]+\"/atm/hist/h0concat.nc\"\n print(fpath)\n dat = read.read_sfc_cesm(fpath,\"1979-01-01T12:00:00\",\"2014-12-31T12:00:00\")\n \n if (icity == 0): \n flnsc = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],\n dims=['time','city'], name='flnsc')\n \n flnsc[:,icity] = dat.FLNSC.isel(lon=0,lat=0)\n \n flnsc.to_netcdf(path=pathout+\"FLNSC_\"+outname+\".nc\")\n",
"import importlib\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom CASutils import filter_utils as filt\nfrom CASutils import readdata_utils as read\nfrom CASutils import calendar_utils as cal\n\nimportlib.reload(filt)\nimportlib.reload(read)\nimportlib.reload(cal)\n\n\nexpname=['SASK_SNOWDa_CLM5F_02.001.FSCAM.sask2_1979_2014',\n 'TOR_SNOWDa_CLM5F_02.001.FSCAM.tor2_1979_2014',\n 'SID_SNOWDa_CLM5F_02.001.FSCAM.sid2_1979_2014']\n\noutname='SCAM_SNOWDa_CLM5F_02'\n\ncityname=['Saskatoon','Toronto','Siderovsk']\ncitylon=[253.330, 280.617, 82.3139]\ncitylat=[52.1579, 43.6532, 66.5973]\n\nfor icity in np.arange(0,3,1):\n\n basedir=\"/project/cas02/islas/CLM5_CLM4/raw/SCAM_new_lowrelax/\"\n pathout=\"/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM/new_lowrelax/\"\n \n fpath=basedir+expname[icity]+\"/atm/hist/h0concat.nc\"\n print(fpath)\n dat = read.read_sfc_cesm(fpath,\"1979-01-01T12:00:00\",\"2014-12-31T12:00:00\")\n \n if (icity == 0): \n trefht = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],\n dims=['time','city'], name='trefht')\n \n trefht[:,icity] = dat.TREFHT.isel(lon=0,lat=0)\n \n trefht.to_netcdf(path=pathout+\"TREFHT_\"+outname+\".nc\")\n"
] | [
[
"numpy.arange",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.zeros"
],
[
"numpy.arange",
"numpy.zeros"
]
] |
GuillaumeRochette/HumanViewSynthesis | [
"358d9cb55486ad0f81a31df8ab4159153765e7e5"
] | [
"geometry/matrix.py"
] | [
"from typing import Tuple\nimport torch\nfrom torch import Tensor\n\n\ndef homogeneous(A: Tensor, b: Tensor) -> Tensor:\n \"\"\"\n Converts heterogeneous matrix into homogeneous matrix.\n\n :param A: Heterogeneous matrix of shape [*, N, N].\n :param b: Heterogeneous vector of shape [*, N, 1].\n :return: Homogeneous matrix of shape [*, N + 1, N + 1].\n \"\"\"\n assert A.shape[:-2] == b.shape[:-2]\n assert A.shape[-2] == A.shape[-1] == b.shape[-2]\n assert b.shape[-1] == 1\n\n s, n = A.shape[:-2], A.shape[-2]\n\n c = torch.zeros(s + (1, n), dtype=A.dtype, device=A.device)\n d = torch.ones(s + (1, 1), dtype=A.dtype, device=A.device)\n\n M = torch.cat(\n [\n torch.cat([A, b], dim=-1),\n torch.cat([c, d], dim=-1),\n ],\n dim=-2,\n )\n\n return M\n\n\ndef heterogeneous(M: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Converts homogeneous matrix into heterogeneous matrix.\n\n :param M: Homogeneous matrix of shape [*, N + 1, N + 1].\n :return: Heterogeneous matrix and vector of shapes [*, N, N] and [*, N, 1] respectively.\n \"\"\"\n assert M.shape[-2] == M.shape[-1]\n\n n = M.shape[-2] - 1\n\n Ab, cd = M.split([n, 1], dim=-2)\n A, b = Ab.split([n, 1], dim=-1)\n c, d = cd.split([n, 1], dim=-1)\n A, b = A / d, b / d\n\n return A, b\n\n\ndef affine(x: Tensor, A: Tensor, b: Tensor) -> Tensor:\n \"\"\"\n Applies an affine transformation to x given A and b.\n\n :param x: Vector of shape [*, N, 1].\n :param A: Matrix of shape [*, N, N].\n :param b: Vector of shape [*, N, 1].\n :return: Vector of shape [*, N, 1].\n \"\"\"\n assert x.ndim == A.ndim == b.ndim\n assert x.shape[-2] == A.shape[-2] == A.shape[-1] == b.shape[-2]\n assert x.shape[-1] == b.shape[-1] == 1\n\n y = A @ x + b\n\n return y\n\n\ndef eye_like(x: Tensor) -> Tensor:\n \"\"\"\n Return an identity matrix of the same shape as x.\n\n :param x: Matrix of shape [*, M, N].\n :return: Identity matrix of shape [*, M, N].\n \"\"\"\n m, n = x.shape[-2], x.shape[-1]\n\n return torch.eye(m, n, dtype=x.dtype, device=x.device).expand_as(x)\n\n\ndef diag(x: Tensor):\n \"\"\"\n Returns a diagonal matrix given a vector.\n\n :param x: Vector of shape [*, M, 1].\n :return: Diagonal matrix of shape [*, M, M].\n \"\"\"\n assert x.shape[-1] == 1\n m = x.shape[-2]\n\n return torch.eye(m, dtype=x.dtype, device=x.device) * x\n"
] | [
[
"torch.zeros",
"torch.ones",
"torch.eye",
"torch.cat"
]
] |
tomcattigerkkk/traj_gen | [
"d01882c17d8e979860fb1f09defa968a86adb494"
] | [
"python/scripts/traj_gen/chomp_trajectory.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\nfrom .traj_gen_base import TrajGen\nimport numpy as np\nimport casadi as ca\nfrom scipy.interpolate import interp1d\nclass CHOMPTrajGen(TrajGen):\n def __init__(self, knots_, dim_, pntDensity_):\n super().__init__(knots_, dim_)\n self.pntDensity = pntDensity_\n assert knots_.shape[0]==2, 'For optimalTraj, knots = [t0, tf]'\n self.num_variables = int(np.floor((knots_[-1]-knots_[0])*pntDensity_))\n self.dt = (knots_[-1]-knots_[0])/(self.num_variables-1)\n self.ts = np.linspace(knots_[0], knots_[-1], self.num_variables) # different from Ts\n self.Xs = np.zeros((self.dim, self.num_variables))\n\n def findStepIndex(self, t):\n \"\"\"\n find the closest index of the segment\n \"\"\"\n time_diff = (self.ts-t)**2\n return np.where(time_diff==np.min(time_diff))[0][0]\n\n def setDerivativeObj(self, weight_mask):\n self.weight_mask = weight_mask\n\n def addPin(self, pin_):\n if pin_['d'] >= self.num_variables:\n print(\"Warning: The degree of the pin exceed the total number of variables. This pin ignored\\n\")\n super().addPin(pin_)\n X_ = pin_['X']\n m = 0\n if len(X_.shape) == 2: # 2 dimension ==> loose pin\n if m in self.loosePinSet.keys():\n self.loosePinSet[m].append(pin_)\n else:\n self.loosePinSet[m] = [pin_]\n elif len(X_.shape) == 1: # vector ==> fix pin\n if m in self.fixPinSet.keys():\n self.fixPinSet[m].append(pin_)\n else:\n self.fixPinSet[m] = [pin_]\n else:\n print(\"Warning: Dim of pin value is invalid\\n\")\n\n def getDiffMat(self, d_):\n if d_ == 0:\n mat_ = np.diag(np.ones(self.num_variables))\n else:\n mat_ = np.diag(np.ones(self.num_variables))\n for j in range(1, d_+1):\n D_ = np.zeros((self.num_variables-j, self.num_variables-j+1))\n for i in range(self.num_variables-j):\n D_[i, i:i+2] = np.array([-1, 1])\n D_ = D_/self.dt\n mat_ = np.dot(D_, mat_)\n return mat_\n\n def loosePin2InequalityMat(self,):\n ASet = None\n BSet = None\n if len(self.loosePinSet.keys()) == 0:\n return ASet, BSet\n for pin in self.loosePinSet[0]:\n a_set_ = []\n b_set_ = []\n for dd in range(self.dim):\n\n n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])\n a_ = np.zeros((2, self.num_variables-pin['d']))\n a_[:, n_] = np.array([1, -1])\n a_ = np.dot(a_, self.getDiffMat(pin['d']))\n a_set_.append(a_)\n b_ = np.array([pin['X'][dd, 1], -pin['X'][dd, 0]]).reshape(-1, 1)\n b_set_.append(b_)\n\n if ASet is None:\n ASet = np.array(a_set_)\n BSet = np.array(b_set_).reshape(self.dim, -1, 1)\n else:\n ASet = np.concatenate((ASet, np.array(a_set_)), axis=1)\n BSet = np.concatenate((BSet, np.array(b_set_).reshape(self.dim, -1, 1)), axis=1)\n print('Bset final in {}'.format(BSet.shape))\n return ASet, BSet\n\n def fixPin2EqualityMat(self,):\n AeqSet = None\n BeqSet = None\n if len(self.fixPinSet.keys())==0:\n return AeqSet, BeqSet\n for pin in self.fixPinSet[0]:\n aeq_set_ = []\n beq_set_ = []\n for dd in range(self.dim):\n n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])\n a_ = np.zeros(self.num_variables-pin['d'])\n a_[n_] = 1.0\n a_ = np.dot(a_, self.getDiffMat(pin['d']))\n aeq_set_.append(a_)\n # print(aeq_set_)\n b_ = pin['X'][dd]\n beq_set_.append(b_)\n if AeqSet is None:\n AeqSet = np.array(aeq_set_).reshape(self.dim, 1, -1)\n BeqSet = np.array(beq_set_).reshape(self.dim, 1, -1)\n # print(AeqSet.shape)\n # print(BeqSet.shape)\n else:\n AeqSet = np.concatenate((AeqSet, np.array(aeq_set_).reshape(self.dim, 1, -1)), axis=1)\n BeqSet = np.concatenate((BeqSet, np.array(beq_set_).reshape(self.dim, 1, -1)), axis=1)\n # print(BeqSet.shape)\n return AeqSet, BeqSet\n\n def getQPset(self,):\n # 1. objective\n QSet = np.zeros((self.dim, self.num_variables, self.num_variables))\n for dd in range(self.dim):\n Q_ = np.zeros((self.num_variables, self.num_variables))\n for d in range(1, self.weight_mask.shape[0]+1):\n if self.weight_mask[d-1]>0:\n temp_ = self.getDiffMat(d)\n Qd_ = np.dot(temp_.T, temp_)\n Q_ = Q_ + self.weight_mask[d-1]*Qd_\n QSet[dd] = Q_\n\n # 2. constraints\n ASet, BSet = self.loosePin2InequalityMat()\n AeqSet, BeqSet = self.fixPin2EqualityMat()\n\n return QSet, ASet, BSet, AeqSet, BeqSet\n\n def solve(self,):\n self.isSolved = True\n # prepare QP\n QSet, ASet, BSet, AeqSet, BeqSet = self.getQPset()\n\n if ASet is None:\n print(\"Please define the beginning and also the end pins\")\n return False\n\n for dd in range(self.dim):\n print('solving {}-th dimension.. \\n'.format(dd))\n x_sym = ca.SX.sym('x', QSet[0].shape[0])\n opts_setting = {'ipopt.max_iter':100, 'ipopt.print_level':0, 'print_time':0, 'ipopt.acceptable_tol':1e-8, 'ipopt.acceptable_obj_change_tol':1e-6}\n obj = ca.mtimes([x_sym.T, QSet[dd], x_sym])\n if ASet is None:\n a_set = AeqSet[dd].copy()\n else:\n a_set = np.concatenate((ASet[dd], AeqSet[dd]))\n Ax_sym = ca.mtimes([a_set, x_sym])\n if BSet is None:\n b_set_u = BeqSet[dd]\n b_set_l = BeqSet[dd]\n else:\n b_set_u = np.concatenate((BSet[dd], BeqSet[dd]), axis=0) # Ax <= b_set_u\n b_set_l = np.concatenate((-np.inf*np.ones(BSet[dd].shape), BeqSet[dd]), axis=0) # Ax >= b_set_l\n nlp_prob = {'f': obj, 'x': x_sym, 'g':Ax_sym}\n solver = ca.nlpsol('solver', 'ipopt', nlp_prob, opts_setting)\n try:\n result = solver(lbg=b_set_l, ubg=b_set_u,)\n Phat_ = result['x']\n # print(Phat_)\n flag_ = True\n except:\n Phat_ = None\n flag_ = False\n if flag_:\n self.Xs[dd] = Phat_.full().flatten()\n else:\n self.isSolved = False\n print(\"Failure ..\")\n return False\n return True\n\n def eval(self, t_, d_):\n val_ = np.zeros((self.dim, t_.shape[0]))\n for dd in range(self.dim):\n for idx in range(t_.shape[0]):\n t_i = t_[idx]\n if t_i < self.Ts[0] or t_i > self.Ts[-1]:\n print(\"WARNING: Eval of t: out of bound. Extrapolation\\n\")\n Xsd_ = np.dot(self.getDiffMat(d_), self.Xs[dd].T)\n if d_ >0:\n t_v_ = self.ts[:-d_]\n else:\n t_v_ = self.ts\n # print(t_v_.shape)\n # print(Xsd_.shape)\n set_interp = interp1d(t_v_, Xsd_, kind='linear')\n # print(t_v_[-1])\n # print(t_[idx])\n if t_[idx] <= t_v_[-1]:\n val_[dd, idx] = set_interp(t_[idx])\n else:\n val_[dd, idx] = set_interp(t_v_[-1])\n return val_\n"
] | [
[
"numpy.ones",
"scipy.interpolate.interp1d",
"numpy.zeros",
"numpy.concatenate",
"numpy.floor",
"numpy.min",
"numpy.array",
"numpy.dot",
"numpy.linspace"
]
] |
SmirnovKol/recurrent-visual-attention | [
"a38ac8958ebf1c61a10c4d5320f1e31d3d0b73dd"
] | [
"data_loader.py"
] | [
"import numpy as np\nfrom utils import plot_images\n\nimport torch\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\ndef get_train_valid_loader(\n data_dir,\n batch_size,\n random_seed,\n valid_size=0.1,\n shuffle=True,\n show_sample=False,\n num_workers=4,\n pin_memory=False,\n):\n \"\"\"Train and validation data loaders.\n\n If using CUDA, num_workers should be set to 1 and pin_memory to True.\n\n Args:\n data_dir: path directory to the dataset.\n batch_size: how many samples per batch to load.\n random_seed: fix seed for reproducibility.\n valid_size: percentage split of the training set used for\n the validation set. Should be a float in the range [0, 1].\n In the paper, this number is set to 0.1.\n shuffle: whether to shuffle the train/validation indices.\n show_sample: plot 9x9 sample grid of the dataset.\n num_workers: number of subprocesses to use when loading the dataset.\n pin_memory: whether to copy tensors into CUDA pinned memory. Set it to\n True if using GPU.\n \"\"\"\n error_msg = \"[!] valid_size should be in the range [0, 1].\"\n assert (valid_size >= 0) and (valid_size <= 1), error_msg\n\n # define transforms\n normalize = transforms.Normalize((0.1307,), (0.3081,))\n trans = transforms.Compose([transforms.ToTensor(), normalize])\n\n # load dataset\n dataset = datasets.MNIST(data_dir, train=True, download=True, transform=trans)\n\n num_train = len(dataset)\n indices = list(range(num_train))\n split = int(np.floor(valid_size * num_train))\n\n if shuffle:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n\n train_idx, valid_idx = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_idx)\n valid_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=train_sampler,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n\n valid_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=valid_sampler,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n\n # visualize some images\n if show_sample:\n sample_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=9,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n data_iter = iter(sample_loader)\n images, labels = data_iter.next()\n X = images.numpy()\n X = np.transpose(X, [0, 2, 3, 1])\n plot_images(X, labels)\n\n return (train_loader, valid_loader)\n\n\ndef get_test_loader(data_dir, batch_size, num_workers=4, pin_memory=False):\n \"\"\"Test datalaoder.\n\n If using CUDA, num_workers should be set to 1 and pin_memory to True.\n\n Args:\n data_dir: path directory to the dataset.\n batch_size: how many samples per batch to load.\n num_workers: number of subprocesses to use when loading the dataset.\n pin_memory: whether to copy tensors into CUDA pinned memory. Set it to\n True if using GPU.\n \"\"\"\n # define transforms\n normalize = transforms.Normalize((0.1307,), (0.3081,))\n trans = transforms.Compose([transforms.ToTensor(), normalize])\n\n # load dataset\n dataset = datasets.MNIST(data_dir, train=False, download=True, transform=trans)\n\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n\n return data_loader\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"numpy.transpose",
"numpy.random.seed",
"numpy.floor",
"torch.utils.data.sampler.SubsetRandomSampler"
]
] |
badrutdinovrr/darts | [
"434708e63cbda8f710d3c1810d06ad31c11db923"
] | [
"cnn/model_search.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom operations import *\nfrom torch.autograd import Variable\nfrom genotypes import PRIMITIVES\nfrom genotypes import Genotype\n\n\nclass MixedOp(nn.Module):\n\n def __init__(self, C, stride):\n super(MixedOp, self).__init__()\n self._ops = nn.ModuleList()\n for primitive in PRIMITIVES:\n op = OPS[primitive](C, stride, False)\n if 'pool' in primitive:\n op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))\n self._ops.append(op)\n\n def forward(self, x, weights):\n return sum(w * op(x) for w, op in zip(weights, self._ops))\n\n\nclass Cell(nn.Module):\n\n def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):\n super(Cell, self).__init__()\n self.reduction = reduction\n\n if reduction_prev:\n self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)\n else:\n self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)\n self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)\n self._steps = steps\n self._multiplier = multiplier\n\n self._ops = nn.ModuleList()\n self._bns = nn.ModuleList()\n for i in range(self._steps):\n for j in range(2+i):\n stride = 2 if reduction and j < 2 else 1\n op = MixedOp(C, stride)\n self._ops.append(op)\n\n def forward(self, s0, s1, weights):\n s0 = self.preprocess0(s0)\n s1 = self.preprocess1(s1)\n\n states = [s0, s1]\n offset = 0\n for i in range(self._steps):\n s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))\n offset += len(states)\n states.append(s)\n\n return torch.cat(states[-self._multiplier:], dim=1)\n\n\nclass Network(nn.Module):\n\n def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):\n super(Network, self).__init__()\n self._C = C\n self._num_classes = num_classes\n self._layers = layers\n self._criterion = criterion\n self._steps = steps\n self._multiplier = multiplier\n\n C_curr = stem_multiplier*C\n self.stem = nn.Sequential(\n nn.Conv2d(1, C_curr, 3, padding=1, bias=False),\n nn.BatchNorm2d(C_curr)\n )\n \n C_prev_prev, C_prev, C_curr = C_curr, C_curr, C\n self.cells = nn.ModuleList()\n reduction_prev = False\n for i in range(layers):\n if i in [layers//3, 2*layers//3]:\n C_curr *= 2\n reduction = True\n else:\n reduction = False\n cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)\n reduction_prev = reduction\n self.cells += [cell]\n C_prev_prev, C_prev = C_prev, multiplier*C_curr\n\n self.global_pooling = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(C_prev, num_classes)\n\n self._initialize_alphas()\n\n def new(self):\n model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()\n for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):\n x.data.copy_(y.data)\n return model_new\n\n def forward(self, input):\n s0 = s1 = self.stem(input)\n for i, cell in enumerate(self.cells):\n if cell.reduction:\n weights = F.softmax(self.alphas_reduce, dim=-1)\n else:\n weights = F.softmax(self.alphas_normal, dim=-1)\n s0, s1 = s1, cell(s0, s1, weights)\n out = self.global_pooling(s1)\n logits = self.classifier(out.view(out.size(0),-1))\n return logits\n\n def _loss(self, input, target):\n logits = self(input)\n return self._criterion(logits, target) \n\n def _initialize_alphas(self):\n k = sum(1 for i in range(self._steps) for n in range(2+i))\n num_ops = len(PRIMITIVES)\n\n self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)\n self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)\n self._arch_parameters = [\n self.alphas_normal,\n self.alphas_reduce,\n ]\n\n def arch_parameters(self):\n return self._arch_parameters\n\n def genotype(self):\n\n def _parse(weights):\n gene = []\n n = 2\n start = 0\n for i in range(self._steps):\n end = start + n\n W = weights[start:end].copy()\n edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j))\n start = end\n n += 1\n return gene\n\n gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())\n gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())\n\n concat = range(2+self._steps-self._multiplier, self._steps+2)\n genotype = Genotype(\n normal=gene_normal, normal_concat=concat,\n reduce=gene_reduce, reduce_concat=concat\n )\n return genotype\n\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.randn",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.softmax",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat"
]
] |
915288938lx/Personae-master-01 | [
"0885c37956bd3f9157c66109e09755a51ad5d3a1"
] | [
"algorithm/RL/DDPG.py"
] | [
"# coding=utf-8\n\nimport tensorflow as tf\nimport numpy as np\n\nimport os\n\nfrom algorithm import config\nfrom base.env.market import Market\nfrom checkpoints import CHECKPOINTS_DIR\nfrom base.algorithm.model import BaseRLTFModel\nfrom helper.args_parser import model_launcher_parser\nfrom helper.data_logger import generate_algorithm_logger, generate_market_logger\n\n\nclass Algorithm(BaseRLTFModel):\n\n def __init__(self, session, env, a_space, s_space, **options):\n super(Algorithm, self).__init__(session, env, a_space, s_space, **options)\n\n self.actor_loss, self.critic_loss = .0, .0\n\n # Initialize buffer.\n self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1))\n self.buffer_length = 0\n\n self._init_input()\n self._init_nn()\n self._init_op()\n self._init_saver()\n self._init_summary_writer()\n\n def _init_input(self):\n self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state')\n self.r = tf.placeholder(tf.float32, [None, 1], 'reward')\n self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next')\n\n def _init_nn(self):\n # Initialize predict actor and critic.\n self.a_predict = self.__build_actor_nn(self.s, \"predict/actor\", trainable=True)\n self.q_predict = self.__build_critic(self.s, self.a_predict, \"predict/critic\", trainable=True)\n # Initialize target actor and critic.\n self.a_next = self.__build_actor_nn(self.s_next, \"target/actor\", trainable=False)\n self.q_next = self.__build_critic(self.s_next, self.a_next, \"target/critic\", trainable=False)\n # Save scopes\n self.scopes = [\"predict/actor\", \"target/actor\", \"predict/critic\", \"target/critic\"]\n\n def _init_op(self):\n # Get actor and critic parameters.\n params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes]\n zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3])\n # Initialize update actor and critic op.\n self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params]\n self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params]\n # Initialize actor loss and train op.\n with tf.variable_scope('actor_loss'):\n self.a_loss = -tf.reduce_mean(self.q_predict)\n with tf.variable_scope('actor_train'):\n self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0])\n # Initialize critic loss and train op.\n self.q_target = self.r + self.gamma * self.q_next\n with tf.variable_scope('critic_loss'):\n self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict)\n with tf.variable_scope('critic_train'):\n self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2])\n # Initialize variables.\n self.session.run(tf.global_variables_initializer())\n\n def run(self):\n if self.mode != 'train':\n self.restore()\n else:\n for episode in range(self.episodes):\n self.log_loss(episode)\n s = self.env.reset(self.mode)\n while True:\n c, a, a_index = self.predict(s)\n s_next, r, status, info = self.env.forward(c, a)\n self.save_transition(s, a_index, r, s_next)\n self.train()\n s = s_next\n if status == self.env.Done:\n self.env.trader.log_asset(episode)\n break\n if self.enable_saver and episode % 10 == 0:\n self.save(episode)\n\n def train(self):\n if self.buffer_length < self.buffer_size:\n return\n self.session.run([self.update_a, self.update_c])\n s, a, r, s_next = self.get_transition_batch()\n self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next})\n self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s})\n\n def predict(self, s):\n a = self.session.run(self.a_predict, {self.s: s})[0][0]\n return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False)\n\n def save_transition(self, s, a, r, s_next):\n transition = np.hstack((s, [[a]], [[r]], s_next))\n self.buffer[self.buffer_length % self.buffer_size, :] = transition\n self.buffer_length += 1\n\n def get_transition_batch(self):\n indices = np.random.choice(self.buffer_size, size=self.batch_size)\n batch = self.buffer[indices, :]\n s = batch[:, :self.s_space]\n a = batch[:, self.s_space: self.s_space + 1]\n r = batch[:, -self.s_space - 1: -self.s_space]\n s_next = batch[:, -self.s_space:]\n return s, a, r, s_next\n\n def log_loss(self, episode):\n self.logger.warning(\"Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}\".format(episode,\n self.actor_loss,\n self.critic_loss))\n\n def __build_actor_nn(self, state, scope, trainable=True):\n\n w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1)\n\n with tf.variable_scope(scope):\n # state is ? * code_count * data_dim.\n first_dense = tf.layers.dense(state,\n 64,\n tf.nn.relu,\n kernel_initializer=w_init,\n bias_initializer=b_init,\n trainable=trainable)\n\n action = tf.layers.dense(first_dense,\n 1,\n tf.nn.sigmoid,\n kernel_initializer=w_init,\n bias_initializer=b_init,\n trainable=trainable)\n\n return tf.multiply(action, self.a_space - 1)\n\n @staticmethod\n def __build_critic(state, action, scope, trainable=True):\n\n w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1)\n\n with tf.variable_scope(scope):\n\n s_first_dense = tf.layers.dense(state,\n 32,\n tf.nn.relu,\n kernel_initializer=w_init,\n bias_initializer=b_init,\n trainable=trainable)\n\n a_first_dense = tf.layers.dense(action,\n 32,\n tf.nn.relu,\n kernel_initializer=w_init,\n bias_initializer=b_init,\n trainable=trainable)\n\n q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense),\n 1,\n kernel_initializer=w_init,\n bias_initializer=b_init,\n trainable=trainable)\n\n return q_value\n\n\ndef main(args):\n mode = args.mode\n # mode = 'test'\n codes = args.codes\n # codes = [\"AU88\", \"RB88\", \"CU88\", \"AL88\"]\n # codes = [\"T9999\"]\n market = args.market\n # market = 'future'\n episode = args.episode\n # episode = 2000\n # training_data_ratio = 0.5\n training_data_ratio = args.training_data_ratio\n\n model_name = os.path.basename(__file__).split('.')[0]\n\n env = Market(codes, start_date=\"2012-01-01\", end_date=\"2019-07-19\", **{\n \"market\": market,\n # \"use_sequence\": True,\n \"logger\": generate_market_logger(model_name),\n \"training_data_ratio\": training_data_ratio,\n })\n\n algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{\n \"mode\": mode,\n \"episodes\": episode,\n \"enable_saver\": True,\n \"learning_rate\": 0.003,\n \"enable_summary_writer\": True,\n \"logger\": generate_algorithm_logger(model_name),\n \"save_path\": os.path.join(CHECKPOINTS_DIR, \"RL\", model_name, market, \"model\"),\n \"summary_path\": os.path.join(CHECKPOINTS_DIR, \"RL\", model_name, market, \"summary\"),\n })\n\n algorithm.run()\n algorithm.eval()\n algorithm.plot()\n\n\nif __name__ == '__main__':\n main(model_launcher_parser.parse_args())\n\n\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.constant_initializer",
"numpy.zeros",
"tensorflow.global_variables_initializer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.get_collection",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"numpy.random.choice",
"tensorflow.variable_scope",
"numpy.hstack",
"tensorflow.assign",
"tensorflow.random_normal_initializer",
"tensorflow.Session",
"tensorflow.losses.mean_squared_error",
"tensorflow.nn.relu",
"tensorflow.layers.dense"
]
] |
fragrussu/qMRINet | [
"418cbe22cefa2974d8a97b359324ff4c35865d22"
] | [
"tools/trainpar_deepqmri.py"
] | [
"# Author: Francesco Grussu, University College London\n#\t\t <[email protected]> <[email protected]>\n#\n# Code released under BSD Two-Clause license\n#\n# Copyright (c) 2020 University College London. \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n# \n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# The views and conclusions contained in the software and documentation are those\n# of the authors and should not be interpreted as representing official policies,\n# either expressed or implied, of the FreeBSD Project.\n\n### Load libraries\nimport argparse, os, sys\nfrom numpy import matlib\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import Tensor\nfrom torch.utils.data import DataLoader\nfrom torch import autograd\nimport pickle as pk\nfrom pathlib import Path as pt\nsys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )\nimport deepqmri\n\n\nif __name__ == \"__main__\":\n\n\t\n\t### Print help and parse arguments\n\tparser = argparse.ArgumentParser(description='This program trains a qMRI-net for quantitative MRI parameter estimation. A qMRI-Nnet enables voxel-by-voxel estimation of microstructural properties from sets of MRI images aacquired by varying the MRI sequence parameters. Author: Francesco Grussu, University College London (<[email protected]><[email protected]>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')\n\tparser.add_argument('sig_train', help='path to a pickle binary file storing the input training MRI signals as a numpy matrix (rows: voxels; columns: measurements)')\n\tparser.add_argument('param_train', help='path to a pickle binary file storing the training tissue parameter data as a numpy matrix (rows: voxels; columns: parameters)')\n\tparser.add_argument('sig_val', help='path to a pickle binary file storing the input validation MRI signals as a numpy matrix (rows: voxels; columns: measurements)')\n\tparser.add_argument('param_val', help='path to a pickle binary file storing the validation tissue parameters as a numpy matrix (rows: voxels; columns: parameters)')\n\tparser.add_argument('mri_model', help='string indicating the MRI model to fit (choose among: \"pr_hybriddwi\" for prostate hybrid diffusion-relaxometry imaging; \"br_sirsmdt\" for brain saturation recovery diffusion tensor on spherical mean signals; \"twocompdwite\" for a two-compartment diffusion-t2 relaxation model without anisotropy). Tissue parameters will be: model \"pr_hybriddwi\", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model \"br_sirsmdt\", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model \"twocompdwite\", parameters v, Da, t2a, Db, Kb, t2b, s0')\n\tparser.add_argument('mri_prot', help='path to text file storing the MRI protocol. For model \"pr_hybriddwi\" and \"twocompdwite\" it must contain a matrix where the 1st row stores b-values in s/mm^2, while 2nd row echo times in ms; for model \"br_sirsmdt\" it must contain a matrix where the 1st row stores preparation times (saturation-inversion delay) in ms, the 2nd row inversion times (inversion-excitation delay) in ms, the 3rd row b-values in s/mm^2. For a pure inversion recovery (i.e. no saturation pulse), use a very large number for the saturation-inversion delay (at least 5 times the maximum expected T1). Different entries should be separated by spaces')\n\tparser.add_argument('out_base', help='base name of output directory (a string built with the network parameters will be added to the base). The output directory will contain the following output files: ** losstrain.bin, pickle binary storing the training loss as a numpy matrix (shape: epoch x batch); ** lossval.bin, pickle binary storing the validation loss as a numpy matrix (shape: epoch x 1); ** nnet_epoch0.bin, pickle binary storing the qMRI-net at initialisation; ** nnet_epoch0.pth, Pytorch binary storing the qMRI-net at initialisation; ** nnet_epoch<FINAL_EPOCH>.bin, pickle binary storing the qMRI-net at the final epoch; ** nnet_lossvalmin.bin, pickle binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); * nnet_lossvalmin.pth, Pytorch binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_sigval.bin, prediction of the validation signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_tissueval.bin, prediction of tissue parameters from validation signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin.info, text file reporting information regarding the epoch with the lowest validation loss; ** lossval_min.txt, miniimum validation loss; ** nnet_lossvalmin_sigtest.bin, prediction of the test signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information), if those signals are provided; ** nnet_lossvalmin_tissuetest.bin, prediction of tissue parameters from test signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information) if test signals are provided')\n\tparser.add_argument('--nn', metavar='<list>', help='array storing the number of hidden neurons, separated by hyphens (example: 30-15-8). The first number (input neurons) must equal the number of measurements in the protocol (Nmeas); the last number (output neurons) must equal the number of parameters in the model (Npar, 9 for model \"pr_hybriddwi\", 4 for model \"br_sirsmdt\", 7 for model \"twocompdwite\"). Default: Nmeas-(Npar + (Nmeas minus Npar))/2-Npar, where Nmeas is the number of MRI measurements and Npar is the number of tissue parameters for the signal model to fit')\n\tparser.add_argument('--pdrop', metavar='<value>', default='0.0', help='dropout probability in each layer of the neural network. Default: 0.0')\n\tparser.add_argument('--noepoch', metavar='<value>', default='500', help='number of epochs used for training. Default: 500')\n\tparser.add_argument('--lrate', metavar='<value>', default='0.001', help='learning rate. Default: 0.001')\n\tparser.add_argument('--mbatch', metavar='<value>', help='number of voxels in each training mini-batch. Default: 1/80 of the total number of training voxels (minimum: 2 voxels)')\n\tparser.add_argument('--seed', metavar='<value>', default='19102018', help='integer used as a seed for Numpy and PyTorch random number generators. Default: 19102018')\n\tparser.add_argument('--nwork', metavar='<value>', default='0', help='number of workers for data loader. Default: 0')\n\tparser.add_argument('--dtest', metavar='<file>', help='path to an option input pickle binary file storing test MRI signals as a numpy matrix (rows: voxels; columns: measurements)')\n\tparser.add_argument('--parmin', metavar='<value>', help='list of lower bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 0.5,0.2,250,0.5 for model br_sirsmdt). Tissue parameters are: model \"pr_hybriddwi\", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model \"br_sirsmdt\", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model \"twocompdwite\", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')\n\tparser.add_argument('--parmax', metavar='<value>', help='list of upper bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 2.4,0.9,3000,5.0 for model br_sirsmdt). Tissue parameters are: model \"pr_hybriddwi\", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model \"br_sirsmdt\", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model \"twocompdwite\", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')\n\targs = parser.parse_args()\n\n\t### Get some of the inputs\n\tpdrop = float(args.pdrop)\n\tnoepoch = int(args.noepoch)\n\tlrate = float(args.lrate)\n\tseed = int(args.seed)\n\tnwork = int(args.nwork)\n\tmrimodel = args.mri_model\n\n\t### Print some information\n\tprint('')\n\tprint('')\n\tprint('********************************************************************')\n\tprint(' TRAIN A qMRI-NET (qmripar CLASS) ')\n\tprint('********************************************************************')\n\tprint('')\n\tprint('** Input training MRI signals: {}'.format(args.sig_train))\n\tprint('** Input training tissue parameters: {}'.format(args.param_train))\n\tprint('** Input validation MRI signals: {}'.format(args.sig_val))\n\tprint('** Input validation tissue parameters: {}'.format(args.param_val))\n\tif args.dtest is not None:\n\t\tprint('** Input test MRI signals: {}'.format(args.dtest))\n\n\t### Load training MRI signals\n\tfh = open(args.sig_train,'rb')\n\tdatatrain = pk.load(fh)\n\tfh.close()\n\tnvox_train = datatrain.shape[0]\n\tnmeas_train = datatrain.shape[1]\n\n\t### Load validation MRI signals\n\tfh = open(args.sig_val,'rb')\n\tdataval = pk.load(fh)\n\tfh.close()\n\tnvox_val = dataval.shape[0]\n\tif dataval.shape[1]!=datatrain.shape[1]:\n\t\traise RuntimeError('the number of MRI measurements in the validation set differs from the training set!')\t\t\n\n\t### Load test MRI signals\n\tif args.dtest is not None:\n\t\tfh = open(args.dtest,'rb')\n\t\tdatatest = np.float32(pk.load(fh))\n\t\tfh.close()\n\t\tif datatest.shape[1]!=datatrain.shape[1]:\n\t\t\traise RuntimeError('the number of MRI measurements in the test set differs from the training set!')\t\t\n\n\t### Load training tissue parameters\n\tfh = open(args.param_train,'rb')\n\tprmtrain = pk.load(fh)\n\tnpar_train = prmtrain.shape[1]\n\tfh.close()\n\tif prmtrain.shape[0]!=datatrain.shape[0]:\n\t\traise RuntimeError('the number of voxels in the training parameters differs from the training MRI signals!')\t\t\n\n\t### Load validation tissue parameters\n\tfh = open(args.param_val,'rb')\n\tprmval = pk.load(fh)\n\tfh.close()\n\tif prmval.shape[0]!=dataval.shape[0]:\n\t\traise RuntimeError('the number of voxels in the validation parameters differs from the validation MRI signals!')\n\tif prmval.shape[1]!=prmtrain.shape[1]:\n\t\traise RuntimeError('the number of validation parameters differs from the number of training parameters!')\t\t\n\n\t### Get number of mini-batches\n\tif args.mbatch is None:\n\t\tmbatch = int(float(datatrain.shape[0]) / 80.0) # Default: 1/80 of the total number of training voxels\n\telse:\n\t\tmbatch = int(args.mbatch)\n\t\tif (mbatch>datatrain.shape[0]):\n\t\t\tmbatch = datatrain.shape[0]\n\t\tif(mbatch<2):\n\t\t\tmbatch = int(2)\n\n\t### Load MRI protocol\n\ttry:\n\t\tmriprot = np.loadtxt(args.mri_prot)\n\texcept:\n\t\traise RuntimeError('the format of the MRI protocol is not understood!')\n\t\t\n\t### Check that MRI model exists\n\tif ( (mrimodel!='pr_hybriddwi') and (mrimodel!='br_sirsmdt') and (mrimodel!='twocompdwite') ):\n\t\traise RuntimeError('the chosen MRI model is not implemented. Sorry!')\n\tif (mrimodel=='pr_hybriddwi'):\n\t\ts0idx = 8\n\telif (mrimodel=='br_sirsmdt'):\n\t\ts0idx = 3\n\telif (mrimodel=='twocompdwite'):\n\t\ts0idx = 6\n\n\t### Get specifics for hidden layers\n\tif args.nn is None:\n\n\t\tif (mrimodel=='pr_hybriddwi'): \n\t\t\tnpars = 9\n\t\telif (mrimodel=='br_sirsmdt'):\n\t\t\tnpars = 4\n\t\telif (mrimodel=='twocompdwite'):\n\t\t\tnpars = 7\n\t\telse:\n\t\t\traise RuntimeError('the chosen MRI model is not implemented. Sorry!')\n\n\n\t\tnhidden = np.array([int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars)])\n\t\tnhidden_str = '{}-{}-{}'.format( int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars) )\n\n\telse:\n\t\tnhidden = (args.nn).split('-')\n\t\tnhidden = np.array( list(map( int,nhidden )) )\n\t\tnhidden_str = args.nn\n\n\t### Get optional user-defined bounds for tissue parameters\n\tif (args.parmin is not None) or (args.parmax is not None):\n\t\t\n\t\tif (args.parmin is not None) and (args.parmax is None):\n\t\t\traise RuntimeError('you need to set both parmin and parmax options simultaneously')\n\t\t\n\t\tif (args.parmax is not None) and (args.parmin is None):\n\t\t\traise RuntimeError('you need to set both parmin and parmax options simultaneously')\n\t\t\t\t\t\n\t\t# Lower bound\n\t\tpminbound = (args.parmin).split(',')\n\t\tpminbound = np.array( list(map( float, pminbound )) )\n\t\t\n\t\t# Upper bound\n\t\tpmaxbound = (args.parmax).split(',')\n\t\tpmaxbound = np.array( list(map( float, pmaxbound )) )\n\n\n\t### Create output base name\n\tout_base_dir = '{}_nhidden{}_pdrop{}_noepoch{}_lr{}_mbatch{}_seed{}'.format(args.out_base,nhidden_str,pdrop,noepoch,lrate,mbatch,seed)\n\tif(os.path.isdir(out_base_dir)==False):\t\n\t\tos.mkdir(out_base_dir)\n\n\t### Print some more information\n\tprint('** Output directory: {}'.format(out_base_dir))\n\tprint('')\n\tprint('')\n\tprint('PARAMETERS')\n\tprint('')\n\tprint('** Hidden neurons: {}'.format(nhidden))\n\tprint('** Dropout probability: {}'.format(pdrop))\n\tprint('** Number of epochs: {}'.format(noepoch))\n\tprint('** Learning rate: {}'.format(lrate))\n\tprint('** Number of voxels in a mini-batch: {}'.format(mbatch))\n\tprint('** Seed: {}'.format(seed))\n\tprint('** Number of workers for data loader: {}'.format(nwork))\n\n\n\t### Set random seeds\n\tnp.random.seed(seed) # Random seed for reproducibility: NumPy\n\ttorch.manual_seed(seed) # Random seed for reproducibility: PyTorch\n\n\t### Normalise MRI signals and convert to single precision\n\tmax_val_train = np.transpose( matlib.repmat(np.max(datatrain,axis=1),nmeas_train,1) )\n\tdatatrain = np.float32( datatrain / max_val_train )\n\n\tmax_val_val = np.transpose( matlib.repmat(np.max(dataval,axis=1),nmeas_train,1) )\n\tdataval = np.float32( dataval / max_val_val )\n\n\tif args.dtest is not None:\n\t\tmax_val_test = np.transpose( matlib.repmat(np.max(datatest,axis=1),nmeas_train,1) )\n\t\tdatatest = np.float32( datatest / max_val_test )\n\n\tprmtrain = np.float32(prmtrain)\n\tprmval = np.float32(prmval)\n\t\n\t### Create mini-batches on training data with data loader\n\tloadertrain = DataLoader(np.concatenate((datatrain,prmtrain),axis=1), batch_size=mbatch, shuffle=True, num_workers=nwork)\n\n\t### Allocate memory for losses\n\tnobatch=0 # Count how many mini-batches of size mbatch we created\n\tfor signals in loadertrain:\n\t\tnobatch = nobatch+1\n\tlosstrain = np.zeros((noepoch,nobatch)) + np.nan\n\tlossval = np.zeros((noepoch,1)) + np.nan\n\n\t### Instantiate the network and training objects, and save the intantiated network\n\tnnet = deepqmri.qmripar(nhidden,pdrop,mrimodel,mriprot).cpu() # Instantiate neural network\n\tif (args.parmin is not None) or (args.parmax is not None):\n\t\tnnet.changelim(pminbound,pmaxbound) # Change tissue parameter ranges\n\tprint('** Tissue parameter names: {}'.format(nnet.param_name))\t\n\tprint('** Tissue parameter lower bounds: {}'.format(nnet.param_min))\t\n\tprint('** Tissue parameter upper bounds: {}'.format(nnet.param_max))\t\n\tprint('')\n\tprint('')\n\tnnetloss = nn.MSELoss() # Loss: L2 norm (mean squared error, Gaussian noise)\n\tnnetopt = torch.optim.Adam(nnet.parameters(), lr=lrate) # Network trained with ADAM optimiser\n\ttorch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch0_net.pth') ) # Save network at epoch 0 (i.e. at initialisation)\n\tnnet_file = open(os.path.join(out_base_dir,'epoch0_net.bin'),'wb')\n\tpk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL) \n\tnnet_file.close()\n\n\t### Create normalisation tensors for model parameters\n\tslope_norm_tr = np.ones((mbatch , npar_train))\n\toffset_norm_tr = np.ones((mbatch , npar_train))\n\n\tfor pp in range(0,npar_train):\n\t\tslope_norm_tr[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])\n\t\toffset_norm_tr[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])\n\n\tslope_norm_tr = Tensor(np.float32(slope_norm_tr))\n\toffset_norm_tr = Tensor(np.float32(offset_norm_tr))\n\n\n\tslope_norm_val = np.ones((nvox_val , npar_train))\n\toffset_norm_val = np.ones((nvox_val , npar_train))\n\n\tfor pp in range(0,npar_train):\n\t\tslope_norm_val[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])\n\t\toffset_norm_val[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])\n\n\tslope_norm_val = Tensor(np.float32(slope_norm_val))\n\toffset_norm_val = Tensor(np.float32(offset_norm_val))\n\n\t### Run training\n\t# Loop over epochs\n\tloss_val_prev = np.inf\n\tfor epoch in range(noepoch):\n\t \n\t\tprint(' EPOCH {}/{}'.format(epoch+1,noepoch))\n\t\tprint('')\n\n\t\t# Loop over mini-batches for at a fixed epoch\n\t\tminibatch_id = 0\n\t\tfor signals in loadertrain:\n\n\n\t\t\t# Pass the mini-batch through the network and store the training loss\n\t\t\toutput = nnet( Tensor(signals[:,0:nmeas_train]) ) # Pass MRI measurements and estimate tissue parmaters\n\t\t\ttry:\n\t\t\t\tlossmeas_train = nnetloss(Tensor(output)*slope_norm_tr + offset_norm_tr, Tensor(signals[:,nmeas_train:nmeas_train+npar_train])*slope_norm_tr + offset_norm_tr) # Training loss \n\t\t\texcept:\n\t\t\t\traise RuntimeError('The number of training voxels must be a multiple of the size of the mini-batch!')\n\n\t\t\t# Back propagation\n\t\t\tnnetopt.zero_grad() # Evaluate loss gradient with respect to network parameters at the output layer\n\t\t\tlossmeas_train.backward() # Backpropage the loss gradient through previous layers\n\t\t\tnnetopt.step() # Update network parameters\n\t\t\n\t\t\t# Store loss for the current mini-batch of training\n\t\t\tlosstrain[epoch,minibatch_id] = Tensor.numpy(lossmeas_train.data)\n\n\t\t\t# Update mini-batch counter\n\t\t\tminibatch_id = minibatch_id + 1\n\t\t\n\t\t\n\t\t# Run validation\n\t\tnnet.eval() # Set network to evaluation mode (deactivates dropout)\n\t\ttissueval_nnet = nnet( Tensor(dataval) ) # Output of full network (predicted tissue parameters)\n\t\tdataval_nnet = nnet.getsignals( Tensor(tissueval_nnet) ) # Estimate MRI signals\n\t\tdataval_nnet = dataval_nnet.detach().numpy()\n\t\tmax_val_val_out = np.transpose( matlib.repmat(np.max(dataval_nnet,axis=1),nmeas_train,1) )\n\t\tlossmeas_val = nnetloss( Tensor(tissueval_nnet)*slope_norm_val + offset_norm_val , Tensor(prmval)*slope_norm_val + offset_norm_val ) # Validation loss\n\t\t# Store validation loss\n\t\tlossval[epoch,0] = Tensor.numpy(lossmeas_val.data)\n\n\t\t# Save trained network at current epoch if validation loss has decreased\n\t\tif(Tensor.numpy(lossmeas_val.data)<=loss_val_prev):\n\t\t\tprint(' ... validation loss has decreased. Saving net...')\n\t\t\t# Save network\n\t\t\ttorch.save( nnet.state_dict(), os.path.join(out_base_dir,'lossvalmin_net.pth') )\n\t\t\tnnet_file = open(os.path.join(out_base_dir,'lossvalmin_net.bin'),'wb')\n\t\t\tpk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL) \n\t\t\tnnet_file.close()\n\t\t\t# Save information on the epoch\n\t\t\tnnet_text = open(os.path.join(out_base_dir,'lossvalmin.info'),'w')\n\t\t\tnnet_text.write('Epoch {} (indices starting from 0)'.format(epoch));\n\t\t\tnnet_text.close();\n\t\t\t# Update value of best validation loss so far\n\t\t\tloss_val_prev = Tensor.numpy(lossmeas_val.data)\n\t\t\t# Save predicted validation tissue parameters \n\t\t\ttissueval_nnet = tissueval_nnet.detach().numpy()\n\t\t\ttissueval_nnet[:,s0idx] = (max_val_val[:,0]/max_val_val_out[:,0])*tissueval_nnet[:,s0idx] # Rescale s0 (any column of would work)\n\t\t\ttissueval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissueval.bin'),'wb')\n\t\t\tpk.dump(tissueval_nnet,tissueval_nnet_file,pk.HIGHEST_PROTOCOL) \n\t\t\ttissueval_nnet_file.close()\n\t\t\t# Save predicted validation signals\n\t\t\tdataval_nnet = (max_val_val/max_val_val_out)*dataval_nnet\t\t\t\t\n\t\t\tdataval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigval.bin'),'wb')\n\t\t\tpk.dump(dataval_nnet,dataval_nnet_file,pk.HIGHEST_PROTOCOL) \n\t\t\tdataval_nnet_file.close()\n\n\t\t\t# Analyse test data if provided\n\t\t\tif args.dtest is not None:\n\t\t\t\t# Get neuronal activations as well as predicted test tissue parameters and test MRI signals \n\t\t\t\ttissuetest_nnet = nnet( Tensor(datatest) ) # Output of network (estimated tissue parameters)\n\t\t\t\tdatatest_nnet = nnet.getsignals( Tensor(tissuetest_nnet) ) # Predicted MRI signals\n\t\t\t\tdatatest_nnet = datatest_nnet.detach().numpy()\n\t\t\t\tmax_val_test_out = np.transpose( matlib.repmat(np.max(datatest_nnet,axis=1),nmeas_train,1) )\n\t\t\t\t# Save predicted test tissue parameters \n\t\t\t\ttissuetest_nnet = tissuetest_nnet.detach().numpy()\n\t\t\t\ttissuetest_nnet[:,s0idx] = (max_val_test[:,0]/max_val_test_out[:,0])*tissuetest_nnet[:,s0idx] # Rescale s0 (any column of max_val_test works)\n\t\t\t\ttissuetest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissuetest.bin'),'wb')\n\t\t\t\tpk.dump(tissuetest_nnet,tissuetest_nnet_file,pk.HIGHEST_PROTOCOL) \n\t\t\t\ttissuetest_nnet_file.close()\n\t\t\t\t# Save predicted test signals\n\t\t\t\tdatatest_nnet = (max_val_test/max_val_test_out)*datatest_nnet # Rescale signal\t\n\t\t\t\tdatatest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigtest.bin'),'wb')\n\t\t\t\tpk.dump(datatest_nnet,datatest_nnet_file,pk.HIGHEST_PROTOCOL) \n\t\t\t\tdatatest_nnet_file.close()\n\n\n\t\t# Set network back to training mode\n\t\tnnet.train()\n\n\t\t# Print some information\n\t\tprint('')\n\t\tprint(' TRAINING INFO:')\n\t\tprint(' Trainig loss: {:.12f}; validation loss: {:.12f}'.format(Tensor.numpy(lossmeas_train.data), Tensor.numpy(lossmeas_val.data)) )\n\t\tprint('')\n\n\t# Save the final network\n\tnnet.eval()\n\ttorch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch{}_net.pth'.format(noepoch)) )\n\tnnet_file = open(os.path.join(out_base_dir,'epoch{}_net.bin'.format(noepoch)),'wb')\n\tpk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL) \n\tnnet_file.close()\n\n\t# Save the training and validation loss\n\tlosstrain_file = open(os.path.join(out_base_dir,'losstrain.bin'),'wb')\n\tpk.dump(losstrain,losstrain_file,pk.HIGHEST_PROTOCOL) \n\tlosstrain_file.close()\n\n\tlossval_file = open(os.path.join(out_base_dir,'lossval.bin'),'wb')\n\tpk.dump(lossval,lossval_file,pk.HIGHEST_PROTOCOL) \n\tlossval_file.close()\n\tnp.savetxt(os.path.join(out_base_dir,'lossval_min.txt'), [np.nanmin(lossval)], fmt='%.12f', delimiter=' ')\n\n\n"
] | [
[
"numpy.ones",
"torch.Tensor",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.manual_seed",
"numpy.random.seed",
"numpy.float32",
"numpy.nanmin",
"numpy.max",
"torch.Tensor.numpy",
"numpy.concatenate",
"numpy.loadtxt"
]
] |
Morisset/Mexico-datos | [
"29d5ed1079732d5d809bc14eb5d3438662508728"
] | [
"codigo/process_datos_abiertos.py"
] | [
"import os\nimport csv\nimport pandas as pd\nimport geopandas as gpd\nfrom datetime import datetime, timedelta\n\n\n## PROCESSING FUNCTIONS ##\n\ndef confirmados_diarios_por_estado(datos, entidades):\n \"\"\"\n Calcula el número total de casos confirmados por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - serie: Serie de tiempo de nuevos casos confirmados por dia para cada\n entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n \"\"\"\n series = (datos[datos['RESULTADO'] == 1]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef negativos_diarios_por_estado(datos, entidades):\n \"\"\"\n Calcula el número total de casos negativos por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevas pruebas negativas por dia para cada\n entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n series = (datos[datos['RESULTADO'] == 2]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef pruebas_pendientes_diarias_por_estado(datos, entidades):\n \"\"\"\n Calcula el número de pruebas pendientes por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevas pruebas pendientes por dia para cada\n entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n series = (datos[datos['RESULTADO'] == 3]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef pruebas_totales_diarias_por_estado(datos, entidades):\n \"\"\"\n Calcula el número total de pruebas realizadas por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevas pruebas totales por dia para cada\n entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n series = (datos\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef defunciones_diarias_por_estado(datos, entidades):\n \"\"\"\n Calcula el número de defunciones por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevas muertes por dia para cada entidad\n federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n idx = (datos['RESULTADO'] == 1) & (datos['FECHA_DEF'] != '9999-99-99')\n series = (datos[idx]\n .groupby(['ENTIDAD_UM', 'FECHA_DEF'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef hospitalizados_diarios_por_estado(datos, entidades):\n \"\"\"\n Calcula el número de pacientes hopitalizados por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevos hospitalizados por dia para cada entidad\n federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n # esta serie incluye UCI + noUCI\n idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 2)\n series = (datos[idx]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef ambulatorios_diarios_por_estado(datos, entidades):\n \"\"\"\n Calcula el número de pacientes ambulatorios por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevos pacientes infectados ambulatorios por\n dia para cada entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 1)\n series = (datos[idx]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\ndef uci_diarios_por_estado(datos, entidades):\n \"\"\"\n Calcula el número de pacientes ingresados a una UCI por fecha y por estado.\n\n Input:\n - datos: datos abiertos de COVID-19 en México disponibles en [1].\n\n Output:\n - series: Serie de tiempo de nuevos pacientes en UCI por dia para cada\n entidad federativa en México.\n\n [1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127\n\n \"\"\"\n idx = (datos['RESULTADO'] == 1) & (datos['UCI'] == 1)\n series = (datos[idx]\n .groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])\n .count()['ORIGEN'])\n return get_formato_series(series, entidades)\n\n\n## HELPER FUNCTIONS ##\n\ndef get_formato_series(series, entidades):\n \"\"\"\n Convierte groupby a formato tidy (columnas son estados e indice es la fecha).\n\n Input:\n - series:\n DataFrame en formato groupby agrupada for una columna que corresponde a\n entidades federativas y otra columna que corresponde a una fecha.\n - entidades:\n diccionario de clave_de_entidad => nombre_de_entidad.\n\n Output:\n - series:\n DataFrame en formato tidy, con los nombres de los estados como columnas\n (la primer columna es el total nacional) y con la fecha como indice.\n\n \"\"\"\n diccionario_cambio_edos = {'Ciudad De México': 'Ciudad de México',\n 'Coahuila De Zaragoza': 'Coahuila',\n 'Michoacán De Ocampo': 'Michoacán',\n 'Veracruz De Ignacio De La Llave': 'Veracruz'}\n\n series = series.unstack(level=0).fillna(0).astype('int')\n\n # Formato para mexicovid19/Mexico-datos\n series.index.name = 'Fecha'\n series.index = pd.to_datetime(series.index)\n # Formato oficial de DGE\n series = series.rename(columns=entidades)\n # Formato específico de nuestro repositorio\n series = series.rename(columns=diccionario_cambio_edos)\n series = series.reindex(sorted(series.columns), axis=1)\n # Formato de agregado nacional\n series.loc[:, 'Nacional'] = series.sum(axis=1)\n # Reordenar columnas para que los casos nacionales queden primero\n cols = list(series.columns)\n cols = cols[-1:] + cols[:-1]\n series = series[cols]\n\n # Llenamos ceros para fechas sin informacion\n idx = pd.date_range(series.index.min(), series.index.max())\n series = series.reindex(idx, fill_value=0)\n series.index.name = 'Fecha'\n\n return series\n\n\nif __name__ == '__main__':\n\n update_time = datetime.now() - timedelta(hours=6)\n date = datetime.now() - timedelta(days=1)\n date_filename = date.strftime('%Y%m%d')\n date_iso = date.strftime('%Y-%m-%d')\n\n repo = '..'\n dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')\n dir_datos = os.path.join(repo, 'datos', '')\n dir_geo = os.path.join(dir_datos, 'geograficos', '')\n dir_demograficos = os.path.join(dir_datos, 'demograficos_variables', '')\n\n dir_series_dge = os.path.join(dir_datos_abiertos, 'series_de_tiempo', '')\n dir_series = os.path.join(dir_datos, 'series_de_tiempo', '')\n\n dir_input = os.path.join(dir_datos_abiertos, 'raw', '')\n input_filename = dir_input + f'datos_abiertos_{date_filename}.zip'\n\n ## READING ##\n\n # Lee los datos abiertos\n datos_abiertos_df = pd.read_csv(input_filename, compression='zip')\n\n # Lee catalogo de entidades (hoja de calculo 'Catálogo de ENTIDADES' en\n # el archivo 'diccionario_datos/Catalogos_0412.xlsx''; ha sido convertido a csv)\n cat = (pd.read_csv(dir_input + 'diccionario_datos/catalogo_entidades.csv')\n .set_index('CLAVE_ENTIDAD')['ENTIDAD_FEDERATIVA']\n .to_dict())\n # cambia mayúsculas de estados por formato título\n entidades = {key: val.title() for (key, val) in cat.items()}\n\n # Datos abiertos\n files = ['covid19_mex_confirmados.csv',\n 'covid19_mex_negativos.csv',\n 'covid19_mex_pendientes.csv',\n 'covid19_mex_pruebas-totales.csv',\n 'covid19_mex_muertes.csv',\n 'covid19_mex_hospitalizados.csv',\n 'covid19_mex_uci.csv',\n 'covid19_mex_ambulatorios.csv']\n\n funciones = [confirmados_diarios_por_estado,\n negativos_diarios_por_estado,\n pruebas_pendientes_diarias_por_estado,\n pruebas_totales_diarias_por_estado,\n defunciones_diarias_por_estado,\n hospitalizados_diarios_por_estado,\n uci_diarios_por_estado,\n ambulatorios_diarios_por_estado]\n\n dfs = [func(datos_abiertos_df, entidades) for func in funciones]\n\n for f, df in zip(files, dfs):\n df.to_csv(f'{dir_series_dge}/nuevos/{f}')\n df.cumsum().to_csv(f'{dir_series_dge}/acumulados/{f}')\n\n ## Series de tiempo estaticas (solo actualiza ultima fila) ##\n\n # Formato unix sin quotes\n csv.register_dialect('unixnq', delimiter=',', lineterminator='\\n',\n quoting=csv.QUOTE_NONE)\n\n # Totales por estado\n totales_file = dir_series + 'covid19_mex_casos_totales.csv'\n fila_totales = dfs[0].cumsum().tail(1) # confirmados_diarios_por_estado\n with open(totales_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_totales.values[0].tolist())\n\n # Casos ultimas 24h\n nuevos_file = dir_series + 'covid19_mex_casos_nuevos.csv'\n totales_df = pd.read_csv(totales_file)\n fila_nuevos = (totales_df.iloc[-1, 1:] - totales_df.iloc[-2, 1:]).astype(int)\n with open(nuevos_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_nuevos.values.tolist()) # a series\n\n # Muertes por estado\n muertes_file = dir_series + 'covid19_mex_muertes.csv'\n fila_muertes = dfs[4].cumsum().tail(1) # defunciones_diarias_por_estado\n with open(muertes_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_muertes.values[0].tolist())\n\n # Muertes nuevas por estado\n muertes_nuevas_file = dir_series + 'covid19_mex_muertes_nuevas.csv'\n muertes_df = pd.read_csv(muertes_file)\n fila_nuevas = (muertes_df.iloc[-1, 1:] - muertes_df.iloc[-2, 1:]).astype(int)\n with open(muertes_nuevas_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_nuevas.values.tolist()) # a series\n\n # Sospechosos por estado\n sospechosos_file = dir_series + 'covid19_mex_sospechosos.csv'\n # pruebas_pendientes_diarias_por_estado\n fila_sospechosos = dfs[2].cumsum().tail(1)\n with open(sospechosos_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_sospechosos.values[0].tolist())\n\n # Sospechosos por estado\n negativos_file = dir_series + 'covid19_mex_negativos.csv'\n fila_negativos = dfs[1].cumsum().tail(1) # negativos_diarios_por_estado\n with open(negativos_file, 'a') as f:\n writer = csv.writer(f, 'unixnq')\n writer.writerow([date_iso] + fila_negativos.values[0].tolist())\n\n ## Totales por estado en el archivo geojson ##\n geojson_file = dir_geo + 'mexico.geojson'\n edos_hoy_file = dir_datos + 'estados_hoy.csv'\n updated_file = dir_datos + 'last_updated.csv'\n\n gdf = gpd.read_file(geojson_file).set_index('name')\n gdf.totales = fila_totales.drop('Nacional', axis=1).squeeze()\n gdf.nuevos = fila_nuevos.drop('Nacional').squeeze() # series\n gdf.muertes = fila_muertes.drop('Nacional', axis=1).squeeze()\n gdf.muertes_nuevas = fila_nuevas.drop('Nacional').squeeze() # series\n gdf.sospechosos = fila_sospechosos.drop('Nacional', axis=1).squeeze()\n gdf.negativos = fila_negativos.drop('Nacional', axis=1).squeeze()\n gdf.totales_100k = gdf.totales * 100000 / gdf.population\n gdf.muertes_100k = gdf.muertes * 100000 / gdf.population\n\n gdf.updated_at = str(update_time).replace(' ', 'T')\n\n gdf = gdf.reset_index()\n assert gdf.shape[1] == 14\n\n gdf.to_file(geojson_file, driver='GeoJSON')\n gdf.loc[0:0, ['updated_at']].to_csv(updated_file, index=False)\n\n ### Estados hoy ###\n cols_edos_hoy = ['name', 'totales', 'nuevos',\n 'muertes', 'muertes_nuevas', 'sospechosos', 'negativos']\n\n map_cols = {'name': 'Estado',\n 'totales': 'Confirmados totales',\n 'nuevos': 'Confirmados nuevos',\n 'muertes': 'Defunciones',\n 'muertes_nuevas': 'Defunciones nuevas',\n 'sospechosos': 'Sospechosos totales',\n 'negativos': 'Negativos totales'}\n\n edos_hoy_df = gdf[cols_edos_hoy].rename(columns=map_cols)\n edos_hoy_df.to_csv(edos_hoy_file, index=False)\n\n print(f'Se procesaron exitosamente los datos abiertos de {input_filename}')\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
mariolpantunes/ml-deti | [
"a47fdb5df70e3f6fda5768be14f97462dfe057fb"
] | [
"code/Ex02.py"
] | [
"import matplotlib.pyplot as plt\nimport arff\nimport numpy as np\nfrom sklearn import linear_model\n\n# Load dataset\ndataset = arff.load(open('dataset/dataset01.arff', 'r'))\ndata = np.array(dataset['data'])\n\n# Reshape vector\nX1 = data[:, 0].reshape(-1, 1)\nX2 = np.multiply(X1, X1)\nX = np.concatenate((X1, X2), axis=1)\nY = data[:, 1].reshape(-1, 1)\n\n# Plot points\nplt.scatter(X1, Y, color='black')\nplt.xticks(())\nplt.yticks(())\nplt.show()\n\n# Create linear regression object\nmodel = linear_model.LinearRegression()\n\n# Train the model using X and Y\nmodel.fit(X, Y)\n\n# The coefficients\nprint(\"Y = %.2fX^2 + %.2fX + %.2f\" % (model.coef_[0][0], model.coef_[0][1], model.intercept_))\n\n# The mean square error\nprint(\"Residual sum of squares: %.2f\" % np.mean((model.predict(X) - Y) ** 2))\n\n# Explained variance score: 1 is perfect prediction\nprint('Variance score: %.2f' % model.score(X, Y))\n\n# Plot outputs\nplt.scatter(X1, Y, color='black')\nplt.plot(X1, model.predict(X), color='blue', linewidth=3)\nplt.xticks(())\nplt.yticks(())\n\nplt.show()\n"
] | [
[
"numpy.multiply",
"matplotlib.pyplot.xticks",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.concatenate",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter"
]
] |
JacobEkedahl/detect-intros-from-video | [
"9b2bac1c7209558711072f967a3359d2ca698cd4"
] | [
"src/stats/intro_stats.py"
] | [
"import matplotlib.pyplot as plt\n\nimport utils.extractor as extractor\nimport utils.file_handler as file_handler\nimport utils.time_handler as time_handler\n\n\ndef plot_intros():\n intros = extractor.get_intros_from_data()\n only_valid_intros = [x for x in intros if not x[\"end\"] == \"00:00:00\"]\n x_data = map(get_start_time_seconds, only_valid_intros) \n y_data = map(get_size_from_intro, only_valid_intros) \n # naming the x axis\n plt.xlabel('Start time of intro (Seconds)') \n # naming the y axis \n plt.ylabel('Length of intro (Seconds)')\n plt.grid(True)\n plt.scatter(list(x_data), list(y_data)) \n plt.show()\n\ndef plot_hist_sizes():\n intros = extractor.get_intros_from_data()\n only_valid_intros = [x for x in intros if not x[\"end\"] == \"00:00:00\"]\n x_data = list(map(get_size_from_intro, only_valid_intros))\n plt.xlabel('Length of intro (Seconds)') \n plt.ylabel('Frequency')\n plt.grid(True)\n plt.hist(x_data, bins=40)\n plt.show()\n\ndef plot_hist_frequency():\n intros = extractor.get_intros_from_data()\n only_valid_intros = [x for x in intros if not x[\"end\"] == \"00:00:00\"]\n x_data = list(map(get_start_time_seconds, only_valid_intros))\n plt.xlabel('Start time of intro (Seconds)') \n plt.ylabel('Frequency')\n plt.grid(True)\n plt.hist(x_data, bins=60)\n plt.show()\n\ndef plot_all_intros():\n x_titles = ['Start time of intro (Seconds)', 'Length of intro (Seconds)']\n y_title = 'Frequency'\n titles = ['Start times of intros','Lengths of intros']\n colors = ['blue', 'blue']\n bins = [60, 40]\n intros = extractor.get_intros_from_data()\n only_valid_intros = [x for x in intros if not x[\"end\"] == \"00:00:00\"]\n x_size = list(map(get_size_from_intro, only_valid_intros))\n x_start = list(map(get_start_time_seconds, only_valid_intros))\n x_data = [x_start, x_size]\n fig, axs = plt.subplots(1, 2)\n axs = axs.ravel()\n\n for idx, ax in enumerate(axs):\n ax.hist(x_data[idx], bins=bins[idx], fc=colors[idx])\n # ax.set_title(titles[idx])\n ax.set_xlabel(x_titles[idx])\n ax.set_ylabel(y_title)\n ax.grid()\n plt.tight_layout()\n plt.show()\n\n\ndef get_size_from_intro(intro):\n start = time_handler.timestamp(intro[\"start\"]) / 1000\n end = time_handler.timestamp(intro[\"end\"]) / 1000\n return abs(start - end)\n\ndef get_start_time_seconds(intro):\n return time_handler.timestamp(intro[\"start\"]) / 1000\n"
] | [
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel"
]
] |
idanmoradarthas/DataScienceUtils | [
"be4806ebcb9ab0e2cdd189842227bd242f0c8910"
] | [
"tests/test_strings.py"
] | [
"import pandas\n\nfrom ds_utils.strings import append_tags_to_frame, extract_significant_terms_from_subset\n\n\ndef test_append_tags_to_frame():\n x_train = pandas.DataFrame([{\"article_name\": \"1\", \"article_tags\": \"ds,ml,dl\"},\n {\"article_name\": \"2\", \"article_tags\": \"ds,ml\"}])\n x_test = pandas.DataFrame([{\"article_name\": \"3\", \"article_tags\": \"ds,ml,py\"}])\n\n x_train_expected = pandas.DataFrame([{\"article_name\": \"1\", \"tag_ds\": 1, \"tag_ml\": 1, \"tag_dl\": 1},\n {\"article_name\": \"2\", \"tag_ds\": 1, \"tag_ml\": 1, \"tag_dl\": 0}],\n columns=[\"article_name\", \"tag_dl\", \"tag_ds\", \"tag_ml\"])\n x_test_expected = pandas.DataFrame([{\"article_name\": \"3\", \"tag_ds\": 1, \"tag_ml\": 1, \"tag_dl\": 0}],\n columns=[\"article_name\", \"tag_dl\", \"tag_ds\", \"tag_ml\"])\n\n x_train_with_tags, x_test_with_tags = append_tags_to_frame(x_train, x_test, \"article_tags\", \"tag_\")\n pandas.testing.assert_frame_equal(x_train_expected, x_train_with_tags, check_like=True)\n pandas.testing.assert_frame_equal(x_test_expected, x_test_with_tags, check_like=True)\n\n\ndef test_significant_terms():\n corpus = ['This is the first document.', 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?']\n data_frame = pandas.DataFrame(corpus, columns=[\"content\"])\n subset_data_frame = data_frame[data_frame.index > 1]\n terms = extract_significant_terms_from_subset(data_frame, subset_data_frame, \"content\")\n\n expected = pandas.Series(\n [1.0, 1.0, 1.0, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.5, 0.25, 0.0],\n index=['third', 'one', 'and', 'this', 'the', 'is', 'first', 'document', 'second'])\n\n pandas.testing.assert_series_equal(expected, terms)\n"
] | [
[
"pandas.Series",
"pandas.testing.assert_series_equal",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal"
]
] |
slomrafgrav/models | [
"91a59c78e8c48e8a1b2fec37143e52dae3f066c1"
] | [
"research/object_detection/eval_util.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utility functions for evaluation.\"\"\"\nimport collections\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.metrics import coco_evaluation\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import object_detection_evaluation\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import visualization_utils as vis_utils\n\nslim = tf.contrib.slim\n\n# A dictionary of metric names to classes that implement the metric. The classes\n# in the dictionary must implement\n# utils.object_detection_evaluation.DetectionEvaluator interface.\nEVAL_METRICS_CLASS_DICT = {\n 'coco_detection_metrics':\n coco_evaluation.CocoDetectionEvaluator,\n 'coco_mask_metrics':\n coco_evaluation.CocoMaskEvaluator,\n 'oid_challenge_detection_metrics':\n object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,\n 'pascal_voc_detection_metrics':\n object_detection_evaluation.PascalDetectionEvaluator,\n 'weighted_pascal_voc_detection_metrics':\n object_detection_evaluation.WeightedPascalDetectionEvaluator,\n 'pascal_voc_instance_segmentation_metrics':\n object_detection_evaluation.PascalInstanceSegmentationEvaluator,\n 'weighted_pascal_voc_instance_segmentation_metrics':\n object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,\n 'oid_V2_detection_metrics':\n object_detection_evaluation.OpenImagesDetectionEvaluator,\n}\n\nEVAL_DEFAULT_METRIC = 'coco_detection_metrics'\n\n\ndef write_metrics(metrics, global_step, summary_dir):\n \"\"\"Write metrics to a summary directory.\n\n Args:\n metrics: A dictionary containing metric names and values.\n global_step: Global step at which the metrics are computed.\n summary_dir: Directory to write tensorflow summaries to.\n \"\"\"\n tf.logging.info('Writing metrics to tf summary.')\n summary_writer = tf.summary.FileWriterCache.get(summary_dir)\n for key in sorted(metrics):\n summary = tf.Summary(value=[\n tf.Summary.Value(tag=key, simple_value=metrics[key]),\n ])\n summary_writer.add_summary(summary, global_step)\n tf.logging.info('%s: %f', key, metrics[key])\n tf.logging.info('Metrics written to tf summary.')\n\n\n# TODO(rathodv): Add tests.\ndef visualize_detection_results(result_dict,\n tag,\n global_step,\n categories,\n summary_dir='',\n export_dir='',\n agnostic_mode=False,\n show_groundtruth=False,\n groundtruth_box_visualization_color='black',\n min_score_thresh=.5,\n max_num_predictions=20,\n skip_scores=False,\n skip_labels=False,\n keep_image_id_for_visualization_export=False):\n \"\"\"Visualizes detection results and writes visualizations to image summaries.\n\n This function visualizes an image with its detected bounding boxes and writes\n to image summaries which can be viewed on tensorboard. It optionally also\n writes images to a directory. In the case of missing entry in the label map,\n unknown class name in the visualization is shown as \"N/A\".\n\n Args:\n result_dict: a dictionary holding groundtruth and detection\n data corresponding to each image being evaluated. The following keys\n are required:\n 'original_image': a numpy array representing the image with shape\n [1, height, width, 3] or [1, height, width, 1]\n 'detection_boxes': a numpy array of shape [N, 4]\n 'detection_scores': a numpy array of shape [N]\n 'detection_classes': a numpy array of shape [N]\n The following keys are optional:\n 'groundtruth_boxes': a numpy array of shape [N, 4]\n 'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]\n Detections are assumed to be provided in decreasing order of score and for\n display, and we assume that scores are probabilities between 0 and 1.\n tag: tensorboard tag (string) to associate with image.\n global_step: global step at which the visualization are generated.\n categories: a list of dictionaries representing all possible categories.\n Each dict in this list has the following keys:\n 'id': (required) an integer id uniquely identifying this category\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'\n 'supercategory': (optional) string representing the supercategory\n e.g., 'animal', 'vehicle', 'food', etc\n summary_dir: the output directory to which the image summaries are written.\n export_dir: the output directory to which images are written. If this is\n empty (default), then images are not exported.\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not.\n show_groundtruth: boolean (default: False) controlling whether to show\n groundtruth boxes in addition to detected boxes\n groundtruth_box_visualization_color: box color for visualizing groundtruth\n boxes\n min_score_thresh: minimum score threshold for a box to be visualized\n max_num_predictions: maximum number of detections to visualize\n skip_scores: whether to skip score when drawing a single detection\n skip_labels: whether to skip label when drawing a single detection\n keep_image_id_for_visualization_export: whether to keep image identifier in\n filename when exported to export_dir\n Raises:\n ValueError: if result_dict does not contain the expected keys (i.e.,\n 'original_image', 'detection_boxes', 'detection_scores',\n 'detection_classes')\n \"\"\"\n detection_fields = fields.DetectionResultFields\n input_fields = fields.InputDataFields\n if not set([\n input_fields.original_image,\n detection_fields.detection_boxes,\n detection_fields.detection_scores,\n detection_fields.detection_classes,\n ]).issubset(set(result_dict.keys())):\n raise ValueError('result_dict does not contain all expected keys.')\n if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:\n raise ValueError('If show_groundtruth is enabled, result_dict must contain '\n 'groundtruth_boxes.')\n tf.logging.info('Creating detection visualizations.')\n category_index = label_map_util.create_category_index(categories)\n\n image = np.squeeze(result_dict[input_fields.original_image], axis=0)\n if image.shape[2] == 1: # If one channel image, repeat in RGB.\n image = np.tile(image, [1, 1, 3])\n detection_boxes = result_dict[detection_fields.detection_boxes]\n detection_scores = result_dict[detection_fields.detection_scores]\n detection_classes = np.int32((result_dict[\n detection_fields.detection_classes]))\n detection_keypoints = result_dict.get(detection_fields.detection_keypoints)\n detection_masks = result_dict.get(detection_fields.detection_masks)\n detection_boundaries = result_dict.get(detection_fields.detection_boundaries)\n\n # Plot groundtruth underneath detections\n if show_groundtruth:\n groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]\n groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)\n vis_utils.visualize_boxes_and_labels_on_image_array(\n image=image,\n boxes=groundtruth_boxes,\n classes=None,\n scores=None,\n category_index=category_index,\n keypoints=groundtruth_keypoints,\n use_normalized_coordinates=False,\n max_boxes_to_draw=None,\n groundtruth_box_visualization_color=groundtruth_box_visualization_color)\n vis_utils.visualize_boxes_and_labels_on_image_array(\n image,\n detection_boxes,\n detection_classes,\n detection_scores,\n category_index,\n instance_masks=detection_masks,\n instance_boundaries=detection_boundaries,\n keypoints=detection_keypoints,\n use_normalized_coordinates=False,\n max_boxes_to_draw=max_num_predictions,\n min_score_thresh=min_score_thresh,\n agnostic_mode=agnostic_mode,\n skip_scores=skip_scores,\n skip_labels=skip_labels)\n\n if export_dir:\n if keep_image_id_for_visualization_export and result_dict[fields.\n InputDataFields()\n .key]:\n export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(\n tag, result_dict[fields.InputDataFields().key]))\n else:\n export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))\n vis_utils.save_image_array_as_png(image, export_path)\n\n summary = tf.Summary(value=[\n tf.Summary.Value(\n tag=tag,\n image=tf.Summary.Image(\n encoded_image_string=vis_utils.encode_image_array_as_png_str(\n image)))\n ])\n summary_writer = tf.summary.FileWriterCache.get(summary_dir)\n summary_writer.add_summary(summary, global_step)\n\n tf.logging.info('Detection visualizations written to summary with tag %s.',\n tag)\n\n\ndef _run_checkpoint_once(tensor_dict,\n evaluators=None,\n batch_processor=None,\n checkpoint_dirs=None,\n variables_to_restore=None,\n restore_fn=None,\n num_batches=1,\n master='',\n save_graph=False,\n save_graph_dir='',\n losses_dict=None,\n eval_export_path=None):\n \"\"\"Evaluates metrics defined in evaluators and returns summaries.\n\n This function loads the latest checkpoint in checkpoint_dirs and evaluates\n all metrics defined in evaluators. The metrics are processed in batch by the\n batch_processor.\n\n Args:\n tensor_dict: a dictionary holding tensors representing a batch of detections\n and corresponding groundtruth annotations.\n evaluators: a list of object of type DetectionEvaluator to be used for\n evaluation. Note that the metric names produced by different evaluators\n must be unique.\n batch_processor: a function taking four arguments:\n 1. tensor_dict: the same tensor_dict that is passed in as the first\n argument to this function.\n 2. sess: a tensorflow session\n 3. batch_index: an integer representing the index of the batch amongst\n all batches\n By default, batch_processor is None, which defaults to running:\n return sess.run(tensor_dict)\n To skip an image, it suffices to return an empty dictionary in place of\n result_dict.\n checkpoint_dirs: list of directories to load into an EnsembleModel. If it\n has only one directory, EnsembleModel will not be used --\n a DetectionModel\n will be instantiated directly. Not used if restore_fn is set.\n variables_to_restore: None, or a dictionary mapping variable names found in\n a checkpoint to model variables. The dictionary would normally be\n generated by creating a tf.train.ExponentialMovingAverage object and\n calling its variables_to_restore() method. Not used if restore_fn is set.\n restore_fn: None, or a function that takes a tf.Session object and correctly\n restores all necessary variables from the correct checkpoint file. If\n None, attempts to restore from the first directory in checkpoint_dirs.\n num_batches: the number of batches to use for evaluation.\n master: the location of the Tensorflow session.\n save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.\n save_graph_dir: where to store the Tensorflow graph on disk. If save_graph\n is True this must be non-empty.\n losses_dict: optional dictionary of scalar detection losses.\n eval_export_path: Path for saving a json file that contains the detection\n results in json format.\n\n Returns:\n global_step: the count of global steps.\n all_evaluator_metrics: A dictionary containing metric names and values.\n\n Raises:\n ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least\n one element.\n ValueError: if save_graph is True and save_graph_dir is not defined.\n \"\"\"\n if save_graph and not save_graph_dir:\n raise ValueError('`save_graph_dir` must be defined.')\n sess = tf.Session(master, graph=tf.get_default_graph())\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n if restore_fn:\n restore_fn(sess)\n else:\n if not checkpoint_dirs:\n raise ValueError('`checkpoint_dirs` must have at least one entry.')\n checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])\n saver = tf.train.Saver(variables_to_restore)\n saver.restore(sess, checkpoint_file)\n\n if save_graph:\n tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')\n\n counters = {'skipped': 0, 'success': 0}\n aggregate_result_losses_dict = collections.defaultdict(list)\n with tf.contrib.slim.queues.QueueRunners(sess):\n try:\n for batch in range(int(num_batches)):\n if (batch + 1) % 100 == 0:\n tf.logging.info('Running eval ops batch %d/%d', batch + 1,\n num_batches)\n if not batch_processor:\n try:\n if not losses_dict:\n losses_dict = {}\n result_dict, result_losses_dict = sess.run([tensor_dict,\n losses_dict])\n counters['success'] += 1\n except tf.errors.InvalidArgumentError:\n tf.logging.info('Skipping image')\n counters['skipped'] += 1\n result_dict = {}\n else:\n result_dict, result_losses_dict = batch_processor(\n tensor_dict, sess, batch, counters, losses_dict=losses_dict)\n if not result_dict:\n continue\n for key, value in iter(result_losses_dict.items()):\n aggregate_result_losses_dict[key].append(value)\n for evaluator in evaluators:\n # TODO(b/65130867): Use image_id tensor once we fix the input data\n # decoders to return correct image_id.\n # TODO(akuznetsa): result_dict contains batches of images, while\n # add_single_ground_truth_image_info expects a single image. Fix\n if (isinstance(result_dict, dict) and\n fields.InputDataFields.key in result_dict and\n result_dict[fields.InputDataFields.key]):\n image_id = result_dict[fields.InputDataFields.key]\n else:\n image_id = batch\n evaluator.add_single_ground_truth_image_info(\n image_id=image_id, groundtruth_dict=result_dict)\n evaluator.add_single_detected_image_info(\n image_id=image_id, detections_dict=result_dict)\n tf.logging.info('Running eval batches done.')\n except tf.errors.OutOfRangeError:\n tf.logging.info('Done evaluating -- epoch limit reached')\n finally:\n # When done, ask the threads to stop.\n tf.logging.info('# success: %d', counters['success'])\n tf.logging.info('# skipped: %d', counters['skipped'])\n all_evaluator_metrics = {}\n if eval_export_path and eval_export_path is not None:\n for evaluator in evaluators:\n if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or\n isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):\n tf.logging.info('Started dumping to json file.')\n evaluator.dump_detections_to_json_file(\n json_output_path=eval_export_path)\n tf.logging.info('Finished dumping to json file.')\n for evaluator in evaluators:\n metrics = evaluator.evaluate()\n evaluator.clear()\n if any(key in all_evaluator_metrics for key in metrics):\n raise ValueError('Metric names between evaluators must not collide.')\n all_evaluator_metrics.update(metrics)\n global_step = tf.train.global_step(sess, tf.train.get_global_step())\n\n for key, value in iter(aggregate_result_losses_dict.items()):\n all_evaluator_metrics['Losses/' + key] = np.mean(value)\n sess.close()\n return (global_step, all_evaluator_metrics)\n\n\n# TODO(rathodv): Add tests.\ndef repeated_checkpoint_run(tensor_dict,\n summary_dir,\n evaluators,\n batch_processor=None,\n checkpoint_dirs=None,\n variables_to_restore=None,\n restore_fn=None,\n num_batches=1,\n eval_interval_secs=120,\n max_number_of_evaluations=None,\n master='',\n save_graph=False,\n save_graph_dir='',\n losses_dict=None,\n eval_export_path=None):\n \"\"\"Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.\n\n This function repeatedly loads a checkpoint and evaluates a desired\n set of tensors (provided by tensor_dict) and hands the resulting numpy\n arrays to a function result_processor which can be used to further\n process/save/visualize the results.\n\n Args:\n tensor_dict: a dictionary holding tensors representing a batch of detections\n and corresponding groundtruth annotations.\n summary_dir: a directory to write metrics summaries.\n evaluators: a list of object of type DetectionEvaluator to be used for\n evaluation. Note that the metric names produced by different evaluators\n must be unique.\n batch_processor: a function taking three arguments:\n 1. tensor_dict: the same tensor_dict that is passed in as the first\n argument to this function.\n 2. sess: a tensorflow session\n 3. batch_index: an integer representing the index of the batch amongst\n all batches\n By default, batch_processor is None, which defaults to running:\n return sess.run(tensor_dict)\n checkpoint_dirs: list of directories to load into a DetectionModel or an\n EnsembleModel if restore_fn isn't set. Also used to determine when to run\n next evaluation. Must have at least one element.\n variables_to_restore: None, or a dictionary mapping variable names found in\n a checkpoint to model variables. The dictionary would normally be\n generated by creating a tf.train.ExponentialMovingAverage object and\n calling its variables_to_restore() method. Not used if restore_fn is set.\n restore_fn: a function that takes a tf.Session object and correctly restores\n all necessary variables from the correct checkpoint file.\n num_batches: the number of batches to use for evaluation.\n eval_interval_secs: the number of seconds between each evaluation run.\n max_number_of_evaluations: the max number of iterations of the evaluation.\n If the value is left as None the evaluation continues indefinitely.\n master: the location of the Tensorflow session.\n save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.\n save_graph_dir: where to save on disk the Tensorflow graph. If store_graph\n is True this must be non-empty.\n losses_dict: optional dictionary of scalar detection losses.\n eval_export_path: Path for saving a json file that contains the detection\n results in json format.\n\n Returns:\n metrics: A dictionary containing metric names and values in the latest\n evaluation.\n\n Raises:\n ValueError: if max_num_of_evaluations is not None or a positive number.\n ValueError: if checkpoint_dirs doesn't have at least one element.\n \"\"\"\n if max_number_of_evaluations and max_number_of_evaluations <= 0:\n raise ValueError(\n '`number_of_steps` must be either None or a positive number.')\n\n if not checkpoint_dirs:\n raise ValueError('`checkpoint_dirs` must have at least one entry.')\n\n last_evaluated_model_path = None\n number_of_evaluations = 0\n while True:\n start = time.time()\n tf.logging.info('Starting evaluation at ' + time.strftime(\n '%Y-%m-%d-%H:%M:%S', time.gmtime()))\n model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])\n if not model_path:\n tf.logging.info('No model found in %s. Will try again in %d seconds',\n checkpoint_dirs[0], eval_interval_secs)\n elif model_path == last_evaluated_model_path:\n tf.logging.info('Found already evaluated checkpoint. Will try again in '\n '%d seconds', eval_interval_secs)\n else:\n last_evaluated_model_path = model_path\n global_step, metrics = _run_checkpoint_once(\n tensor_dict,\n evaluators,\n batch_processor,\n checkpoint_dirs,\n variables_to_restore,\n restore_fn,\n num_batches,\n master,\n save_graph,\n save_graph_dir,\n losses_dict=losses_dict,\n eval_export_path=eval_export_path)\n write_metrics(metrics, global_step, summary_dir)\n number_of_evaluations += 1\n\n if (max_number_of_evaluations and\n number_of_evaluations >= max_number_of_evaluations):\n tf.logging.info('Finished evaluation!')\n break\n time_to_next_eval = start + eval_interval_secs - time.time()\n if time_to_next_eval > 0:\n time.sleep(time_to_next_eval)\n\n return metrics\n\n\ndef _scale_box_to_absolute(args):\n boxes, image_shape = args\n return box_list_ops.to_absolute_coordinates(\n box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()\n\n\ndef _resize_detection_masks(args):\n detection_boxes, detection_masks, image_shape = args\n detection_masks_reframed = ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image_shape[0], image_shape[1])\n return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n\n\ndef _resize_groundtruth_masks(args):\n mask, image_shape = args\n mask = tf.expand_dims(mask, 3)\n mask = tf.image.resize_images(\n mask,\n image_shape,\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,\n align_corners=True)\n return tf.cast(tf.squeeze(mask, 3), tf.uint8)\n\n\ndef _scale_keypoint_to_absolute(args):\n keypoints, image_shape = args\n return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])\n\n\ndef result_dict_for_single_example(image,\n key,\n detections,\n groundtruth=None,\n class_agnostic=False,\n scale_to_absolute=False):\n \"\"\"Merges all detection and groundtruth information for a single example.\n\n Note that evaluation tools require classes that are 1-indexed, and so this\n function performs the offset. If `class_agnostic` is True, all output classes\n have label 1.\n\n Args:\n image: A single 4D uint8 image tensor of shape [1, H, W, C].\n key: A single string tensor identifying the image.\n detections: A dictionary of detections, returned from\n DetectionModel.postprocess().\n groundtruth: (Optional) Dictionary of groundtruth items, with fields:\n 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in\n normalized coordinates.\n 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.\n 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)\n 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_instance_masks': 3D int64 tensor of instance masks\n (Optional).\n class_agnostic: Boolean indicating whether the detections are class-agnostic\n (i.e. binary). Default False.\n scale_to_absolute: Boolean indicating whether boxes and keypoints should be\n scaled to absolute coordinates. Note that for IoU based evaluations, it\n does not matter whether boxes are expressed in absolute or relative\n coordinates. Default False.\n\n Returns:\n A dictionary with:\n 'original_image': A [1, H, W, C] uint8 image tensor.\n 'key': A string tensor with image identifier.\n 'detection_boxes': [max_detections, 4] float32 tensor of boxes, in\n normalized or absolute coordinates, depending on the value of\n `scale_to_absolute`.\n 'detection_scores': [max_detections] float32 tensor of scores.\n 'detection_classes': [max_detections] int64 tensor of 1-indexed classes.\n 'detection_masks': [max_detections, H, W] float32 tensor of binarized\n masks, reframed to full image masks.\n 'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in\n normalized or absolute coordinates, depending on the value of\n `scale_to_absolute`. (Optional)\n 'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.\n (Optional)\n 'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)\n 'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)\n 'groundtruth_instance_masks': 3D int64 tensor of instance masks\n (Optional).\n\n \"\"\"\n\n if groundtruth:\n max_gt_boxes = tf.shape(\n groundtruth[fields.InputDataFields.groundtruth_boxes])[0]\n for gt_key in groundtruth:\n # expand groundtruth dict along the batch dimension.\n groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)\n\n for detection_key in detections:\n detections[detection_key] = tf.expand_dims(\n detections[detection_key][0], axis=0)\n\n batched_output_dict = result_dict_for_batched_example(\n image,\n tf.expand_dims(key, 0),\n detections,\n groundtruth,\n class_agnostic,\n scale_to_absolute,\n max_gt_boxes=max_gt_boxes)\n\n exclude_keys = [\n fields.InputDataFields.original_image,\n fields.DetectionResultFields.num_detections,\n fields.InputDataFields.num_groundtruth_boxes\n ]\n\n output_dict = {\n fields.InputDataFields.original_image:\n batched_output_dict[fields.InputDataFields.original_image]\n }\n\n for key in batched_output_dict:\n # remove the batch dimension.\n if key not in exclude_keys:\n output_dict[key] = tf.squeeze(batched_output_dict[key], 0)\n return output_dict\n\n\ndef result_dict_for_batched_example(images,\n keys,\n detections,\n groundtruth=None,\n class_agnostic=False,\n scale_to_absolute=False,\n original_image_spatial_shapes=None,\n true_image_shapes=None,\n max_gt_boxes=None):\n \"\"\"Merges all detection and groundtruth information for a single example.\n\n Note that evaluation tools require classes that are 1-indexed, and so this\n function performs the offset. If `class_agnostic` is True, all output classes\n have label 1.\n\n Args:\n images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].\n keys: A [batch_size] string tensor with image identifier.\n detections: A dictionary of detections, returned from\n DetectionModel.postprocess().\n groundtruth: (Optional) Dictionary of groundtruth items, with fields:\n 'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor\n of boxes, in normalized coordinates.\n 'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of\n 1-indexed classes.\n 'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of\n bbox area. (Optional)\n 'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64\n tensor. (Optional)\n 'groundtruth_difficult': [batch_size, max_number_of_boxes] int64\n tensor. (Optional)\n 'groundtruth_group_of': [batch_size, max_number_of_boxes] int64\n tensor. (Optional)\n 'groundtruth_instance_masks': 4D int64 tensor of instance\n masks (Optional).\n class_agnostic: Boolean indicating whether the detections are class-agnostic\n (i.e. binary). Default False.\n scale_to_absolute: Boolean indicating whether boxes and keypoints should be\n scaled to absolute coordinates. Note that for IoU based evaluations, it\n does not matter whether boxes are expressed in absolute or relative\n coordinates. Default False.\n original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]\n used to resize the image. When set to None, the image size is retained.\n true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]\n containing the size of the unpadded original_image.\n max_gt_boxes: [batch_size] tensor representing the maximum number of\n groundtruth boxes to pad.\n\n Returns:\n A dictionary with:\n 'original_image': A [batch_size, H, W, C] uint8 image tensor.\n 'original_image_spatial_shape': A [batch_size, 2] tensor containing the\n original image sizes.\n 'true_image_shape': A [batch_size, 3] tensor containing the size of\n the unpadded original_image.\n 'key': A [batch_size] string tensor with image identifier.\n 'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,\n in normalized or absolute coordinates, depending on the value of\n `scale_to_absolute`.\n 'detection_scores': [batch_size, max_detections] float32 tensor of scores.\n 'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed\n classes.\n 'detection_masks': [batch_size, max_detections, H, W] float32 tensor of\n binarized masks, reframed to full image masks.\n 'num_detections': [batch_size] int64 tensor containing number of valid\n detections.\n 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in\n normalized or absolute coordinates, depending on the value of\n `scale_to_absolute`. (Optional)\n 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed\n classes. (Optional)\n 'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox\n area. (Optional)\n 'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)\n 'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)\n 'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)\n 'groundtruth_instance_masks': 4D int64 tensor of instance masks\n (Optional).\n 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number\n of groundtruth boxes per image.\n\n Raises:\n ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape\n [2].\n ValueError: if true_image_shapes is not 2D int32 tensor of shape\n [3].\n \"\"\"\n label_id_offset = 1 # Applying label id offset (b/63711816)\n\n input_data_fields = fields.InputDataFields\n if original_image_spatial_shapes is None:\n original_image_spatial_shapes = tf.tile(\n tf.expand_dims(tf.shape(images)[1:3], axis=0),\n multiples=[tf.shape(images)[0], 1])\n else:\n if (len(original_image_spatial_shapes.shape) != 2 and\n original_image_spatial_shapes.shape[1] != 2):\n raise ValueError(\n '`original_image_spatial_shape` should be a 2D tensor of shape '\n '[batch_size, 2].')\n\n if true_image_shapes is None:\n true_image_shapes = tf.tile(\n tf.expand_dims(tf.shape(images)[1:4], axis=0),\n multiples=[tf.shape(images)[0], 1])\n else:\n if (len(true_image_shapes.shape) != 2\n and true_image_shapes.shape[1] != 3):\n raise ValueError('`true_image_shapes` should be a 2D tensor of '\n 'shape [batch_size, 3].')\n\n output_dict = {\n input_data_fields.original_image:\n images,\n input_data_fields.key:\n keys,\n input_data_fields.original_image_spatial_shape: (\n original_image_spatial_shapes),\n input_data_fields.true_image_shape:\n true_image_shapes\n }\n\n detection_fields = fields.DetectionResultFields\n detection_boxes = detections[detection_fields.detection_boxes]\n detection_scores = detections[detection_fields.detection_scores]\n num_detections = tf.to_int32(detections[detection_fields.num_detections])\n\n if class_agnostic:\n detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)\n else:\n detection_classes = (\n tf.to_int64(detections[detection_fields.detection_classes]) +\n label_id_offset)\n\n if scale_to_absolute:\n output_dict[detection_fields.detection_boxes] = (\n shape_utils.static_or_dynamic_map_fn(\n _scale_box_to_absolute,\n elems=[detection_boxes, original_image_spatial_shapes],\n dtype=tf.float32))\n else:\n output_dict[detection_fields.detection_boxes] = detection_boxes\n output_dict[detection_fields.detection_classes] = detection_classes\n output_dict[detection_fields.detection_scores] = detection_scores\n output_dict[detection_fields.num_detections] = num_detections\n\n if detection_fields.detection_masks in detections:\n detection_masks = detections[detection_fields.detection_masks]\n # TODO(rathodv): This should be done in model's postprocess\n # function ideally.\n output_dict[detection_fields.detection_masks] = (\n shape_utils.static_or_dynamic_map_fn(\n _resize_detection_masks,\n elems=[detection_boxes, detection_masks,\n original_image_spatial_shapes],\n dtype=tf.uint8))\n\n if detection_fields.detection_keypoints in detections:\n detection_keypoints = detections[detection_fields.detection_keypoints]\n output_dict[detection_fields.detection_keypoints] = detection_keypoints\n if scale_to_absolute:\n output_dict[detection_fields.detection_keypoints] = (\n shape_utils.static_or_dynamic_map_fn(\n _scale_keypoint_to_absolute,\n elems=[detection_keypoints, original_image_spatial_shapes],\n dtype=tf.float32))\n\n if groundtruth:\n if max_gt_boxes is None:\n if input_data_fields.num_groundtruth_boxes in groundtruth:\n max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]\n else:\n raise ValueError(\n 'max_gt_boxes must be provided when processing batched examples.')\n\n if input_data_fields.groundtruth_instance_masks in groundtruth:\n masks = groundtruth[input_data_fields.groundtruth_instance_masks]\n groundtruth[input_data_fields.groundtruth_instance_masks] = (\n shape_utils.static_or_dynamic_map_fn(\n _resize_groundtruth_masks,\n elems=[masks, original_image_spatial_shapes],\n dtype=tf.uint8))\n\n output_dict.update(groundtruth)\n if scale_to_absolute:\n groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]\n output_dict[input_data_fields.groundtruth_boxes] = (\n shape_utils.static_or_dynamic_map_fn(\n _scale_box_to_absolute,\n elems=[groundtruth_boxes, original_image_spatial_shapes],\n dtype=tf.float32))\n\n # For class-agnostic models, groundtruth classes all become 1.\n if class_agnostic:\n groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]\n groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)\n output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes\n\n output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes\n\n return output_dict\n\n\ndef get_evaluators(eval_config, categories, evaluator_options=None):\n \"\"\"Returns the evaluator class according to eval_config, valid for categories.\n\n Args:\n eval_config: An `eval_pb2.EvalConfig`.\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n evaluator_options: A dictionary of metric names (see\n EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization\n keyword arguments. For example:\n evalator_options = {\n 'coco_detection_metrics': {'include_metrics_per_category': True}\n }\n\n Returns:\n An list of instances of DetectionEvaluator.\n\n Raises:\n ValueError: if metric is not in the metric class dictionary.\n \"\"\"\n evaluator_options = evaluator_options or {}\n eval_metric_fn_keys = eval_config.metrics_set\n if not eval_metric_fn_keys:\n eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]\n evaluators_list = []\n for eval_metric_fn_key in eval_metric_fn_keys:\n if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:\n raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))\n kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key\n in evaluator_options else {})\n evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](\n categories,\n **kwargs_dict))\n return evaluators_list\n\n\ndef get_eval_metric_ops_for_evaluators(eval_config,\n categories,\n eval_dict):\n \"\"\"Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.\n\n Args:\n eval_config: An `eval_pb2.EvalConfig`.\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n eval_dict: An evaluation dictionary, returned from\n result_dict_for_single_example().\n\n Returns:\n A dictionary of metric names to tuple of value_op and update_op that can be\n used as eval metric ops in tf.EstimatorSpec.\n \"\"\"\n eval_metric_ops = {}\n evaluator_options = evaluator_options_from_eval_config(eval_config)\n evaluators_list = get_evaluators(eval_config, categories, evaluator_options)\n for evaluator in evaluators_list:\n eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(\n eval_dict))\n return eval_metric_ops\n\n\ndef evaluator_options_from_eval_config(eval_config):\n \"\"\"Produces a dictionary of evaluation options for each eval metric.\n\n Args:\n eval_config: An `eval_pb2.EvalConfig`.\n\n Returns:\n evaluator_options: A dictionary of metric names (see\n EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization\n keyword arguments. For example:\n evalator_options = {\n 'coco_detection_metrics': {'include_metrics_per_category': True}\n }\n \"\"\"\n eval_metric_fn_keys = eval_config.metrics_set\n evaluator_options = {}\n for eval_metric_fn_key in eval_metric_fn_keys:\n if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):\n evaluator_options[eval_metric_fn_key] = {\n 'include_metrics_per_category': (\n eval_config.include_metrics_per_category)\n }\n return evaluator_options\n"
] | [
[
"tensorflow.Summary.Value",
"tensorflow.squeeze",
"tensorflow.train.get_global_step",
"tensorflow.train.write_graph",
"tensorflow.tables_initializer",
"tensorflow.greater",
"tensorflow.global_variables_initializer",
"numpy.mean",
"numpy.tile",
"tensorflow.shape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.image.resize_images",
"numpy.int32",
"tensorflow.train.Saver",
"tensorflow.local_variables_initializer",
"numpy.squeeze",
"tensorflow.logging.info",
"tensorflow.to_int64",
"tensorflow.to_int32",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_default_graph",
"tensorflow.summary.FileWriterCache.get",
"tensorflow.contrib.slim.queues.QueueRunners"
]
] |
DrAuxin/WestpaTools | [
"4e236e0a3d65504d1937260316a4a5c6f39aa610"
] | [
"wtools/plotting.py"
] | [
"import h5py\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef plotflux(h5file, state=1):\n \"\"\"\n A function that plots the dataset target_flux_evolution from a direct.h5 file.\n\n Parameters\n ----------\n h5file: dictionary\n The user's HDF5 file loaded with loadh5.\n state: integer\n The target state; the state for which you want to know the entering flux for.\n\n Returns\n -------\n Nothing\n The plot of the flux evolution will be shown in a separate window.\n\n Examples\n --------\n >>> h5file = loadh5(\"west.h5\")\n >>> plotflux(h5file, 1)\n --------\n | __/ |\n | / |\n -------- \n \"\"\"\n fluxes = h5file['target_flux_evolution']['expected',:,state-1]\n iterations = numpy.arange(1,len(fluxes)+1,1)\n fig, ax = plt.subplots()\n ax.plot(iterations,fluxes, linewidth=3)\n ax.set_xlabel('WE Iteration', fontsize=24)\n ax.set_ylabel('Mean Flux', fontsize=24)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax.tick_params(labelsize=22)\n fig.tight_layout()\n plt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
anonips/-MDP-Playground | [
"74431f98c210830a93a1bc83fcdcb95bf1644696"
] | [
"experiments/custom_agents_opt.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom ray.rllib.agents.trainer import Trainer, with_common_config\nfrom ray.rllib.utils.annotations import override\n\n# yapf: disable\n# __sphinx_doc_begin__\nclass RandomAgent(Trainer):\n \"\"\"Policy that takes random actions and never learns.\"\"\"\n\n _name = \"RandomAgent\"\n _default_config = with_common_config({\n \"rollouts_per_iteration\": 10,\n })\n\n @override(Trainer)\n def _init(self, config, env_creator):\n self.env = env_creator(config[\"env_config\"])\n\n @override(Trainer)\n def _train(self):\n rewards = []\n steps = 0\n for _ in range(self.config[\"rollouts_per_iteration\"]):\n obs = self.env.reset()\n done = False\n reward = 0.0\n while not done:\n action = self.env.action_space.sample()\n obs, r, done, info = self.env.step(action)\n\n reward += r\n steps += 1\n rewards.append(reward)\n return {\n \"episode_reward_mean\": np.mean(rewards),\n \"timesteps_this_iter\": steps,\n }\n\nclass VIAgent(Trainer):\n \"\"\"Value Iteration.\n #TODO Make it Generalized PI.\n \"\"\"\n\n _name = \"VIAgent\"\n _default_config = with_common_config({\n \"tolerance\": 0.01,\n \"discount_factor\": 0.5,\n \"rollouts_per_iteration\": 10,\n \"episode_length\": 200,\n # \"lr\": 0.5\n })\n\n @override(Trainer)\n def _init(self, config, env_creator):\n self.env = env_creator(config[\"env_config\"])\n self.V = np.zeros(self.env.observation_space.n)\n self.policy = np.zeros(self.env.observation_space.n, dtype=int)\n self.policy[:] = -1 #IMP # To avoid initing it to a value within action_space range\n\n @override(Trainer)\n def _train(self):\n max_diff = np.inf # Maybe keep a state variable so that we don't need to update every train iteration??\n state_space_size = self.env.observation_space.n\n gamma = self.config[\"discount_factor\"]\n total_iterations = 0\n while max_diff > self.config[\"tolerance\"]:\n total_iterations += 1\n for s in range(state_space_size):\n # print(\"self.V[:]\", s, max_diff, self.V, [self.env.R(s, a) for a in range(self.env.action_space.n)], self.policy[s])\n self.V_old = self.V.copy() # Is this asynchronous? V_old should be held constant for all states in the for loop?\n # print([self.env.R(s, a) for a in range(self.env.action_space.n)], [gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)], [self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])\n self.policy[s] = np.argmax([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])\n self.V[s] = np.max([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)]) # We want R to be a callable function, so I guess we have to keep a for loop here??\n # print(\"self.V, self.V_old, self.policy[s]\", self.V, self.V_old, self.policy[s], self.env.P(s, self.policy[s]))\n\n max_diff = np.max(np.absolute(self.V_old - self.V))\n # import time\n # time.sleep(2)\n# for s in range(state_space_size):\n# print(\"FINAL self.V[:]\", s, max_diff, self.V[:], [self.env.R(s, a) for a in range(self.env.action_space.n)])\n\n print(\"Total iterations:\", total_iterations)\n rewards = []\n steps = 0\n for _ in range(self.config[\"rollouts_per_iteration\"]):\n obs = self.env.reset()\n done = False\n reward = 0.0\n for _ in range(self.config[\"episode_length\"]):\n action = self.policy[obs]\n obs, r, done, info = self.env.step(action)\n\n reward += r\n steps += 1\n rewards.append(reward)\n return {\n \"episode_reward_mean\": np.mean(rewards),\n \"timesteps_this_iter\": steps,\n }\n\n\n\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.utils.seed import seed as rllib_seed\nimport rl_toy\nfrom rl_toy.envs import RLToyEnv\nfrom ray.tune.registry import register_env\nregister_env(\"RLToy-v0\", lambda config: RLToyEnv(config))\n\n\n\nfrom ray.rllib.models.preprocessors import OneHotPreprocessor\nfrom ray.rllib.models import ModelCatalog\nModelCatalog.register_custom_preprocessor(\"ohe\", OneHotPreprocessor)\n\n\n\n#rllib_seed(0, 0, 0) ####IMP Doesn't work due to multi-process I think; so use config[\"seed\"]\nray.init()\n\n\n# Old config space\n# algorithms = [\"DQN\"]\n# state_space_sizes = [2**i for i in range(4,6)]\n# action_space_sizes = [2**i for i in range(1,6)]\n# delays = [0] + [2**i for i in range(5)]\n# sequence_lengths = [i for i in range(1,6)]\n# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)\n# # make_reward_dense = [True, False]\n# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)\n\n\n#test basic case\n# algorithms = [\"DQN\"]\n# state_space_sizes = [10]\n# action_space_sizes = [10]\n# delays = [4]\n# sequence_lengths = [2]\n# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)\n# # make_reward_dense = [True, False]\n# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)\n\nstate_space_sizes = [8]#, 10, 12, 14] # [2**i for i in range(1,6)]\naction_space_sizes = [8]#2, 4, 8, 16] # [2**i for i in range(1,6)]\ndelays = [0] # + [2**i for i in range(4)]\nsequence_lengths = [1]#, 2]#i for i in range(1,4)]\nreward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)\n# make_reward_dense = [True, False]\nterminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)\nalgorithms = [\"DQN\"]\n#seeds = []\n# Others, keep the rest fixed for these: learning_starts, target_network_update_freq, double_dqn, fcnet_hiddens, fcnet_activation, use_lstm, lstm_seq_len, sample_batch_size/train_batch_size\n# More others: adam_epsilon, exploration_final_eps/exploration_fraction, buffer_size\nnum_layerss = [1, 2, 3, 4]\nlayer_widths = [128, 256, 512]\nfcnet_activations = [\"tanh\", \"relu\", \"sigmoid\"]\nlearning_startss = [500, 1000, 2000, 4000, 8000]\ntarget_network_update_freqs = [8, 80, 800]\ndouble_dqn = [False, True]\nlearning_rates = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]\nadam_epsilons = [1e-3, 1e-4, 1e-5, 1e-6] # [1e-1, 1e-4, 1e-7, 1e-10]\n# lstm with sequence lengths\n\nprint('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density,'\n 'terminal_state_density ')\nprint(algorithms, state_space_sizes, action_space_sizes, delays, sequence_lengths, reward_densities, terminal_state_densities)\n\n\n\n# stats = {}\n# aaaa = 3\n\n#TODO Write addnl. line at beginning of file for column names\n# fout = open('rl_stats_temp.csv', 'a') #hardcoded\n# fout.write('# basename, n_points, n_features, n_trees ')\n\nimport time\nstart = time.time()\nprint(algorithms, state_space_sizes, action_space_sizes, delays,\n sequence_lengths, reward_densities, terminal_state_densities)\n\n\ndef on_train_result(info):\n# print(\"#############trainer.train() result: {} -> {} episodes\".format(\n# info[\"trainer\"], info[\"result\"][\"episodes_this_iter\"]), info)\n # you can mutate the result dict to add new fields to return\n# stats['episode_len_mean'] = info['result']['episode_len_mean']\n# print(\"++++++++\", aaaa, stats)\n algorithm = info[\"trainer\"]._name\n state_space_size = info[\"result\"][\"config\"][\"env_config\"][\"state_space_size\"]\n action_space_size = info[\"result\"][\"config\"][\"env_config\"][\"action_space_size\"]\n delay = info[\"result\"][\"config\"][\"env_config\"][\"delay\"]\n sequence_length = info[\"result\"][\"config\"][\"env_config\"][\"sequence_length\"]\n reward_density = info[\"result\"][\"config\"][\"env_config\"][\"reward_density\"]\n terminal_state_density = info[\"result\"][\"config\"][\"env_config\"][\"terminal_state_density\"]\n fcnet_hiddens = info[\"result\"][\"config\"][\"model\"][\"fcnet_hiddens\"]\n num_layers = len(fcnet_hiddens)\n layer_width = fcnet_hiddens[0] #hack\n lr = info[\"result\"][\"config\"][\"lr\"]\n adam_epsilon = info[\"result\"][\"config\"][\"adam_epsilon\"]\n\n timesteps_total = info[\"result\"][\"timesteps_total\"] # also has episodes_total and training_iteration\n episode_reward_mean = info[\"result\"][\"episode_reward_mean\"] # also has max and min\n episode_len_mean = info[\"result\"][\"episode_len_mean\"]\n\n fout = open('./rl_stats_temp_opt.csv', 'a') #hardcoded\n fout.write('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density, '\n 'terminal_state_density, num_layers, layer_width, lr, adam_epsilon,\\n' + str(algorithm) + ' ' + str(state_space_size) +\n ' ' + str(action_space_size) + ' ' + str(delay) + ' ' + str(sequence_length)\n + ' ' + str(reward_density) + ' ' + str(terminal_state_density) + ' ')\n # Writes every iteration, would slow things down. #hack\n fout.write(str(num_layers) + ' ' + str(layer_width) + ' ' + str(lr) + ' ' + str(adam_epsilon) + ' ' + str(timesteps_total) + ' ' + str(episode_reward_mean) +\n ' ' + str(episode_len_mean) + '\\n')\n fout.close()\n\n info[\"result\"][\"callback_ok\"] = True\n\n\n\n# tune.run(\n# RandomAgent,\n# stop={\n# \"timesteps_total\": 20000,\n# },\n# config={\n# \"rollouts_per_iteration\": 10,\n# \"env\": \"RLToy-v0\",\n# \"env_config\": {\n# 'state_space_type': 'discrete',\n# 'action_space_type': 'discrete',\n# 'state_space_size': 16,\n# 'action_space_size': 16,\n# 'generate_random_mdp': True,\n# 'delay': 6,\n# 'sequence_length': 1,\n# 'reward_density': 0.25,\n# 'terminal_state_density': 0.25\n# },\n# },\n# )\n\n# tune.run(\n# VIAgent,\n# stop={\n# \"timesteps_total\": 20000,\n# },\n# config={\n# \"tolerance\": 0.01,\n# \"discount_factor\": 0.99,\n# \"rollouts_per_iteration\": 10,\n# \"env\": \"RLToy-v0\",\n# \"env_config\": {\n# 'state_space_type': 'discrete',\n# 'action_space_type': 'discrete',\n# 'state_space_size': 10,\n# 'action_space_size': 10,\n# 'generate_random_mdp': True,\n# 'delay': 0,\n# 'sequence_length': 1,\n# 'reward_density': 0.25,\n# 'terminal_state_density': 0.25\n# },\n# },\n# )\n\nfor algorithm in algorithms: #TODO each one has different config_spaces\n for state_space_size in state_space_sizes:\n for action_space_size in action_space_sizes:\n for delay in delays:\n for sequence_length in sequence_lengths:\n for reward_density in reward_densities:\n for terminal_state_density in terminal_state_densities:\n for lr in learning_rates:\n for adam_epsilon in adam_epsilons:\n tune.run(\n algorithm,\n stop={\n \"timesteps_total\": 20000,\n },\n config={\n# 'seed': 0, #seed\n \"adam_epsilon\": adam_epsilon,\n \"lr\": lr, # \"lr\": grid_search([1e-2, 1e-4, 1e-6]),\n \"beta_annealing_fraction\": 1.0,\n \"buffer_size\": 1000000,\n \"double_q\": False,\n \"dueling\": False,\n \"env\": \"RLToy-v0\",\n \"env_config\": {\n 'seed': 0, #seed\n 'state_space_type': 'discrete',\n 'action_space_type': 'discrete',\n 'state_space_size': state_space_size,\n 'action_space_size': action_space_size,\n 'generate_random_mdp': True,\n 'delay': delay,\n 'sequence_length': sequence_length,\n 'reward_density': reward_density,\n 'terminal_state_density': terminal_state_density,\n 'repeats_in_sequences': False,\n 'reward_unit': 1.0,\n 'make_denser': False,\n 'completely_connected': True\n },\n \"model\": {\n \"fcnet_hiddens\": [256, 256],\n \"custom_preprocessor\": \"ohe\",\n \"custom_options\": {}, # extra options to pass to your preprocessor\n \"fcnet_activation\": \"tanh\",\n \"use_lstm\": False,\n \"max_seq_len\": 20,\n \"lstm_cell_size\": 256,\n \"lstm_use_prev_action_reward\": False,\n },\n \"exploration_final_eps\": 0.01,\n \"exploration_fraction\": 0.1,\n \"final_prioritized_replay_beta\": 1.0,\n \"hiddens\": None,\n \"learning_starts\": 1000,\n \"n_step\": 1,\n \"noisy\": False,\n \"num_atoms\": 1,\n \"prioritized_replay\": False,\n \"prioritized_replay_alpha\": 0.5,\n \"sample_batch_size\": 4,\n \"schedule_max_timesteps\": 20000,\n \"target_network_update_freq\": 800,\n \"timesteps_per_iteration\": 100,\n \"train_batch_size\": 32,\n\n \"callbacks\": {\n # \"on_episode_start\": tune.function(on_episode_start),\n # \"on_episode_step\": tune.function(on_episode_step),\n # \"on_episode_end\": tune.function(on_episode_end),\n # \"on_sample_end\": tune.function(on_sample_end),\n \"on_train_result\": tune.function(on_train_result),\n # \"on_postprocess_traj\": tune.function(on_postprocess_traj),\n },\n },\n #return_trials=True # add tirals = tune.run( above\n )\n\nend = time.time()\nprint(\"No. of seconds to run:\", end - start)\n"
] | [
[
"numpy.mean",
"numpy.zeros",
"numpy.absolute"
]
] |
zhengzangw/RODNet | [
"eca5f2bd1f3051c2b823d279532ddafa71b009c1"
] | [
"rodnet/models/backbones/hgwi.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass RadarStackedHourglass(nn.Module):\n def __init__(self, n_class, stacked_num=1):\n super(RadarStackedHourglass, self).__init__()\n self.stacked_num = stacked_num\n self.conv1a = nn.Conv3d(\n in_channels=2,\n out_channels=32,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n self.conv1b = nn.Conv3d(\n in_channels=32,\n out_channels=64,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n self.conv1c = nn.Conv3d(\n in_channels=64,\n out_channels=160,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n\n self.hourglass = []\n for i in range(stacked_num):\n self.hourglass.append(\n nn.ModuleList(\n [\n RODEncode(),\n RODDecode(),\n nn.Conv3d(\n in_channels=160,\n out_channels=n_class,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n ),\n nn.Conv3d(\n in_channels=n_class,\n out_channels=160,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n ),\n ]\n )\n )\n self.hourglass = nn.ModuleList(self.hourglass)\n self.relu = nn.ReLU()\n self.bn1a = nn.BatchNorm3d(num_features=32)\n self.bn1b = nn.BatchNorm3d(num_features=64)\n self.bn1c = nn.BatchNorm3d(num_features=160)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x = self.relu(self.bn1a(self.conv1a(x)))\n x = self.relu(self.bn1b(self.conv1b(x)))\n x = self.relu(self.bn1c(self.conv1c(x)))\n\n out = []\n for i in range(self.stacked_num):\n x, x1, x2, x3 = self.hourglass[i][0](x)\n x = self.hourglass[i][1](x, x1, x2, x3)\n confmap = self.hourglass[i][2](x)\n out.append(self.sigmoid(confmap))\n if i < self.stacked_num - 1:\n confmap_ = self.hourglass[i][3](confmap)\n x = x + confmap_\n\n return out\n\n\nclass InceptionLayerConcat(nn.Module):\n \"\"\"\n Kernal size: for 2d kernal size, since the kernal size in temporal domain will be fixed\n \"\"\"\n\n def __init__(self, kernal_size, in_channel, stride):\n super(InceptionLayerConcat, self).__init__()\n\n paddingX = kernal_size[0] // 2\n paddingY = kernal_size[1] // 2\n\n self.branch1 = nn.Conv3d(\n in_channels=in_channel,\n out_channels=32,\n kernel_size=(5, kernal_size[0], kernal_size[1]),\n stride=stride,\n padding=(2, paddingX, paddingY),\n )\n self.branch2a = nn.Conv3d(\n in_channels=in_channel,\n out_channels=64,\n kernel_size=(5, kernal_size[0], kernal_size[1]),\n stride=(1, 1, 1),\n padding=(2, paddingX, paddingY),\n )\n self.branch2b = nn.Conv3d(\n in_channels=64,\n out_channels=64,\n kernel_size=(9, kernal_size[0], kernal_size[1]),\n stride=stride,\n padding=(4, paddingX, paddingY),\n )\n self.branch3a = nn.Conv3d(\n in_channels=in_channel,\n out_channels=64,\n kernel_size=(5, kernal_size[0], kernal_size[1]),\n stride=(1, 1, 1),\n padding=(2, paddingX, paddingY),\n )\n self.branch3b = nn.Conv3d(\n in_channels=64,\n out_channels=64,\n kernel_size=(13, kernal_size[0], kernal_size[1]),\n stride=stride,\n padding=(6, paddingX, paddingY),\n )\n\n def forward(self, x):\n branch1 = self.branch1(x)\n\n branch2 = self.branch2a(x)\n branch2 = self.branch2b(branch2)\n\n branch3 = self.branch3a(x)\n branch3 = self.branch3b(branch3)\n\n return torch.cat((branch1, branch2, branch3), 1)\n\n\nclass RODEncode(nn.Module):\n def __init__(self):\n super(RODEncode, self).__init__()\n self.inception1 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n self.inception2 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n self.inception3 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n\n self.skip_inception1 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n self.skip_inception2 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n self.skip_inception3 = InceptionLayerConcat(\n kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)\n )\n # self.conv4a = nn.Conv3d(in_channels=64, out_channels=64,\n # kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))\n # self.conv4b = nn.Conv3d(in_channels=64, out_channels=64,\n # kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))\n # self.conv5a = nn.Conv3d(in_channels=64, out_channels=64,\n # kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))\n # self.conv5b = nn.Conv3d(in_channels=64, out_channels=64,\n # kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))\n self.bn1 = nn.BatchNorm3d(num_features=160)\n self.bn2 = nn.BatchNorm3d(num_features=160)\n self.bn3 = nn.BatchNorm3d(num_features=160)\n\n self.skip_bn1 = nn.BatchNorm3d(num_features=160)\n self.skip_bn2 = nn.BatchNorm3d(num_features=160)\n self.skip_bn3 = nn.BatchNorm3d(num_features=160)\n # self.bn4a = nn.BatchNorm3d(num_features=64)\n # self.bn4b = nn.BatchNorm3d(num_features=64)\n # self.bn5a = nn.BatchNorm3d(num_features=64)\n # self.bn5b = nn.BatchNorm3d(num_features=64)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x1 = self.relu(self.skip_bn1(self.skip_inception1(x)))\n x = self.relu(\n self.bn1(self.inception1(x))\n ) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)\n\n x2 = self.relu(self.skip_bn2(self.skip_inception2(x)))\n x = self.relu(\n self.bn2(self.inception2(x))\n ) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)\n\n x3 = self.relu(self.skip_bn3(self.skip_inception3(x)))\n x = self.relu(\n self.bn3(self.inception3(x))\n ) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)\n\n return x, x1, x2, x3\n\n\nclass RODDecode(nn.Module):\n def __init__(self):\n super(RODDecode, self).__init__()\n self.convt1 = nn.ConvTranspose3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(3, 6, 6),\n stride=(1, 2, 2),\n padding=(1, 2, 2),\n )\n self.convt2 = nn.ConvTranspose3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(3, 6, 6),\n stride=(1, 2, 2),\n padding=(1, 2, 2),\n )\n self.convt3 = nn.ConvTranspose3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(3, 6, 6),\n stride=(1, 2, 2),\n padding=(1, 2, 2),\n )\n self.conv1 = nn.Conv3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n self.conv2 = nn.Conv3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n self.conv3 = nn.Conv3d(\n in_channels=160,\n out_channels=160,\n kernel_size=(9, 5, 5),\n stride=(1, 1, 1),\n padding=(4, 2, 2),\n )\n self.prelu = nn.PReLU()\n self.sigmoid = nn.Sigmoid()\n # self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'],\n # radar_configs['ramap_asize']), mode='nearest')\n\n def forward(self, x, x1, x2, x3):\n x = self.prelu(\n self.convt1(x + x3)\n ) # (B, 256, W/4, 16, 16) -> (B, 128, W/2, 32, 32)\n x = self.prelu(self.conv1(x))\n x = self.prelu(\n self.convt2(x + x2)\n ) # (B, 128, W/2, 32, 32) -> (B, 64, W, 64, 64)\n x = self.prelu(self.conv2(x))\n x = self.prelu(self.convt3(x + x1)) # (B, 64, W, 64, 64) -> (B, 3, W, 128, 128)\n x = self.prelu(self.conv3(x))\n return x\n"
] | [
[
"torch.nn.PReLU",
"torch.nn.BatchNorm3d",
"torch.nn.ConvTranspose3d",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Conv3d"
]
] |
bopopescu/fbserver | [
"e812dbc4dc0cbf2fda19473015a3d7e253718a19"
] | [
"venv/lib/python2.7/site-packages/sklearn/base.py"
] | [
"\"\"\"Base classes for all estimators.\"\"\"\n# Author: Gael Varoquaux <[email protected]>\n# License: BSD 3 clause\n\nimport copy\nimport inspect\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\nfrom .externals import six\n\n\n###############################################################################\ndef clone(estimator, safe=True):\n \"\"\"Constructs a new estimator with the same parameters.\n\n Clone does a deep copy of the model in an estimator\n without actually copying attached data. It yields a new estimator\n with the same parameters that has not been fit on any data.\n\n Parameters\n ----------\n estimator: estimator object, or list, tuple or set of objects\n The estimator or group of estimators to be cloned\n\n safe: boolean, optional\n If safe is false, clone will fall back to a deepcopy on objects\n that are not estimators.\n\n \"\"\"\n estimator_type = type(estimator)\n # XXX: not handling dictionaries\n if estimator_type in (list, tuple, set, frozenset):\n return estimator_type([clone(e, safe=safe) for e in estimator])\n elif not hasattr(estimator, 'get_params'):\n if not safe:\n return copy.deepcopy(estimator)\n else:\n raise TypeError(\"Cannot clone object '%s' (type %s): \"\n \"it does not seem to be a scikit-learn estimator \"\n \"it does not implement a 'get_params' methods.\"\n % (repr(estimator), type(estimator)))\n klass = estimator.__class__\n new_object_params = estimator.get_params(deep=False)\n for name, param in six.iteritems(new_object_params):\n new_object_params[name] = clone(param, safe=False)\n new_object = klass(**new_object_params)\n params_set = new_object.get_params(deep=False)\n\n # quick sanity check of the parameters of the clone\n for name in new_object_params:\n param1 = new_object_params[name]\n param2 = params_set[name]\n if isinstance(param1, np.ndarray):\n # For most ndarrays, we do not test for complete equality\n if not isinstance(param2, type(param1)):\n equality_test = False\n elif (param1.ndim > 0\n and param1.shape[0] > 0\n and isinstance(param2, np.ndarray)\n and param2.ndim > 0\n and param2.shape[0] > 0):\n equality_test = (\n param1.shape == param2.shape\n and param1.dtype == param2.dtype\n # We have to use '.flat' for 2D arrays\n and param1.flat[0] == param2.flat[0]\n and param1.flat[-1] == param2.flat[-1]\n )\n else:\n equality_test = np.all(param1 == param2)\n elif sparse.issparse(param1):\n # For sparse matrices equality doesn't work\n if not sparse.issparse(param2):\n equality_test = False\n elif param1.size == 0 or param2.size == 0:\n equality_test = (\n param1.__class__ == param2.__class__\n and param1.size == 0\n and param2.size == 0\n )\n else:\n equality_test = (\n param1.__class__ == param2.__class__\n and param1.data[0] == param2.data[0]\n and param1.data[-1] == param2.data[-1]\n and param1.nnz == param2.nnz\n and param1.shape == param2.shape\n )\n else:\n equality_test = new_object_params[name] == params_set[name]\n if not equality_test:\n raise RuntimeError('Cannot clone object %s, as the constructor '\n 'does not seem to set parameter %s' %\n (estimator, name))\n\n return new_object\n\n\n###############################################################################\ndef _pprint(params, offset=0, printer=repr):\n \"\"\"Pretty print the dictionary 'params'\n\n Parameters\n ----------\n params: dict\n The dictionary to pretty print\n\n offset: int\n The offset in characters to add at the begin of each line.\n\n printer:\n The function to convert entries to strings, typically\n the builtin str or repr\n\n \"\"\"\n # Do a multi-line justified repr:\n options = np.get_printoptions()\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, (k, v) in enumerate(sorted(six.iteritems(params))):\n if type(v) is float:\n # use str for representing floating point numbers\n # this way we get consistent representation across\n # architectures and versions.\n this_repr = '%s=%s' % (k, str(v))\n else:\n # use repr of the rest\n this_repr = '%s=%s' % (k, printer(v))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n\n np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines\n\n\n###############################################################################\nclass BaseEstimator(object):\n \"\"\"Base class for all estimators in scikit-learn\n\n Notes\n -----\n All estimators should specify all the parameters that can be set\n at the class level in their ``__init__`` as explicit keyword\n arguments (no ``*args`` or ``**kwargs``).\n \"\"\"\n\n @classmethod\n def _get_param_names(cls):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch the constructor or the original constructor before\n # deprecation wrapping if any\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n if init is object.__init__:\n # No explicit constructor to introspect\n return []\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n args, varargs, kw, default = inspect.getargspec(init)\n if varargs is not None:\n raise RuntimeError(\"scikit-learn estimators should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s doesn't follow this convention.\"\n % (cls, ))\n # Remove 'self'\n # XXX: This is going to fail if the init is a staticmethod, but\n # who would do this?\n args.pop(0)\n args.sort()\n return args\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep: boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n # We need deprecation warnings to always be on in order to\n # catch deprecated param values.\n # This is set in utils/__init__.py but it gets overwritten\n # when running under python3 somehow.\n warnings.simplefilter(\"always\", DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n # if the parameter is deprecated, don't show it\n continue\n finally:\n warnings.filters.pop(0)\n\n # XXX: should we rather test if instance of estimator?\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this estimator.\n\n The method works on simple estimators as well as on nested objects\n (such as pipelines). The former have parameters of the form\n ``<component>__<parameter>`` so that it's possible to update each\n component of a nested object.\n\n Returns\n -------\n self\n \"\"\"\n if not params:\n # Simple optimisation to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n for key, value in six.iteritems(params):\n split = key.split('__', 1)\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n if not name in valid_params:\n raise ValueError('Invalid parameter %s for estimator %s' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if not key in valid_params:\n raise ValueError('Invalid parameter %s ' 'for estimator %s'\n % (key, self.__class__.__name__))\n setattr(self, key, value)\n return self\n\n def __repr__(self):\n class_name = self.__class__.__name__\n return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),\n offset=len(class_name),),)\n\n\n###############################################################################\nclass ClassifierMixin(object):\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Returns the mean accuracy on the given test data and labels.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test samples.\n\n y : array-like, shape = (n_samples,)\n True labels for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n score : float\n Mean accuracy of self.predict(X) wrt. y.\n\n \"\"\"\n from .metrics import accuracy_score\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\n\n\n###############################################################################\nclass RegressorMixin(object):\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Returns the coefficient of determination R^2 of the prediction.\n\n The coefficient R^2 is defined as (1 - u/v), where u is the regression\n sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual\n sum of squares ((y_true - y_true.mean()) ** 2).sum().\n Best possible score is 1.0, lower values are worse.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test samples.\n\n y : array-like, shape = (n_samples,)\n True values for X.\n\n sample_weight : array-like, shape = [n_samples], optional\n Sample weights.\n\n Returns\n -------\n score : float\n R^2 of self.predict(X) wrt. y.\n \"\"\"\n\n from .metrics import r2_score\n return r2_score(y, self.predict(X), sample_weight=sample_weight)\n\n\n###############################################################################\nclass ClusterMixin(object):\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\n def fit_predict(self, X, y=None):\n \"\"\"Performs clustering on X and returns cluster labels.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Input data.\n\n Returns\n -------\n y : ndarray, shape (n_samples,)\n cluster labels\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n self.fit(X)\n return self.labels_\n\n\nclass BiclusterMixin(object):\n \"\"\"Mixin class for all bicluster estimators in scikit-learn\"\"\"\n\n @property\n def biclusters_(self):\n \"\"\"Convenient way to get row and column indicators together.\n\n Returns the ``rows_`` and ``columns_`` members.\n \"\"\"\n return self.rows_, self.columns_\n\n def get_indices(self, i):\n \"\"\"Row and column indices of the i'th bicluster.\n\n Only works if ``rows_`` and ``columns_`` attributes exist.\n\n Returns\n -------\n row_ind : np.array, dtype=np.intp\n Indices of rows in the dataset that belong to the bicluster.\n col_ind : np.array, dtype=np.intp\n Indices of columns in the dataset that belong to the bicluster.\n\n \"\"\"\n from .cluster.bicluster.utils import get_indices\n return get_indices(self.rows_[i], self.columns_[i])\n\n def get_shape(self, i):\n \"\"\"Shape of the i'th bicluster.\n\n Returns\n -------\n shape : (int, int)\n Number of rows and columns (resp.) in the bicluster.\n \"\"\"\n from .cluster.bicluster.utils import get_shape\n return get_shape(self.rows_[i], self.columns_[i])\n\n def get_submatrix(self, i, data):\n \"\"\"Returns the submatrix corresponding to bicluster `i`.\n\n Works with sparse matrices. Only works if ``rows_`` and\n ``columns_`` attributes exist.\n\n \"\"\"\n from .cluster.bicluster.utils import get_submatrix\n return get_submatrix(self.rows_[i], self.columns_[i], data)\n\n\n###############################################################################\nclass TransformerMixin(object):\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\n\n def fit_transform(self, X, y=None, **fit_params):\n \"\"\"Fit to data, then transform it.\n\n Fits transformer to X and y with optional parameters fit_params\n and returns a transformed version of X.\n\n Parameters\n ----------\n X : numpy array of shape [n_samples, n_features]\n Training set.\n\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n X_new : numpy array of shape [n_samples, n_features_new]\n Transformed array.\n\n \"\"\"\n # non-optimized default implementation; override when a better\n # method is possible for a given clustering algorithm\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X)\n\n\n###############################################################################\nclass MetaEstimatorMixin(object):\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\n # this is just a tag for the moment\n\n\n###############################################################################\n# XXX: Temporary solution to figure out if an estimator is a classifier\n\ndef _get_sub_estimator(estimator):\n \"\"\"Returns the final estimator if there is any.\"\"\"\n if hasattr(estimator, 'estimator'):\n # GridSearchCV and other CV-tuned estimators\n return _get_sub_estimator(estimator.estimator)\n if hasattr(estimator, 'steps'):\n # Pipeline\n return _get_sub_estimator(estimator.steps[-1][1])\n return estimator\n\n\ndef is_classifier(estimator):\n \"\"\"Returns True if the given estimator is (probably) a classifier.\"\"\"\n estimator = _get_sub_estimator(estimator)\n return isinstance(estimator, ClassifierMixin)\n"
] | [
[
"scipy.sparse.issparse",
"numpy.get_printoptions",
"numpy.set_printoptions",
"numpy.all"
]
] |
jupiterman/Data-Transfer-Neural-Way | [
"d900a5552c78f81450c3918640aa3e9210a57488"
] | [
"script_helper/Script/Network.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom scipy.misc import imread, imresize, imsave, fromimage, toimage\nfrom scipy.optimize import fmin_l_bfgs_b\nimport numpy as np\nimport time\nimport argparse\nimport warnings\n\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D\nfrom keras import backend as K\nfrom keras.utils.data_utils import get_file\nfrom keras.utils.layer_utils import convert_all_kernels_in_model\n\n\"\"\"\nNeural Style Transfer with Keras 2.0.5\n\nBased on:\nhttps://github.com/fchollet/keras/blob/master/examples/neural_style_transfer.py\n\n-----------------------------------------------------------------------------------------------------------------------\n\"\"\"\n\nTHEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'\nTF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\nTH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'\nTF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\n\nparser = argparse.ArgumentParser(description='Neural style transfer with Keras.')\nparser.add_argument('base_image_path', metavar='base', type=str,\n help='Path to the image to transform.')\n\nparser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,\n help='Path to the style reference image.')\n\nparser.add_argument('result_prefix', metavar='res_prefix', type=str,\n help='Prefix for the saved results.')\n\nparser.add_argument(\"--style_masks\", type=str, default=None, nargs='+',\n help='Masks for style images')\n\nparser.add_argument(\"--content_mask\", type=str, default=None,\n help='Masks for the content image')\n\nparser.add_argument(\"--color_mask\", type=str, default=None,\n help='Mask for color preservation')\n\nparser.add_argument(\"--image_size\", dest=\"img_size\", default=400, type=int,\n help='Minimum image size')\n\nparser.add_argument(\"--content_weight\", dest=\"content_weight\", default=0.025, type=float,\n help=\"Weight of content\")\n\nparser.add_argument(\"--style_weight\", dest=\"style_weight\", nargs='+', default=[1], type=float,\n help=\"Weight of style, can be multiple for multiple styles\")\n\nparser.add_argument(\"--style_scale\", dest=\"style_scale\", default=1.0, type=float,\n help=\"Scale the weighing of the style\")\n\nparser.add_argument(\"--total_variation_weight\", dest=\"tv_weight\", default=8.5e-5, type=float,\n help=\"Total Variation weight\")\n\nparser.add_argument(\"--num_iter\", dest=\"num_iter\", default=10, type=int,\n help=\"Number of iterations\")\n\nparser.add_argument(\"--model\", default=\"vgg16\", type=str,\n help=\"Choices are 'vgg16' and 'vgg19'\")\n\nparser.add_argument(\"--content_loss_type\", default=0, type=int,\n help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')\n\nparser.add_argument(\"--rescale_image\", dest=\"rescale_image\", default=\"False\", type=str,\n help=\"Rescale image after execution to original dimentions\")\n\nparser.add_argument(\"--rescale_method\", dest=\"rescale_method\", default=\"bilinear\", type=str,\n help=\"Rescale image algorithm\")\n\nparser.add_argument(\"--maintain_aspect_ratio\", dest=\"maintain_aspect_ratio\", default=\"True\", type=str,\n help=\"Maintain aspect ratio of loaded images\")\n\nparser.add_argument(\"--content_layer\", dest=\"content_layer\", default=\"conv5_2\", type=str,\n help=\"Content layer used for content loss.\")\n\nparser.add_argument(\"--init_image\", dest=\"init_image\", default=\"content\", type=str,\n help=\"Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'\")\n\nparser.add_argument(\"--pool_type\", dest=\"pool\", default=\"max\", type=str,\n help='Pooling type. Can be \"ave\" for average pooling or \"max\" for max pooling')\n\nparser.add_argument('--preserve_color', dest='color', default=\"False\", type=str,\n help='Preserve original color in image')\n\nparser.add_argument('--min_improvement', default=0.0, type=float,\n help='Defines minimum improvement required to continue script')\n\n\ndef str_to_bool(v):\n return v.lower() in (\"true\", \"yes\", \"t\", \"1\")\n\n''' Arguments '''\n\nargs = parser.parse_args()\nbase_image_path = args.base_image_path\nstyle_reference_image_paths = args.syle_image_paths\nresult_prefix = args.result_prefix\n\nstyle_image_paths = []\nfor style_image_path in style_reference_image_paths:\n style_image_paths.append(style_image_path)\n\nstyle_masks_present = args.style_masks is not None\nmask_paths = []\n\nif style_masks_present:\n for mask_path in args.style_masks:\n mask_paths.append(mask_path)\n\nif style_masks_present:\n assert len(style_image_paths) == len(mask_paths), \"Wrong number of style masks provided.\\n\" \\\n \"Number of style images = %d, \\n\" \\\n \"Number of style mask paths = %d.\" % \\\n (len(style_image_paths), len(style_masks_present))\n\ncontent_mask_present = args.content_mask is not None\ncontent_mask_path = args.content_mask\n\n\ncolor_mask_present = args.color_mask is not None\n\nrescale_image = str_to_bool(args.rescale_image)\nmaintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)\npreserve_color = str_to_bool(args.color)\n\n# these are the weights of the different loss components\ncontent_weight = args.content_weight\ntotal_variation_weight = args.tv_weight\n\nstyle_weights = []\n\nif len(style_image_paths) != len(args.style_weight):\n print(\"Mismatch in number of style images provided and number of style weights provided. \\n\"\n \"Found %d style images and %d style weights. \\n\"\n \"Equally distributing weights to all other styles.\" % (len(style_image_paths), len(args.style_weight)))\n\n weight_sum = sum(args.style_weight) * args.style_scale\n count = len(style_image_paths)\n\n for i in range(len(style_image_paths)):\n style_weights.append(weight_sum / count)\nelse:\n for style_weight in args.style_weight:\n style_weights.append(style_weight * args.style_scale)\n\n# Decide pooling function\npooltype = str(args.pool).lower()\nassert pooltype in [\"ave\", \"max\"], 'Pooling argument is wrong. Needs to be either \"ave\" or \"max\".'\n\npooltype = 1 if pooltype == \"ave\" else 0\n\nread_mode = \"gray\" if args.init_image == \"gray\" else \"color\"\n\n# dimensions of the generated picture.\nimg_width = img_height = 0\n\nimg_WIDTH = img_HEIGHT = 0\naspect_ratio = 0\n\nassert args.content_loss_type in [0, 1, 2], \"Content Loss Type must be one of 0, 1 or 2\"\n\n\n# util function to open, resize and format pictures into appropriate tensors\ndef preprocess_image(image_path, load_dims=False, read_mode=\"color\"):\n global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio\n\n mode = \"RGB\" if read_mode == \"color\" else \"L\"\n img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)\n\n if mode == \"L\":\n # Expand the 1 channel grayscale to 3 channel grayscale image\n temp = np.zeros(img.shape + (3,), dtype=np.uint8)\n temp[:, :, 0] = img\n temp[:, :, 1] = img.copy()\n temp[:, :, 2] = img.copy()\n\n img = temp\n\n if load_dims:\n img_WIDTH = img.shape[0]\n img_HEIGHT = img.shape[1]\n aspect_ratio = float(img_HEIGHT) / img_WIDTH\n\n img_width = args.img_size\n if maintain_aspect_ratio:\n img_height = int(img_width * aspect_ratio)\n else:\n img_height = args.img_size\n\n img = imresize(img, (img_width, img_height)).astype('float32')\n\n # RGB -> BGR\n img = img[:, :, ::-1]\n\n img[:, :, 0] -= 103.939\n img[:, :, 1] -= 116.779\n img[:, :, 2] -= 123.68\n\n if K.image_dim_ordering() == \"th\":\n img = img.transpose((2, 0, 1)).astype('float32')\n\n img = np.expand_dims(img, axis=0)\n return img\n\n\n# util function to convert a tensor into a valid image\ndef deprocess_image(x):\n if K.image_dim_ordering() == \"th\":\n x = x.reshape((3, img_width, img_height))\n x = x.transpose((1, 2, 0))\n else:\n x = x.reshape((img_width, img_height, 3))\n\n x[:, :, 0] += 103.939\n x[:, :, 1] += 116.779\n x[:, :, 2] += 123.68\n\n # BGR -> RGB\n x = x[:, :, ::-1]\n\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n\n# util function to preserve image color\ndef original_color_transform(content, generated, mask=None):\n generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space\n\n if mask is None:\n generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr\n else:\n width, height, channels = generated.shape\n\n for i in range(width):\n for j in range(height):\n if mask[i, j] == 1:\n generated[i, j, 1:] = content[i, j, 1:]\n\n generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space\n return generated\n\n\ndef load_mask(mask_path, shape, return_mask_img=False):\n if K.image_dim_ordering() == \"th\":\n _, channels, width, height = shape\n else:\n _, width, height, channels = shape\n\n mask = imread(mask_path, mode=\"L\") # Grayscale mask load\n mask = imresize(mask, (width, height)).astype('float32')\n\n # Perform binarization of mask\n mask[mask <= 127] = 0\n mask[mask > 128] = 255\n\n max = np.amax(mask)\n mask /= max\n\n if return_mask_img: return mask\n\n mask_shape = shape[1:]\n\n mask_tensor = np.empty(mask_shape)\n\n for i in range(channels):\n if K.image_dim_ordering() == \"th\":\n mask_tensor[i, :, :] = mask\n else:\n mask_tensor[:, :, i] = mask\n\n return mask_tensor\n\n\ndef pooling_func(x):\n if pooltype == 1:\n return AveragePooling2D((2, 2), strides=(2, 2))(x)\n else:\n return MaxPooling2D((2, 2), strides=(2, 2))(x)\n\n\n# get tensor representations of our images\nbase_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))\n\nstyle_reference_images = []\nfor style_path in style_image_paths:\n style_reference_images.append(K.variable(preprocess_image(style_path)))\n\n# this will contain our generated image\nif K.image_dim_ordering() == 'th':\n combination_image = K.placeholder((1, 3, img_width, img_height))\nelse:\n combination_image = K.placeholder((1, img_width, img_height, 3))\n\nimage_tensors = [base_image]\nfor style_image_tensor in style_reference_images:\n image_tensors.append(style_image_tensor)\nimage_tensors.append(combination_image)\n\nnb_tensors = len(image_tensors)\nnb_style_images = nb_tensors - 2 # Content and Output image not considered\n\n# combine the various images into a single Keras tensor\ninput_tensor = K.concatenate(image_tensors, axis=0)\n\nif K.image_dim_ordering() == \"th\":\n shape = (nb_tensors, 3, img_width, img_height)\nelse:\n shape = (nb_tensors, img_width, img_height, 3)\n\nip = Input(tensor=input_tensor, batch_shape=shape)\n\n# build the VGG16 network with our 3 images as input\nx = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)\nx = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)\nx = pooling_func(x)\n\nx = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)\nx = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)\nx = pooling_func(x)\n\nx = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)\nx = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)\nx = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)\nif args.model == \"vgg19\":\n x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)\nx = pooling_func(x)\n\nx = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)\nx = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)\nx = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)\nif args.model == \"vgg19\":\n x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)\nx = pooling_func(x)\n\nx = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)\nx = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)\nx = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)\nif args.model == \"vgg19\":\n x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)\nx = pooling_func(x)\n\nmodel = Model(ip, x)\n\nif K.image_dim_ordering() == \"th\":\n if args.model == \"vgg19\":\n weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')\n else:\n weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')\nelse:\n if args.model == \"vgg19\":\n weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')\n else:\n weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')\n\nmodel.load_weights(weights)\n\nif K.backend() == 'tensorflow' and K.image_dim_ordering() == \"th\":\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image dimension ordering convention '\n '(`image_dim_ordering=\"th\"`). '\n 'For best performance, set '\n '`image_dim_ordering=\"tf\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n convert_all_kernels_in_model(model)\n\nprint('Model loaded.')\n\n# get the symbolic outputs of each \"key\" layer (we gave them unique names).\noutputs_dict = dict([(layer.name, layer.output) for layer in model.layers])\nshape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])\n\n# compute the neural style loss\n# first we need to define 4 util functions\n\n# the gram matrix of an image tensor (feature-wise outer product)\ndef gram_matrix(x):\n assert K.ndim(x) == 3\n if K.image_dim_ordering() == \"th\":\n features = K.batch_flatten(x)\n else:\n features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))\n gram = K.dot(features, K.transpose(features))\n return gram\n\n\n# the \"style loss\" is designed to maintain\n# the style of the reference image in the generated image.\n# It is based on the gram matrices (which capture style) of\n# feature maps from the style reference image\n# and from the generated image\ndef style_loss(style, combination, mask_path=None, nb_channels=None):\n assert K.ndim(style) == 3\n assert K.ndim(combination) == 3\n\n if content_mask_path is not None:\n content_mask = K.variable(load_mask(content_mask_path, nb_channels))\n combination = combination * K.stop_gradient(content_mask)\n del content_mask\n\n if mask_path is not None:\n style_mask = K.variable(load_mask(mask_path, nb_channels))\n style = style * K.stop_gradient(style_mask)\n if content_mask_path is None:\n combination = combination * K.stop_gradient(style_mask)\n del style_mask\n\n S = gram_matrix(style)\n C = gram_matrix(combination)\n channels = 3\n size = img_width * img_height\n return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))\n\n\n# an auxiliary loss function\n# designed to maintain the \"content\" of the\n# base image in the generated image\ndef content_loss(base, combination):\n channel_dim = 0 if K.image_dim_ordering() == \"th\" else -1\n\n try:\n channels = K.int_shape(base)[channel_dim]\n except TypeError:\n channels = K.shape(base)[channel_dim]\n size = img_width * img_height\n\n if args.content_loss_type == 1:\n multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))\n elif args.content_loss_type == 2:\n multiplier = 1. / (channels * size)\n else:\n multiplier = 1.\n\n return multiplier * K.sum(K.square(combination - base))\n\n\n# the 3rd loss function, total variation loss,\n# designed to keep the generated image locally coherent\ndef total_variation_loss(x):\n assert K.ndim(x) == 4\n if K.image_dim_ordering() == 'th':\n a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])\n b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])\n else:\n a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])\n b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])\n return K.sum(K.pow(a + b, 1.25))\n\n\n# combine these loss functions into a single scalar\nloss = K.variable(0.)\nlayer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'\nbase_image_features = layer_features[0, :, :, :]\ncombination_features = layer_features[nb_tensors - 1, :, :, :]\nloss += content_weight * content_loss(base_image_features,\n combination_features)\nstyle_masks = []\nif style_masks_present:\n style_masks = mask_paths # If mask present, pass dictionary of masks to style loss\nelse:\n style_masks = [None for _ in range(nb_style_images)] # If masks not present, pass None to the style loss\n\nchannel_index = 1 if K.image_dim_ordering() == \"th\" else -1\n\nfeature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']\nfor layer_name in feature_layers:\n layer_features = outputs_dict[layer_name]\n shape = shape_dict[layer_name]\n combination_features = layer_features[nb_tensors - 1, :, :, :]\n\n style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]\n sl = []\n for j in range(nb_style_images):\n sl.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))\n\n for j in range(nb_style_images):\n loss += (style_weights[j] / len(feature_layers)) * sl[j]\n\nloss += total_variation_weight * total_variation_loss(combination_image)\n\n# get the gradients of the generated image wrt the loss\ngrads = K.gradients(loss, combination_image)\n\noutputs = [loss]\nif type(grads) in {list, tuple}:\n outputs += grads\nelse:\n outputs.append(grads)\n\nf_outputs = K.function([combination_image], outputs)\n\n\ndef eval_loss_and_grads(x):\n if K.image_dim_ordering() == 'th':\n x = x.reshape((1, 3, img_width, img_height))\n else:\n x = x.reshape((1, img_width, img_height, 3))\n outs = f_outputs([x])\n loss_value = outs[0]\n if len(outs[1:]) == 1:\n grad_values = outs[1].flatten().astype('float64')\n else:\n grad_values = np.array(outs[1:]).flatten().astype('float64')\n return loss_value, grad_values\n\n\n# this Evaluator class makes it possible\n# to compute loss and gradients in one pass\n# while retrieving them via two separate functions,\n# \"loss\" and \"grads\". This is done because scipy.optimize\n# requires separate functions for loss and gradients,\n# but computing them separately would be inefficient.\nclass Evaluator(object):\n def __init__(self):\n self.loss_value = None\n self.grads_values = None\n\n def loss(self, x):\n assert self.loss_value is None\n loss_value, grad_values = eval_loss_and_grads(x)\n self.loss_value = loss_value\n self.grad_values = grad_values\n return self.loss_value\n\n def grads(self, x):\n assert self.loss_value is not None\n grad_values = np.copy(self.grad_values)\n self.loss_value = None\n self.grad_values = None\n return grad_values\n\n\nevaluator = Evaluator()\n\n# run scipy-based optimization (L-BFGS) over the pixels of the generated image\n# so as to minimize the neural style loss\n\n\nif \"content\" in args.init_image or \"gray\" in args.init_image:\n x = preprocess_image(base_image_path, True, read_mode=read_mode)\nelif \"noise\" in args.init_image:\n x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.\n\n if K.image_dim_ordering() == \"th\":\n x = x.transpose((0, 3, 1, 2))\nelse:\n print(\"Using initial image : \", args.init_image)\n x = preprocess_image(args.init_image, read_mode=read_mode)\n\n# We require original image if we are to preserve color in YCbCr mode\nif preserve_color:\n content = imread(base_image_path, mode=\"YCbCr\")\n content = imresize(content, (img_width, img_height))\n\n if color_mask_present:\n if K.image_dim_ordering() == \"th\":\n color_mask_shape = (None, None, img_width, img_height)\n else:\n color_mask_shape = (None, img_width, img_height, None)\n\n color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)\n else:\n color_mask = None\nelse:\n color_mask = None\n\nnum_iter = args.num_iter\nprev_min_val = -1\n\nimprovement_threshold = float(args.min_improvement)\n\nfor i in range(num_iter):\n print(\"Starting iteration %d of %d\" % ((i + 1), num_iter))\n start_time = time.time()\n\n x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)\n\n if prev_min_val == -1:\n prev_min_val = min_val\n\n improvement = (prev_min_val - min_val) / prev_min_val * 100\n\n print('Current loss value:', min_val, \" Improvement : %0.3f\" % improvement, \"%\")\n prev_min_val = min_val\n # save current generated image\n img = deprocess_image(x.copy())\n\n if preserve_color and content is not None:\n img = original_color_transform(content, img, mask=color_mask)\n\n if not rescale_image:\n img_ht = int(img_width * aspect_ratio)\n print(\"Rescaling Image to (%d, %d)\" % (img_width, img_ht))\n img = imresize(img, (img_width, img_ht), interp=args.rescale_method)\n\n if rescale_image:\n print(\"Rescaling Image to (%d, %d)\" % (img_WIDTH, img_HEIGHT))\n img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)\n\n fname = result_prefix + '_at_iteration_%d.png' % (i + 1)\n imsave(fname, img)\n end_time = time.time()\n print('Image saved as', fname)\n print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))\n\n if improvement_threshold is not 0.0:\n if improvement < improvement_threshold and improvement is not 0.0:\n print(\"Improvement (%f) is less than improvement threshold (%f). Early stopping script.\" % (\n improvement, improvement_threshold))\n exit()\n"
] | [
[
"numpy.random.uniform",
"scipy.misc.toimage",
"numpy.empty",
"numpy.zeros",
"numpy.array",
"scipy.misc.imsave",
"scipy.misc.imresize",
"numpy.copy",
"numpy.expand_dims",
"numpy.clip",
"numpy.amax",
"scipy.misc.imread"
]
] |
Hazboun6/pta_sim | [
"cf8676e23056586ecb35a030dbaad45a1f985764"
] | [
"pta_sim/pint_sim.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\nimport numpy as np\nimport astropy.units as u\nfrom astropy.time import Time, TimeDelta\n\nfrom pint.residuals import resids\nimport pint.toa as toa\nfrom pint import models\n\n__all__ = ['make_ideal',\n 'createfourierdesignmatrix_red',\n 'add_rednoise',\n 'add_dm_rednoise',\n 'add_efac',\n 'add_equad',\n 'add_ecorr']\n\ndef make_ideal(toas, model, iterations=2):\n '''\n Takes a pint.toas and pint.model object and effectively zeros out the residuals.\n '''\n for ii in range(iterations):\n rs=resids(toas, model)\n toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))\n\ndef createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,\n logf=False, fmin=None, fmax=None,\n pshift=False, modes=None):\n \"\"\"\n Construct fourier design matrix from eq 11 of Lentati et al, 2013\n\n Parameters\n ----------\n\n toas : array\n Vector of time series in seconds.\n\n nmodes : int\n Number of fourier coefficients to use.\n\n Tspan : float\n Option to us some other Tspan [s]\n\n logf : bool\n Use log frequency spacing.\n\n fmin : float\n Lower sampling frequency.\n\n fmax : float\n Upper sampling frequency.\n\n pshift : bool\n Option to add random phase shift.\n\n modes : array\n Option to provide explicit list or array of sampling frequencies.\n\n Returns\n -------\n F : array\n fourier design matrix, [NTOAs x 2 nfreqs].\n f : arraty\n Sampling frequencies, [2 nfreqs].\n \"\"\"\n\n T = Tspan if Tspan is not None else toas.max() - toas.min()\n\n # define sampling frequencies\n if modes is not None:\n nmodes = len(modes)\n f = modes\n elif fmin is None and fmax is None and not logf:\n # make sure partially overlapping sets of modes\n # have identical frequencies\n f = 1.0 * np.arange(1, nmodes + 1) / T\n else:\n # more general case\n\n if fmin is None:\n fmin = 1 / T\n\n if fmax is None:\n fmax = nmodes / T\n\n if logf:\n f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)\n else:\n f = np.linspace(fmin, fmax, nmodes)\n\n # add random phase shift to basis functions\n ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)\n if pshift else np.zeros(nmodes))\n\n Ffreqs = np.repeat(f, 2)\n\n N = len(toas)\n F = np.zeros((N, 2 * nmodes))\n\n # The sine/cosine modes\n F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +\n ranphase[None,:])\n F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +\n ranphase[None,:])\n\n return F, Ffreqs\n\ndef add_rednoise(TOAs, A, gamma, components=30,\n seed=None, modes=None, Tspan=None):\n \"\"\"Add red noise with P(f) = A^2 / (12 pi^2) (f * year)^-gamma,\n using `components` Fourier bases.\n Optionally take a pseudorandom-number-generator seed.\"\"\"\n\n # nobs=len(psr.toas)\n nobs = len(TOAs.table)\n\n day_in_sec = 86400\n year_in_sec = 365.25*day_in_sec\n fyr = 1 / year_in_sec\n\n if seed is not None:\n np.random.seed(seed)\n if modes is not None:\n print('Must use linear spacing.')\n\n toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec\n Tspan = toas.max() - toas.min()\n F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)\n prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3\n y = np.sqrt(prior) * np.random.randn(freqs.size)\n dt = np.dot(F,y) * u.s\n TOAs.adjust_TOAs(TimeDelta(dt.to('day')))\n\ndef add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,\n seed=None, modes=None, Tspan=None, useDM=False):\n \"\"\"Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,\n using `components` Fourier bases.\n Optionally take a pseudorandom-number-generator seed.\"\"\"\n\n # nobs=len(psr.toas)\n nobs = len(TOAs.table)\n radio_freqs = TOAs.table['freq']\n if useDM:\n rf_ref = 4.15e3\n chrom = rf_ref**2 / radio_freqs**2\n day_in_sec = 86400\n year_in_sec = 365.25*day_in_sec\n fyr = 1 / year_in_sec\n\n if seed is not None:\n np.random.seed(seed)\n\n toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec\n\n Tspan = toas.max() - toas.min()\n\n F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)\n prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3\n\n y = np.sqrt(prior) * np.random.randn(freqs.size)\n dt = chrom.quantity.value * np.dot(F,y) * u.s\n TOAs.adjust_TOAs(TimeDelta(dt.to('day')))\n\ndef add_equad(TOAs, equad, flagid=None, flags=None, seed=None):\n \"\"\"Add quadrature noise of rms `equad` [s].\n Optionally take a pseudorandom-number-generator seed.\"\"\"\n\n if seed is not None:\n np.random.seed(seed)\n\n # default equadvec\n equadvec = np.zeros(TOAs.ntoas)\n\n # check that equad is scalar if flags is None\n if flags is None:\n if not np.isscalar(equad):\n raise ValueError('ERROR: If flags is None, equad must be a scalar')\n else:\n equadvec = np.ones(TOAs.ntoas) * equad\n\n if flags is not None and flagid is not None and not np.isscalar(equad):\n if len(equad) == len(flags):\n for ct, flag in enumerate(flags):\n ind = flag == np.array([f['f'] for f\n in TOAs.table['flags'].data])\n equadvec[ind] = equad[ct]\n\n equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)\n TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))\n\n\ndef add_efac(TOAs, efac, flagid=None, flags=None, seed=None):\n \"\"\"Add quadrature noise of rms `equad` [s].\n Optionally take a pseudorandom-number-generator seed.\"\"\"\n\n if seed is not None:\n np.random.seed(seed)\n\n # default equadvec\n efacvec = np.zeros(TOAs.ntoas)\n\n # check that equad is scalar if flags is None\n if flags is None:\n if not np.isscalar(efac):\n raise ValueError('ERROR: If flags is None, efac must be a scalar')\n else:\n efacvec = np.ones(TOAs.ntoas) * efac\n\n if flags is not None and flagid is not None and not np.isscalar(efac):\n if len(efac) == len(flags):\n for ct, flag in enumerate(flags):\n ind = flag == np.array([f['f'] for f\n in TOAs.table['flags'].data])\n efacvec[ind] = efac[ct]\n\n dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)\n TOAs.adjust_TOAs(TimeDelta(dt.to('day')))\n\ndef quantize(times, flags=None, dt=1.0):\n isort = np.argsort(times)\n\n bucket_ref = [times[isort[0]]]\n bucket_ind = [[isort[0]]]\n\n for i in isort[1:]:\n if times[i] - bucket_ref[-1] < dt:\n bucket_ind[-1].append(i)\n else:\n bucket_ref.append(times[i])\n bucket_ind.append([i])\n\n avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')\n if flags is not None:\n aveflags = np.array([flags[l[0]] for l in bucket_ind])\n\n U = np.zeros((len(times),len(bucket_ind)),'d')\n for i,l in enumerate(bucket_ind):\n U[l,i] = 1\n\n if flags is not None:\n return avetoas, aveflags, U\n else:\n return avetoas, U\n\n\ndef add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):\n \"\"\"Add correlated quadrature noise of rms `ecorr` [s],\n with coarse-graining time `coarsegrain` [days].\n Optionally take a pseudorandom-number-generator seed.\"\"\"\n\n if seed is not None:\n np.random.seed(seed)\n\n times = np.array(TOAs.table['tdbld'], dtype='float64')\n if flags is None:\n t, U = quantize(times, dt=coarsegrain.to('day').value)\n elif flags is not None and flagid is not None:\n flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])\n t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)\n\n # default ecorr value\n ecorrvec = np.zeros(len(t))\n\n # check that ecorr is scalar if flags is None\n if flags is None:\n if not np.isscalar(ecorr):\n raise ValueError('ERROR: If flags is None, ecorr must be a scalar')\n else:\n ecorrvec = np.ones(len(t)) * ecorr\n\n if flags is not None and flagid is not None and not np.isscalar(ecorr):\n if len(ecorr) == len(flags):\n for ct, flag in enumerate(flags):\n ind = flag == np.array(f)\n ecorrvec[ind] = ecorr[ct]\n\n ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s\n TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))\n"
] | [
[
"numpy.random.uniform",
"numpy.sqrt",
"numpy.ones",
"numpy.zeros",
"numpy.argsort",
"numpy.repeat",
"numpy.cos",
"numpy.random.seed",
"numpy.random.randn",
"numpy.arange",
"numpy.log10",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.isscalar",
"numpy.linspace",
"numpy.mean"
]
] |
Keesiu/meta-kaggle | [
"87de739aba2399fd31072ee81b391f9b7a63f540",
"87de739aba2399fd31072ee81b391f9b7a63f540",
"87de739aba2399fd31072ee81b391f9b7a63f540",
"87de739aba2399fd31072ee81b391f9b7a63f540"
] | [
"data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py",
"data/external/repositories_2to3/132160/kaggle-ndsb-master/average_predictions.py",
"data/external/repositories_2to3/197978/Grasp-and-lift-EEG-challenge-master/lvl3/genYOLO.py",
"data/external/repositories_2to3/236942/homesite-master/python/build_meta_keras.py"
] | [
"#!/usr/bin/env python\r\n\r\nfrom pandas import *\r\nfrom numpy import *\r\nfrom djeval import *\r\nimport csv, code\r\nimport pickle as pickle\r\nfrom sklearn.externals import joblib\r\n\r\nNUM_GAMES=50000\r\n\r\n\r\ndef shell():\r\n vars = globals()\r\n vars.update(locals())\r\n shell = code.InteractiveConsole(vars)\r\n shell.interact()\r\n\r\nmsg(\"Hi! Reading eheaders\")\r\neheaders_filename = '/data/eheaders.p'\r\neheaders_file = open(eheaders_filename, 'r')\r\neheaders = pickle.load(eheaders_file)\r\nelos = eheaders['elos']\r\nresult = eheaders['result']\r\ncheckmate = eheaders['checkmate']\r\nopenings = eheaders['openings']\r\nocount = eheaders['opening_count']\r\n\r\nmsg(\"Hi! Reading crunched movescores from %s\" % sys.argv[1])\r\ncrunched_path = sys.argv[1]\r\ncrunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])\r\n\r\ndo_gb = False\r\nif do_gb:\r\n msg(\"Hi! Reading GB scores from %s\" % sys.argv[2])\r\n gb_path = sys.argv[2]\r\n gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])\r\n\r\nmsg(\"Hi! Reading depthstats\")\r\ndepthstats_path = '/data/depthstats.csv'\r\ncolumns = [\r\n'gamenum',\r\n'side',\r\n'mean_depth',\r\n'mean_seldepth',\r\n'mean_depths_agreeing_ratio',\r\n'mean_deepest_agree_ratio',\r\n'pct_sanemoves',\r\n'gamelength',\r\n'mean_num_bestmoves',\r\n'mean_num_bestmove_changes',\r\n'mean_bestmove_depths_agreeing',\r\n'mean_deepest_change',\r\n'mean_deepest_change_ratio',\r\n]\r\ndepthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)\r\ndepthstats_df = depthstats_df.set_index(['gamenum', 'side'])\r\n# we have the gamelength column in another df, drop it here to avoid conflicts\r\ndepthstats_df.drop('gamelength', axis=1, inplace=True)\r\n\r\ndo_material = True\r\nif do_material:\r\n msg(\"Hi! Reading material\")\r\n material_path = '/data/material.csv'\r\n columns = [\r\n 'gamenum',\r\n 'material_break_0',\r\n 'material_break_1',\r\n 'material_break_2',\r\n 'material_break_3',\r\n 'material_break_4',\r\n 'opening_length',\r\n 'midgame_length',\r\n 'endgame_length',\r\n 'mean_acwsa',\r\n 'mean_acwsa_0',\r\n 'mean_acwsa_1',\r\n 'mean_acwsa_2',\r\n 'mean_acwsa_3',\r\n 'mean_acwsa_4',\r\n 'mean_acwsa_5',\r\n 'mean_acwsa_6',\r\n 'mean_acwsa_7',\r\n 'mean_acwsa_8',\r\n 'mean_acwsa_9',\r\n ]\r\n material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)\r\n material_df = material_df.set_index(['gamenum'])\r\n material_df = material_df.reindex(list(range(1, NUM_GAMES+1)))\r\n material_df = material_df.fillna(material_df.mean())\r\n\r\nmsg(\"Reading ELOscored data\")\r\neloscored_cols = [\r\n 'gamenum',\r\n 'final_elo',\r\n 'final_ply',\r\n 'final_num_games',\r\n 'final_elo_stdev',\r\n 'elopath_min',\r\n 'elopath_max',\r\n]\r\neloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)\r\neloscored_df = eloscored_df.set_index(['gamenum'])\r\n\r\nmsg(\"Reading ELOscored data 4\")\r\neloscored4_cols = [\r\n 'gamenum',\r\n 'final_elo',\r\n 'final_ply',\r\n 'final_num_games',\r\n 'final_elo_stdev',\r\n]\r\neloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]\r\neloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)\r\neloscored4_df = eloscored4_df.set_index(['gamenum'])\r\n\r\nmsg(\"Reading ELOscored data 10\")\r\neloscored10_cols = [\r\n 'gamenum',\r\n 'final_elo',\r\n 'final_ply',\r\n 'final_num_games',\r\n 'final_elo_stdev',\r\n]\r\neloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]\r\neloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)\r\neloscored10_df = eloscored10_df.set_index(['gamenum'])\r\n\r\ndo_movemodel=True\r\nif do_movemodel:\r\n msg(\"Hi! Reading moveaggs\")\r\n move_aggs = joblib.load('/data/move_aggs.p')\r\n move_aggs.fillna(move_aggs.mean(), inplace=True)\r\n move_aggs = move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']]\r\n msg(\"Hi! Reading wmoveaggs\")\r\n wmove_aggs = joblib.load('/data/wmove_aggs.p')\r\n wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)\r\n wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)\r\n wmove_aggs = wmove_aggs['moveelo_weighted']\r\n\r\ndo_elochunk = False\r\nif do_elochunk:\r\n ch_agg_df = joblib.load('/data/chunk_aggs.p')\r\n ch_agg_df.index = ch_agg_df.index.droplevel('elo')\r\n ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]\r\n\r\n\r\nmsg(\"Hi! Setting up playergame rows\")\r\n\r\nif do_elochunk:\r\n elorange_cols = list(ch_agg_df.columns.values)\r\n msg(\"elorange cols are %s\" % elorange_cols)\r\n\r\nmsg('Preparing ELO df')\r\nelo_rows = [[x[0][0], x[0][1], x[1]] for x in list(elos.items())]\r\nelo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])\r\nelo_df.set_index(['gamenum','side'], inplace=True)\r\n\r\nmsg('Joining DFs')\r\nsupplemental_dfs = [depthstats_df, elo_df, crunched_df]\r\nif do_movemodel:\r\n supplemental_dfs.extend([move_aggs, wmove_aggs])\r\nif do_elochunk:\r\n supplemental_dfs.append(ch_agg_df)\r\nmega_df = concat(supplemental_dfs, axis=1)\r\nif do_material:\r\n mega_df = mega_df.join(material_df, how='outer')\r\nmega_df = mega_df.join(eloscored_df, how='outer')\r\nmega_df = mega_df.join(eloscored4_df, how='outer')\r\nmega_df = mega_df.join(eloscored10_df, how='outer')\r\nif do_gb:\r\n mega_df = mega_df.join(gb_df, how='outer')\r\n\r\nyy_df = mega_df\r\nmsg(\"hi, columns are %s\" % yy_df.columns)\r\n\r\n# TODO confirm that all columns are there\r\n\r\n\r\ndef opening_feature(opening):\r\n if ocount[opening] < 20:\r\n return 'rare'\r\n if ocount[opening] < 200:\r\n return 'uncommon'\r\n return opening\r\n\r\nmsg(\"Hi! Computing additional features\")\r\nyy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]\r\nyy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]\r\nyy_df['any_grit'] = (yy_df['grit'] > 0)\r\nyy_df['major_grit'] = (yy_df['grit'] > 5)\r\nyy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53\r\nyy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)\r\nyy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)\r\nyy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']\r\nyy_df['noblunders'] = (yy_df['blunderrate'] == 0)\r\nyy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)\r\nyy_df['early_lead'] = yy_df['early_lead'].clip(0,100)\r\nyy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)\r\nyy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)\r\n\r\n\r\n# prepare opponent_df with selected info about opponent\r\nopponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio', 'pct_sanemoves']\r\nif do_elochunk:\r\n opponent_columns.extend(elorange_cols)\r\nopponent_df = yy_df[opponent_columns]\r\nopponent_df = opponent_df.reset_index()\r\nopponent_df['side'] = opponent_df['side'] * -1\r\nopponent_df.set_index(['gamenum', 'side'], inplace=True)\r\nopponent_df.columns = ['opponent_' + x for x in opponent_df.columns]\r\nyy_df = concat([yy_df, opponent_df], axis=1)\r\n\r\n# more derived columns that use opponent comparisons\r\nyy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)\r\nyy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)\r\nyy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)\r\nyy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)\r\nyy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0\r\nyy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])\r\nyy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']\r\n\r\n\r\nmsg(\"Hi! Computing dummy variables\")\r\ncategorical_features = ['opening_feature']\r\ndummies = get_dummies(yy_df[categorical_features]).astype(np.int8)\r\nyy_df = yy_df.join(dummies)\r\n\r\n# fill in missing values\r\nmsg(\"Hi! Filling in missing values\")\r\nfull_index = pandas.MultiIndex.from_product([list(range(1,NUM_GAMES + 1)), [1,-1]], names=['gamenum', 'side'])\r\nyy_df = yy_df.reindex(full_index)\r\nyy_elo = yy_df['elo'].copy(True)\r\nyy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)\r\nyy_df.fillna(False, inplace=True)\r\nyy_df['elo'] = yy_elo\r\n\r\n# stupid patch for some stupid opening feature that got assigned to False by fillna ?!!?!?!?\r\nyy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'\r\n\r\nmsg(\"Hi! Writing yy_df to disk\")\r\nyy_df.to_pickle(sys.argv[3])\r\n\r\nmsg(\"Column counts are:\")\r\ncounts = yy_df.count(axis=0)\r\nprint(counts)\r\n\r\n",
"import numpy as np\r\nimport sys\r\n\r\nif len(sys.argv) < 3:\r\n sys.exit(\"Usage: python average_predictions.py <predictions_file1> [predictions_file_2] [...] <output_file>\")\r\n\r\npredictions_paths = sys.argv[1:-1]\r\ntarget_path = sys.argv[-1]\r\npredictions = [np.load(path) for path in predictions_paths]\r\navg_predictions = np.mean(predictions, axis=0)\r\nnp.save(target_path, avg_predictions)\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 31 16:01:25 2015.\r\n\r\n@author: rc,alex\r\n\"\"\"\r\nimport os\r\nimport sys\r\nif __name__ == '__main__' and __package__ is None:\r\n filePath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\n sys.path.append(filePath)\r\n\r\nfrom glob import glob\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nfrom preprocessing.aux import getEventNames\r\n\r\ncols = getEventNames()\r\nids = np.load('../infos_test.npy')\r\nsubject_test = ids[:, 1]\r\nseries_test = ids[:, 2]\r\nids = ids[:, 0]\r\n\r\nlabels = np.load('../infos_val.npy')\r\nsubjects = labels[:, -2]\r\nseries = labels[:, -1]\r\nlabels = labels[:, :-2]\r\n\r\nsubs_val = []\r\nsubs_test = []\r\n\r\n# get all submissions\r\nsubs = glob('models/*.yml')\r\n# remove folder and extension\r\nsubs = [sub[7:-4] for sub in subs]\r\n\r\n# load subs\r\nprint('Computing an average of %d submissions:' % len(subs))\r\nfor sub in subs:\r\n print(sub)\r\n subs_val.append(np.load('val/val_%s.npy' % sub)[0])\r\n subs_test.append(pd.read_csv('submissions/%s.csv' % sub,\r\n index_col=0).values)\r\n\r\n# average all models\r\nens_val = np.mean(subs_val, axis=0)\r\nens_test = np.mean(subs_test, axis=0)\r\n\r\n# stats of the average\r\naucs_val = [np.mean(roc_auc_score(labels[series == s], ens_val[series == s]))\r\n for s in [7, 8]]\r\nprint('AUC: %.5f (SD: %.5f), s7: %.5f ; s8: %.5f' % (np.mean(aucs_val),\r\n np.std(aucs_val),\r\n aucs_val[0], aucs_val[1]))\r\n\r\n# save\r\nnp.save('val/val_YOLO.npy', [ens_val])\r\nsub = pd.DataFrame(data=ens_test, index=ids, columns=cols)\r\nsub.to_csv('submissions/YOLO.csv', index_label='id', float_format='%.8f')\r\n",
"\"\"\"\r\nCreated on Thu Dec 10 10:44:27 2015\r\n\r\n@author: konrad\r\n\"\"\"\r\nfrom keras.regularizers import l2, activity_l2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Activation\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.optimizers import Adadelta\r\nfrom itertools import product\r\nimport datetime\r\n\r\nif __name__ == '__main__':\r\n\r\n ## settings\r\n projPath = './'\r\n dataset_version = \"kb9\"\r\n model_type = \"keras\"\r\n seed_value = 123\r\n todate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n np.random.seed(seed_value)\r\n\r\n # data\r\n # read the training and test sets\r\n xtrain = pd.read_csv(projPath + 'input/xtrain_'+ dataset_version + '.csv')\r\n id_train = xtrain.QuoteNumber\r\n ytrain = xtrain.QuoteConversion_Flag\r\n xtrain.drop('QuoteNumber', axis = 1, inplace = True)\r\n xtrain.drop('QuoteConversion_Flag', axis = 1, inplace = True)\r\n\r\n xtest = pd.read_csv(projPath + 'input/xtest_'+ dataset_version + '.csv')\r\n id_test = xtest.QuoteNumber\r\n xtest.drop('QuoteNumber', axis = 1, inplace = True)\r\n\r\n # folds\r\n xfolds = pd.read_csv(projPath + 'input/xfolds.csv')\r\n # work with 5-fold split\r\n fold_index = xfolds.fold5\r\n fold_index = np.array(fold_index) - 1\r\n n_folds = len(np.unique(fold_index))\r\n\r\n ## model\r\n nb_classes = 2\r\n dims = xtrain.shape[1]\r\n epochs = 30\r\n\r\n # parameter grids\r\n drop_vals = [0.01, 0.2]\r\n dec_vals = [0.995, 0.8]\r\n lr_vals = [1, 0.5, 0.1]\r\n reg_vals = [1e-5,1e-3]\r\n lay_vals = [1,2]\r\n param_grid = tuple([drop_vals, dec_vals, lr_vals, reg_vals, lay_vals])\r\n param_grid = list(product(*param_grid))\r\n\r\n # storage structure for forecasts\r\n mvalid = np.zeros((xtrain.shape[0],len(param_grid)))\r\n mfull = np.zeros((xtest.shape[0],len(param_grid)))\r\n\r\n ## build 2nd level forecasts\r\n for i in range(len(param_grid)):\r\n print(\"processing parameter combo:\", param_grid[i])\r\n print(\"Combo:\", i, \"of\", len(param_grid))\r\n # loop over folds\r\n # Recompile model on each fold\r\n for j in range(0,n_folds):\r\n # configure model with j-th combo of parameters\r\n x = param_grid[i]\r\n model = Sequential()\r\n model.add(Dense(dims * x[4], input_shape=(dims,),W_regularizer=l2(x[3])))\r\n model.add(BatchNormalization())\r\n model.add(Dropout(x[0]))\r\n model.add(Dense(nb_classes))\r\n model.add(Activation('softmax'))\r\n opt=Adadelta(lr=x[2],decay=x[1],epsilon=1e-5)\r\n model.compile(loss='binary_crossentropy', optimizer=opt)\r\n\r\n idx0 = np.where(fold_index != j)\r\n idx1 = np.where(fold_index == j)\r\n x0 = np.array(xtrain)[idx0,:][0]\r\n x1 = np.array(xtrain)[idx1,:][0]\r\n y0 = np.array(ytrain)[idx0]\r\n y1 = np.array(ytrain)[idx1]\r\n y00 = np.zeros((x0.shape[0],2))\r\n y00[:,0] = y0; y00[:,1] = 1-y0\r\n # fit the model on observations associated with subject whichSubject in this fold\r\n model.fit(x0, y00, nb_epoch=epochs, batch_size=1000)\r\n mvalid[idx1,i] = model.predict_proba(x1)[:,0]\r\n del model\r\n print(\"finished fold:\", j)\r\n\r\n print(\"Building full prediction model for test set.\")\r\n # configure model with j-th combo of parameters\r\n x = param_grid[i]\r\n model = Sequential()\r\n model.add(Dense(dims * x[4], input_shape=(dims,),W_regularizer=l2(x[3])))\r\n #model.add(PReLU())\r\n model.add(BatchNormalization())\r\n model.add(Dropout(x[0]))\r\n model.add(Dense(nb_classes))\r\n model.add(Activation('softmax'))\r\n opt=Adadelta(lr=x[2],decay=x[1],epsilon=1e-5)\r\n model.compile(loss='binary_crossentropy', optimizer=opt)\r\n\r\n # fit on complete dataset\r\n ytrain0 = np.zeros((xtrain.shape[0],2))\r\n ytrain0[:,0] = ytrain\r\n ytrain0[:,1] = 1- ytrain\r\n model.fit(np.array(xtrain), ytrain0,nb_epoch=epochs, batch_size=1000)\r\n mfull[:,i] = model.predict_proba(np.array(xtest))[:,0]\r\n del model\r\n print(\"finished full prediction\")\r\n\r\n ## store the results\r\n # add indices etc\r\n mvalid = pd.DataFrame(mvalid)\r\n mvalid.columns = [model_type + str(i) for i in range(0, mvalid.shape[1])]\r\n mvalid['QuoteNumber'] = id_train\r\n mvalid['QuoteConversion_Flag'] = ytrain\r\n\r\n mfull = pd.DataFrame(mfull)\r\n mfull.columns = [model_type + str(i) for i in range(0, mfull.shape[1])]\r\n mfull['QuoteNumber'] = id_test\r\n\r\n\r\n # save the files\r\n mvalid.to_csv(projPath + 'metafeatures/prval_' + model_type + todate + '_data' + dataset_version + '_seed' + str(seed_value) + '.csv', index = False, header = True)\r\n mfull.to_csv(projPath + 'metafeatures/prfull_' + model_type + todate + '_data' + dataset_version + '_seed' + str(seed_value) + '.csv', index = False, header = True)\r\n"
] | [
[
"sklearn.externals.joblib.load"
],
[
"numpy.save",
"numpy.load",
"numpy.mean"
],
[
"numpy.load",
"numpy.save",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.metrics.roc_auc_score",
"numpy.std",
"numpy.mean"
],
[
"numpy.zeros",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.array",
"numpy.where",
"numpy.unique"
]
] |
tacox5/elastic_pendulum | [
"c2058444ca161a420466b531b008fe247a87db60"
] | [
"pyelastic/pendulum.py"
] | [
"import os\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import solve_ivp\nfrom scipy.interpolate import interp1d\nfrom .settings import *\n\n\nclass ElasticPendulum:\n \"\"\"Class that handles the simulation of springy, double pendulums. This class\n handles a number of initial conditions from starting angle to pendulum mass\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Animate\n\n Args:\n alpha_0 : float\n Inital angle of the top pendulum in radians\n beta_0 : float\n Inital angle of the bottom pendulum in radians\n alpha_1 : float, default=True\n Inital angular velocity of the top pendulum in radians\n beta_1 : float, default=True\n Inital angular velocity of the top pendulum in radians\n k1 : boolean, default=True\n Spring constant of the top pendulum in arbitrary units\n k2 : boolean, default=True\n Spring constant of the top pendulum in arbitrary units\n l1 : boolean, default=True\n Length of the top pendulum in arbitrary units\n l2 : boolean, default=True\n Length of the bottom pendulum in arbitrary units\n m1 : float, default=1.0\n Mass of the top pendulum in arbitrary units\n m2 : float, default=1.0\n Mass of the bottom pendulum in arbitrary units\n a0 : boolean, default=True\n b0 : boolean, default=True\n a1 : boolean, default=True\n b1 : boolean, default=True\n t_end : float, default=2\n Length of the simulation in seconds\n fps : int, default=24\n Frame rate of the video simulation. Sets the resolution of the integrator\n and helps to visualize the results later\n \"\"\"\n prop_defaults = {\n \"alpha_0\": np.random.uniform(-np.pi, np.pi),\n \"beta_0\": np.random.uniform(-np.pi, np.pi),\n \"alpha_1\": 0.0,\n \"beta_1\": 0.0,\n \"k1\": np.random.uniform(35, 55),\n \"k2\": np.random.uniform(35, 55),\n \"l1\": 1.0,\n \"l2\": 1.0,\n \"m1\": 1.0,\n \"m2\": 1.0,\n \"a0\": 1.0,\n \"b0\": 1.0,\n \"a1\": 1.0,\n \"b1\": 1.0,\n \"t_end\": 2,\n \"fps\": 24,\n \"g\": GRAVITY,\n }\n\n for (prop, default) in prop_defaults.items():\n setattr(self, prop, kwargs.get(prop, default))\n\n self.dt = 1.0 / self.fps\n self.t_eval = np.arange(0, self.t_end, self.dt)\n\n def _spherical_to_cartesian(self, array, interpolate=True):\n \"\"\"Transforms from 2D spherical coordinate system to a cartesian coordinate system\n\n Args:\n array : np.ndarray\n Output array from integration function in spherical coordinates\n\n interpolate : boolean, default=True\n\n\n Returns:\n None\n \"\"\"\n x1 = array[:, 2] * np.sin(array[:, 0])\n x2 = x1 + array[:, 3] * np.sin(array[:, 1])\n y1 = -array[:, 2] * np.cos(array[:, 0])\n y2 = y1 - array[:, 3] * np.cos(array[:, 1])\n\n if interpolate:\n self.fx1 = interp1d(np.arange(0, x1.shape[0]), x1)\n self.fy1 = interp1d(np.arange(0, x1.shape[0]), y1)\n self.fx2 = interp1d(np.arange(0, x1.shape[0]), x2)\n self.fy2 = interp1d(np.arange(0, x1.shape[0]), y2)\n\n return x1, x2, y1, y2\n\n def _alpha_pp(self, t, Y):\n \"\"\" \"\"\"\n alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, _ = Y\n return -(\n self.g * self.m1 * np.sin(alpha_0)\n - self.k2 * self.l2 * np.sin(alpha_0 - beta_0)\n + self.k2 * b0 * np.sin(alpha_0 - beta_0)\n + 2 * self.m1 * a1 * alpha_1\n ) / (self.m1 * a0)\n\n def _beta_pp(self, t, Y):\n \"\"\" \"\"\"\n alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y\n return (\n -self.k1 * self.l1 * np.sin(alpha_0 - beta_0)\n + self.k1 * a0 * np.sin(alpha_0 - beta_0)\n - 2.0 * self.m1 * b1 * beta_1\n ) / (self.m1 * b0)\n\n def _a_pp(self, t, Y):\n \"\"\" \"\"\"\n alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y\n return (\n self.k1 * self.l1\n + self.g * self.m1 * np.cos(alpha_0)\n - self.k2 * self.l2 * np.cos(alpha_0 - beta_0)\n + self.k2 * b0 * np.cos(alpha_0 - beta_0)\n + a0 * (-self.k1 + self.m1 * alpha_1 ** 2)\n ) / self.m1\n\n def _b_pp(self, t, Y):\n \"\"\" \"\"\"\n alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y\n return (\n self.k2 * self.l2 * self.m1\n + self.k2 * self.l2 * self.m2 * np.cos(alpha_0 - beta_0)\n + self.k1 * self.m2 * a0 * np.cos(alpha_0 - beta_0)\n - b0 * (self.k2 * (self.m1 + self.m2) - self.m1 * self.m2 * beta_1 ** 2)\n ) / (self.m1 * self.m2)\n\n def _lagrangian(self, t, Y):\n \"\"\"Set of differential equations to integrate to solve the equations of motion\n for the pendulum masses. Incorporates\n\n Args:\n t : np.ndarray\n Evaluation time array\n Y : np.ndarray\n Initial conditions of the pendulum masses\n Returns:\n list :\n Evaluation of the differential equations\n \"\"\"\n return [\n Y[1],\n self._alpha_pp(t, Y),\n Y[3],\n self._beta_pp(t, Y),\n Y[5],\n self._a_pp(t, Y),\n Y[7],\n self._b_pp(t, Y),\n ]\n\n def integrate(self, method=\"LSODA\", interpolate=True):\n \"\"\"Main\n\n Args:\n method : str, default=LSODA\n Integrator type to integrate the set of differential equations. Options\n are: RK45, RK23, DOP853, Radu, BDF, and LSODA. For more information, see\n scipy.integrate.solve_ivp documentation\n interpolate : boolean, default=True\n Whether to interpolate the final results. Useful for animation\n\n Returns:\n None\n \"\"\"\n Y0 = [\n self.alpha_0,\n self.alpha_1,\n self.beta_0,\n self.beta_1,\n self.a0,\n self.a1,\n self.b0,\n self.b1,\n ]\n self.solution = solve_ivp(\n self._lagrangian, [0, self.t_end], Y0, t_eval=self.t_eval, method=method\n )\n self.x1, self.x2, self.y1, self.y2 = self._spherical_to_cartesian(\n self.solution.y[[0, 2, 4, 6]].T, interpolate=interpolate\n )\n"
] | [
[
"numpy.random.uniform",
"scipy.integrate.solve_ivp",
"numpy.cos",
"numpy.arange",
"numpy.sin"
]
] |
Mark-Kinyua/python_public | [
"25c4eff3a6f93c35a949f94a2f9c3df3202a3113"
] | [
"motion_detector/main.py"
] | [
"import numpy as np\nimport cv2\n\n# A motion detecetor, yup... lol.\n# Remember to use an old python version < 3.6\n\n\nimage_path = 'room_people.jpg' # Photo\n\n# The model was already formulated, just need to loaad it into the system.\n\nprototxt_path = 'models/MobileNetSSD_deploy.prototxt' # Load Model\nmodel_path = 'models/MobileNetSSD_deploy.caffemodel'\nmin_confidence = 0.2\n\n# Things it can identify\nclasses = [\"background\",\"aeroplane\",\"bicycle\",\"bird\",\"boat\",\"bottle\",\"bus\",\"car\",\"cat\",\"chair\",\"cow\",\"diningtable\",\"dog\",\"horse\",\n \"motorbike\",\"person\",\"pottedplant\",\"sheep\",\"sofa\",\"train\",\"tvmonitor\"]\n\nnp.random.seed(543210) # Same Colors\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\n\nnet = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)\n\n# img = cv2.imread(image_path)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n\n _, img = cap.read()\n\n height, width = img.shape[0], img.shape[1]\n\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 0.007, (300,300), 130)\n\n net.setInput(blob)\n\n detected_objects = net.forward()\n\n for i in range(detected_objects.shape[2]):\n\n confidence = detected_objects[0][0][i][2]\n\n if confidence > min_confidence:\n class_index = int(detected_objects[0,0,i,1])\n\n upper_left_x = int(detected_objects[0, 0, i, 3] * width)\n upper_left_y = int(detected_objects[0, 0, i, 3] * height)\n lower_right_x = int(detected_objects[0, 0, i, 5] * width)\n lower_right_y = int(detected_objects[0, 0, i, 6] * height)\n\n prediction_text = f\"{classes[class_index]}: {confidence:.2f}%\"\n cv2.rectangle(img, (upper_left_x, upper_left_y), (lower_right_x, lower_right_y), colors[class_index], 3)\n cv2.putText(img, prediction_text, (upper_left_x,\n upper_left_y- 15 if upper_left_y > 30 else upper_left_y + 15),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[class_index], 2)\n\n cv2.imshow(\"Detected Objects\", img)\n cv2.waitKey(5)\n\ncv2.destroyAllWindows()\ncap.release()\n"
] | [
[
"numpy.random.seed"
]
] |
demetoir/MLtools | [
"8c42fcd4cc71728333d9c116ade639fe57d50d37"
] | [
"script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py"
] | [
"from hyperopt import hp\r\nfrom sklearn.naive_bayes import MultinomialNB as _skMultinomialNB\r\n\r\nfrom script.sklearn_like_toolkit.warpper.base.BaseWrapperClf import BaseWrapperClf\r\nfrom script.sklearn_like_toolkit.warpper.base.MixIn import MetaBaseWrapperClfWithABC\r\n\r\n\r\nclass skMultinomial_NBClf(BaseWrapperClf, _skMultinomialNB, metaclass=MetaBaseWrapperClfWithABC):\r\n def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):\r\n _skMultinomialNB.__init__(self, alpha, fit_prior, class_prior)\r\n BaseWrapperClf.__init__(self)\r\n\r\n HyperOpt_space = {\r\n 'alpha': hp.loguniform('alpha', -8, 1),\r\n }\r\n\r\n tuning_grid = {\r\n 'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0],\r\n # 'class_prior': None,\r\n # 'fit_prior': True\r\n }\r\n"
] | [
[
"sklearn.naive_bayes.MultinomialNB.__init__"
]
] |
umangino/pandas | [
"c492672699110fe711b7f76ded5828ff24bce5ab"
] | [
"pandas/tests/indexes/multi/test_formats.py"
] | [
"import warnings\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Index,\n MultiIndex,\n)\n\n\ndef test_format(idx):\n idx.format()\n idx[:0].format()\n\n\ndef test_format_integer_names():\n index = MultiIndex(\n levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]\n )\n index.format(names=True)\n\n\ndef test_format_sparse_config(idx):\n warn_filters = warnings.filters\n warnings.filterwarnings(\"ignore\", category=FutureWarning, module=\".*format\")\n # GH1538\n with pd.option_context(\"display.multi_sparse\", False):\n result = idx.format()\n assert result[1] == \"foo two\"\n\n warnings.filters = warn_filters\n\n\ndef test_format_sparse_display():\n index = MultiIndex(\n levels=[[0, 1], [0, 1], [0, 1], [0]],\n codes=[\n [0, 0, 0, 1, 1, 1],\n [0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0],\n ],\n )\n\n result = index.format()\n assert result[3] == \"1 0 0 0\"\n\n\ndef test_repr_with_unicode_data():\n with pd.option_context(\"display.encoding\", \"UTF-8\"):\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n index = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n assert \"\\\\\" not in repr(index) # we don't want unicode-escaped\n\n\ndef test_repr_roundtrip_raises():\n mi = MultiIndex.from_product([list(\"ab\"), range(3)], names=[\"first\", \"second\"])\n msg = \"Must pass both levels and codes\"\n with pytest.raises(TypeError, match=msg):\n eval(repr(mi))\n\n\ndef test_unicode_string_with_unicode():\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\n str(idx)\n\n\ndef test_repr_max_seq_item_setting(idx):\n # GH10182\n idx = idx.repeat(50)\n with pd.option_context(\"display.max_seq_items\", None):\n repr(idx)\n assert \"...\" not in str(idx)\n\n\nclass TestRepr:\n def test_unicode_repr_issues(self):\n levels = [Index([\"a/\\u03c3\", \"b/\\u03c3\", \"c/\\u03c3\"]), Index([0, 1])]\n codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]\n index = MultiIndex(levels=levels, codes=codes)\n\n repr(index.levels)\n repr(index.get_level_values(1))\n\n def test_repr_max_seq_items_equal_to_n(self, idx):\n # display.max_seq_items == n\n with pd.option_context(\"display.max_seq_items\", 6):\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one'),\n ('foo', 'two'),\n ('bar', 'one'),\n ('baz', 'two'),\n ('qux', 'one'),\n ('qux', 'two')],\n names=['first', 'second'])\"\"\"\n assert result == expected\n\n def test_repr(self, idx):\n result = idx[:1].__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one')],\n names=['first', 'second'])\"\"\"\n assert result == expected\n\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one'),\n ('foo', 'two'),\n ('bar', 'one'),\n ('baz', 'two'),\n ('qux', 'one'),\n ('qux', 'two')],\n names=['first', 'second'])\"\"\"\n assert result == expected\n\n with pd.option_context(\"display.max_seq_items\", 5):\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([('foo', 'one'),\n ('foo', 'two'),\n ...\n ('qux', 'one'),\n ('qux', 'two')],\n names=['first', 'second'], length=6)\"\"\"\n assert result == expected\n\n # display.max_seq_items == 1\n with pd.option_context(\"display.max_seq_items\", 1):\n result = idx.__repr__()\n expected = \"\"\"\\\nMultiIndex([...\n ('qux', 'two')],\n names=['first', ...], length=6)\"\"\"\n assert result == expected\n\n def test_rjust(self, narrow_multi_index):\n mi = narrow_multi_index\n result = mi[:1].__repr__()\n expected = \"\"\"\\\nMultiIndex([('a', 9, '2000-01-01 00:00:00')],\n names=['a', 'b', 'dti'])\"\"\"\n assert result == expected\n\n result = mi[::500].__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\n ( 'a', 9, '2000-01-01 00:08:20'),\n ('abc', 10, '2000-01-01 00:16:40'),\n ('abc', 10, '2000-01-01 00:25:00')],\n names=['a', 'b', 'dti'])\"\"\"\n assert result == expected\n\n result = mi.__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\n ( 'a', 9, '2000-01-01 00:00:01'),\n ( 'a', 9, '2000-01-01 00:00:02'),\n ( 'a', 9, '2000-01-01 00:00:03'),\n ( 'a', 9, '2000-01-01 00:00:04'),\n ( 'a', 9, '2000-01-01 00:00:05'),\n ( 'a', 9, '2000-01-01 00:00:06'),\n ( 'a', 9, '2000-01-01 00:00:07'),\n ( 'a', 9, '2000-01-01 00:00:08'),\n ( 'a', 9, '2000-01-01 00:00:09'),\n ...\n ('abc', 10, '2000-01-01 00:33:10'),\n ('abc', 10, '2000-01-01 00:33:11'),\n ('abc', 10, '2000-01-01 00:33:12'),\n ('abc', 10, '2000-01-01 00:33:13'),\n ('abc', 10, '2000-01-01 00:33:14'),\n ('abc', 10, '2000-01-01 00:33:15'),\n ('abc', 10, '2000-01-01 00:33:16'),\n ('abc', 10, '2000-01-01 00:33:17'),\n ('abc', 10, '2000-01-01 00:33:18'),\n ('abc', 10, '2000-01-01 00:33:19')],\n names=['a', 'b', 'dti'], length=2000)\"\"\"\n assert result == expected\n\n def test_tuple_width(self, wide_multi_index):\n mi = wide_multi_index\n result = mi[:1].__repr__()\n expected = \"\"\"MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\n assert result == expected\n\n result = mi[:10].__repr__()\n expected = \"\"\"\\\nMultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\n ('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\n ('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\n ('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\n ('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\n ('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\n ('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\n ('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\n ('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\n ('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\n assert result == expected\n\n result = mi.__repr__()\n expected = \"\"\"\\\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\n ( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\n ( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\n ( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\n ( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\n ( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\n ( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\n ( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\n ( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\n ( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),\n ...\n ('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),\n ('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),\n ('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),\n ('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),\n ('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),\n ('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),\n ('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),\n ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),\n ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),\n ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)\"\"\"\n assert result == expected\n"
] | [
[
"pandas.DataFrame",
"numpy.arange",
"pandas.MultiIndex",
"pandas.Index",
"pandas.option_context"
]
] |
jackd/graphics | [
"736b99a3306e302674a9b7599e3e2857b85fdb74"
] | [
"tensorflow_graphics/nn/metric/tests/fscore_test.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the fscore metric.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow_graphics.nn.metric import fscore\nfrom tensorflow_graphics.nn.metric import precision\nfrom tensorflow_graphics.nn.metric import recall\nfrom tensorflow_graphics.util import test_case\n\n\ndef random_tensor(tensor_shape):\n return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)\n\n\ndef random_tensor_shape():\n tensor_size = np.random.randint(5) + 1\n return np.random.randint(1, 10, size=(tensor_size)).tolist()\n\n\ndef binary_precision_function(ground_truth, predictions):\n return precision.evaluate(ground_truth, predictions, classes=[1])\n\n\ndef binary_recall_function(ground_truth, predictions):\n return recall.evaluate(ground_truth, predictions, classes=[1])\n\n\nclass FscoreTest(test_case.TestCase):\n\n @parameterized.parameters(\n # Precision = 0.5, Recall = 0.25.\n ((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),\n # Precision = 1, Recall = 1.\n ((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),\n # Precision = 0, Recall = 0.\n ((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))\n def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):\n tensor_shape = random_tensor_shape()\n\n ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])\n predicted_labels = np.tile(predictions, tensor_shape + [1])\n expected = np.tile(expected_fscore, tensor_shape)\n\n result = fscore.evaluate(\n ground_truth_labels,\n predicted_labels,\n precision_function=binary_precision_function,\n recall_function=binary_recall_function)\n\n self.assertAllClose(expected, result)\n\n @parameterized.parameters(\n (\"Not all batch dimensions are broadcast-compatible.\", (1, 5, 3), (4, 3)),\n (\"Not all batch dimensions are broadcast-compatible.\", (3, 4), (2, 4, 5)),\n )\n def test_evaluate_shape_exception_raised(self, error_msg, *shape):\n \"\"\"Tests that the shape exception is raised.\"\"\"\n self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)\n\n @parameterized.parameters(\n ((1, 5, 3), (2, 5, 1)),\n ((None, 2, 6), (4, 2, None)),\n ((3, 1, 1, 2), (3, 5, 8, 2)),\n )\n def test_evaluate_shape_exception_not_raised(self, *shapes):\n \"\"\"Tests that the shape exceptions are not raised.\"\"\"\n self.assert_exception_is_not_raised(fscore.evaluate, shapes)\n\n\nif __name__ == \"__main__\":\n test_case.main()\n"
] | [
[
"numpy.random.uniform",
"numpy.tile",
"numpy.random.randint"
]
] |
zhaodi-Wen/Child_skin_disease_detect | [
"e95045341e8c27161eebb2c9c3b68026a4ea247b"
] | [
"src/src/create_tf_record.py"
] | [
"# -*-coding: utf-8 -*-\n\"\"\"\n @Project: create_tfrecord\n @File : create_tfrecord.py\n @Author : panjq\n @E-mail : [email protected]\n @Date : 2018-07-27 17:19:54\n @desc : 将图片数据保存为单个tfrecord文件\n\"\"\"\n\n##########################################################################\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nimport random\nfrom PIL import Image\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\ntrain_path = './train_new/img'\ntest_path = './test_new/img'\nlist = set(os.listdir(test_path))\nclasses=sorted(list,key=str.lower)\nprint(classes)\n##########################################################################\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n# 生成字符串型的属性\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n# 生成实数型的属性\ndef float_list_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ndef get_example_nums(tf_records_filenames):\n '''\n 统计tf_records图像的个数(example)个数\n :param tf_records_filenames: tf_records文件路径\n :return:\n '''\n nums= 0\n for record in tf.python_io.tf_record_iterator(tf_records_filenames):\n nums += 1\n return nums\n\ndef show_image(title,image):\n '''\n 显示图片\n :param title: 图像标题\n :param image: 图像的数据\n :return:\n '''\n # plt.figure(\"show_image\")\n # print(image.dtype)\n plt.imshow(image)\n plt.axis('on') # 关掉坐标轴为 off\n plt.title(title) # 图像题目\n plt.show()\n\n# def load_labels_file(filename,labels_num=1,shuffle=False):\n# '''\n# 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2\n# :param filename:\n# :param labels_num :labels个数\n# :param shuffle :是否打乱顺序\n# :return:images type->list\n# :return:labels type->list\n# '''\n# images=[]\n# labels=[]\n# with open(filename) as f:\n# lines_list=f.readlines()\n# if shuffle:\n# random.shuffle(lines_list)\n#\n# for lines in lines_list:\n# line=lines.rstrip().split(' ')\n# label=[]\n# for i in range(labels_num):\n# label.append(int(line[i+1]))\n# images.append(line[0])\n# labels.append(label)\n# return images,labels\n\ndef load_labels_file(filename,num=1,shuffle=False):\n '''\n 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2\n :param filename:\n :param labels_num :labels个数\n :param shuffle :是否打乱顺序\n :return:images type->list\n :return:labels type->list\n '''\n images=[]\n labels=[]\n # with open(filename) as f:\n # lines_list=f.readlines()\n # if shuffle:\n # random.shuffle(lines_list)\n #\n # for lines in lines_list:\n # line=lines.rstrip().split(' ')\n # label=[]\n # for i in range(labels_num):\n # label.append(int(line[i+1]))\n # images.append(line[0])\n # labels.append(label)\n # return images,labels\n for index,name in enumerate(classes):\n # print(index,name)\n class_path = filename+'/'+name+'/'\n # print(class_path)\n for img_name in os.listdir(class_path):\n img_path = class_path+img_name\n # print(img_path)\n images.append(img_path)\n labels.append(index)\n # img = Image.open(img_path)\n\n # img = img.resize((224,224))\n # img_raw = img.tobytes()\n # with open(train_label,'a') as f:\n # f.write(str(index)+'\\n')\n randnum = random.randint(0, 100)\n random.seed(randnum)\n random.shuffle(images)\n random.seed(randnum)\n random.shuffle(labels)\n return images,labels\n\ndef read_image(filename, resize_height, resize_width,normalization=False):\n '''\n 读取图片数据,默认返回的是uint8,[0,255]\n :param filename:\n :param resize_height:\n :param resize_width:\n :param normalization:是否归一化到[0.,1.0]\n :return: 返回的图片数据\n '''\n\n bgr_image = cv2.imread(filename)\n if len(bgr_image.shape)==2:#若是灰度图则转为三通道\n print(\"Warning:gray image\",filename)\n bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)\n\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB\n # show_image(filename,rgb_image)\n # rgb_image=Image.open(filename)\n if resize_height>0 and resize_width>0:\n rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))\n rgb_image=np.asanyarray(rgb_image)\n if normalization:\n # 不能写成:rgb_image=rgb_image/255\n rgb_image=rgb_image/255.0\n # show_image(\"src resize image\",image)\n return rgb_image\n\n\ndef get_batch_images(images,labels,batch_size,labels_nums,one_hot=False,shuffle=False,num_threads=64):\n '''\n :param images:图像\n :param labels:标签\n :param batch_size:\n :param labels_nums:标签个数\n :param one_hot:是否将labels转为one_hot的形式\n :param shuffle:是否打乱顺序,一般train时shuffle=True,验证时shuffle=False\n :return:返回batch的images和labels\n '''\n min_after_dequeue = 200\n capacity = min_after_dequeue + 3 * batch_size # 保证capacity必须大于min_after_dequeue参数值\n if shuffle:\n images_batch, labels_batch = tf.train.shuffle_batch([images,labels],\n batch_size=batch_size,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue,\n num_threads=num_threads)\n else:\n images_batch, labels_batch = tf.train.batch([images,labels],\n batch_size=batch_size,\n capacity=capacity,\n num_threads=num_threads)\n if one_hot:\n labels_batch = tf.one_hot(labels_batch, labels_nums, 1, 0)\n return images_batch,labels_batch\n\ndef read_records(filename,resize_height, resize_width,type=None):\n '''\n 解析record文件:源文件的图像数据是RGB,uint8,[0,255],一般作为训练数据时,需要归一化到[0,1]\n :param filename:\n :param resize_height:\n :param resize_width:\n :param type:选择图像数据的返回类型\n None:默认将uint8-[0,255]转为float32-[0,255]\n normalization:归一化float32-[0,1]\n centralization:归一化float32-[0,1],再减均值中心化\n :return:\n '''\n # 创建文件队列,不限读取的数量\n filename_queue = tf.train.string_input_producer([filename])\n # create a reader from file queue\n reader = tf.TFRecordReader()\n # reader从文件队列中读入一个序列化的样本\n _, serialized_example = reader.read(filename_queue)\n # get feature from serialized example\n # 解析符号化的样本\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'label': tf.FixedLenFeature([], tf.int64)\n }\n )\n tf_image = tf.decode_raw(features['image_raw'], tf.uint8)#获得图像原始的数据\n\n tf_height = features['height']\n tf_width = features['width']\n tf_depth = features['depth']\n tf_label = tf.cast(features['label'], tf.int32)\n # PS:恢复原始图像数据,reshape的大小必须与保存之前的图像shape一致,否则出错\n # tf_image=tf.reshape(tf_image, [-1]) # 转换为行向量\n tf_image=tf.reshape(tf_image, [resize_height, resize_width, 3]) # 设置图像的维度\n\n # 恢复数据后,才可以对图像进行resize_images:输入uint->输出float32\n # tf_image=tf.image.resize_images(tf_image,[224, 224])\n\n # 存储的图像类型为uint8,tensorflow训练时数据必须是tf.float32\n if type is None:\n tf_image = tf.cast(tf_image, tf.float32)\n elif type=='normalization':# [1]若需要归一化请使用:\n # 仅当输入数据是uint8,才会归一化[0,255]\n # tf_image = tf.image.convert_image_dtype(tf_image, tf.float32)\n tf_image = tf.cast(tf_image, tf.float32) * (1. / 255.0) # 归一化\n elif type=='centralization':\n # 若需要归一化,且中心化,假设均值为0.5,请使用:\n tf_image = tf.cast(tf_image, tf.float32) * (1. / 255) - 0.5 #中心化\n\n # 这里仅仅返回图像和标签\n # return tf_image, tf_height,tf_width,tf_depth,tf_label\n return tf_image,tf_label\n\n\ndef create_records(image_dir, output_record_dir, resize_height, resize_width,shuffle,log=5):\n '''\n 实现将图像原始数据,label,长,宽等信息保存为record文件\n 注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型\n :param image_dir:原始图像的目录\n :param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)\n :param output_record_dir:保存record文件的路径\n :param resize_height:\n :param resize_width:\n PS:当resize_height或者resize_width=0是,不执行resize\n :param shuffle:是否打乱顺序\n :param log:log信息打印间隔\n '''\n # 加载文件,仅获取一个label\n images_list, labels_list=load_labels_file(image_dir,1,shuffle)\n\n writer = tf.python_io.TFRecordWriter(output_record_dir)\n for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):\n image_path=image_name\n # print(image_path)\n # print(labels)\n if not os.path.exists(image_path):\n print('Err:no image',image_path)\n continue\n image = read_image(image_path, resize_height, resize_width)\n image_raw = image.tostring()\n if i%log==0 or i==len(images_list)-1:\n print('------------processing:%d-th------------' % (i))\n print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))\n # 这里仅保存一个label,多label适当增加\"'label': _int64_feature(label)\"项\n label=labels\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image_raw': _bytes_feature(image_raw),\n 'height': _int64_feature(image.shape[0]),\n 'width': _int64_feature(image.shape[1]),\n 'depth': _int64_feature(image.shape[2]),\n 'labels': _int64_feature(label)\n }))\n writer.write(example.SerializeToString())\n writer.close()\n\ndef disp_records(record_file,resize_height, resize_width,show_nums=4):\n '''\n 解析record文件,并显示show_nums张图片,主要用于验证生成record文件是否成功\n :param tfrecord_file: record文件路径\n :return:\n '''\n # 读取record函数\n tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')\n # 显示前4个图片\n init_op = tf.initialize_all_variables()\n with tf.Session() as sess:\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(show_nums):\n image,label = sess.run([tf_image,tf_label]) # 在会话中取出image和label\n # image = tf_image.eval()\n # 直接从record解析的image是一个向量,需要reshape显示\n # image = image.reshape([height,width,depth])\n #print('shape:{},tpye:{},labels:{}'.format(image.shape,image.dtype,label))\n # pilimg = Image.fromarray(np.asarray(image_eval_reshape))\n # pilimg.show()\n show_image(\"image:%d\"%(label),image)\n coord.request_stop()\n coord.join(threads)\n\n\ndef batch_test(record_file,resize_height, resize_width):\n '''\n :param record_file: record文件路径\n :param resize_height:\n :param resize_width:\n :return:\n :PS:image_batch, label_batch一般作为网络的输入\n '''\n # 读取record函数\n tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')\n image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess: # 开始一个会话\n sess.run(init)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n for i in range(4):\n # 在会话中取出images和labels\n images, labels = sess.run([image_batch, label_batch])\n # 这里仅显示每个batch里第一张图片\n show_image(\"image\", images[0, :, :, :])\n print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))\n\n # 停止所有线程\n coord.request_stop()\n coord.join(threads)\n\n\n# if __name__ == '__main__':\n# # 参数设置\n#\n# resize_height = 224 # 指定存储图片高度\n# resize_width = 224 # 指定存储图片宽度\n# shuffle=True\n# log=5\n# # 产生train.record文件\n# image_dir='dataset/train'\n# train_labels = 'dataset/train.txt' # 图片路径\n# train_record_output = 'dataset/record/train.tfrecords'\n# create_records(image_dir,train_labels, train_record_output, resize_height, resize_width,shuffle,log)\n# train_nums=get_example_nums(train_record_output)\n# print(\"save train example nums={}\".format(train_nums))\n#\n# # 产生val.record文件\n# image_dir='dataset/val'\n# val_labels = 'dataset/val.txt' # 图片路径\n# val_record_output = 'dataset/record/val.tfrecords'\n# create_records(image_dir,val_labels, val_record_output, resize_height, resize_width,shuffle,log)\n# val_nums=get_example_nums(val_record_output)\n# print(\"save val example nums={}\".format(val_nums))\n#\n# # 测试显示函数\n# # disp_records(train_record_output,resize_height, resize_width)\n# batch_test(train_record_output,resize_height, resize_width)\n\nif __name__ == '__main__':\n # 参数设置\n\n resize_height = 224 # 指定存储图片高度\n resize_width = 224 # 指定存储图片宽度\n shuffle=True\n log=5\n # 产生train.record文件\n image_dir='./train_new/img'\n # train_labels = './onsets/train.txt' # 图片路径\n train_record_output = 'train.tfrecord'\n create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)\n train_nums=get_example_nums(train_record_output)\n print(\"save train example nums={}\".format(train_nums))\n\n # 产生val.record文件\n image_dir='./test_new/img'\n # val_labels = './onsets/val.txt' # 图片路径\n val_record_output = 'val.tfrecord'\n create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)\n val_nums=get_example_nums(val_record_output)\n print(\"save val example nums={}\".format(val_nums))\n\n # 测试显示函数\n # disp_records(train_record_output,resize_height, resize_width)\n # batch_test(train_record_output,resize_height, resize_width)"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.reshape",
"tensorflow.decode_raw",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Int64List",
"tensorflow.train.FloatList",
"matplotlib.pyplot.imshow",
"tensorflow.one_hot",
"tensorflow.train.batch",
"tensorflow.train.BytesList",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.title",
"tensorflow.FixedLenFeature",
"tensorflow.train.shuffle_batch",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.TFRecordReader",
"matplotlib.pyplot.axis",
"tensorflow.train.string_input_producer",
"numpy.asanyarray",
"tensorflow.cast",
"tensorflow.train.start_queue_runners",
"tensorflow.Session",
"tensorflow.train.Coordinator",
"matplotlib.pyplot.show"
]
] |
CoAxLab/infomercial | [
"fa5d1c1e5c1351735dda2961a2a94f71cd17e270"
] | [
"infomercial/exp/softmeta_bandit.py"
] | [
"import os\nimport fire\nimport gym\n\nimport numpy as np\nfrom scipy.special import softmax\n\nfrom noboard.csv import SummaryWriter\n\nfrom copy import deepcopy\nfrom scipy.stats import entropy\nfrom collections import OrderedDict\n\nfrom infomercial.distance import kl\nfrom infomercial.memory import DiscreteDistribution\nfrom infomercial.models import Critic\nfrom infomercial.models import SoftmaxActor\n\nfrom infomercial.utils import estimate_regret\nfrom infomercial.utils import load_checkpoint\nfrom infomercial.utils import save_checkpoint\n\n\ndef R_update(state, reward, critic, lr):\n \"\"\"Really simple TD learning\"\"\"\n\n update = lr * (reward - critic(state))\n critic.update(state, update)\n\n return critic\n\n\ndef E_update(state, value, critic, lr):\n \"\"\"Bellman update\"\"\"\n update = lr * value\n critic.replace(state, update)\n\n return critic\n\n\ndef R_homeostasis(reward, total_reward, set_point):\n \"\"\"Update reward value assuming homeostatic value.\n \n Value based on Keramati and Gutkin, 2014.\n https://elifesciences.org/articles/04811\n \"\"\"\n deviance_last = np.abs(set_point - total_reward)\n deviance = np.abs(set_point - (total_reward + reward))\n reward_value = deviance_last - deviance\n return reward_value\n\n\ndef run(env_name='BanditOneHot10-v0',\n num_episodes=1000,\n temp=1.0,\n tie_threshold=0.0,\n tie_break=None,\n lr_R=.1,\n master_seed=42,\n write_to_disk=True,\n log_dir=None):\n \"\"\"Bandit agent - softmax (E, R)\"\"\"\n\n # --- Init ---\n writer = SummaryWriter(log_dir=log_dir, write_to_disk=write_to_disk)\n\n # -\n env = gym.make(env_name)\n env.seed(master_seed)\n num_actions = env.action_space.n\n all_actions = list(range(num_actions))\n best_action = env.best\n\n default_reward_value = 0\n default_info_value = entropy(np.ones(num_actions) / num_actions)\n E_t = default_info_value\n R_t = default_reward_value\n\n # --- Agents and memories ---\n critic_R = Critic(num_actions, default_value=default_reward_value)\n critic_E = Critic(num_actions, default_value=default_info_value)\n actor_R = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)\n actor_E = SoftmaxActor(num_actions, temp=temp, seed_value=master_seed)\n memories = [DiscreteDistribution() for _ in range(num_actions)]\n\n # -\n num_best = 0\n total_R = 0.0\n total_E = 0.0\n total_regret = 0.0\n\n # ------------------------------------------------------------------------\n for n in range(num_episodes):\n env.reset()\n\n # Meta-greed policy selection\n if (E_t - tie_threshold) > R_t:\n critic = critic_E\n actor = actor_E\n policy = 0\n else:\n critic = critic_R\n actor = actor_R\n policy = 1\n\n # Choose an action; Choose a bandit\n action = actor(list(critic.model.values()))\n if action in best_action:\n num_best += 1\n\n # Est. regret and save it\n regret = estimate_regret(all_actions, action, critic)\n\n # Pull a lever.\n state, R_t, _, _ = env.step(action)\n R_t = R_homeostasis(R_t, total_R, num_episodes)\n\n # Estimate E\n old = deepcopy(memories[action])\n memories[action].update((int(state), int(R_t)))\n new = deepcopy(memories[action])\n E_t = kl(new, old, default_info_value)\n\n # Learning, both policies.\n critic_R = R_update(action, R_t, critic_R, lr_R)\n critic_E = E_update(action, E_t, critic_E, lr=1)\n\n # Log data\n writer.add_scalar(\"policy\", policy, n)\n writer.add_scalar(\"state\", int(state), n)\n writer.add_scalar(\"action\", action, n)\n writer.add_scalar(\"regret\", regret, n)\n writer.add_scalar(\"score_E\", E_t, n)\n writer.add_scalar(\"score_R\", R_t, n)\n writer.add_scalar(\"value_E\", critic_E(action), n)\n writer.add_scalar(\"value_R\", critic_R(action), n)\n\n total_E += E_t\n total_R += R_t\n total_regret += regret\n writer.add_scalar(\"total_regret\", total_regret, n)\n writer.add_scalar(\"total_E\", total_E, n)\n writer.add_scalar(\"total_R\", total_R, n)\n writer.add_scalar(\"p_bests\", num_best / (n + 1), n)\n\n # -- Build the final result, and save or return it ---\n writer.close()\n\n result = dict(best=env.best,\n num_episodes=num_episodes,\n temp=temp,\n tie_threshold=tie_threshold,\n critic_E=critic_E.state_dict(),\n critic_R=critic_R.state_dict(),\n total_E=total_E,\n total_R=total_R,\n total_regret=total_regret,\n env_name=env_name,\n lr_R=lr_R,\n master_seed=master_seed)\n\n if write_to_disk:\n save_checkpoint(result,\n filename=os.path.join(writer.log_dir, \"result.pkl\"))\n\n return result\n\n\nif __name__ == \"__main__\":\n fire.Fire(run)"
] | [
[
"numpy.ones",
"numpy.abs"
]
] |
dnicholson/gasex-python | [
"53b8c3ff4e64e724d8883bdef299d465621b124f"
] | [
"gasex/diff.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n% Diffusion coeff and Schmidt number for gases in fresh/sea water\n%=========================================================================\n% Modified by D. Nicholson from MATLAB gas_diffusion Version 2.0 16 July 2013\n% Author: Roberta C. Hamme (University of Victoria)\n% Diffusion values for 'He','Ne','Ar','Kr','Xe','N2','O2','CH4','N2' and 'CO2' are calculated from\n% gas_diffusion Version 2.0 functions \n% salinity correction is of the form: D = D0 * (1 - 0.049 * SP / 35.5)\n% \n%\n% Support for additional gases ('CO2','N2O','CH4','RN','SF6','DMS','CFC12','CFC11','CH3BR','CCL4')\n% has been added based on Wanninkhof 2014 \n%\n% Table 1:\n% Sc = A + Bt + Ct2+ dt3+ Et4(t in °C). The last column is the calculated Schmidt number for 20°C. \n% The Schmidt number is the kinematic viscosity of waterdivided by the molecular diffusion \n% coefficient of the gas. The kinematic viscosity for fresh water and seawater are from \n% Sharqawy et al. (2010). The dif-fusion coefficients of gases are from the following: \n% 3He, He, Ne, Kr, Xe, CH4, CO2, and Rn measured by Jähne et al. (1987); Ar, O2, N2, N2O, \n% and CCl4fitusing Wilke and Chang (1955) as adapted by Hayduk and Laudie (1974); SF6 \n% measured by King and Saltzman (1995); DMS measured by Saltzman etal. (1993); CFC-11 and \n% CFC-12 measured by Zheng et al. (1998); CH3Br measured by De Bruyn and Saltzman (1997a).\n%\n%\n% REFERENCE:\n% He, Ne, Kr, Xe, CH4, CO2, H2 freshwater values from Jahne et al., 1987.\n% \"Measurement of Diffusion Coeffients of Sparingly Soluble Gases in Water\"\n% J. Geophys. Res., 92(C10), 10767-10776.\n% Ar freshwaters values are extrapolated from Jahne et al. 1987\n% He, Ne, Kr, Xe values at each temperature were fitted to D vs. mass^-0.5\n% relationship to predict Ar at those temperatures, then Ar was fit to a\n% ln(D_Ar) vs. 1/T(K) relationship to obtain Eyring equation coefficients\n% O2 and N2 freshwater values from Ferrell and Himmelblau, 1967.\n% \"Diffusion coefficients of nitrogen and oxygen in water\"\n% J. Chem. Eng. Data, 12(1), 111-115, doi: 10.1021/je60032a036.\n% Correction for salinity is based on Jahne's observed average 4.9% decrease in\n% diffusivity for H2 and He in 35.5 ppt NaCl solution\n%\n% for Ne, the Jahne values compare well with and fall between those of\n% Wise and Houghton 1968 and Holz et al. 1994\n% for Ar, the extrapolated Jahne values compare well with Wise and Houghton 1968,\n% O'Brien and Hyslop 1977, and a numerical simulation by Bourg et al. 2008\n% but are higher than other reported values\n% for Kr, the Jahne values compare well with Wise and Houghton 1968,\n% and a numerical simulation by Bourg et al. 2008\n% for Xe, the Jahne values compare well with Pollack 1981, and a numerical\n% simulation by Bourg et al. 2008, but fall significantly above Wise and Houghton 1968\n% and below Weingartner et al. 1992\n% for O2, there is general agreement among measurements. The Ferrel and Himmelblau values\n% agree reasonably well with Baird and Davidson 1962, Wise and Houghton 1966,\n% Duda and Vrentas 1968, O'Brien and Hyslop 1977, and the Wilke and Change (1955) theory\n% as tabulated by Wanninkhof 1992, but lie below Krieger et al 1967\n% for N2, there is less agreement. The Ferrel and Himmelblau values\n% agree reasonably well with Baird and Davidson 1962, O'Brien and Hyslop 1977,\n% and the Wilke and Change (1955) theory as tabulated by Wanninkhof 1992,\n% but lie significantly below the values of Wise and Houghton 1966 and Krieger et al 1967\n% for He, I did not investigate comparisons of data, but chose Jahne\n% since their work for other gases appears to be the best\n% for CO2, CH4 and H2: Jahne 1987 \n%\n% \n%\n% DISCLAIMER:\n% This software is provided \"as is\" without warranty of any kind.\n%=========================================================================\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyval\nfrom ._utilities import match_args_return\nfrom gasex.phys import R as R \nfrom gasex.phys import visc as visc\n\n\n# Currently supported gases\n# TODO: find N2O, CO diffusivities\nGAS_LIST = ('HE','NE','AR','KR','XE','N2','O2','CH4','N2','CO2')\n\n@match_args_return\ndef diff(SP,pt,*,gas=None):\n \n \"\"\"\n DESCRIPTION\n -----------\n Diffusion coefficients of various gases in fresh/sea water\n \n PARAMETERS\n -----------\n SP = practical salinity [PSS-78]\n pt = potential temperature [degree C]\n gas = 'He','Ne','Ar','Kr','Xe','N2','O2','CH4','N2' or 'CO2'\n \n OUTPUT:\n D = diffusion coefficient [m^2 s-1]\n\n \"\"\"\n g_up = gas.upper()\n if g_up not in GAS_LIST:\n raise ValueError(\"gas: must be one of \", GAS_LIST)\n \n AEa_dict = {'O2': (4.286e-6, 18700),\\\n 'HE': (0.8180e-6, 11700),\\\n 'NE': (1.6080e-6, 14840),\\\n 'AR': (2.227e-6, 16680),\\\n 'KR': (6.3930e-6, 20200),\\\n 'XE': (9.0070e-6, 21610),\\\n 'N2': (3.4120e-6, 18500),\\\n 'CH4':(3.0470e-6, 18360),\\\n 'CO2':(5.0190e-6, 19510),\\\n 'H2': (3.3380e-6, 16060)}\n \n if g_up in AEa_dict.keys():\n #freshwater diffusivity\n AEa = AEa_dict[g_up]\n D0 = AEa[0] * np.exp(-AEa[1] / (R * (pt+273.15)))\n #salinity correction\n D = D0 * (1 - 0.049 * SP / 35.5)\n else:\n raise ValueError(\"gas: must be one of \", AEa_dict.keys())\n return D\n\n\n@match_args_return\ndef schmidt(SP,pt,*,gas=None):\n\n g_up = gas.upper()\n if g_up not in GAS_LIST:\n raise ValueError(\"gas\", g_up, \" does not match one of \", GAS_LIST)\n \n Sc = visc(SP,pt) / diff(SP,pt,gas=gas) \n return Sc\n\n@match_args_return\ndef schmidt_W14(pt,*,gas=None,sw=True):\n \"\"\"Schmidt number @ 35 psu based on Wanninkhof 2014 Table 1\n\n Args:\n pt ([array like]): potential temperature [degree C]\n gas ([string]): abbreviation for gas. Defaults to None.\n sw (bool, optional): if True, then calculates for SP = 35, of false, \n calculates for fresh water. Defaults to True.\n\n Raises:\n ValueError: [description]\n\n Returns:\n [type]: Schmidt number [dimensionless]\n \"\"\"\n W14_LIST = ('CO2','N2O','CH4','RN','SF6','DMS','CFC12','CFC11','CH3BR','CCL4')\n g_up = gas.upper()\n if sw:\n A_dict = {'CO2': (2116.8,-136.25,4.7353,-0.092307,0.0007555 ),\\\n 'N2O': (2356.2,-166.38,6.3952,-0.13422,0.0011506 ),\\\n 'CH4':(2101.2,-131.54,4.4931,-0.08676,0.00070663),\n 'RN': (3489.6,-244.56,8.9713,-0.18022,0.0014985 ),\n 'SF6':(3177.5,-200.57,6.8865,-0.13335,0.0010877 ),\n 'DMS':(2855.7,-177.63,6.0438,-0.11645,0.00094743),\n 'CFC12':(3828.1,-249.86, 8.7603, -0.1716, 0.001408 ),\n 'CFC11':(3579.2, -222.63, 7.5749, -0.14595, 0.0011874 ),\n 'CH3BR':(2181.8, -138.4, 4.7663, -0.092448, 0.0007547 ),\n 'CCL4': (4398.7, -308.25, 11.798, -0.24709, 0.0021159) }\n else:\n A_dict = {'CO2': (1923.6, -125.06, 4.3773, -0.085681, 0.00070284 ),\\\n 'N2O': (2141.2, -152.56, 5.8963, -0.12411, 0.0010655 ),\\\n 'CH4':(1909.4, -120.78, 4.1555, -0.080578, 0.00065777),\n 'RN': (3171, -224.28, 8.2809, -0.16699, 0.0013915 ),\n 'SF6':(3035, -196.35, 6.851, -0.13387, 0.0010972 ),\n 'DMS':(2595, -163.12, 5.5902, -0.10817, 0.00088204),\n 'CFC12':(3478.6, -229.32, 8.0961, -0.15923, 0.0013095 ),\n 'CFC11':(3460, -217.49, 7.4537, -0.14423, 0.0011761 ),\n 'CH3BR':(2109.2, -135.17, 4.6884, -0.091317, 0.00074715 ),\n 'CCL4': (3997.2, -282.69, 10.88, -0.22855, 0.0019605) }\n\n if g_up in A_dict.keys():\n A = A_dict[g_up]\n else:\n raise ValueError(\"gas\", g_up, \" does not match one of \", A_dict.keys())\n\n Sc = polyval(pt,A)\n return Sc\n"
] | [
[
"numpy.exp",
"numpy.polynomial.polynomial.polyval"
]
] |
alixhami/training-data-analyst | [
"3eb60cb6c8b55fd7f38414c1082da36b8e62558e"
] | [
"courses/machine_learning/deepdive/05_artandscience/simplernn/trainer/model.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nimport tensorflow.contrib.metrics as metrics\nimport tensorflow.contrib.rnn as rnn\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nSEQ_LEN = 10\nDEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]\nBATCH_SIZE = 20\nTIMESERIES_INPUT_LAYER = 'rawdata'\nTIMESERIES_COL = '{}_input'.format(TIMESERIES_INPUT_LAYER)\n# In each sequence, column index 0 to N_INPUTS - 1 are features, and column index N_INPUTS to SEQ_LEN are labels\nN_OUTPUTS = 1\nN_INPUTS = SEQ_LEN - N_OUTPUTS\nLSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells\n\n# Read data and convert to needed format\ndef read_dataset(filename, mode, batch_size):\n def _input_fn():\n # Provide the ability to decode a CSV\n def decode_csv(line):\n # all_data is a list of scalar tensors\n all_data = tf.decode_csv(line, record_defaults = DEFAULTS)\n inputs = all_data[:len(all_data) - N_OUTPUTS] # first N_INPUTS values\n labels = all_data[len(all_data) - N_OUTPUTS:] # last N_OUTPUTS values\n\n # Convert each list of rank R tensors to one rank R+1 tensor\n inputs = tf.stack(inputs, axis = 0)\n labels = tf.stack(labels, axis = 0)\n\n # Convert input R+1 tensor into a feature dictionary of one R+1 tensor\n features = {TIMESERIES_COL: inputs}\n\n return features, labels\n\n # Create list of files that match pattern\n file_list = tf.gfile.Glob(filename)\n\n # Create dataset from file list\n dataset = tf.data.TextLineDataset(file_list).map(decode_csv)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None # indefinitely\n dataset = dataset.shuffle(buffer_size = 10 * batch_size)\n else:\n num_epochs = 1 # end-of-input after this\n\n dataset = dataset.repeat(num_epochs).batch(batch_size)\n\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels\n return _input_fn\n\n# Create inference model using Keras\n# The model here is a dnn regressor\ndef make_keras_estimator(output_dir):\n from tensorflow import keras\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER))\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Dense(1))\n model.compile(loss = 'mean_squared_error',\n optimizer = 'adam', \n metrics = ['mae', 'mape']) # mean absolute [percentage] error\n return keras.estimator.model_to_estimator(model, model_dir=output_dir)\n\n# Create the inference model\ndef simple_rnn(features, labels, mode):\n # 0. Reformat input shape to become a sequence\n x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)\n\n # 1. Configure the RNN\n lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0)\n outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)\n\n # Slice to keep only the last cell of the RNN\n outputs = outputs[-1]\n #print('last outputs={}'.format(outputs))\n\n # Output is result of linear activation of last layer of RNN\n weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))\n bias = tf.Variable(tf.random_normal([N_OUTPUTS]))\n predictions = tf.matmul(outputs, weight) + bias\n \n # 2. Loss function, training/eval ops\n if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:\n loss = tf.losses.mean_squared_error(labels, predictions)\n train_op = tf.contrib.layers.optimize_loss(\n loss = loss,\n global_step = tf.train.get_global_step(),\n learning_rate = 0.01,\n optimizer = \"SGD\")\n eval_metric_ops = {\n \"rmse\": tf.metrics.root_mean_squared_error(labels, predictions)\n }\n else:\n loss = None\n train_op = None\n eval_metric_ops = None\n \n # 3. Create predictions\n predictions_dict = {\"predicted\": predictions}\n \n # 4. Create export outputs\n export_outputs = {\"predict_export_outputs\": tf.estimator.export.PredictOutput(outputs = predictions)}\n\n # 4. Return EstimatorSpec\n return tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = predictions_dict,\n loss = loss,\n train_op = train_op,\n eval_metric_ops = eval_metric_ops,\n export_outputs = export_outputs)\n\n# Create serving input function\ndef serving_input_fn():\n feature_placeholders = {\n TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])\n }\n\n features = {\n key: tf.expand_dims(tensor, -1)\n for key, tensor in feature_placeholders.items()\n }\n features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis = [2])\n\n return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)\n\n# Create custom estimator's train and evaluate function\ndef train_and_evaluate(output_dir, use_keras):\n if use_keras:\n estimator = make_keras_estimator(output_dir)\n else:\n estimator = tf.estimator.Estimator(model_fn = simple_rnn, \n model_dir = output_dir)\n train_spec = tf.estimator.TrainSpec(read_dataset('train.csv',\n tf.estimator.ModeKeys.TRAIN,\n 512),\n max_steps = 1000)\n exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)\n eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv',\n tf.estimator.ModeKeys.EVAL,\n 512),\n steps = None, \n exporters = exporter)\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n"
] | [
[
"tensorflow.data.TextLineDataset",
"tensorflow.keras.estimator.model_to_estimator",
"tensorflow.logging.set_verbosity",
"tensorflow.matmul",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.squeeze",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.train.get_global_step",
"tensorflow.random_normal",
"tensorflow.split",
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.keras.layers.Activation",
"tensorflow.gfile.Glob",
"tensorflow.keras.layers.Dense",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.stack",
"tensorflow.expand_dims",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.losses.mean_squared_error",
"tensorflow.estimator.Estimator",
"tensorflow.keras.models.Sequential",
"tensorflow.placeholder",
"tensorflow.metrics.root_mean_squared_error",
"tensorflow.estimator.LatestExporter",
"tensorflow.decode_csv",
"tensorflow.estimator.train_and_evaluate"
]
] |
NiccoloSacchi/rlcard | [
"046129e8616b12e25652957869a94ab5fd838ae1"
] | [
"rlcard/games/leducholdem/game.py"
] | [
"import numpy as np\nfrom copy import copy\n\nfrom rlcard.games.leducholdem.dealer import LeducholdemDealer as Dealer\nfrom rlcard.games.leducholdem.player import LeducholdemPlayer as Player\nfrom rlcard.games.leducholdem.judger import LeducholdemJudger as Judger\nfrom rlcard.games.leducholdem.round import LeducholdemRound as Round\n\nfrom rlcard.games.limitholdem.game import LimitholdemGame\n\nclass LeducholdemGame(LimitholdemGame):\n\n def __init__(self, allow_step_back=False):\n ''' Initialize the class leducholdem Game\n '''\n self.allow_step_back = allow_step_back\n ''' No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n\n self.num_players = 2\n '''\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2\n\n def init_game(self):\n ''' Initialilze the game of Limit Texas Hold'em\n\n This version supports two-player limit texas hold'em\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): The first state of the game\n (int): Current player's id\n '''\n # Initilize a dealer that can deal cards\n self.dealer = Dealer()\n\n # Initilize two players to play the game\n self.players = [Player(i) for i in range(self.num_players)]\n\n # Initialize a judger class which will decide who wins in the end\n self.judger = Judger()\n\n # Prepare for the first round\n for i in range(self.num_players):\n self.players[i].hand = self.dealer.deal_card()\n # Randomly choose a small blind and a big blind\n s = np.random.randint(0, self.num_players)\n b = (s + 1) % self.num_players\n self.players[b].in_chips = self.big_blind\n self.players[s].in_chips = self.small_blind\n self.public_card = None\n # The player with small blind plays the first\n self.game_pointer = s\n\n # Initilize a bidding round, in the first round, the big blind and the small blind needs to\n # be passed to the round for processing.\n self.round = Round(raise_amount=self.raise_amount,\n allowed_raise_num=self.allowed_raise_num,\n num_players=self.num_players)\n\n self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])\n\n # Count the round. There are 2 rounds in each game.\n self.round_counter = 0\n\n # Save the hisory for stepping back to the last state.\n self.history = []\n\n state = self.get_state(self.game_pointer)\n\n return state, self.game_pointer\n\n def step(self, action):\n ''' Get the next state\n\n Args:\n action (str): a specific action. (call, raise, fold, or check)\n\n Returns:\n (tuple): Tuple containing:\n\n (dict): next player's state\n (int): next plater's id\n '''\n if self.allow_step_back:\n # First snapshot the current state\n r = copy(self.round)\n r_raised = copy(self.round.raised)\n gp = self.game_pointer\n r_c = self.round_counter\n d_deck = copy(self.dealer.deck)\n p = copy(self.public_card)\n ps = [copy(self.players[i]) for i in range(self.num_players)]\n ps_hand = [copy(self.players[i].hand) for i in range(self.num_players)]\n self.history.append((r, r_raised, gp, r_c, d_deck, p, ps, ps_hand))\n\n # Then we proceed to the next round\n self.game_pointer = self.round.proceed_round(self.players, action)\n\n # If a round is over, we deal more public cards\n if self.round.is_over():\n # For the first round, we deal 1 card as public card. Double the raise amount for the second round\n if self.round_counter == 0:\n self.public_card = self.dealer.deal_card()\n self.round.raise_amount = 2 * self.raise_amount\n\n self.round_counter += 1\n self.round.start_new_round(self.game_pointer)\n\n state = self.get_state(self.game_pointer)\n\n return state, self.game_pointer\n\n def get_state(self, player):\n ''' Return player's state\n\n Args:\n player_id (int): player id\n\n Returns:\n (dict): The state of the player\n '''\n chips = [self.players[i].in_chips for i in range(self.num_players)]\n legal_actions = self.get_legal_actions()\n state = self.players[player].get_state(self.public_card, chips, legal_actions)\n state['current_player'] = self.game_pointer\n\n return state\n\n def is_over(self):\n ''' Check if the game is over\n\n Returns:\n (boolean): True if the game is over\n '''\n alive_players = [1 if p.status=='alive' else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False\n\n def get_payoffs(self):\n ''' Return the payoffs of the game\n\n Returns:\n (list): Each entry corresponds to the payoff of one player\n '''\n chips_payoffs = self.judger.judge_game(self.players, self.public_card)\n payoffs = np.array(chips_payoffs) / (self.big_blind)\n return payoffs\n\n def step_back(self):\n ''' Return to the previous state of the game\n\n Returns:\n (bool): True if the game steps back successfully\n '''\n if len(self.history) > 0:\n self.round, r_raised, self.game_pointer, self.round_counter, d_deck, self.public_card, self.players, ps_hand = self.history.pop()\n self.round.raised = r_raised\n self.dealer.deck = d_deck\n for i, hand in enumerate(ps_hand):\n self.players[i].hand = hand\n return True\n return False\n\n\n# Test the game\n\n#if __name__ == \"__main__\":\n# game = LeducholdemGame(allow_step_back=True)\n# while True:\n# print('New Game')\n# state, game_pointer = game.init_game()\n# print(game_pointer, state)\n# i = 1\n# while not game.is_over():\n# i += 1\n# legal_actions = game.get_legal_actions()\n# if i == 4:\n# print('Step back')\n# print(game.step_back())\n# game_pointer = game.get_player_id()\n# print(game_pointer)\n# state = game.get_state(game_pointer)\n# legal_actions = game.get_legal_actions()\n# # action = input()\n# action = np.random.choice(legal_actions)\n# print(game_pointer, action, legal_actions, state)\n# state, game_pointer = game.step(action)\n# print(game_pointer, state)\n#\n# print(game.get_payoffs())\n"
] | [
[
"numpy.array",
"numpy.random.randint"
]
] |
DanielHuji-RB/RB-article | [
"e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e"
] | [
"Python/Functions_base/Functions/replace_ElecNaming.py"
] | [
"#Daniel Sand\n\nimport pandas as pd\nimport numpy as np\n\n\nfileName='/Tscores.csv'\n\nnewFileName='/Tscores_v3.csv'\ndf=pd.read_csv(fileName, sep=',')\n\n#6 differnt electordes\noldFormat=['0-1','0-2','0-3','2-Jan','3-Jan','3-Feb']\nnewFormat=['0_1','0_2','0_3','2_1','3_1','3_2']\n\nfor iCont in range(0, len(oldFormat)):\n currElec_old = oldFormat[iCont]\n currElec_new = newFormat[iCont]\n df.loc[df.Elec==currElec_old,'Elec']=currElec_new\ndf.to_csv(path_or_buf=newFileName)\n"
] | [
[
"pandas.read_csv"
]
] |
ThinkBigAnalytics/ludwig | [
"0a3159af4cc91f57251f3dec0cdb863c7003cf00"
] | [
"ludwig/features/image_feature.py"
] | [
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nimport os\n\nimport h5py\nimport numpy as np\nimport tensorflow as tf\nfrom skimage.io import imread\n\nfrom ludwig.constants import *\nfrom ludwig.features.base_feature import BaseFeature\nfrom ludwig.features.base_feature import InputFeature\nfrom ludwig.models.modules.image_encoders import ResNetEncoder\nfrom ludwig.models.modules.image_encoders import Stacked2DCNN\nfrom ludwig.utils.image_utils import resize_image\nfrom ludwig.utils.misc import get_from_registry\nfrom ludwig.utils.misc import set_default_value\n\n\nclass ImageBaseFeature(BaseFeature):\n def __init__(self, feature):\n super().__init__(feature)\n self.type = IMAGE\n\n preprocessing_defaults = {\n 'missing_value_strategy': BACKFILL,\n 'in_memory': True,\n 'resize_method': 'crop_or_pad'\n }\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters):\n return {\n 'preprocessing': preprocessing_parameters\n }\n\n @staticmethod\n def add_feature_data(\n feature,\n dataset_df,\n data,\n metadata,\n preprocessing_parameters\n ):\n set_default_value(\n feature,\n 'in_memory',\n preprocessing_parameters['in_memory']\n )\n\n if ('height' in preprocessing_parameters or\n 'width' in preprocessing_parameters):\n should_resize = True\n try:\n provided_height = int(preprocessing_parameters[HEIGHT])\n provided_width = int(preprocessing_parameters[WIDTH])\n except ValueError as e:\n raise ValueError(\n 'Image height and width must be set and have '\n 'positive integer values: ' + str(e)\n )\n if (provided_height <= 0 or provided_width <= 0):\n raise ValueError(\n 'Image height and width must be positive integers'\n )\n else:\n should_resize = False\n\n csv_path = os.path.dirname(os.path.abspath(dataset_df.csv))\n\n num_images = len(dataset_df)\n\n height = 0\n width = 0\n num_channels = 1\n\n if num_images > 0:\n # here if a width and height have not been specified\n # we assume that all images have the same wifth and im_height\n # thus the width and height of the first one are the same\n # of all the other ones\n first_image = imread(\n os.path.join(csv_path, dataset_df[feature['name']][0])\n )\n height = first_image.shape[0]\n width = first_image.shape[1]\n\n if first_image.ndim == 2:\n num_channels = 1\n else:\n num_channels = first_image.shape[2]\n\n if should_resize:\n height = provided_height\n width = provided_width\n\n metadata[feature['name']]['preprocessing']['height'] = height\n metadata[feature['name']]['preprocessing']['width'] = width\n metadata[feature['name']]['preprocessing'][\n 'num_channels'] = num_channels\n\n if feature['in_memory']:\n data[feature['name']] = np.empty(\n (num_images, height, width, num_channels),\n dtype=np.int8\n )\n for i in range(len(dataset_df)):\n filename = os.path.join(\n csv_path,\n dataset_df[feature['name']][i]\n )\n img = imread(filename)\n if img.ndim == 2:\n img = img.reshape((img.shape[0], img.shape[1], 1))\n if should_resize:\n img = resize_image(\n img,\n (height, width),\n preprocessing_parameters['resize_method']\n )\n data[feature['name']][i, :, :, :] = img\n else:\n data_fp = os.path.splitext(dataset_df.csv)[0] + '.hdf5'\n mode = 'w'\n if os.path.isfile(data_fp):\n mode = 'r+'\n with h5py.File(data_fp, mode) as h5_file:\n image_dataset = h5_file.create_dataset(\n feature['name'] + '_data',\n (num_images, height, width, num_channels),\n dtype=np.uint8\n )\n for i in range(len(dataset_df)):\n filename = os.path.join(\n csv_path,\n dataset_df[feature['name']][i]\n )\n img = imread(filename)\n if img.ndim == 2:\n img = img.reshape((img.shape[0], img.shape[1], 1))\n if should_resize:\n img = resize_image(\n img,\n (height, width),\n preprocessing_parameters['resize_method'],\n )\n\n image_dataset[i, :height, :width, :] = img\n\n data[feature['name']] = np.arange(num_images)\n\n\nclass ImageInputFeature(ImageBaseFeature, InputFeature):\n def __init__(self, feature):\n super().__init__(feature)\n\n self.height = 0\n self.width = 0\n self.num_channels = 0\n\n self.in_memory = True\n self.data_hdf5_fp = ''\n\n self.encoder = 'stacked_cnn'\n\n encoder_parameters = self.overwrite_defaults(feature)\n\n self.encoder_obj = self.get_image_encoder(encoder_parameters)\n\n def get_image_encoder(self, encoder_parameters):\n return get_from_registry(\n self.encoder, image_encoder_registry)(\n **encoder_parameters\n )\n\n def _get_input_placeholder(self):\n # None dimension is for dealing with variable batch size\n return tf.placeholder(\n tf.float32,\n shape=[None, self.height, self.width, self.num_channels],\n name=self.name,\n )\n\n def build_input(\n self,\n regularizer,\n dropout_rate,\n is_training=False,\n **kwargs\n ):\n placeholder = self._get_input_placeholder()\n logging.debug(' targets_placeholder: {0}'.format(placeholder))\n\n feature_representation, feature_representation_size = self.encoder_obj(\n placeholder,\n regularizer,\n dropout_rate,\n is_training,\n )\n logging.debug(\n ' feature_representation: {0}'.format(feature_representation)\n )\n\n feature_representation = {\n 'name': self.name,\n 'type': self.type,\n 'representation': feature_representation,\n 'size': feature_representation_size,\n 'placeholder': placeholder\n }\n return feature_representation\n\n @staticmethod\n def update_model_definition_with_metadata(\n input_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n for dim in ['height', 'width', 'num_channels']:\n input_feature[dim] = feature_metadata['preprocessing'][dim]\n input_feature['data_hdf5_fp'] = (\n kwargs['model_definition']['data_hdf5_fp']\n )\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, 'tied_weights', None)\n\n\nimage_encoder_registry = {\n 'stacked_cnn': Stacked2DCNN,\n 'resnet': ResNetEncoder\n}\n"
] | [
[
"numpy.arange",
"tensorflow.placeholder",
"numpy.empty"
]
] |
devinlife/tensorflow | [
"1445444c15a396410f25ae91b7d1c19d724e2afc"
] | [
"tensorflow/python/keras/layers/convolutional.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras convolution layers and image transformation layers.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\n# imports for backwards namespace compatibility\n# pylint: disable=unused-import\nfrom tensorflow.python.keras.layers.pooling import AveragePooling1D\nfrom tensorflow.python.keras.layers.pooling import AveragePooling2D\nfrom tensorflow.python.keras.layers.pooling import AveragePooling3D\nfrom tensorflow.python.keras.layers.pooling import MaxPooling1D\nfrom tensorflow.python.keras.layers.pooling import MaxPooling2D\nfrom tensorflow.python.keras.layers.pooling import MaxPooling3D\n# pylint: enable=unused-import\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.util.tf_export import keras_export\n# pylint: disable=g-classes-have-attributes\n\n\nclass Conv(Layer):\n \"\"\"Abstract N-D convolution layer (private, used as implementation base).\n\n This layer creates a convolution kernel that is convolved\n (actually cross-correlated) with the layer input to produce a tensor of\n outputs. If `use_bias` is True (and a `bias_initializer` is provided),\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n\n Arguments:\n rank: An integer, the rank of the convolution, e.g. \"2\" for 2D convolution.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n length of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, ...)`.\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self, rank,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs):\n super(Conv, self).__init__(\n trainable=trainable,\n name=name,\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs)\n self.rank = rank\n if filters is not None and not isinstance(filters, int):\n filters = int(filters)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(\n kernel_size, rank, 'kernel_size')\n if not all(self.kernel_size):\n raise ValueError('The argument `kernel_size` cannot contain 0(s). '\n 'Received: %s' % (kernel_size,))\n self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n if (self.padding == 'causal' and not isinstance(self,\n (Conv1D, SeparableConv1D))):\n raise ValueError('Causal padding is only supported for `Conv1D`'\n 'and ``SeparableConv1D`.')\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(\n dilation_rate, rank, 'dilation_rate')\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_channel = self._get_input_channel(input_shape)\n kernel_shape = self.kernel_size + (input_channel, self.filters)\n\n self.kernel = self.add_weight(\n name='kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n channel_axis = self._get_channel_axis()\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_channel})\n\n self._build_conv_op_input_shape = input_shape\n self._build_input_channel = input_channel\n self._padding_op = self._get_padding_op()\n self._conv_op_data_format = conv_utils.convert_data_format(\n self.data_format, self.rank + 2)\n self._convolution_op = nn_ops.Convolution(\n input_shape,\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format)\n self.built = True\n\n def call(self, inputs):\n if self._recreate_conv_op(inputs):\n self._convolution_op = nn_ops.Convolution(\n inputs.get_shape(),\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format)\n self._build_conv_op_input_shape = inputs.get_shape()\n\n # Apply causal padding to inputs for Conv1D.\n if self.padding == 'causal' and self.__class__.__name__ == 'Conv1D':\n inputs = array_ops.pad(inputs, self._compute_causal_padding())\n\n outputs = self._convolution_op(inputs, self.kernel)\n\n if self.use_bias:\n if self.data_format == 'channels_first':\n if self.rank == 1:\n # nn.bias_add does not accept a 1D input tensor.\n bias = array_ops.reshape(self.bias, (1, self.filters, 1))\n outputs += bias\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def _spatial_output_shape(self, spatial_input_shape):\n return [\n conv_utils.conv_output_length(\n length,\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n for i, length in enumerate(spatial_input_shape)\n ]\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_last':\n return tensor_shape.TensorShape(\n [input_shape[0]] + self._spatial_output_shape(input_shape[1:-1]) +\n [self.filters])\n else:\n return tensor_shape.TensorShape(\n [input_shape[0], self.filters] +\n self._spatial_output_shape(input_shape[2:]))\n\n def get_config(self):\n config = {\n 'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'dilation_rate': self.dilation_rate,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Conv, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _compute_causal_padding(self):\n \"\"\"Calculates padding for 'causal' option for 1-d conv layers.\"\"\"\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n if self.data_format == 'channels_last':\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n else:\n causal_padding = [[0, 0], [0, 0], [left_pad, 0]]\n return causal_padding\n\n def _get_channel_axis(self):\n if self.data_format == 'channels_first':\n return 1\n else:\n return -1\n\n def _get_input_channel(self, input_shape):\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n return int(input_shape[channel_axis])\n\n def _get_padding_op(self):\n if self.padding == 'causal':\n op_padding = 'valid'\n else:\n op_padding = self.padding\n if not isinstance(op_padding, (list, tuple)):\n op_padding = op_padding.upper()\n return op_padding\n\n def _recreate_conv_op(self, inputs):\n \"\"\"Recreate conv_op if necessary.\n\n Check if the input_shape in call() is different from that in build().\n If the most-specific input shape describing the build and call shapes is not\n equal to the shape we currently built with, then we need to rebuild the\n _convolution_op to avoid incorrect behavior.\n\n Args:\n inputs: The input data to call() method.\n\n Returns:\n `True` or `False` to indicate whether to recreate the conv_op.\n \"\"\"\n call_input_shape = inputs.get_shape()\n # If the most specific compatible shape between _build_input_shape and\n # call_input_shape is not _build_input_shape then we must re-build.\n return self._build_conv_op_input_shape.most_specific_compatible_shape(\n call_input_shape) != self._build_conv_op_input_shape\n\n\n@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')\nclass Conv1D(Conv):\n \"\"\"1D convolution layer (e.g. temporal convolution).\n\n This layer creates a convolution kernel that is convolved\n with the layer input over a single spatial (or temporal) dimension\n to produce a tensor of outputs.\n If `use_bias` is True, a bias vector is created and added to the outputs.\n Finally, if `activation` is not `None`,\n it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide an `input_shape` argument\n (tuple of integers or `None`, e.g.\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\n\n Examples:\n\n >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size\n >>> # is 4.\n >>> input_shape = (4, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu',input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 8, 32)\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\n `\"causal\"` results in causal (dilated) convolutions, e.g. `output[t]`\n does not depend on `input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section\n 2.1](https://arxiv.org/abs/1609.03499).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n dilation_rate: an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format='channels_last',\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv1D, self).__init__(\n rank=1,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n\n@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')\nclass Conv2D(Conv):\n \"\"\"2D convolution layer (e.g. spatial convolution over images).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`.\n\n Examples:\n\n >>> # The inputs are 28x28 RGB images with `channels_last` and the batch\n >>> # size is 4.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 26, 26, 2)\n\n >>> # With `dilation_rate` as 2.\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', dilation_rate=2, input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 24, 24, 2)\n\n >>> # With `padding` as \"same\".\n >>> input_shape = (4, 28, 28, 3)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv2D(\n ... 2, 3, activation='relu', padding=\"same\", input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 28, 28, 2)\n\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(conv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv2D, self).__init__(\n rank=2,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n\n@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')\nclass Conv3D(Conv):\n \"\"\"3D convolution layer (e.g. spatial convolution over volumes).\n\n This layer creates a convolution kernel that is convolved\n with the layer input to produce a tensor of\n outputs. If `use_bias` is True,\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes\n with a single channel,\n in `data_format=\"channels_last\"`.\n\n Examples:\n\n >>> # The inputs are 28x28x28 volumes with a single channel, and the\n >>> # batch size is 4\n >>> input_shape =(4, 28, 28, 28, 1)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv3D(\n ... 2, 3, activation='relu', input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 26, 26, 26, 2)\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along each spatial\n dimension.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (\n see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 5D tensor with shape:\n `(batch_size, channels, conv_dim1, conv_dim2, conv_dim3)` if\n data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, conv_dim1, conv_dim2, conv_dim3, channels)` if\n data_format='channels_last'.\n\n Output shape:\n 5D tensor with shape:\n `(batch_size, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if\n data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if\n data_format='channels_last'.\n `new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have\n changed due to padding.\n\n Returns:\n A tensor of rank 5 representing\n `activation(conv3d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv3D, self).__init__(\n rank=3,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n\n@keras_export('keras.layers.Conv1DTranspose',\n 'keras.layers.Convolution1DTranspose')\nclass Conv1DTranspose(Conv1D):\n \"\"\"Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer length of the 1D convolution window.\n strides: An integer specifying the stride of the convolution along the\n time dimension. Specifying a stride value != 1 is incompatible with\n specifying a `dilation_rate` value != 1. Defaults to 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer specifying the amount of padding along\n the time dimension of the output tensor.\n The amount of output padding must be lower than the stride.\n If set to `None` (default), the output shape is inferred.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: an integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying a `dilation_rate` value != 1 is\n incompatible with specifying a stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, steps, channels)`\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, new_steps, filters)`\n If `output_padding` is specified:\n ```\n new_timesteps = ((timesteps - 1) * strides + kernel_size -\n 2 * padding + output_padding)\n ```\n\n Returns:\n A tensor of rank 3 representing\n `activation(conv1dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep learning](\n https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional Networks](\n https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n output_padding=None,\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv1DTranspose, self).__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n self.output_padding = output_padding\n if self.output_padding is not None:\n self.output_padding = conv_utils.normalize_tuple(\n self.output_padding, 1, 'output_padding')\n for stride, out_pad in zip(self.strides, self.output_padding):\n if out_pad >= stride:\n raise ValueError('Stride ' + str(self.strides) + ' must be '\n 'greater than output padding ' +\n str(self.output_padding))\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if len(input_shape) != 3:\n raise ValueError('Inputs should have rank 3. Received input shape: ' +\n str(input_shape))\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim})\n kernel_shape = self.kernel_size + (self.filters, input_dim)\n\n self.kernel = self.add_weight(\n name='kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n if self.data_format == 'channels_first':\n t_axis = 2\n else:\n t_axis = 1\n\n length = inputs_shape[t_axis]\n if self.output_padding is None:\n output_padding = None\n else:\n output_padding = self.output_padding[0]\n\n # Infer the dynamic output shape:\n out_length = conv_utils.deconv_output_length(\n length, self.kernel_size[0], padding=self.padding,\n output_padding=output_padding, stride=self.strides[0],\n dilation=self.dilation_rate[0])\n if self.data_format == 'channels_first':\n output_shape = (batch_size, self.filters, out_length)\n else:\n output_shape = (batch_size, out_length, self.filters)\n data_format = conv_utils.convert_data_format(self.data_format, ndim=3)\n\n output_shape_tensor = array_ops.stack(output_shape)\n outputs = nn_ops.conv1d_transpose(\n inputs,\n self.kernel,\n output_shape_tensor,\n strides=self.strides,\n padding=self.padding.upper(),\n data_format=data_format,\n dilations=self.dilation_rate)\n\n if not context.executing_eagerly():\n # Infer the static output shape:\n out_shape = self.compute_output_shape(inputs.shape)\n outputs.set_shape(out_shape)\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n output_shape = list(input_shape)\n if self.data_format == 'channels_first':\n c_axis, t_axis = 1, 2\n else:\n c_axis, t_axis = 2, 1\n\n if self.output_padding is None:\n output_padding = None\n else:\n output_padding = self.output_padding[0]\n output_shape[c_axis] = self.filters\n output_shape[t_axis] = conv_utils.deconv_output_length(\n output_shape[t_axis],\n self.kernel_size[0],\n padding=self.padding,\n output_padding=output_padding,\n stride=self.strides[0],\n dilation=self.dilation_rate[0])\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = super(Conv1DTranspose, self).get_config()\n config['output_padding'] = self.output_padding\n return config\n\n\n@keras_export('keras.layers.Conv2DTranspose',\n 'keras.layers.Convolution2DTranspose')\nclass Conv2DTranspose(Conv2D):\n \"\"\"Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\n in `data_format=\"channels_last\"`.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer or tuple/list of 2 integers,\n specifying the amount of padding along the height and width\n of the output tensor.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified:\n ```\n new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n ```\n\n Returns:\n A tensor of rank 4 representing\n `activation(conv2dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n output_padding=None,\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv2DTranspose, self).__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n self.output_padding = output_padding\n if self.output_padding is not None:\n self.output_padding = conv_utils.normalize_tuple(\n self.output_padding, 2, 'output_padding')\n for stride, out_pad in zip(self.strides, self.output_padding):\n if out_pad >= stride:\n raise ValueError('Stride ' + str(self.strides) + ' must be '\n 'greater than output padding ' +\n str(self.output_padding))\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank 4. Received input shape: ' +\n str(input_shape))\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})\n kernel_shape = self.kernel_size + (self.filters, input_dim)\n\n self.kernel = self.add_weight(\n name='kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n if self.data_format == 'channels_first':\n h_axis, w_axis = 2, 3\n else:\n h_axis, w_axis = 1, 2\n\n # Use the constant height and weight when possible.\n # TODO(scottzhu): Extract this into a utility function that can be applied\n # to all convolutional layers, which currently lost the static shape\n # information due to tf.shape().\n height, width = None, None\n if inputs.shape.rank is not None:\n dims = inputs.shape.as_list()\n height = dims[h_axis]\n width = dims[w_axis]\n height = height if height is not None else inputs_shape[h_axis]\n width = width if width is not None else inputs_shape[w_axis]\n\n kernel_h, kernel_w = self.kernel_size\n stride_h, stride_w = self.strides\n\n if self.output_padding is None:\n out_pad_h = out_pad_w = None\n else:\n out_pad_h, out_pad_w = self.output_padding\n\n # Infer the dynamic output shape:\n out_height = conv_utils.deconv_output_length(height,\n kernel_h,\n padding=self.padding,\n output_padding=out_pad_h,\n stride=stride_h,\n dilation=self.dilation_rate[0])\n out_width = conv_utils.deconv_output_length(width,\n kernel_w,\n padding=self.padding,\n output_padding=out_pad_w,\n stride=stride_w,\n dilation=self.dilation_rate[1])\n if self.data_format == 'channels_first':\n output_shape = (batch_size, self.filters, out_height, out_width)\n else:\n output_shape = (batch_size, out_height, out_width, self.filters)\n\n output_shape_tensor = array_ops.stack(output_shape)\n outputs = backend.conv2d_transpose(\n inputs,\n self.kernel,\n output_shape_tensor,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n\n if not context.executing_eagerly():\n # Infer the static output shape:\n out_shape = self.compute_output_shape(inputs.shape)\n outputs.set_shape(out_shape)\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n output_shape = list(input_shape)\n if self.data_format == 'channels_first':\n c_axis, h_axis, w_axis = 1, 2, 3\n else:\n c_axis, h_axis, w_axis = 3, 1, 2\n\n kernel_h, kernel_w = self.kernel_size\n stride_h, stride_w = self.strides\n\n if self.output_padding is None:\n out_pad_h = out_pad_w = None\n else:\n out_pad_h, out_pad_w = self.output_padding\n\n output_shape[c_axis] = self.filters\n output_shape[h_axis] = conv_utils.deconv_output_length(\n output_shape[h_axis],\n kernel_h,\n padding=self.padding,\n output_padding=out_pad_h,\n stride=stride_h,\n dilation=self.dilation_rate[0])\n output_shape[w_axis] = conv_utils.deconv_output_length(\n output_shape[w_axis],\n kernel_w,\n padding=self.padding,\n output_padding=out_pad_w,\n stride=stride_w,\n dilation=self.dilation_rate[1])\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = super(Conv2DTranspose, self).get_config()\n config['output_padding'] = self.output_padding\n return config\n\n\n@keras_export('keras.layers.Conv3DTranspose',\n 'keras.layers.Convolution3DTranspose')\nclass Conv3DTranspose(Conv3D):\n \"\"\"Transposed convolution layer (sometimes called Deconvolution).\n\n The need for transposed convolutions generally arises\n from the desire to use a transformation going in the opposite direction\n of a normal convolution, i.e., from something that has the shape of the\n output of some convolution to something that has the shape of its input\n while maintaining a connectivity pattern that is compatible with\n said convolution.\n\n When using this layer as the first layer in a model,\n provide the keyword argument `input_shape`\n (tuple of integers, does not include the sample axis),\n e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels\n if `data_format=\"channels_last\"`.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along the depth, height\n and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n output_padding: An integer or tuple/list of 3 integers,\n specifying the amount of padding along the depth, height, and\n width.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, depth, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: an integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (\n see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 5D tensor with shape:\n `(batch_size, channels, depth, rows, cols)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, depth, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 5D tensor with shape:\n `(batch_size, filters, new_depth, new_rows, new_cols)` if\n data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, new_depth, new_rows, new_cols, filters)` if\n data_format='channels_last'.\n `depth` and `rows` and `cols` values might have changed due to padding.\n If `output_padding` is specified::\n ```\n new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +\n output_padding[0])\n new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +\n output_padding[1])\n new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +\n output_padding[2])\n ```\n\n Returns:\n A tensor of rank 5 representing\n `activation(conv3dtranspose(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n\n References:\n - [A guide to convolution arithmetic for deep\n learning](https://arxiv.org/abs/1603.07285v1)\n - [Deconvolutional\n Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1, 1),\n padding='valid',\n output_padding=None,\n data_format=None,\n dilation_rate=(1, 1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(Conv3DTranspose, self).__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n self.output_padding = output_padding\n if self.output_padding is not None:\n self.output_padding = conv_utils.normalize_tuple(\n self.output_padding, 3, 'output_padding')\n for stride, out_pad in zip(self.strides, self.output_padding):\n if out_pad >= stride:\n raise ValueError('Stride ' + str(self.strides) + ' must be '\n 'greater than output padding ' +\n str(self.output_padding))\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n if len(input_shape) != 5:\n raise ValueError('Inputs should have rank 5, received input shape:',\n str(input_shape))\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined, found None: ' + str(input_shape))\n input_dim = int(input_shape[channel_axis])\n kernel_shape = self.kernel_size + (self.filters, input_dim)\n self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})\n\n self.kernel = self.add_weight(\n 'kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n 'bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n inputs_shape = array_ops.shape(inputs)\n batch_size = inputs_shape[0]\n if self.data_format == 'channels_first':\n d_axis, h_axis, w_axis = 2, 3, 4\n else:\n d_axis, h_axis, w_axis = 1, 2, 3\n\n depth = inputs_shape[d_axis]\n height = inputs_shape[h_axis]\n width = inputs_shape[w_axis]\n\n kernel_d, kernel_h, kernel_w = self.kernel_size\n stride_d, stride_h, stride_w = self.strides\n\n if self.output_padding is None:\n out_pad_d = out_pad_h = out_pad_w = None\n else:\n out_pad_d, out_pad_h, out_pad_w = self.output_padding\n\n # Infer the dynamic output shape:\n out_depth = conv_utils.deconv_output_length(depth,\n kernel_d,\n padding=self.padding,\n output_padding=out_pad_d,\n stride=stride_d)\n out_height = conv_utils.deconv_output_length(height,\n kernel_h,\n padding=self.padding,\n output_padding=out_pad_h,\n stride=stride_h)\n out_width = conv_utils.deconv_output_length(width,\n kernel_w,\n padding=self.padding,\n output_padding=out_pad_w,\n stride=stride_w)\n if self.data_format == 'channels_first':\n output_shape = (batch_size, self.filters, out_depth, out_height,\n out_width)\n strides = (1, 1, stride_d, stride_h, stride_w)\n else:\n output_shape = (batch_size, out_depth, out_height, out_width,\n self.filters)\n strides = (1, stride_d, stride_h, stride_w, 1)\n\n output_shape_tensor = array_ops.stack(output_shape)\n outputs = nn.conv3d_transpose(\n inputs,\n self.kernel,\n output_shape_tensor,\n strides,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=5),\n padding=self.padding.upper())\n\n if not context.executing_eagerly():\n # Infer the static output shape:\n out_shape = self.compute_output_shape(inputs.shape)\n outputs.set_shape(out_shape)\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n output_shape = list(input_shape)\n if self.data_format == 'channels_first':\n c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4\n else:\n c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3\n\n kernel_d, kernel_h, kernel_w = self.kernel_size\n stride_d, stride_h, stride_w = self.strides\n\n if self.output_padding is None:\n out_pad_d = out_pad_h = out_pad_w = None\n else:\n out_pad_d, out_pad_h, out_pad_w = self.output_padding\n\n output_shape[c_axis] = self.filters\n output_shape[d_axis] = conv_utils.deconv_output_length(\n output_shape[d_axis],\n kernel_d,\n padding=self.padding,\n output_padding=out_pad_d,\n stride=stride_d)\n output_shape[h_axis] = conv_utils.deconv_output_length(\n output_shape[h_axis],\n kernel_h,\n padding=self.padding,\n output_padding=out_pad_h,\n stride=stride_h)\n output_shape[w_axis] = conv_utils.deconv_output_length(\n output_shape[w_axis],\n kernel_w,\n padding=self.padding,\n output_padding=out_pad_w,\n stride=stride_w)\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = super(Conv3DTranspose, self).get_config()\n config.pop('dilation_rate')\n config['output_padding'] = self.output_padding\n return config\n\n\nclass SeparableConv(Conv):\n \"\"\"Abstract base layer for separable nD convolution.\n\n This layer performs a depthwise convolution that acts separately on\n channels, followed by a pointwise convolution that mixes channels.\n If `use_bias` is True and a bias initializer is provided,\n it adds a bias vector to the output.\n It then optionally applies an activation function to produce the final output.\n\n Arguments:\n rank: An integer, the rank of the convolution, e.g. \"2\" for 2D convolution.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: A tuple or list of integers specifying the spatial\n dimensions of the filters. Can be a single integer to specify the same\n value for all spatial dimensions.\n strides: A tuple or list of integers specifying the strides\n of the convolution. Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, ...)`.\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias.\n depthwise_initializer: An initializer for the depthwise convolution kernel.\n pointwise_initializer: An initializer for the pointwise convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n depthwise_regularizer: Optional regularizer for the depthwise\n convolution kernel.\n pointwise_regularizer: Optional regularizer for the pointwise\n convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n depthwise_constraint: Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n pointwise_constraint: Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer`.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(self,\n rank,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n depth_multiplier=1,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n pointwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n pointwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n pointwise_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs):\n super(SeparableConv, self).__init__(\n rank=rank,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n bias_initializer=initializers.get(bias_initializer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n bias_constraint=bias_constraint,\n trainable=trainable,\n name=name,\n **kwargs)\n self.depth_multiplier = depth_multiplier\n self.depthwise_initializer = initializers.get(depthwise_initializer)\n self.pointwise_initializer = initializers.get(pointwise_initializer)\n self.depthwise_regularizer = regularizers.get(depthwise_regularizer)\n self.pointwise_regularizer = regularizers.get(pointwise_regularizer)\n self.depthwise_constraint = constraints.get(depthwise_constraint)\n self.pointwise_constraint = constraints.get(pointwise_constraint)\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n depthwise_kernel_shape = self.kernel_size + (input_dim,\n self.depth_multiplier)\n pointwise_kernel_shape = (\n 1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)\n\n self.depthwise_kernel = self.add_weight(\n name='depthwise_kernel',\n shape=depthwise_kernel_shape,\n initializer=self.depthwise_initializer,\n regularizer=self.depthwise_regularizer,\n constraint=self.depthwise_constraint,\n trainable=True,\n dtype=self.dtype)\n self.pointwise_kernel = self.add_weight(\n name='pointwise_kernel',\n shape=pointwise_kernel_shape,\n initializer=self.pointwise_initializer,\n regularizer=self.pointwise_regularizer,\n constraint=self.pointwise_constraint,\n trainable=True,\n dtype=self.dtype)\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n raise NotImplementedError\n\n def get_config(self):\n config = {\n 'filters':\n self.filters,\n 'kernel_size':\n self.kernel_size,\n 'strides':\n self.strides,\n 'padding':\n self.padding,\n 'data_format':\n self.data_format,\n 'depth_multiplier':\n self.depth_multiplier,\n 'dilation_rate':\n self.dilation_rate,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'depthwise_initializer':\n initializers.serialize(self.depthwise_initializer),\n 'pointwise_initializer':\n initializers.serialize(self.pointwise_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'depthwise_regularizer':\n regularizers.serialize(self.depthwise_regularizer),\n 'pointwise_regularizer':\n regularizers.serialize(self.pointwise_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'depthwise_constraint':\n constraints.serialize(self.depthwise_constraint),\n 'pointwise_constraint':\n constraints.serialize(self.pointwise_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint)\n }\n base_config = super(SeparableConv, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.SeparableConv1D',\n 'keras.layers.SeparableConvolution1D')\nclass SeparableConv1D(SeparableConv):\n \"\"\"Depthwise separable 1D convolution.\n\n This layer performs a depthwise convolution that acts separately on\n channels, followed by a pointwise convolution that mixes channels.\n If `use_bias` is True and a bias initializer is provided,\n it adds a bias vector to the output.\n It then optionally applies an activation function to produce the final output.\n\n Arguments:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: A single integer specifying the spatial\n dimensions of the filters.\n strides: A single integer specifying the strides\n of the convolution.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: A single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias.\n depthwise_initializer: An initializer for the depthwise convolution kernel (\n see `keras.initializers`).\n pointwise_initializer: An initializer for the pointwise convolution kernel (\n see `keras.initializers`).\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used (see `keras.initializers`).\n depthwise_regularizer: Optional regularizer for the depthwise\n convolution kernel (see `keras.regularizers`).\n pointwise_regularizer: Optional regularizer for the pointwise\n convolution kernel (see `keras.regularizers`).\n bias_regularizer: Optional regularizer for the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Optional regularizer function for the output (\n see `keras.regularizers`).\n depthwise_constraint: Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training (\n see `keras.constraints`).\n pointwise_constraint: Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer` (\n see `keras.constraints`).\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer` (\n see `keras.constraints`).\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n\n Input shape:\n 3D tensor with shape:\n `(batch_size, channels, steps)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, steps, channels)` if data_format='channels_last'.\n\n Output shape:\n 3D tensor with shape:\n `(batch_size, filters, new_steps)` if data_format='channels_first'\n or 3D tensor with shape:\n `(batch_size, new_steps, filters)` if data_format='channels_last'.\n `new_steps` value might have changed due to padding or strides.\n\n Returns:\n A tensor of rank 3 representing\n `activation(separableconv1d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n depth_multiplier=1,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n pointwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n pointwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n pointwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(SeparableConv1D, self).__init__(\n rank=1,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n depth_multiplier=depth_multiplier,\n activation=activations.get(activation),\n use_bias=use_bias,\n depthwise_initializer=initializers.get(depthwise_initializer),\n pointwise_initializer=initializers.get(pointwise_initializer),\n bias_initializer=initializers.get(bias_initializer),\n depthwise_regularizer=regularizers.get(depthwise_regularizer),\n pointwise_regularizer=regularizers.get(pointwise_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n depthwise_constraint=constraints.get(depthwise_constraint),\n pointwise_constraint=constraints.get(pointwise_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def call(self, inputs):\n if self.padding == 'causal':\n inputs = array_ops.pad(inputs, self._compute_causal_padding())\n if self.data_format == 'channels_last':\n strides = (1,) + self.strides * 2 + (1,)\n spatial_start_dim = 1\n else:\n strides = (1, 1) + self.strides * 2\n spatial_start_dim = 2\n\n # Explicitly broadcast inputs and kernels to 4D.\n # TODO(fchollet): refactor when a native separable_conv1d op is available.\n inputs = array_ops.expand_dims(inputs, spatial_start_dim)\n depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)\n pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)\n dilation_rate = (1,) + self.dilation_rate\n\n if self.padding == 'causal':\n op_padding = 'valid'\n else:\n op_padding = self.padding\n outputs = nn.separable_conv2d(\n inputs,\n depthwise_kernel,\n pointwise_kernel,\n strides=strides,\n padding=op_padding.upper(),\n rate=dilation_rate,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n outputs = array_ops.squeeze(outputs, [spatial_start_dim])\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n\n@keras_export('keras.layers.SeparableConv2D',\n 'keras.layers.SeparableConvolution2D')\nclass SeparableConv2D(SeparableConv):\n \"\"\"Depthwise separable 2D convolution.\n\n Separable convolutions consist of first performing\n a depthwise spatial convolution\n (which acts on each input channel separately)\n followed by a pointwise convolution which mixes the resulting\n output channels. The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Intuitively, separable convolutions can be understood as\n a way to factorize a convolution kernel into two smaller kernels,\n or as an extreme version of an Inception block.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix (\n see `keras.initializers`).\n pointwise_initializer: Initializer for the pointwise kernel matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n pointwise_regularizer: Regularizer function applied to\n the pointwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n pointwise_constraint: Constraint function applied to\n the pointwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(separableconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n depth_multiplier=1,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n pointwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n pointwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n pointwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(SeparableConv2D, self).__init__(\n rank=2,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n depth_multiplier=depth_multiplier,\n activation=activations.get(activation),\n use_bias=use_bias,\n depthwise_initializer=initializers.get(depthwise_initializer),\n pointwise_initializer=initializers.get(pointwise_initializer),\n bias_initializer=initializers.get(bias_initializer),\n depthwise_regularizer=regularizers.get(depthwise_regularizer),\n pointwise_regularizer=regularizers.get(pointwise_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n depthwise_constraint=constraints.get(depthwise_constraint),\n pointwise_constraint=constraints.get(pointwise_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs)\n\n def call(self, inputs):\n # Apply the actual ops.\n if self.data_format == 'channels_last':\n strides = (1,) + self.strides + (1,)\n else:\n strides = (1, 1) + self.strides\n outputs = nn.separable_conv2d(\n inputs,\n self.depthwise_kernel,\n self.pointwise_kernel,\n strides=strides,\n padding=self.padding.upper(),\n rate=self.dilation_rate,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.use_bias:\n outputs = nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n\n@keras_export('keras.layers.DepthwiseConv2D')\nclass DepthwiseConv2D(Conv2D):\n \"\"\"Depthwise separable 2D convolution.\n\n Depthwise Separable convolutions consist of performing\n just the first step in a depthwise spatial convolution\n (which acts on each input channel separately).\n The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Arguments:\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `'valid'` or `'same'` (case-insensitive).\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be 'channels_last'.\n dilation_rate: An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its 'activation') (\n see `keras.regularizers`).\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n\n Input shape:\n 4D tensor with shape:\n `[batch_size, channels, rows, cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch_size, rows, cols, channels]` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `[batch_size, filters, new_rows, new_cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch_size, new_rows, new_cols, filters]` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n\n Returns:\n A tensor of rank 4 representing\n `activation(depthwiseconv2d(inputs, kernel) + bias)`.\n\n Raises:\n ValueError: if `padding` is \"causal\".\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(self,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n depth_multiplier=1,\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(DepthwiseConv2D, self).__init__(\n filters=None,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n bias_constraint=bias_constraint,\n **kwargs)\n self.depth_multiplier = depth_multiplier\n self.depthwise_initializer = initializers.get(depthwise_initializer)\n self.depthwise_regularizer = regularizers.get(depthwise_regularizer)\n self.depthwise_constraint = constraints.get(depthwise_constraint)\n self.bias_initializer = initializers.get(bias_initializer)\n\n def build(self, input_shape):\n if len(input_shape) < 4:\n raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '\n 'Received input shape:', str(input_shape))\n input_shape = tensor_shape.TensorShape(input_shape)\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError('The channel dimension of the inputs to '\n '`DepthwiseConv2D` '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n depthwise_kernel_shape = (self.kernel_size[0],\n self.kernel_size[1],\n input_dim,\n self.depth_multiplier)\n\n self.depthwise_kernel = self.add_weight(\n shape=depthwise_kernel_shape,\n initializer=self.depthwise_initializer,\n name='depthwise_kernel',\n regularizer=self.depthwise_regularizer,\n constraint=self.depthwise_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs):\n outputs = backend.depthwise_conv2d(\n inputs,\n self.depthwise_kernel,\n strides=self.strides,\n padding=self.padding,\n dilation_rate=self.dilation_rate,\n data_format=self.data_format)\n\n if self.use_bias:\n outputs = backend.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n\n return outputs\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n rows = input_shape[2]\n cols = input_shape[3]\n out_filters = input_shape[1] * self.depth_multiplier\n elif self.data_format == 'channels_last':\n rows = input_shape[1]\n cols = input_shape[2]\n out_filters = input_shape[3] * self.depth_multiplier\n\n rows = conv_utils.conv_output_length(rows, self.kernel_size[0],\n self.padding,\n self.strides[0],\n self.dilation_rate[0])\n cols = conv_utils.conv_output_length(cols, self.kernel_size[1],\n self.padding,\n self.strides[1],\n self.dilation_rate[1])\n if self.data_format == 'channels_first':\n return (input_shape[0], out_filters, rows, cols)\n elif self.data_format == 'channels_last':\n return (input_shape[0], rows, cols, out_filters)\n\n def get_config(self):\n config = super(DepthwiseConv2D, self).get_config()\n config.pop('filters')\n config.pop('kernel_initializer')\n config.pop('kernel_regularizer')\n config.pop('kernel_constraint')\n config['depth_multiplier'] = self.depth_multiplier\n config['depthwise_initializer'] = initializers.serialize(\n self.depthwise_initializer)\n config['depthwise_regularizer'] = regularizers.serialize(\n self.depthwise_regularizer)\n config['depthwise_constraint'] = constraints.serialize(\n self.depthwise_constraint)\n return config\n\n\n@keras_export('keras.layers.UpSampling1D')\nclass UpSampling1D(Layer):\n \"\"\"Upsampling layer for 1D inputs.\n\n Repeats each temporal step `size` times along the time axis.\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.UpSampling1D(size=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 1 2]\n [ 0 1 2]\n [ 3 4 5]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 6 7 8]\n [ 9 10 11]\n [ 9 10 11]]], shape=(2, 4, 3), dtype=int64)\n\n Arguments:\n size: Integer. Upsampling factor.\n\n Input shape:\n 3D tensor with shape: `(batch_size, steps, features)`.\n\n Output shape:\n 3D tensor with shape: `(batch_size, upsampled_steps, features)`.\n \"\"\"\n\n def __init__(self, size=2, **kwargs):\n super(UpSampling1D, self).__init__(**kwargs)\n self.size = int(size)\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n size = self.size * input_shape[1] if input_shape[1] is not None else None\n return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])\n\n def call(self, inputs):\n output = backend.repeat_elements(inputs, self.size, axis=1)\n return output\n\n def get_config(self):\n config = {'size': self.size}\n base_config = super(UpSampling1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.UpSampling2D')\nclass UpSampling2D(Layer):\n \"\"\"Upsampling layer for 2D inputs.\n\n Repeats the rows and columns of the data\n by `size[0]` and `size[1]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 2, 1, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n >>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)\n >>> print(y)\n tf.Tensor(\n [[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)\n\n Arguments:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n interpolation: A string, one of `nearest` or `bilinear`.\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`\n \"\"\"\n\n def __init__(self,\n size=(2, 2),\n data_format=None,\n interpolation='nearest',\n **kwargs):\n super(UpSampling2D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.size = conv_utils.normalize_tuple(size, 2, 'size')\n if interpolation not in {'nearest', 'bilinear'}:\n raise ValueError('`interpolation` argument should be one of `\"nearest\"` '\n 'or `\"bilinear\"`.')\n self.interpolation = interpolation\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n height = self.size[0] * input_shape[\n 2] if input_shape[2] is not None else None\n width = self.size[1] * input_shape[\n 3] if input_shape[3] is not None else None\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], height, width])\n else:\n height = self.size[0] * input_shape[\n 1] if input_shape[1] is not None else None\n width = self.size[1] * input_shape[\n 2] if input_shape[2] is not None else None\n return tensor_shape.TensorShape(\n [input_shape[0], height, width, input_shape[3]])\n\n def call(self, inputs):\n return backend.resize_images(\n inputs, self.size[0], self.size[1], self.data_format,\n interpolation=self.interpolation)\n\n def get_config(self):\n config = {\n 'size': self.size,\n 'data_format': self.data_format,\n 'interpolation': self.interpolation\n }\n base_config = super(UpSampling2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.UpSampling3D')\nclass UpSampling3D(Layer):\n \"\"\"Upsampling layer for 3D inputs.\n\n Repeats the 1st, 2nd and 3rd dimensions\n of the data by `size[0]`, `size[1]` and `size[2]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 1, 2, 1, 3)\n >>> x = tf.constant(1, shape=input_shape)\n >>> y = tf.keras.layers.UpSampling3D(size=2)(x)\n >>> print(y.shape)\n (2, 2, 4, 2, 3)\n\n Arguments:\n size: Int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, dim1, dim2, dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, dim1, dim2, dim3)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\n \"\"\"\n\n def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.size = conv_utils.normalize_tuple(size, 3, 'size')\n self.input_spec = InputSpec(ndim=5)\n super(UpSampling3D, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n dim1 = self.size[0] * input_shape[\n 2] if input_shape[2] is not None else None\n dim2 = self.size[1] * input_shape[\n 3] if input_shape[3] is not None else None\n dim3 = self.size[2] * input_shape[\n 4] if input_shape[4] is not None else None\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\n else:\n dim1 = self.size[0] * input_shape[\n 1] if input_shape[1] is not None else None\n dim2 = self.size[1] * input_shape[\n 2] if input_shape[2] is not None else None\n dim3 = self.size[2] * input_shape[\n 3] if input_shape[3] is not None else None\n return tensor_shape.TensorShape(\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\n\n def call(self, inputs):\n return backend.resize_volumes(\n inputs, self.size[0], self.size[1], self.size[2], self.data_format)\n\n def get_config(self):\n config = {'size': self.size, 'data_format': self.data_format}\n base_config = super(UpSampling3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ZeroPadding1D')\nclass ZeroPadding1D(Layer):\n \"\"\"Zero-padding layer for 1D input (e.g. temporal sequence).\n\n Examples:\n\n >>> input_shape = (2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n >>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)\n >>> print(y)\n tf.Tensor(\n [[[ 0 0 0]\n [ 0 0 0]\n [ 0 1 2]\n [ 3 4 5]\n [ 0 0 0]\n [ 0 0 0]]\n [[ 0 0 0]\n [ 0 0 0]\n [ 6 7 8]\n [ 9 10 11]\n [ 0 0 0]\n [ 0 0 0]]], shape=(2, 6, 3), dtype=int64)\n\n Arguments:\n padding: Int, or tuple of int (length 2), or dictionary.\n - If int:\n How many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n - If tuple of int (length 2):\n How many zeros to add at the beginning and the end of\n the padding dimension (`(left_pad, right_pad)`).\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_pad, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, padded_axis, features)`\n \"\"\"\n\n def __init__(self, padding=1, **kwargs):\n super(ZeroPadding1D, self).__init__(**kwargs)\n self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n if input_shape[1] is not None:\n length = input_shape[1] + self.padding[0] + self.padding[1]\n else:\n length = None\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\n\n def call(self, inputs):\n return backend.temporal_padding(inputs, padding=self.padding)\n\n def get_config(self):\n config = {'padding': self.padding}\n base_config = super(ZeroPadding1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ZeroPadding2D')\nclass ZeroPadding2D(Layer):\n \"\"\"Zero-padding layer for 2D input (e.g. picture).\n\n This layer can add rows and columns of zeros\n at the top, bottom, left and right side of an image tensor.\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[0 1]\n [2 3]]]]\n >>> y = tf.keras.layers.ZeroPadding2D(padding=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[[0 0]\n [0 0]\n [0 0]\n [0 0]]\n [[0 0]\n [0 1]\n [2 3]\n [0 0]]\n [[0 0]\n [0 0]\n [0 0]\n [0 0]]]], shape=(1, 3, 4, 2), dtype=int64)\n\n Arguments:\n padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_height_pad, symmetric_width_pad)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_pad, bottom_pad), (left_pad, right_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, padded_rows, padded_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, padded_rows, padded_cols)`\n \"\"\"\n\n def __init__(self, padding=(1, 1), data_format=None, **kwargs):\n super(ZeroPadding2D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n if isinstance(padding, int):\n self.padding = ((padding, padding), (padding, padding))\n elif hasattr(padding, '__len__'):\n if len(padding) != 2:\n raise ValueError('`padding` should have two elements. '\n 'Found: ' + str(padding))\n height_padding = conv_utils.normalize_tuple(padding[0], 2,\n '1st entry of padding')\n width_padding = conv_utils.normalize_tuple(padding[1], 2,\n '2nd entry of padding')\n self.padding = (height_padding, width_padding)\n else:\n raise ValueError('`padding` should be either an int, '\n 'a tuple of 2 ints '\n '(symmetric_height_pad, symmetric_width_pad), '\n 'or a tuple of 2 tuples of 2 ints '\n '((top_pad, bottom_pad), (left_pad, right_pad)). '\n 'Found: ' + str(padding))\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n if input_shape[2] is not None:\n rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]\n else:\n rows = None\n if input_shape[3] is not None:\n cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]\n else:\n cols = None\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], rows, cols])\n elif self.data_format == 'channels_last':\n if input_shape[1] is not None:\n rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]\n else:\n rows = None\n if input_shape[2] is not None:\n cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]\n else:\n cols = None\n return tensor_shape.TensorShape(\n [input_shape[0], rows, cols, input_shape[3]])\n\n def call(self, inputs):\n return backend.spatial_2d_padding(\n inputs, padding=self.padding, data_format=self.data_format)\n\n def get_config(self):\n config = {'padding': self.padding, 'data_format': self.data_format}\n base_config = super(ZeroPadding2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.ZeroPadding3D')\nclass ZeroPadding3D(Layer):\n \"\"\"Zero-padding layer for 3D data (spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (1, 1, 2, 2, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.ZeroPadding3D(padding=2)(x)\n >>> print(y.shape)\n (1, 5, 6, 6, 3)\n\n Arguments:\n padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 3 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints:\n interpreted as\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_padded_axis, second_padded_axis, third_axis_to_pad,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_padded_axis, second_padded_axis,\n third_axis_to_pad)`\n \"\"\"\n\n def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):\n super(ZeroPadding3D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n if isinstance(padding, int):\n self.padding = ((padding, padding), (padding, padding), (padding,\n padding))\n elif hasattr(padding, '__len__'):\n if len(padding) != 3:\n raise ValueError('`padding` should have 3 elements. '\n 'Found: ' + str(padding))\n dim1_padding = conv_utils.normalize_tuple(padding[0], 2,\n '1st entry of padding')\n dim2_padding = conv_utils.normalize_tuple(padding[1], 2,\n '2nd entry of padding')\n dim3_padding = conv_utils.normalize_tuple(padding[2], 2,\n '3rd entry of padding')\n self.padding = (dim1_padding, dim2_padding, dim3_padding)\n else:\n raise ValueError(\n '`padding` should be either an int, '\n 'a tuple of 3 ints '\n '(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '\n 'or a tuple of 3 tuples of 2 ints '\n '((left_dim1_pad, right_dim1_pad),'\n ' (left_dim2_pad, right_dim2_pad),'\n ' (left_dim3_pad, right_dim2_pad)). '\n 'Found: ' + str(padding))\n self.input_spec = InputSpec(ndim=5)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n if input_shape[2] is not None:\n dim1 = input_shape[2] + 2 * self.padding[0][0]\n else:\n dim1 = None\n if input_shape[3] is not None:\n dim2 = input_shape[3] + 2 * self.padding[1][0]\n else:\n dim2 = None\n if input_shape[4] is not None:\n dim3 = input_shape[4] + 2 * self.padding[2][0]\n else:\n dim3 = None\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\n elif self.data_format == 'channels_last':\n if input_shape[1] is not None:\n dim1 = input_shape[1] + 2 * self.padding[0][1]\n else:\n dim1 = None\n if input_shape[2] is not None:\n dim2 = input_shape[2] + 2 * self.padding[1][1]\n else:\n dim2 = None\n if input_shape[3] is not None:\n dim3 = input_shape[3] + 2 * self.padding[2][1]\n else:\n dim3 = None\n return tensor_shape.TensorShape(\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\n\n def call(self, inputs):\n return backend.spatial_3d_padding(\n inputs, padding=self.padding, data_format=self.data_format)\n\n def get_config(self):\n config = {'padding': self.padding, 'data_format': self.data_format}\n base_config = super(ZeroPadding3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Cropping1D')\nclass Cropping1D(Layer):\n \"\"\"Cropping layer for 1D input (e.g. temporal sequence).\n\n It crops along the time dimension (axis 1).\n\n Examples:\n\n >>> input_shape = (2, 3, 2)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[ 0 1]\n [ 2 3]\n [ 4 5]]\n [[ 6 7]\n [ 8 9]\n [10 11]]]\n >>> y = tf.keras.layers.Cropping1D(cropping=1)(x)\n >>> print(y)\n tf.Tensor(\n [[[2 3]]\n [[8 9]]], shape=(2, 1, 2), dtype=int64)\n\n Arguments:\n cropping: Int or tuple of int (length 2)\n How many units should be trimmed off at the beginning and end of\n the cropping dimension (axis 1).\n If a single int is provided, the same value will be used for both.\n\n Input shape:\n 3D tensor with shape `(batch_size, axis_to_crop, features)`\n\n Output shape:\n 3D tensor with shape `(batch_size, cropped_axis, features)`\n \"\"\"\n\n def __init__(self, cropping=(1, 1), **kwargs):\n super(Cropping1D, self).__init__(**kwargs)\n self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if input_shape[1] is not None:\n length = input_shape[1] - self.cropping[0] - self.cropping[1]\n else:\n length = None\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\n\n def call(self, inputs):\n if self.cropping[1] == 0:\n return inputs[:, self.cropping[0]:, :]\n else:\n return inputs[:, self.cropping[0]:-self.cropping[1], :]\n\n def get_config(self):\n config = {'cropping': self.cropping}\n base_config = super(Cropping1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Cropping2D')\nclass Cropping2D(Layer):\n \"\"\"Cropping layer for 2D input (e.g. picture).\n\n It crops along spatial dimensions, i.e. height and width.\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)\n >>> print(y.shape)\n (2, 24, 20, 3)\n\n Arguments:\n cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric cropping values for height and width:\n `(symmetric_height_crop, symmetric_width_crop)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_crop, bottom_crop), (left_crop, right_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, cropped_rows, cropped_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, cropped_rows, cropped_cols)`\n \"\"\"\n\n def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):\n super(Cropping2D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n if isinstance(cropping, int):\n self.cropping = ((cropping, cropping), (cropping, cropping))\n elif hasattr(cropping, '__len__'):\n if len(cropping) != 2:\n raise ValueError('`cropping` should have two elements. '\n 'Found: ' + str(cropping))\n height_cropping = conv_utils.normalize_tuple(cropping[0], 2,\n '1st entry of cropping')\n width_cropping = conv_utils.normalize_tuple(cropping[1], 2,\n '2nd entry of cropping')\n self.cropping = (height_cropping, width_cropping)\n else:\n raise ValueError('`cropping` should be either an int, '\n 'a tuple of 2 ints '\n '(symmetric_height_crop, symmetric_width_crop), '\n 'or a tuple of 2 tuples of 2 ints '\n '((top_crop, bottom_crop), (left_crop, right_crop)). '\n 'Found: ' + str(cropping))\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n # pylint: disable=invalid-unary-operand-type\n if self.data_format == 'channels_first':\n return tensor_shape.TensorShape([\n input_shape[0], input_shape[1],\n input_shape[2] - self.cropping[0][0] - self.cropping[0][1]\n if input_shape[2] else None,\n input_shape[3] - self.cropping[1][0] - self.cropping[1][1]\n if input_shape[3] else None\n ])\n else:\n return tensor_shape.TensorShape([\n input_shape[0],\n input_shape[1] - self.cropping[0][0] - self.cropping[0][1]\n if input_shape[1] else None,\n input_shape[2] - self.cropping[1][0] - self.cropping[1][1]\n if input_shape[2] else None, input_shape[3]\n ])\n # pylint: enable=invalid-unary-operand-type\n\n def call(self, inputs):\n # pylint: disable=invalid-unary-operand-type\n if self.data_format == 'channels_first':\n if self.cropping[0][1] == self.cropping[1][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]\n elif self.cropping[0][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:\n -self.cropping[1][1]]\n elif self.cropping[1][1] == 0:\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:]\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:-self.cropping[1][1]]\n else:\n if self.cropping[0][1] == self.cropping[1][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]\n elif self.cropping[0][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:\n -self.cropping[1][1], :]\n elif self.cropping[1][1] == 0:\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:, :]\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[\n 1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type\n # pylint: enable=invalid-unary-operand-type\n\n def get_config(self):\n config = {'cropping': self.cropping, 'data_format': self.data_format}\n base_config = super(Cropping2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.Cropping3D')\nclass Cropping3D(Layer):\n \"\"\"Cropping layer for 3D data (e.g. spatial or spatio-temporal).\n\n Examples:\n\n >>> input_shape = (2, 28, 28, 10, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)\n >>> print(y.shape)\n (2, 24, 20, 6, 3)\n\n Arguments:\n cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to depth, height, and width.\n - If tuple of 3 ints: interpreted as two different\n symmetric cropping values for depth, height, and width:\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_crop, right_dim1_crop), (left_dim2_crop,\n right_dim2_crop), (left_dim3_crop, right_dim3_crop))`\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_crop, second_axis_to_crop,\n third_axis_to_crop)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,\n depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_cropped_axis, second_cropped_axis,\n third_cropped_axis)`\n \"\"\"\n\n def __init__(self,\n cropping=((1, 1), (1, 1), (1, 1)),\n data_format=None,\n **kwargs):\n super(Cropping3D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n if isinstance(cropping, int):\n self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,\n cropping))\n elif hasattr(cropping, '__len__'):\n if len(cropping) != 3:\n raise ValueError('`cropping` should have 3 elements. '\n 'Found: ' + str(cropping))\n dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,\n '1st entry of cropping')\n dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,\n '2nd entry of cropping')\n dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,\n '3rd entry of cropping')\n self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)\n else:\n raise ValueError(\n '`cropping` should be either an int, '\n 'a tuple of 3 ints '\n '(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '\n 'or a tuple of 3 tuples of 2 ints '\n '((left_dim1_crop, right_dim1_crop),'\n ' (left_dim2_crop, right_dim2_crop),'\n ' (left_dim3_crop, right_dim2_crop)). '\n 'Found: ' + str(cropping))\n self.input_spec = InputSpec(ndim=5)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n # pylint: disable=invalid-unary-operand-type\n if self.data_format == 'channels_first':\n if input_shape[2] is not None:\n dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]\n else:\n dim1 = None\n if input_shape[3] is not None:\n dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]\n else:\n dim2 = None\n if input_shape[4] is not None:\n dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]\n else:\n dim3 = None\n return tensor_shape.TensorShape(\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\n elif self.data_format == 'channels_last':\n if input_shape[1] is not None:\n dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]\n else:\n dim1 = None\n if input_shape[2] is not None:\n dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]\n else:\n dim2 = None\n if input_shape[3] is not None:\n dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]\n else:\n dim3 = None\n return tensor_shape.TensorShape(\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\n # pylint: enable=invalid-unary-operand-type\n\n def call(self, inputs):\n # pylint: disable=invalid-unary-operand-type\n if self.data_format == 'channels_first':\n if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,\n self.cropping[2][0]:]\n elif self.cropping[0][1] == self.cropping[1][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,\n self.cropping[2][0]:-self.cropping[2][1]]\n elif self.cropping[1][1] == self.cropping[2][1] == 0:\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:, self.cropping[2][0]:]\n elif self.cropping[0][1] == self.cropping[2][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:\n -self.cropping[1][1], self.cropping[2][0]:]\n elif self.cropping[0][1] == 0:\n return inputs[:, :, self.cropping[0][0]:, self.cropping[1][\n 0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]\n elif self.cropping[1][1] == 0:\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.\n cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]\n elif self.cropping[2][1] == 0:\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.\n cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]\n return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][\n 0]:-self.cropping[2][1]]\n else:\n if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,\n self.cropping[2][0]:, :]\n elif self.cropping[0][1] == self.cropping[1][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,\n self.cropping[2][0]:-self.cropping[2][1], :]\n elif self.cropping[1][1] == self.cropping[2][1] == 0:\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:, self.cropping[2][0]:, :]\n elif self.cropping[0][1] == self.cropping[2][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:\n -self.cropping[1][1], self.cropping[2][0]:, :]\n elif self.cropping[0][1] == 0:\n return inputs[:, self.cropping[0][0]:, self.cropping[1][\n 0]:-self.cropping[1][1], self.cropping[2][0]:\n -self.cropping[2][1], :]\n elif self.cropping[1][1] == 0:\n return inputs[:, self.cropping[0][\n 0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:\n -self.cropping[2][1], :]\n elif self.cropping[2][1] == 0:\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1],\n self.cropping[1][0]:-self.cropping[1][1], self.cropping[\n 2][0]:, :]\n return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[\n 1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type\n -self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type\n # pylint: enable=invalid-unary-operand-type\n\n def get_config(self):\n config = {'cropping': self.cropping, 'data_format': self.data_format}\n base_config = super(Cropping3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n# Aliases\n\nConvolution1D = Conv1D\nConvolution2D = Conv2D\nConvolution3D = Conv3D\nSeparableConvolution1D = SeparableConv1D\nSeparableConvolution2D = SeparableConv2D\nConvolution2DTranspose = Conv2DTranspose\nConvolution3DTranspose = Conv3DTranspose\nDeconvolution2D = Deconv2D = Conv2DTranspose\nDeconvolution3D = Deconv3D = Conv3DTranspose\n"
] | [
[
"tensorflow.python.keras.backend.spatial_2d_padding",
"tensorflow.python.keras.backend.resize_volumes",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.keras.utils.conv_utils.deconv_output_length",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.keras.backend.repeat_elements",
"tensorflow.python.keras.initializers.serialize",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.keras.activations.get",
"tensorflow.python.keras.backend.temporal_padding",
"tensorflow.python.keras.regularizers.serialize",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.keras.utils.conv_utils.normalize_padding",
"tensorflow.python.keras.utils.conv_utils.conv_output_length",
"tensorflow.python.keras.utils.conv_utils.convert_data_format",
"tensorflow.python.ops.nn_ops.Convolution",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.keras.backend.depthwise_conv2d",
"tensorflow.python.keras.utils.conv_utils.normalize_tuple",
"tensorflow.python.keras.backend.conv2d_transpose",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.keras.activations.serialize",
"tensorflow.python.keras.backend.resize_images",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.constraints.serialize",
"tensorflow.python.keras.backend.bias_add",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.backend.spatial_3d_padding"
]
] |
ChidanandKumarKS/mxnet | [
"1ed8b19849046bce92fd3d4a390b2adc405b584a"
] | [
"python/mxnet/base.py"
] | [
"# coding: utf-8\n# pylint: disable=invalid-name, no-member\n\"\"\"ctypes library of mxnet and helper functions.\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport ctypes\nimport atexit\nimport warnings\nimport inspect\nimport numpy as np\nfrom . import libinfo\nwarnings.filterwarnings('default', category=DeprecationWarning)\n\n__all__ = ['MXNetError']\n#----------------------------\n# library loading\n#----------------------------\nif sys.version_info[0] == 3:\n string_types = str,\n numeric_types = (float, int, np.float32, np.int32)\n integer_types = int\n # this function is needed for python3\n # to convert ctypes.char_p .value back to python str\n py_str = lambda x: x.decode('utf-8')\nelse:\n string_types = basestring,\n numeric_types = (float, int, long, np.float32, np.int32)\n integer_types = (int, long)\n py_str = lambda x: x\n\nclass _NullType(object):\n \"\"\"Placeholder for arguments\"\"\"\n def __repr__(self):\n return '_Null'\n\n_Null = _NullType()\n\nclass MXNetError(Exception):\n \"\"\"Error that will be throwed by all mxnet functions.\"\"\"\n pass\n\nclass NotImplementedForSymbol(MXNetError):\n def __init__(self, function, alias, *args):\n super(NotImplementedForSymbol, self).__init__()\n self.function = function.__name__\n self.alias = alias\n self.args = [str(type(a)) for a in args]\n def __str__(self):\n msg = 'Function {}'.format(self.function)\n if self.alias:\n msg += ' (namely operator \"{}\")'.format(self.alias)\n if self.args:\n msg += ' with arguments ({})'.format(', '.join(self.args))\n msg += ' is not implemented for Symbol and only available in NDArray.'\n return msg\n\ndef _load_lib():\n \"\"\"Load library by searching possible path.\"\"\"\n lib_path = libinfo.find_lib_path()\n lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)\n # DMatrix functions\n lib.MXGetLastError.restype = ctypes.c_char_p\n return lib\n\n# version number\n__version__ = libinfo.__version__\n# library instance of mxnet\n_LIB = _load_lib()\n\n# type definitions\nmx_uint = ctypes.c_uint\nmx_float = ctypes.c_float\nmx_float_p = ctypes.POINTER(mx_float)\nmx_real_t = np.float32\nNDArrayHandle = ctypes.c_void_p\nFunctionHandle = ctypes.c_void_p\nOpHandle = ctypes.c_void_p\nCachedOpHandle = ctypes.c_void_p\nSymbolHandle = ctypes.c_void_p\nExecutorHandle = ctypes.c_void_p\nDataIterCreatorHandle = ctypes.c_void_p\nDataIterHandle = ctypes.c_void_p\nKVStoreHandle = ctypes.c_void_p\nRecordIOHandle = ctypes.c_void_p\nRtcHandle = ctypes.c_void_p\n#----------------------------\n# helper function definition\n#----------------------------\ndef check_call(ret):\n \"\"\"Check the return value of C API call.\n\n This function will raise an exception when an error occurs.\n Wrap every API call with this function.\n\n Parameters\n ----------\n ret : int\n return value from API calls.\n \"\"\"\n if ret != 0:\n raise MXNetError(py_str(_LIB.MXGetLastError()))\n\nif sys.version_info[0] < 3:\n def c_str(string):\n \"\"\"Create ctypes char * from a Python string.\n\n Parameters\n ----------\n string : string type\n Python string.\n\n Returns\n -------\n str : c_char_p\n A char pointer that can be passed to C API.\n\n Examples\n --------\n >>> x = mx.base.c_str(\"Hello, World\")\n >>> print x.value\n Hello, World\n \"\"\"\n return ctypes.c_char_p(string)\nelse:\n def c_str(string):\n \"\"\"Create ctypes char * from a Python string.\n\n Parameters\n ----------\n string : string type\n Python string.\n\n Returns\n -------\n str : c_char_p\n A char pointer that can be passed to C API.\n\n Examples\n --------\n >>> x = mx.base.c_str(\"Hello, World\")\n >>> print x.value\n Hello, World\n \"\"\"\n return ctypes.c_char_p(string.encode('utf-8'))\n\n\ndef c_array(ctype, values):\n \"\"\"Create ctypes array from a Python array.\n\n Parameters\n ----------\n ctype : ctypes data type\n Data type of the array we want to convert to, such as mx_float.\n\n values : tuple or list\n Data content.\n\n Returns\n -------\n out : ctypes array\n Created ctypes array.\n\n Examples\n --------\n >>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])\n >>> print len(x)\n 3\n >>> x[1]\n 2.0\n \"\"\"\n return (ctype * len(values))(*values)\n\ndef ctypes2buffer(cptr, length):\n \"\"\"Convert ctypes pointer to buffer type.\n\n Parameters\n ----------\n cptr : ctypes.POINTER(ctypes.c_char)\n Pointer to the raw memory region.\n length : int\n The length of the buffer.\n\n Returns\n -------\n buffer : bytearray\n The raw byte memory buffer.\n \"\"\"\n if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):\n raise TypeError('expected char pointer')\n res = bytearray(length)\n rptr = (ctypes.c_char * length).from_buffer(res)\n if not ctypes.memmove(rptr, cptr, length):\n raise RuntimeError('memmove failed')\n return res\n\ndef ctypes2numpy_shared(cptr, shape):\n \"\"\"Convert a ctypes pointer to a numpy array.\n\n The resulting NumPy array shares the memory with the pointer.\n\n Parameters\n ----------\n cptr : ctypes.POINTER(mx_float)\n pointer to the memory region\n\n shape : tuple\n Shape of target `NDArray`.\n\n Returns\n -------\n out : numpy_array\n A numpy array : numpy array.\n \"\"\"\n if not isinstance(cptr, ctypes.POINTER(mx_float)):\n raise RuntimeError('expected float pointer')\n size = 1\n for s in shape:\n size *= s\n dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))\n return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)\n\n\ndef build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):\n \"\"\"Build argument docs in python style.\n\n arg_names : list of str\n Argument names.\n\n arg_types : list of str\n Argument type information.\n\n arg_descs : list of str\n Argument description information.\n\n remove_dup : boolean, optional\n Whether remove duplication or not.\n\n Returns\n -------\n docstr : str\n Python docstring of parameter sections.\n \"\"\"\n param_keys = set()\n param_str = []\n for key, type_info, desc in zip(arg_names, arg_types, arg_descs):\n if key in param_keys and remove_dup:\n continue\n if key == 'num_args':\n continue\n param_keys.add(key)\n ret = '%s : %s' % (key, type_info)\n if len(desc) != 0:\n ret += '\\n ' + desc\n param_str.append(ret)\n doc_str = ('Parameters\\n' +\n '----------\\n' +\n '%s\\n')\n doc_str = doc_str % ('\\n'.join(param_str))\n return doc_str\n\n\ndef _notify_shutdown():\n \"\"\"Notify MXNet about a shutdown.\"\"\"\n check_call(_LIB.MXNotifyShutdown())\n\natexit.register(_notify_shutdown)\n\ndef add_fileline_to_docstring(module, incursive=True):\n \"\"\"Append the definition position to each function contained in module.\n\n Examples\n --------\n # Put the following codes at the end of a file\n add_fileline_to_docstring(__name__)\n \"\"\"\n\n def _add_fileline(obj):\n \"\"\"Add fileinto to a object.\n \"\"\"\n if obj.__doc__ is None or 'From:' in obj.__doc__:\n return\n fname = inspect.getsourcefile(obj)\n if fname is None:\n return\n try:\n line = inspect.getsourcelines(obj)[-1]\n except IOError:\n return\n obj.__doc__ += '\\n\\nFrom:%s:%d' % (fname, line)\n\n if isinstance(module, str):\n module = sys.modules[module]\n for _, obj in inspect.getmembers(module):\n if inspect.isbuiltin(obj):\n continue\n if inspect.isfunction(obj):\n _add_fileline(obj)\n if inspect.ismethod(obj):\n _add_fileline(obj.__func__)\n if inspect.isclass(obj) and incursive:\n add_fileline_to_docstring(obj, False)\n"
] | [
[
"numpy.frombuffer"
]
] |
jpivarski/awkward-1.0 | [
"49a3ff13ef90b8778a80573211d58c544729eaa5",
"49a3ff13ef90b8778a80573211d58c544729eaa5"
] | [
"tests/v2/test_0879-non-primitive-with-field.py",
"tests/v2/test_0224-arrow-to-awkward.py"
] | [
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_unknown_type():\n array = ak._v2.Array({\"x\": np.arange(10)})\n array = ak._v2.operations.with_field(base=array, what=None, where=\"unknown field1\")\n array = ak._v2.operations.with_field(\n base=array, what=[None], where=\"unknown field2\"\n )\n\n # Try to access the type of a single element\n # This raises a ValueError in #879\n tpe1 = array[\"unknown field1\"].type\n tpe2 = array[\"unknown field2\"].type\n assert str(tpe1) == \"10 * ?unknown\"\n assert str(tpe2) == \"10 * ?unknown\"\n\n\ndef test_in_place_wrapper_broadcasting():\n array = ak._v2.Array({\"x\": np.arange(3)})\n array[\"unknown field\"] = None\n\n assert array[\"unknown field\"].tolist() == [None, None, None]\n assert ak._v2.operations.fields(array) == [\"x\", \"unknown field\"]\n",
"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\npyarrow = pytest.importorskip(\"pyarrow\")\npytest.importorskip(\"awkward._v2._connect.pyarrow\")\n\nto_list = ak._v2.operations.to_list\n\n\ndef test_toarrow_BitMaskedArray():\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n bitmask = ak._v2.index.IndexU8(np.array([40, 34], dtype=np.uint8))\n array = ak._v2.contents.BitMaskedArray(bitmask, content, False, 9, False)\n assert array.to_arrow().to_pylist() == to_list(array)\n\n\ndef test_toarrow_ByteMaskedArray_1():\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n bytemask = ak._v2.index.Index8(np.array([False, True, False], dtype=np.bool_))\n array = ak._v2.contents.ByteMaskedArray(bytemask, content, True)\n assert array.to_arrow().to_pylist() == to_list(array)\n\n\ndef test_toarrow_NumpyArray_1():\n array = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))\n assert isinstance(array.to_arrow(), pyarrow.lib.Array)\n assert array.to_arrow().to_pylist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5]\n\n\ndef test_toarrow_NumpyArray_2():\n array = ak._v2.contents.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]))\n assert isinstance(array.to_arrow(), pyarrow.lib.Array)\n assert array.to_arrow().to_pylist() == [[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]\n\n\ndef test_toarrow_EmptyArray():\n array = ak._v2.contents.EmptyArray()\n assert isinstance(array.to_arrow(), pyarrow.lib.Array)\n assert array.to_arrow().to_pylist() == []\n\n\ndef test_toarrow_ListOffsetArray64():\n content = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 9]))\n array = ak._v2.contents.ListOffsetArray(offsets, content)\n assert isinstance(array.to_arrow().storage, pyarrow.LargeListArray)\n assert array.to_arrow().to_pylist() == [\n [1.1, 2.2, 3.3],\n [],\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n assert array[1:].to_arrow().to_pylist() == [\n [],\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n assert array[2:].to_arrow().to_pylist() == [\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n\n\ndef test_toarrow_ListOffsetArrayU32():\n content = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.IndexU32(np.array([0, 3, 3, 5, 6, 9]))\n array = ak._v2.contents.ListOffsetArray(offsets, content)\n assert isinstance(array.to_arrow().storage, pyarrow.ListArray)\n assert array.to_arrow().to_pylist() == [\n [1.1, 2.2, 3.3],\n [],\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n assert array[1:].to_arrow().to_pylist() == [\n [],\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n assert array[2:].to_arrow().to_pylist() == [\n [4.4, 5.5],\n [6.6],\n [7.7, 8.8, 9.9],\n ]\n\n\ndef test_toarrow_ListArray_RegularArray():\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n array = ak._v2.contents.ListOffsetArray(offsets, content)\n assert array.to_arrow().to_pylist() == [\n [\"one\", \"two\", \"three\"],\n [],\n [\"four\", \"five\"],\n [\"six\"],\n [\"seven\", \"eight\", \"nine\"],\n ]\n\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n starts = ak._v2.index.Index64(np.array([0, 1], dtype=np.int64))\n stops = ak._v2.index.Index64(np.array([2, 3], dtype=np.int64))\n listarray = ak._v2.contents.ListArray(starts, stops, regulararray)\n\n assert isinstance(listarray.to_arrow().storage, pyarrow.LargeListArray)\n assert listarray.to_arrow().to_pylist() == [\n [[[0.0, 1.1, 2.2], []], [[3.3, 4.4], [5.5]]],\n [[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]],\n ]\n assert listarray[1:].to_arrow().to_pylist() == [\n [[[3.3, 4.4], [5.5]], [[6.6, 7.7, 8.8, 9.9], []]],\n ]\n\n assert isinstance(regulararray.to_arrow().storage, pyarrow.FixedSizeListArray)\n assert regulararray.to_arrow().to_pylist() == [\n [[0.0, 1.1, 2.2], []],\n [[3.3, 4.4], [5.5]],\n [[6.6, 7.7, 8.8, 9.9], []],\n ]\n assert regulararray[1:].to_arrow().to_pylist() == [\n [[3.3, 4.4], [5.5]],\n [[6.6, 7.7, 8.8, 9.9], []],\n ]\n\n\ndef test_toarrow_RecordArray():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"two\", \"2\", \"wonky\"],\n )\n\n assert isinstance(recordarray.to_arrow().storage, pyarrow.StructArray)\n assert recordarray.to_arrow().to_pylist() == [\n {\"one\": 1, \"two\": [0.0, 1.1, 2.2], \"2\": 1.1, \"wonky\": 1},\n {\"one\": 2, \"two\": [], \"2\": 2.2, \"wonky\": 2},\n {\"one\": 3, \"two\": [3.3, 4.4], \"2\": 3.3, \"wonky\": 3},\n {\"one\": 4, \"two\": [5.5], \"2\": 4.4, \"wonky\": 4},\n {\"one\": 5, \"two\": [6.6, 7.7, 8.8, 9.9], \"2\": 5.5, \"wonky\": 5},\n ]\n\n\ndef test_toarrow_UnionArray():\n content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])\n\n assert isinstance(unionarray.to_arrow().storage, pyarrow.UnionArray)\n assert unionarray.to_arrow().to_pylist() == [\n 1,\n 2,\n [1.1, 2.2, 3.3],\n [],\n 3,\n [4.4, 5.5],\n 5,\n 4,\n ]\n\n\ndef test_toarrow_IndexedArray():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n index = ak._v2.index.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))\n indexedarray = ak._v2.contents.IndexedArray(index, content)\n\n assert isinstance(indexedarray.to_arrow().storage, pyarrow.lib.DoubleArray)\n assert indexedarray.to_arrow().to_pylist() == [\n 0.0,\n 2.2,\n 4.4,\n 6.6,\n 8.8,\n 9.9,\n 7.7,\n 5.5,\n ]\n\n\ndef test_toarrow_IndexedOptionArray_2():\n array = ak._v2.highlevel.Array([1.1, 2.2, 3.3, 4.4, 5.5, None]).layout\n\n assert array.to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5, None]\n assert array[:-1].to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5]\n assert array[:1].to_arrow().to_pylist() == [1.1]\n assert array[:0].to_arrow().to_pylist() == []\n\n content = ak._v2.contents.NumpyArray(np.array([], dtype=np.float64))\n index = ak._v2.index.Index32(np.array([-1, -1, -1, -1], dtype=np.int32))\n indexedoptionarray = ak._v2.contents.IndexedOptionArray(index, content)\n assert indexedoptionarray.to_arrow().to_pylist() == [None, None, None, None]\n\n\ndef test_toarrow_ByteMaskedArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, True, False, False, False], dtype=np.int8)),\n listoffsetarray,\n True,\n )\n\n assert bytemaskedarray.to_arrow().to_pylist() == [\n [0.0, 1.1, 2.2],\n [],\n None,\n None,\n None,\n ]\n\n\ndef test_toarrow_ByteMaskedArray_3():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n starts = ak._v2.index.Index64(np.array([0, 1]))\n stops = ak._v2.index.Index64(np.array([2, 3]))\n listarray = ak._v2.contents.ListArray(starts, stops, regulararray)\n\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, False], dtype=np.int8)), listarray, True\n )\n assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)\n\n\ndef test_toarrow_ByteMaskedArray_4():\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"two\", \"2\", \"wonky\"],\n )\n\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, False], dtype=np.int8)), recordarray, True\n )\n assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)\n\n\ndef test_toarrow_ByteMaskedArray_5():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n index = ak._v2.index.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))\n indexedarray = ak._v2.contents.IndexedArray(index, content)\n\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, False, False], dtype=np.int8)),\n indexedarray,\n True,\n )\n assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)\n\n\ndef test_toarrow_ByteMaskedArray_broken_unions_1():\n content0 = ak._v2.highlevel.Array(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]]\n ).layout\n content1 = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1, 0, 0], dtype=np.int8))\n index = ak._v2.index.Index32(\n np.array([0, 1, 1, 0, 2, 2, 4, 3, 3, 4], dtype=np.int32)\n )\n unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])\n\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(\n # tags 1, 1, 0, 0, 1, 0, 1, 1, 0, 0\n # index 0, 1, 1, 0, 2, 2, 4, 3, 3, 4\n np.array(\n [True, False, False, True, False, True, True, False, False, True],\n dtype=np.int8,\n )\n ),\n unionarray,\n valid_when=True,\n )\n assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)\n\n\ndef test_toarrow_ByteMaskedArray_broken_unions_2():\n content0 = ak._v2.highlevel.Array(\n [[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]]\n ).layout\n content1 = ak._v2.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4]))\n tags = ak._v2.index.Index8(\n np.array([1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0], dtype=np.int8)\n )\n index = ak._v2.index.Index32(\n np.array([0, 1, 1, 0, 2, 2, 4, 3, 3, 4, 3], dtype=np.int32)\n )\n unionarray = ak._v2.contents.UnionArray(tags, index, [content0, content1])\n\n bytemaskedarray = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(\n # tags 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0\n # index 0, 1, 1, 0, 2, 2, 4, 3, 3, 4, 3\n np.array(\n [True, False, False, True, False, True, True, False, False, True, True],\n dtype=np.int8,\n )\n ),\n unionarray,\n valid_when=True,\n )\n assert bytemaskedarray.to_arrow().to_pylist() == to_list(bytemaskedarray)\n\n\ndef test_toarrow_IndexedOptionArray():\n ioa = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]),\n ak._v2.contents.NumpyArray(\n np.array(\n [\n 5.2,\n 1.7,\n 6.7,\n -0.4,\n 4.0,\n 7.8,\n 3.8,\n 6.8,\n 4.2,\n 0.3,\n 4.6,\n 6.2,\n 6.9,\n -0.7,\n 3.9,\n 1.6,\n 8.7,\n -0.7,\n 3.2,\n 4.3,\n 4.0,\n 5.8,\n 4.2,\n 7.0,\n 5.6,\n 3.8,\n ]\n )\n ),\n )\n assert ioa.to_arrow().to_pylist() == to_list(ioa)\n\n\ndef test_fromarrow_NumpyArray_1():\n boolarray = ak._v2.contents.NumpyArray(\n np.array([True, True, True, False, False, True, False, True, False, True])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(boolarray.to_arrow())\n ) == to_list(boolarray)\n\n\ndef test_fromarrow_NumpyArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(content.to_arrow())) == to_list(\n content\n )\n\n\ndef test_fromarrow_ListOffsetArray():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow())\n ) == to_list(listoffsetarray)\n\n\ndef test_fromarrow_RegularArray():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow())\n ) == to_list(regulararray)\n\n\ndef test_fromarrow_RecordArray():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10], dtype=np.int64))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow())\n ) == to_list(recordarray)\n\n\ndef test_fromarrow_UnionArray():\n content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak._v2.contents.UnionArray(tags, index, [content0, content])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(array.to_arrow())) == to_list(\n array\n )\n\n\ndef test_chunkedarray():\n a = pyarrow.chunked_array(\n [\n pyarrow.array([1.1, 2.2, 3.3]),\n pyarrow.array([], pyarrow.float64()),\n pyarrow.array([4.4, 5.5]),\n pyarrow.array([6.6]),\n pyarrow.array([], pyarrow.float64()),\n pyarrow.array([], pyarrow.float64()),\n pyarrow.array([7.7, 8.8, 9.9]),\n ]\n )\n assert a.to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n 6.6,\n 7.7,\n 8.8,\n 9.9,\n ]\n\n\ndef test_recordbatch():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"a\": 1.1, \"b\": [1, 2, 3]},\n {\"a\": 2.2, \"b\": []},\n {\"a\": 3.3, \"b\": []},\n {\"a\": 4.4, \"b\": [4, 5]},\n {\"a\": 5.5, \"b\": [6]},\n ]\n\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, None, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"a\": 1.1, \"b\": [1, None, 3]},\n {\"a\": 2.2, \"b\": []},\n {\"a\": 3.3, \"b\": []},\n {\"a\": None, \"b\": [4, 5]},\n {\"a\": 5.5, \"b\": [6]},\n ]\n\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\n \"a\": 1.1,\n \"b\": [1, 2, 3],\n \"c\": {\"x\": 1, \"y\": 1.1},\n \"d\": {\"x\": 1, \"y\": 1.1},\n \"e\": [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n },\n {\"a\": 2.2, \"b\": [], \"c\": {\"x\": 2, \"y\": 2.2}, \"d\": None, \"e\": []},\n {\n \"a\": 3.3,\n \"b\": [4, 5],\n \"c\": {\"x\": 3, \"y\": 3.3},\n \"d\": None,\n \"e\": [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n },\n {\n \"a\": None,\n \"b\": [None],\n \"c\": {\"x\": 4, \"y\": None},\n \"d\": {\"x\": 4, \"y\": None},\n \"e\": [None],\n },\n {\n \"a\": 5.5,\n \"b\": [6],\n \"c\": {\"x\": 5, \"y\": 5.5},\n \"d\": {\"x\": 5, \"y\": 5.5},\n \"e\": [{\"x\": 6, \"y\": 6.6}],\n },\n ]\n\n\n### All of the following tests were copied (translated) over from Awkward 0.\n\n\ndef test_arrow_toarrow_string():\n a = ak._v2.operations.from_iter([\"one\", \"two\", \"three\"]).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == to_list(a)\n a = ak._v2.operations.from_iter(\n [[\"one\", \"two\", \"three\"], [], [\"four\", \"five\"]]\n ).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == to_list(a)\n if hasattr(pyarrow.BinaryArray, \"from_buffers\"):\n a = ak._v2.operations.from_iter([b\"one\", b\"two\", b\"three\"]).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n b\"one\",\n b\"two\",\n b\"three\",\n ]\n a = ak._v2.operations.from_iter(\n [[b\"one\", b\"two\", b\"three\"], [], [b\"four\", b\"five\"]]\n ).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n [b\"one\", b\"two\", b\"three\"],\n [],\n [b\"four\", b\"five\"],\n ]\n else:\n a = ak._v2.operations.from_iter([b\"one\", b\"two\", b\"three\"]).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n \"one\",\n \"two\",\n \"three\",\n ]\n a = ak._v2.operations.from_iter(\n [[b\"one\", b\"two\", b\"three\"], [], [b\"four\", b\"five\"]]\n ).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n [\"one\", \"two\", \"three\"],\n [],\n [\"four\", \"five\"],\n ]\n\n\ndef test_arrow_array():\n a = pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n ]\n\n\ndef test_arrow_boolean():\n a = pyarrow.array([True, True, False, False, True])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n True,\n True,\n False,\n False,\n True,\n ]\n\n\ndef test_arrow_array_null():\n a = pyarrow.array([1.1, 2.2, 3.3, None, 4.4, 5.5])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 1.1,\n 2.2,\n 3.3,\n None,\n 4.4,\n 5.5,\n ]\n\n\ndef test_arrow_nested_array():\n a = pyarrow.array([[1.1, 2.2, 3.3], [], [4.4, 5.5]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [1.1, 2.2, 3.3],\n [],\n [4.4, 5.5],\n ]\n\n\ndef test_arrow_nested_nested_array():\n a = pyarrow.array([[[1.1, 2.2], [3.3], []], [], [[4.4, 5.5]]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [[1.1, 2.2], [3.3], []],\n [],\n [[4.4, 5.5]],\n ]\n\n\ndef test_arrow_nested_array_null():\n a = pyarrow.array([[1.1, 2.2, None], [], [4.4, 5.5]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [1.1, 2.2, None],\n [],\n [4.4, 5.5],\n ]\n\n\ndef test_arrow_null_nested_array_null():\n a = pyarrow.array([[1.1, 2.2, None], [], None, [4.4, 5.5]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [1.1, 2.2, None],\n [],\n None,\n [4.4, 5.5],\n ]\n\n\ndef test_arrow_chunked_array():\n a = pyarrow.chunked_array(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([], pyarrow.float64()),\n pyarrow.array([6.6, 7.7, 8.8]),\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n 6.6,\n 7.7,\n 8.8,\n ]\n\n\ndef test_arrow_struct():\n a = pyarrow.array([{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ]\n\n\ndef test_arrow_struct_null():\n a = pyarrow.array([{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": None},\n {\"x\": 3, \"y\": 3.3},\n ]\n\n\ndef test_arrow_null_struct():\n a = pyarrow.array(\n [{\"x\": 1, \"y\": 1.1}, None, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": 1, \"y\": 1.1},\n None,\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ]\n\n\ndef test_arrow_null_struct_null():\n a = pyarrow.array(\n [{\"x\": 1, \"y\": 1.1}, None, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": 1, \"y\": 1.1},\n None,\n {\"x\": 2, \"y\": None},\n {\"x\": 3, \"y\": 3.3},\n ]\n\n\ndef test_arrow_chunked_struct():\n t = pyarrow.struct({\"x\": pyarrow.int64(), \"y\": pyarrow.float64()})\n a = pyarrow.chunked_array(\n [\n pyarrow.array(\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}], t\n ),\n pyarrow.array([], t),\n pyarrow.array([{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}], t),\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": 4.4},\n {\"x\": 5, \"y\": 5.5},\n ]\n\n\ndef test_arrow_nested_struct():\n a = pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n\n\ndef test_arrow_nested_struct_null():\n a = pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n\n\ndef test_arrow_null_nested_struct():\n a = pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n None,\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n None,\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n\n\ndef test_arrow_null_nested_struct_null():\n a = pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}],\n None,\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": None}, {\"x\": 3, \"y\": 3.3}],\n None,\n [],\n [{\"x\": 4, \"y\": 4.4}, {\"x\": 5, \"y\": 5.5}],\n ]\n\n\ndef test_arrow_struct_nested():\n a = pyarrow.array(\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [3, 3], \"y\": 3.3}]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": [], \"y\": 1.1},\n {\"x\": [2], \"y\": 2.2},\n {\"x\": [3, 3], \"y\": 3.3},\n ]\n\n\ndef test_arrow_struct_nested_null():\n a = pyarrow.array(\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [None, 3], \"y\": 3.3}]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\"x\": [], \"y\": 1.1},\n {\"x\": [2], \"y\": 2.2},\n {\"x\": [None, 3], \"y\": 3.3},\n ]\n\n\ndef test_arrow_nested_struct_nested():\n a = pyarrow.array(\n [\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [3, 3], \"y\": 3.3}],\n [],\n [{\"x\": [4, 4, 4], \"y\": 4.4}, {\"x\": [5, 5, 5, 5], \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [3, 3], \"y\": 3.3}],\n [],\n [{\"x\": [4, 4, 4], \"y\": 4.4}, {\"x\": [5, 5, 5, 5], \"y\": 5.5}],\n ]\n\n\ndef test_arrow_null_nested_struct_nested_null():\n a = pyarrow.array(\n [\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [None, 3], \"y\": 3.3}],\n None,\n [],\n [{\"x\": [4, 4, 4], \"y\": 4.4}, {\"x\": [5, 5, 5, 5], \"y\": 5.5}],\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [{\"x\": [], \"y\": 1.1}, {\"x\": [2], \"y\": 2.2}, {\"x\": [None, 3], \"y\": 3.3}],\n None,\n [],\n [{\"x\": [4, 4, 4], \"y\": 4.4}, {\"x\": [5, 5, 5, 5], \"y\": 5.5}],\n ]\n\n\ndef test_arrow_strings():\n a = pyarrow.array([\"one\", \"two\", \"three\", \"fo\\u2014ur\", \"five\"])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"two\",\n \"three\",\n \"fo\\u2014ur\",\n \"five\",\n ]\n\n\ndef test_arrow_strings_null():\n a = pyarrow.array([\"one\", \"two\", None, \"fo\\u2014ur\", \"five\"])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"two\",\n None,\n \"fo\\u2014ur\",\n \"five\",\n ]\n\n\ndef test_arrow_binary():\n a = pyarrow.array([b\"one\", b\"two\", b\"three\", b\"four\", b\"five\"])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n b\"one\",\n b\"two\",\n b\"three\",\n b\"four\",\n b\"five\",\n ]\n\n\ndef test_arrow_binary_null():\n a = pyarrow.array([b\"one\", b\"two\", None, b\"four\", b\"five\"])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n b\"one\",\n b\"two\",\n None,\n b\"four\",\n b\"five\",\n ]\n\n\ndef test_arrow_chunked_strings():\n a = pyarrow.chunked_array(\n [\n pyarrow.array([\"one\", \"two\", \"three\", \"four\", \"five\"]),\n pyarrow.array([\"six\", \"seven\", \"eight\"]),\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"two\",\n \"three\",\n \"four\",\n \"five\",\n \"six\",\n \"seven\",\n \"eight\",\n ]\n\n\ndef test_arrow_nested_strings():\n a = pyarrow.array([[\"one\", \"two\", \"three\"], [], [\"four\", \"five\"]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [\"one\", \"two\", \"three\"],\n [],\n [\"four\", \"five\"],\n ]\n\n\ndef test_arrow_nested_strings_null():\n a = pyarrow.array([[\"one\", \"two\", None], [], [\"four\", \"five\"]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [\"one\", \"two\", None],\n [],\n [\"four\", \"five\"],\n ]\n\n\ndef test_arrow_null_nested_strings_null():\n a = pyarrow.array([[\"one\", \"two\", None], [], None, [\"four\", \"five\"]])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [\"one\", \"two\", None],\n [],\n None,\n [\"four\", \"five\"],\n ]\n\n\ndef test_arrow_union_sparse():\n a = pyarrow.UnionArray.from_sparse(\n pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),\n [\n pyarrow.array([0.0, 1.1, 2.2, 3.3, 4.4]),\n pyarrow.array([True, True, False, True, False]),\n ],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n True,\n 2.2,\n 3.3,\n False,\n ]\n\n\ndef test_arrow_union_sparse_null():\n a = pyarrow.UnionArray.from_sparse(\n pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),\n [\n pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),\n pyarrow.array([True, True, False, True, False]),\n ],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n True,\n None,\n 3.3,\n False,\n ]\n\n\ndef test_arrow_union_sparse_null_null():\n a = pyarrow.UnionArray.from_sparse(\n pyarrow.array([0, 1, 0, 0, 1], type=pyarrow.int8()),\n [\n pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),\n pyarrow.array([True, None, False, True, False]),\n ],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n None,\n None,\n 3.3,\n False,\n ]\n\n\ndef test_arrow_union_dense():\n a = pyarrow.UnionArray.from_dense(\n pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),\n pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),\n [pyarrow.array([0.0, 1.1, 2.2, 3.3]), pyarrow.array([True, True, False])],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n True,\n 1.1,\n 2.2,\n 3.3,\n True,\n False,\n ]\n\n\ndef test_arrow_union_dense_null():\n a = pyarrow.UnionArray.from_dense(\n pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),\n pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),\n [pyarrow.array([0.0, 1.1, None, 3.3]), pyarrow.array([True, True, False])],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n True,\n 1.1,\n None,\n 3.3,\n True,\n False,\n ]\n\n\ndef test_arrow_union_dense_null_null():\n a = pyarrow.UnionArray.from_dense(\n pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()),\n pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()),\n [pyarrow.array([0.0, 1.1, None, 3.3]), pyarrow.array([True, None, False])],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n True,\n 1.1,\n None,\n 3.3,\n None,\n False,\n ]\n\n\ndef test_arrow_dictarray():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", \"two\", \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"one\",\n \"three\",\n \"three\",\n \"two\",\n \"one\",\n \"three\",\n \"two\",\n \"two\",\n ]\n\n\ndef test_arrow_dictarray_null():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, None, 1, None, 2, 1, 1]),\n pyarrow.array([\"one\", \"two\", \"three\"]),\n )\n print(a)\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"one\",\n \"three\",\n None,\n \"two\",\n None,\n \"three\",\n \"two\",\n \"two\",\n ]\n\n\ndef test_arrow_null_dictarray():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", None, \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n \"one\",\n \"one\",\n \"three\",\n \"three\",\n None,\n \"one\",\n \"three\",\n None,\n None,\n ]\n\n\ndef test_arrow_batch():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\n \"a\": 1.1,\n \"b\": [1, 2, 3],\n \"c\": {\"x\": 1, \"y\": 1.1},\n \"d\": {\"x\": 1, \"y\": 1.1},\n \"e\": [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n },\n {\"a\": 2.2, \"b\": [], \"c\": {\"x\": 2, \"y\": 2.2}, \"d\": None, \"e\": []},\n {\n \"a\": 3.3,\n \"b\": [4, 5],\n \"c\": {\"x\": 3, \"y\": 3.3},\n \"d\": None,\n \"e\": [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n },\n {\n \"a\": None,\n \"b\": [None],\n \"c\": {\"x\": 4, \"y\": None},\n \"d\": {\"x\": 4, \"y\": None},\n \"e\": [None],\n },\n {\n \"a\": 5.5,\n \"b\": [6],\n \"c\": {\"x\": 5, \"y\": 5.5},\n \"d\": {\"x\": 5, \"y\": 5.5},\n \"e\": [{\"x\": 6, \"y\": 6.6}],\n },\n ]\n\n\ndef test_arrow_table():\n a = pyarrow.Table.from_batches(\n [\n pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ),\n pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ),\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n {\n \"a\": 1.1,\n \"b\": [1, 2, 3],\n \"c\": {\"x\": 1, \"y\": 1.1},\n \"d\": {\"x\": 1, \"y\": 1.1},\n \"e\": [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n },\n {\"a\": 2.2, \"b\": [], \"c\": {\"x\": 2, \"y\": 2.2}, \"d\": None, \"e\": []},\n {\n \"a\": 3.3,\n \"b\": [4, 5],\n \"c\": {\"x\": 3, \"y\": 3.3},\n \"d\": None,\n \"e\": [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n },\n {\n \"a\": None,\n \"b\": [None],\n \"c\": {\"x\": 4, \"y\": None},\n \"d\": {\"x\": 4, \"y\": None},\n \"e\": [None],\n },\n {\n \"a\": 5.5,\n \"b\": [6],\n \"c\": {\"x\": 5, \"y\": 5.5},\n \"d\": {\"x\": 5, \"y\": 5.5},\n \"e\": [{\"x\": 6, \"y\": 6.6}],\n },\n {\n \"a\": 1.1,\n \"b\": [1, 2, 3],\n \"c\": {\"x\": 1, \"y\": 1.1},\n \"d\": {\"x\": 1, \"y\": 1.1},\n \"e\": [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n },\n {\"a\": 2.2, \"b\": [], \"c\": {\"x\": 2, \"y\": 2.2}, \"d\": None, \"e\": []},\n {\n \"a\": 3.3,\n \"b\": [4, 5],\n \"c\": {\"x\": 3, \"y\": 3.3},\n \"d\": None,\n \"e\": [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n },\n {\n \"a\": None,\n \"b\": [None],\n \"c\": {\"x\": 4, \"y\": None},\n \"d\": {\"x\": 4, \"y\": None},\n \"e\": [None],\n },\n {\n \"a\": 5.5,\n \"b\": [6],\n \"c\": {\"x\": 5, \"y\": 5.5},\n \"d\": {\"x\": 5, \"y\": 5.5},\n \"e\": [{\"x\": 6, \"y\": 6.6}],\n },\n ]\n\n\ndef test_arrow_nonnullable_table():\n x = pyarrow.array([1, 2, 3])\n y = pyarrow.array([1.1, 2.2, 3.3])\n table = pyarrow.Table.from_arrays([x], [\"x\"])\n if hasattr(pyarrow, \"column\"):\n table2 = table.add_column(\n 1,\n pyarrow.column(\n pyarrow.field(\"y\", y.type, False), np.array([1.1, 2.2, 3.3])\n ),\n )\n else:\n table2 = table.add_column(1, \"y\", y)\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(table2)) == [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ]\n\n\ndef test_arrow_coverage100():\n a = ak._v2.operations.from_iter(\n [True, True, False, False, True, False, True, False]\n ).layout\n assert a.to_arrow().to_pylist() == to_list(a)\n\n a = ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"bytes\"}\n ),\n parameters={\"__array__\": \"bytestring\"},\n )\n assert a.to_arrow().to_pylist() == [b\"hello\", b\"there\"]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True, False, False, True, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellotherehellotherehellothere\", \"u1\"),\n parameters={\"__array__\": \"bytes\"},\n ),\n parameters={\"__array__\": \"bytestring\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\n b\"hello\",\n None,\n b\"hello\",\n b\"there\",\n None,\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"bytes\"}\n ),\n parameters={\"__array__\": \"bytestring\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [b\"hello\", None]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], \"i4\")),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"bytes\"}\n ),\n parameters={\"__array__\": \"bytestring\"},\n ),\n )\n assert a.to_arrow().to_pylist() == [\n None,\n b\"there\",\n None,\n b\"hello\",\n b\"hello\",\n None,\n ]\n\n a = ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"chars\"}\n ),\n parameters={\"__array__\": \"string\"},\n )\n assert a.to_arrow().to_pylist() == [\"hello\", \"there\"]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True, False, False, True, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellotherehellotherehellothere\", \"u1\"),\n parameters={\"__array__\": \"chars\"},\n ),\n parameters={\"__array__\": \"string\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\"hello\", None, \"hello\", \"there\", None, None]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n \"hello\",\n None,\n \"hello\",\n \"there\",\n None,\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True, False, False, True, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], \"i8\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellotherehellotherehellothere\", \"u1\"),\n parameters={\"__array__\": \"chars\"},\n ),\n parameters={\"__array__\": \"string\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\"hello\", None, \"hello\", \"there\", None, None]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n \"hello\",\n None,\n \"hello\",\n \"there\",\n None,\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True, False, False, True, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index64(np.array([0, 5, 10, 15, 20, 25, 30], \"i8\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellotherehellotherehellothere\", \"u1\"),\n parameters={\"__array__\": \"bytes\"},\n ),\n parameters={\"__array__\": \"bytestring\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\n b\"hello\",\n None,\n b\"hello\",\n b\"there\",\n None,\n None,\n ]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n b\"hello\",\n None,\n b\"hello\",\n b\"there\",\n None,\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"chars\"}\n ),\n parameters={\"__array__\": \"string\"},\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\"hello\", None]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], \"i4\")),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellothere\", \"u1\"), parameters={\"__array__\": \"chars\"}\n ),\n parameters={\"__array__\": \"string\"},\n ),\n )\n assert a.to_arrow().to_pylist() == [None, \"there\", None, \"hello\", \"hello\", None]\n\n a = ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(np.frombuffer(b\"hellothere\", \"u1\")),\n )\n assert a.to_arrow().to_pylist() == [\n [104, 101, 108, 108, 111],\n [116, 104, 101, 114, 101],\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True, False, False, True, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10, 15, 20, 25, 30], \"i4\")),\n ak._v2.contents.NumpyArray(\n np.frombuffer(b\"hellotherehellotherehellothere\", \"u1\")\n ),\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\n [104, 101, 108, 108, 111],\n None,\n [104, 101, 108, 108, 111],\n [116, 104, 101, 114, 101],\n None,\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, True])),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(np.frombuffer(b\"hellothere\", \"u1\")),\n ),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [[104, 101, 108, 108, 111], None]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], \"i4\")),\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(np.frombuffer(b\"hellothere\", \"u1\")),\n ),\n )\n assert a.to_arrow().to_pylist() == [\n None,\n [116, 104, 101, 114, 101],\n None,\n [104, 101, 108, 108, 111],\n [104, 101, 108, 108, 111],\n None,\n ]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], \"i4\")),\n ak._v2.contents.RegularArray(\n ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),\n 3,\n zeros_length=0,\n ),\n )\n assert a.to_arrow().to_pylist() == [\n None,\n [4.4, 5.5, 6.6],\n None,\n [1.1, 2.2, 3.3],\n [1.1, 2.2, 3.3],\n None,\n ]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1, 1, -1], \"i4\")),\n ak._v2.contents.RegularArray(\n ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),\n 3,\n zeros_length=0,\n ),\n )\n assert a.to_arrow().to_pylist() == [\n None,\n [4.4, 5.5, 6.6],\n None,\n [1.1, 2.2, 3.3],\n [1.1, 2.2, 3.3],\n None,\n [4.4, 5.5, 6.6],\n None,\n ]\n\n a = ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index64(np.array([-1, 1, -1, 0, 0, -1, 1, -1], \"i8\")),\n ak._v2.contents.RegularArray(\n ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),\n 3,\n zeros_length=0,\n ),\n )\n assert a.to_arrow().to_pylist() == [\n None,\n [4.4, 5.5, 6.6],\n None,\n [1.1, 2.2, 3.3],\n [1.1, 2.2, 3.3],\n None,\n [4.4, 5.5, 6.6],\n None,\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, True, True, True, False, False])),\n ak._v2.contents.IndexedOptionArray(\n ak._v2.index.Index32(np.array([-1, 1, -1, 0, 0, -1], \"i4\")),\n ak._v2.contents.RegularArray(\n ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])),\n 3,\n zeros_length=0,\n ),\n ),\n valid_when=True,\n )\n assert a.to_arrow().to_pylist() == [\n None,\n [4.4, 5.5, 6.6],\n None,\n [1.1, 2.2, 3.3],\n None,\n None,\n ]\n\n a = ak._v2.contents.UnmaskedArray(\n ak._v2.contents.ListOffsetArray(\n ak._v2.index.Index32(np.array([0, 5, 10], \"i4\")),\n ak._v2.contents.NumpyArray(np.frombuffer(b\"hellothere\", \"u1\")),\n )\n )\n assert a.to_arrow().to_pylist() == [\n [104, 101, 108, 108, 111],\n [116, 104, 101, 114, 101],\n ]\n\n a = pyarrow.array(\n [\"one\", \"two\", \"three\", \"two\", \"two\", \"one\", \"three\", \"one\"]\n ).dictionary_encode()\n b = ak._v2._connect.pyarrow.handle_arrow(a)\n assert isinstance(b, ak._v2.contents.IndexedOptionArray)\n assert to_list(b) == [\"one\", \"two\", \"three\", \"two\", \"two\", \"one\", \"three\", \"one\"]\n\n a = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], None, [4.4, 5.5]]).layout\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n [1.1, 2.2, 3.3],\n [],\n None,\n [4.4, 5.5],\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, False, False, True, True, False, False])),\n ak._v2.contents.NumpyArray(np.array([1.1, 2.2, 3.3, 999, 314, 4.4, 5.5])),\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [1.1, 2.2, 3.3, None, None, 4.4, 5.5]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([False, False, False, True, True, False, False])),\n ak._v2.operations.from_iter(\n [b\"hello\", b\"\", b\"there\", b\"yuk\", b\"\", b\"o\", b\"hellothere\"]\n ).layout,\n valid_when=False,\n )\n assert a.to_arrow().to_pylist() == [\n b\"hello\",\n b\"\",\n b\"there\",\n None,\n None,\n b\"o\",\n b\"hellothere\",\n ]\n\n a = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8([True, True, False, True]),\n ak._v2.operations.from_iter([[1.1, 2.2, 3.3], [], [999], [4.4, 5.5]]).layout,\n valid_when=True,\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n [1.1, 2.2, 3.3],\n [],\n None,\n [4.4, 5.5],\n ]\n\n a = ak._v2.operations.from_iter([[1, 2, 3], [], [4, 5], 999, 123]).layout\n assert a.to_arrow().to_pylist() == [[1, 2, 3], [], [4, 5], 999, 123]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n [1, 2, 3],\n [],\n [4, 5],\n 999,\n 123,\n ]\n\n\ndef test_arrow_coverage100_broken_unions():\n a = ak._v2.operations.from_iter([[1, 2, 3], [], [4, 5], 999, 123]).layout\n b = ak._v2.contents.ByteMaskedArray(\n ak._v2.index.Index8(np.array([True, True, False, False, True])),\n a,\n valid_when=True,\n )\n assert b.to_arrow().to_pylist() == [[1, 2, 3], [], None, None, 123]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(b.to_arrow())) == [\n [1, 2, 3],\n [],\n None,\n None,\n 123,\n ]\n\n content1 = ak._v2.operations.from_iter([1.1, 2.2, 3.3, 4.4, 5.5]).layout\n content2 = ak._v2.contents.NumpyArray(np.array([], dtype=np.int32))\n a = ak._v2.contents.UnionArray(\n ak._v2.index.Index8(np.array([0, 0, 0, 0, 0], \"i1\")),\n ak._v2.index.Index32(np.array([0, 1, 2, 3, 4], \"i4\")),\n [content1, content2],\n )\n assert to_list(a) == [1.1, 2.2, 3.3, 4.4, 5.5]\n assert a.to_arrow().to_pylist() == [1.1, 2.2, 3.3, 4.4, 5.5]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a.to_arrow())) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n ]\n\n a = pyarrow.UnionArray.from_sparse(\n pyarrow.array([0, 0, 0, 0, 0], type=pyarrow.int8()),\n [\n pyarrow.array([0.0, 1.1, None, 3.3, 4.4]),\n pyarrow.array([True, None, False, True, False]),\n ],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 0.0,\n 1.1,\n None,\n 3.3,\n 4.4,\n ]\n\n a = pyarrow.UnionArray.from_sparse(\n pyarrow.array([0, 1, 0, 1, 1], \"i1\"),\n [\n pyarrow.array([[0.0, 1.1, 2.2], [], None, [5.5], [6.6, 7.7, 8.8, 9.9]]),\n pyarrow.array([0.0, 1.1, 2.2, None, None]),\n ],\n [\"0\", \"1\"],\n [0, 1],\n )\n assert a.to_pylist() == [[0.0, 1.1, 2.2], 1.1, None, None, None]\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n [0.0, 1.1, 2.2],\n 1.1,\n None,\n None,\n None,\n ]\n\n a = pyarrow.chunked_array([pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5])])\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a)) == [\n 1.1,\n 2.2,\n 3.3,\n 4.4,\n 5.5,\n ]\n\n\n# NumpyArray in Awkward Arrays translate to their corresponding DataType Arrays in Arrow\ndef test_nonzero_offset_fromarrow_NumpyArray_1():\n boolarray = ak._v2.contents.NumpyArray(\n np.array([True, True, True, False, False, True, False, True, False, True])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(boolarray.to_arrow()[5:])\n ) == pyarrow.Array.to_pylist(boolarray.to_arrow()[5:])\n\n\ndef test_nonzero_offset_fromarrow_NumpyArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[2:])\n ) == pyarrow.Array.to_pylist(content.to_arrow()[2:])\n\n\ndef test_nonzero_offset_fromarrow_NumpyArray_3():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[2:5])\n ) == pyarrow.Array.to_pylist(content.to_arrow()[2:5])\n\n\ndef test_nonzero_offset_fromarrow_NumpyArray_4():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[0:9:2])\n ) == pyarrow.Array.to_pylist(content.to_arrow()[0:9:2])\n\n\ndef test_nonzero_offset_fromarrow_NumpyArray_5():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[-2:10])\n ) == pyarrow.Array.to_pylist(content.to_arrow()[-2:10])\n\n\ndef test_nonzero_offset_fromarrow_NumpyArray_6():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(content.to_arrow()[-3:3:-1])\n ) == pyarrow.Array.to_pylist(content.to_arrow()[-3:3:-1])\n\n\n# ListOffsetArrays in Awkward Arrays translate to ListArrays in Arrow\ndef test_nonzero_offset_fromarrow_ListOffsetArray_1():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[2:])\n ) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[2:])\n\n\ndef test_nonzero_offset_fromarrow_ListOffsetArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[2:5])\n ) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[2:5])\n\n\ndef test_nonzero_offset_fromarrow_ListOffsetArray_3():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[0:5:2])\n ) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[0:5:2])\n\n\ndef test_nonzero_offset_fromarrow_ListOffsetArray_4():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(listoffsetarray.to_arrow()[-3:3:-1])\n ) == pyarrow.Array.to_pylist(listoffsetarray.to_arrow()[-3:3:-1])\n\n\n# RegularArrays in Awkward Arrays translate to ListArrays in Arrow\ndef test_nonzero_offset_fromarrow_RegularArray_1():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[2:])\n ) == pyarrow.Array.to_pylist(regulararray.to_arrow()[2:])\n\n\ndef test_nonzero_offset_fromarrow_RegularArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[2:5])\n ) == pyarrow.Array.to_pylist(regulararray.to_arrow()[2:5])\n\n\ndef test_nonzero_offset_fromarrow_RegularArray_3():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[0:5:2])\n ) == pyarrow.Array.to_pylist(regulararray.to_arrow()[0:5:2])\n\n\ndef test_nonzero_offset_fromarrow_RegularArray_4():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n regulararray = ak._v2.contents.RegularArray(listoffsetarray, 2, zeros_length=0)\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(regulararray.to_arrow()[-3:3:-1])\n ) == pyarrow.Array.to_pylist(regulararray.to_arrow()[-3:3:-1])\n\n\n# RecordArrays in Awkward Arrays translate to Struct Arrays in Arrow\ndef test_nonzero_offset_fromarrow_RecordArray_1():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[2:])\n ) == pyarrow.Array.to_pylist(recordarray.to_arrow()[2:])\n\n\ndef test_nonzero_offset_fromarrow_RecordArray_2():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[2:5])\n ) == pyarrow.Array.to_pylist(recordarray.to_arrow()[2:5])\n\n\ndef test_nonzero_offset_fromarrow_RecordArray_3():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[0:5:2])\n ) == pyarrow.Array.to_pylist(recordarray.to_arrow()[0:5:2])\n\n\ndef test_nonzero_offset_fromarrow_RecordArray_4():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[-3:3:-1])\n ) == pyarrow.Array.to_pylist(recordarray.to_arrow()[-3:3:-1])\n\n\ndef test_nonzero_offset_fromarrow_RecordArray_4_again():\n content = ak._v2.contents.NumpyArray(\n np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])\n )\n offsets = ak._v2.index.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))\n listoffsetarray = ak._v2.contents.ListOffsetArray(offsets, content)\n\n content1 = ak._v2.contents.NumpyArray(np.array([1, 2, 3, 4, 5]))\n content2 = ak._v2.contents.NumpyArray(\n np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])\n )\n offsets = ak._v2.index.Index32(np.array([0, 3, 3, 5, 6, 9]))\n recordarray = ak._v2.contents.RecordArray(\n [content1, listoffsetarray, content2, content1],\n fields=[\"one\", \"chonks\", \"2\", \"wonky\"],\n )\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(recordarray.to_arrow()[-3:3:-1])\n ) == pyarrow.Array.to_pylist(recordarray.to_arrow()[-3:3:-1])\n\n\ndef test_nonzero_offset_fromarrow_UnionArray_1():\n content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak._v2.contents.UnionArray(tags, index, [content0, content])\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[2:])\n ) == pyarrow.Array.to_pylist(array.to_arrow()[2:])\n\n\ndef test_nonzero_offset_fromarrow_UnionArray_2():\n content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak._v2.contents.UnionArray(tags, index, [content0, content])\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[2:5])\n ) == pyarrow.Array.to_pylist(array.to_arrow()[2:5])\n\n\ndef test_nonzero_offset_fromarrow_UnionArray_3():\n content0 = ak._v2.highlevel.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout\n content = ak._v2.highlevel.Array(\n [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n ).layout\n tags = ak._v2.index.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak._v2.index.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak._v2.contents.UnionArray(tags, index, [content0, content])\n assert to_list(\n ak._v2._connect.pyarrow.handle_arrow(array.to_arrow()[0:5:1])\n ) == pyarrow.Array.to_pylist(array.to_arrow()[0:5:1])\n\n\ndef test_nonzero_offset_fromarrow_ArrowDictionaryArray_1():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", None, \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:])) == [\n \"three\",\n \"three\",\n None,\n \"one\",\n \"three\",\n None,\n None,\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowDictionaryArray_2():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", None, \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:5])) == [\n \"three\",\n \"three\",\n None,\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowDictionaryArray_3():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", None, \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:8:2])) == [\n \"one\",\n \"three\",\n None,\n \"three\",\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowDictionaryArray_4():\n a = pyarrow.DictionaryArray.from_arrays(\n pyarrow.array([0, 0, 2, 2, 1, 0, 2, 1, 1]),\n pyarrow.array([\"one\", None, \"three\"]),\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[-3:3:-1])) == [\n \"three\",\n \"one\",\n None,\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowRecordBatch_1():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0])) == a[0].to_pylist()\n\n\ndef test_nonzero_offset_fromarrow_ArrowRecordBatch_2():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:])) == [\n {\"a\": 3.3, \"b\": []},\n {\"a\": 4.4, \"b\": [4, 5]},\n {\"a\": 5.5, \"b\": [6]},\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowRecordBatch_3():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[2:5])) == [\n {\"a\": 3.3, \"b\": []},\n {\"a\": 4.4, \"b\": [4, 5]},\n {\"a\": 5.5, \"b\": [6]},\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowRecordBatch_4():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:5:2])) == [\n {\"a\": 1.1, \"b\": [1, 2, 3]},\n {\"a\": 3.3, \"b\": []},\n {\"a\": 5.5, \"b\": [6]},\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowRecordBatch_4_again():\n a = pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]),\n pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]]),\n ],\n [\"a\", \"b\"],\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[-2:0:-1])) == [\n {\"a\": 4.4, \"b\": [4, 5]},\n {\"a\": 3.3, \"b\": []},\n {\"a\": 2.2, \"b\": []},\n ]\n\n\ndef test_nonzero_offset_fromarrow_ArrowTable_1():\n a = pyarrow.Table.from_batches(\n [\n pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ),\n pyarrow.RecordBatch.from_arrays(\n [\n pyarrow.array([1.1, 2.2, 3.3, None, 5.5]),\n pyarrow.array([[1, 2, 3], [], [4, 5], [None], [6]]),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n {\"x\": 1, \"y\": 1.1},\n None,\n None,\n {\"x\": 4, \"y\": None},\n {\"x\": 5, \"y\": 5.5},\n ]\n ),\n pyarrow.array(\n [\n [\n {\"x\": 1, \"y\": 1.1},\n {\"x\": 2, \"y\": 2.2},\n {\"x\": 3, \"y\": 3.3},\n ],\n [],\n [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n [None],\n [{\"x\": 6, \"y\": 6.6}],\n ]\n ),\n ],\n [\"a\", \"b\", \"c\", \"d\", \"e\"],\n ),\n ]\n )\n assert to_list(ak._v2._connect.pyarrow.handle_arrow(a[0:5:2])) == [\n {\n \"a\": 1.1,\n \"b\": [1, 2, 3],\n \"c\": {\"x\": 1, \"y\": 1.1},\n \"d\": {\"x\": 1, \"y\": 1.1},\n \"e\": [{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 3, \"y\": 3.3}],\n },\n {\n \"a\": 3.3,\n \"b\": [4, 5],\n \"c\": {\"x\": 3, \"y\": 3.3},\n \"d\": None,\n \"e\": [{\"x\": 4, \"y\": None}, {\"x\": 5, \"y\": 5.5}],\n },\n {\n \"a\": 5.5,\n \"b\": [6],\n \"c\": {\"x\": 5, \"y\": 5.5},\n \"d\": {\"x\": 5, \"y\": 5.5},\n \"e\": [{\"x\": 6, \"y\": 6.6}],\n },\n ]\n"
] | [
[
"numpy.arange"
],
[
"numpy.array",
"numpy.frombuffer"
]
] |
VACUMM/xoa | [
"c6a0d860528cf33ae15c77fa111f95daab0321c0"
] | [
"xoa/__init__.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nxarray-based ocean analysis library\n\nThe successor of Vacumm.\n\"\"\"\n# Copyright 2020-2021 Shom\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport re\nimport warnings\nimport platform\n\nimport pkg_resources\nimport appdirs\nimport configobj\nimport validate\n\n\n# Taken from xarray\ntry:\n __version__ = pkg_resources.get_distribution(\"xoa\").version\nexcept Exception:\n # Local copy or not installed with setuptools.\n # Disable minimum version checks on downstream libraries.\n __version__ = \"999\"\n\n_RE_OPTION_MATCH = re.compile(r\"^(\\w+)\\W(\\w+)$\").match\n\n#: Specifications of configuration options\nCONFIG_SPECS = \"\"\"\n[cf] # cf module\ncache=boolean(default=True) # use the :mod:`~xoa.cf` in memory and file caches\n\n[plot] # plot parameters\ncmapdiv = string(default=\"cmo.balance\") # defaut diverging colormap\ncmappos = string(default=\"cmo.amp\") # default positive colormap\ncmapneg = string(default=\"cmo.tempo_r\") # default negative colormap\ncmapcyc = string(default=\"cmo.phase\") # default cyclic colormap\n\n\"\"\"\n\n#: Default xoa user configuration file\nDEFAULT_USER_CONFIG_FILE = os.path.join(\n appdirs.user_config_dir(\"xoa\"), \"xoa.cfg\"\n)\n\n# Directory of sample files\n_SAMPLE_DIR = os.path.join(os.path.dirname(__file__), '_samples')\n\n_PACKAGES = [\n \"appdirs\",\n \"cartopy\",\n \"cmocean\",\n \"configobj\",\n \"matplotlib\",\n \"numpy\",\n \"pandas\",\n \"scipy\",\n \"xarray\",\n \"xesmf\"\n ]\n\n\nclass XoaError(Exception):\n pass\n\n\nclass XoaConfigError(XoaError):\n pass\n\n\nclass XoaWarning(UserWarning):\n pass\n\n\ndef xoa_warn(message, stacklevel=2):\n \"\"\"Issue a :class:`XoaWarning` warning\n\n Example\n -------\n .. ipython:: python\n :okwarning:\n\n @suppress\n from xoa import xoa_warn\n xoa_warn('Be careful!')\n \"\"\"\n warnings.warn(message, XoaWarning, stacklevel=stacklevel)\n\n\ndef _get_cache_():\n from . import __init__\n if not hasattr(__init__, \"_XOA_CACHE\"):\n __init__._XOA_CACHE = {}\n return __init__._XOA_CACHE\n\n\ndef load_options(cfgfile=None):\n \"\"\"Load specified options\n\n Parameters\n ----------\n cfgfile: file, list(str), dict\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import load_options\n # Dict\n load_options({'plot': {'cmappos': 'mycmap'}})\n\n # Lines\n optlines = \"[plot]\\\\n cmappos=mycmap\".split('\\\\n')\n load_options(optlines)\n \"\"\"\n _get_cache_()\n xoa_cache = _get_cache_()\n\n if \"cfgspecs\" not in xoa_cache:\n xoa_cache[\"cfgspecs\"] = configobj.ConfigObj(\n CONFIG_SPECS.split(\"\\n\"),\n list_values=False,\n interpolation=False,\n raise_errors=True,\n file_error=True,\n )\n if \"options\" not in xoa_cache:\n xoa_cache[\"options\"] = configobj.ConfigObj(\n (\n DEFAULT_USER_CONFIG_FILE\n if os.path.exists(DEFAULT_USER_CONFIG_FILE)\n else None\n ),\n configspec=xoa_cache[\"cfgspecs\"],\n file_error=False,\n raise_errors=True,\n list_values=True,\n )\n if cfgfile:\n xoa_cache[\"options\"].merge(\n configobj.ConfigObj(\n cfgfile, file_error=True, raise_errors=True, list_values=True\n )\n )\n xoa_cache[\"options\"].validate(validate.Validator(), copy=True)\n\n\ndef _get_options_():\n xoa_cache = _get_cache_()\n if \"options\" not in xoa_cache:\n load_options()\n return xoa_cache[\"options\"]\n\n\ndef get_option(section, option=None):\n \"\"\"Get a config option\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import get_option\n print(get_option('plot', 'cmapdiv'))\n print(get_option('plot.cmapdiv'))\n \"\"\"\n options = _get_options_()\n if option is None:\n m = _RE_OPTION_MATCH(section)\n if m:\n section, option = m.groups()\n else:\n raise XoaConfigError(\n \"You must provide an option name to get_option\"\n )\n try:\n value = options[section][option]\n except Exception:\n return XoaConfigError(f\"Invalid section/option: {section}/{option}\")\n return value\n\n\nclass set_options(object):\n \"\"\"Set configuration options\n\n Parameters\n ----------\n section: str, None\n **options: dict\n If a key is in the format \"<section>.<option>\", then the section\n is overwritten.\n\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import set_options, get_option\n\n # Classic: for the session\n set_options('plot', cmapdiv='cmo.balance', cmappos='cmo.amp')\n\n # With dict\n opts = {\"plot.cmapdiv\": \"cmo.balance\"}\n set_options(**opts)\n\n # Context: temporary\n with set_options('plot', cmapdiv='cmo.delta'):\n print('within context:', get_option('plot.cmapdiv'))\n print('after context:', get_option('plot.cmapdiv'))\n\n \"\"\"\n\n def __init__(self, section=None, **options):\n # Format before being ingested\n self.xoa_cache = _get_cache_()\n self.old_options = self.xoa_cache.get(\"options\")\n if \"options\" in self.xoa_cache:\n del self.xoa_cache[\"options\"]\n opts = {}\n for option, value in options.items():\n m = _RE_OPTION_MATCH(option)\n if m:\n sec, option = m.groups()\n else:\n if section is None:\n raise XoaConfigError(\n \"You must specify the section explicitly or through the option name\")\n sec = section\n opts.setdefault(sec, {})[option] = value\n\n # Ingest options\n load_options(opts)\n\n def __enter__(self):\n return self.xoa_cache[\"options\"]\n\n def __exit__(self, type, value, traceback):\n if self.old_options:\n self.xoa_cache[\"options\"] = self.old_options\n else:\n del self.xoa_cache[\"options\"]\n\n\ndef set_option(option, value):\n \"\"\"Set a single option using the flat format, i.e ``section.option``\n\n Parameters\n ----------\n option: str\n Option name in the ``section.option`` format\n value:\n Value to set\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import set_option\n set_option('plot.cmapdiv', 'cmo.balance');\n \"\"\"\n return set_options(None, **{option: value})\n\n\ndef reset_options():\n \"\"\"Restore options to their default values in the current session\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import get_option, set_options, reset_options\n print(get_option('plot.cmapdiv'))\n set_options('plot', cmapdiv='mycmap')\n print(get_option('plot.cmapdiv'))\n reset_options()\n print(get_option('plot.cmapdiv'))\n \"\"\"\n xoa_cache = _get_cache_()\n del xoa_cache['options']\n\n\ndef show_options(specs=False):\n \"\"\"Print current xoa configuration\n\n Parameters\n ----------\n specs: bool\n Print option specifications instead\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import show_options\n show_options()\n show_options(specs=True)\n \"\"\"\n if specs:\n print(CONFIG_SPECS.strip(\"\\n\"))\n else:\n print(\"\\n\".join(_get_options_().write())\n .strip(\"\\n\").replace('#', ' #'))\n\n\ndef _parse_requirements_(reqfile):\n re_match_specs_match = re.compile(r\"^(\\w+)(\\W+.+)?$\").match\n reqs = {}\n with open(reqfile) as f:\n for line in f:\n line = line.strip().strip(\"\\n\")\n if line and not line.startswith(\"#\"):\n m = re_match_specs_match(line)\n if m:\n reqs[m.group(1)] = m.group(2)\n return reqs\n\n\ndef show_versions():\n \"\"\"Print the versions of xoa and of some dependencies\n\n Example\n -------\n .. ipython:: python\n :okexcept:\n\n @suppress\n from xoa import show_versions\n show_versions()\n \"\"\"\n print(\"- python:\", platform.python_version())\n print(\"- xoa:\", __version__)\n for package in _PACKAGES:\n try:\n version = pkg_resources.get_distribution(package).version\n except pkg_resources.DistributionNotFound:\n version = \"NOT INSTALLED or UKNOWN\"\n print(f\"- {package}: {version}\")\n\n\ndef show_paths():\n \"\"\"Print some xoa paths\n\n Example\n -------\n .. ipython:: python\n :okexcept:\n\n @suppress\n from xoa import show_paths\n show_paths()\n \"\"\"\n print(\"- xoa library dir:\", os.path.dirname(__file__))\n from . import cf\n asterix = False\n for label, path in [(\"user config file\", DEFAULT_USER_CONFIG_FILE),\n (\"user CF specs file\", cf.USER_CF_FILE),\n (\"user CF cache file\", cf.USER_CF_CACHE_FILE)]:\n if not os.path.exists(path):\n asterix = True\n path = path + \" [*]\"\n print(\"-\", label+\":\", path)\n print(\"- data samples:\", \" \".join(get_data_sample()))\n if asterix:\n print(\"*: file not present\")\n\n\ndef show_info(opt_specs=True):\n \"\"\"Print xoa related info\n\n Example\n -------\n .. ipython:: python\n :okexcept:\n\n @suppress\n from xoa import show_info\n show_info()\n \"\"\"\n print(\"# VERSIONS\")\n show_versions()\n print(\"\\n# FILES AND DIRECTORIES\")\n show_paths()\n print(\"\\n# OPTIONS\")\n show_options(specs=opt_specs)\n\n\ndef get_data_sample(filename=None):\n \"\"\"Get the absolute path to a sample file\n\n Parameters\n ----------\n filename: str, None\n Name of the sample. If ommited, a list of available samples\n name is returned.\n\n Returns\n -------\n str OR list(str)\n\n Example\n -------\n .. .ipython:: python\n\n @suppress\n from xoa import get_data_sample\n get_data_sample(\"croco.south-africa.surf.nc\")\n get_data_sample()\n\n See also\n --------\n show_data_samples\n open_data_sample\n \"\"\"\n if not os.path.exists(_SAMPLE_DIR):\n filenames = []\n else:\n filenames = os.listdir(_SAMPLE_DIR)\n if filename is None:\n return filenames\n if filename not in filenames:\n raise XoaError(\"Invalid data sample: \"+filename)\n return os.path.join(_SAMPLE_DIR, filename)\n\n\ndef open_data_sample(filename, **kwargs):\n \"\"\"Open a data sample with :func:`xarray.open_dataset` or :func:`pandas.read_csv`\n\n A shortcut to::\n\n xr.open_dataset(get_data_sample(filename))\n\n Parameters\n ----------\n filename: str\n File name of the sample\n\n Returns\n -------\n xarray.Dataset, pandas.DataFrame\n\n Example\n -------\n .. .ipython:: python\n\n @suppress\n from xoa import open_data_sample\n open_data_sample(\"croco.south-africa.nc\")\n\n\n See also\n --------\n get_data_sample\n show_data_samples\n \"\"\"\n fname = get_data_sample(filename)\n if fname.endswith(\"nc\"):\n import xarray as xr\n return xr.open_dataset(fname, **kwargs)\n import pandas as pd\n return pd.read_csv(fname, **kwargs)\n\n\ndef show_data_samples():\n \"\"\"Print the list of data samples\n\n Example\n -------\n .. ipython:: python\n\n @suppress\n from xoa import show_data_samples\n show_data_samples()\n\n See also\n --------\n get_data_samples\n open_data_sample\n \"\"\"\n print(' '.join(get_data_sample()))\n\n\ndef register_accessors(xoa=True, xcf=False, decode_sigma=False):\n \"\"\"Register xarray accessors\n\n Parameters\n ----------\n xoa: bool, str\n Register the main accessors with\n :func:`~xoa.cf.register_xoa_accessors`.\n xcf: bool, str\n Register the :mod:`xoa.cf` module accessors with\n :func:`~xoa.cf.register_cf_accessors`.\n decode_sigma: bool, str\n Register the :mod:`xoa.sigma` module accessor with\n :func:`~xoa.cf.register_sigma_accessor`.\n\n See also\n --------\n xoa.accessors\n \"\"\"\n if xoa:\n from .accessors import register_xoa_accessors\n kw = {\"name\": xoa} if isinstance(xoa, str) else {}\n register_xoa_accessors(**kw)\n if xcf:\n from .accessors import register_cf_accessors\n kw = {\"name\": xcf} if isinstance(xcf, str) else {}\n register_cf_accessors(**kw)\n if decode_sigma:\n from .accessors import register_sigma_accessor\n kw = {\"name\": decode_sigma} if isinstance(decode_sigma, str) else {}\n register_sigma_accessor(**kw)\n"
] | [
[
"pandas.read_csv"
]
] |
fabianp/nipy | [
"40e89f3ca7f34df05631623807993026134e6de3"
] | [
"nipy/labs/spatial_models/hroi.py"
] | [
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nThis module contains the specification of 'hierarchical ROI' object,\nWhich is used in spatial models of the library such as structural analysis\n\nThe connection with other classes is not completely satisfactory at the moment:\nthere should be some intermediate classes between 'Fields' and 'hroi'\n\nAuthor : Bertrand Thirion, 2009-2011\n Virgile Fritsch <[email protected]>\n\n\"\"\"\n\nimport numpy as np\n\nfrom nipy.algorithms.graph.graph import WeightedGraph\nfrom nipy.algorithms.graph.forest import Forest\nfrom nipy.algorithms.graph.field import field_from_coo_matrix_and_data\nfrom .mroi import SubDomains\n\nNINF = - np.inf\n\n\ndef hroi_agglomeration(input_hroi, criterion='size', smin=0):\n \"\"\"Performs an agglomeration then a selection of regions\n so that a certain size or volume criterion is satisfied.\n\n Parameters\n ----------\n input_hroi: HierarchicalROI instance\n The input hROI\n criterion: str, optional\n To be chosen among 'size' or 'volume'\n smin: float, optional\n The applied criterion\n\n Returns\n -------\n output_hroi: HierarchicalROI instance\n \"\"\"\n if criterion not in ['size', 'volume']:\n return ValueError('unknown criterion')\n output_hroi = input_hroi.copy()\n k = 2 * output_hroi.k\n if criterion == 'size':\n value = output_hroi.get_size()\n if criterion == 'volume':\n value = output_hroi.get_volume()\n\n # iteratively agglomerate regions that are too small\n while k > output_hroi.k:\n k = output_hroi.k\n # regions agglomeration\n output_hroi.merge_ascending(output_hroi.get_id()[value <= smin])\n # suppress parents nodes having only one child\n output_hroi.merge_descending()\n # early stopping 1\n if output_hroi.k == 0:\n break\n # early stopping 2\n if criterion == 'size':\n value = output_hroi.get_size()\n if criterion == 'volume':\n value = output_hroi.get_volume()\n if value.max() < smin:\n break\n\n # finally remove those regions for which the criterion cannot be matched\n output_hroi.select_roi(output_hroi.get_id()[value > smin])\n return output_hroi\n\n\ndef HROI_as_discrete_domain_blobs(domain, data, threshold=NINF, smin=0,\n criterion='size'):\n \"\"\"Instantiate an HierarchicalROI as the blob decomposition\n of data in a certain domain.\n\n Parameters\n ----------\n domain : discrete_domain.StructuredDomain instance,\n Definition of the spatial context.\n data : array of shape (domain.size)\n The corresponding data field.\n threshold : float, optional\n Thresholding level.\n criterion : string, optional\n To be chosen among 'size' or 'volume'.\n smin: float, optional\n A threshold on the criterion.\n\n Returns\n -------\n nroi: HierachicalROI instance with a `signal` feature.\n\n \"\"\"\n if threshold > data.max():\n # return an empty HROI structure\n label = - np.ones(data.shape)\n parents = np.array([])\n return HierarchicalROI(domain, label, parents)\n\n # check size\n df = field_from_coo_matrix_and_data(domain.topology, data)\n idx, parents, label = df.threshold_bifurcations(th=threshold)\n nroi = HierarchicalROI(domain, label, parents)\n # create a signal feature\n data = np.ravel(data)\n signal = [data[nroi.select_id(id, roi=False)] for id in nroi.get_id()]\n nroi.set_feature('signal', signal)\n # agglomerate regions in order to compact the structure if necessary\n nroi = hroi_agglomeration(nroi, criterion=criterion, smin=smin)\n return nroi\n\n\ndef HROI_from_watershed(domain, data, threshold=NINF):\n \"\"\"Instantiate an HierarchicalROI as the watershed of a certain dataset\n\n Parameters\n ----------\n domain: discrete_domain.StructuredDomain instance\n Definition of the spatial context.\n data: array of shape (domain.size)\n The corresponding data field.\n threshold: float, optional\n Thresholding level.\n\n Returns\n -------\n nroi : ``HierarchichalROI`` instance\n The HierachicalROI instance with a ``seed`` feature.\n \"\"\"\n if threshold > data.max():\n # return an empty HROI structure\n label = - np.ones(data.shape)\n parents = np.array([])\n return HierarchicalROI(domain, label, parents)\n\n df = field_from_coo_matrix_and_data(domain.topology, data)\n idx, label = df.custom_watershed(0, threshold)\n parents = np.arange(idx.size).astype(int)\n nroi = HierarchicalROI(domain, label, parents)\n\n nroi.set_roi_feature('seed', idx)\n return nroi\n\n\n########################################################################\n# Hierarchical ROI\n########################################################################\nclass HierarchicalROI(SubDomains):\n \"\"\"Class that handles hierarchical ROIs\n\n Parameters\n ----------\n k : int\n Number of ROI in the SubDomains object\n label : array of shape (domain.size), dtype=np.int\n An array use to define which voxel belongs to which ROI.\n The label values greater than -1 correspond to subregions\n labelling. The labels are recomputed so as to be consecutive\n integers.\n The labels should not be accessed outside this class. One has to\n use the API mapping methods instead.\n features : dict {str: list of object, length=self.k}\n Describe the voxels features, grouped by ROI\n roi_features : dict {str: array-like, shape=(self.k, roi_feature_dim)\n Describe the ROI features. A special feature, `id`, is read-only and\n is used to give an unique identifier for region, which is persistent\n through the MROI objects manipulations. On should access the different\n ROI's features using ids.\n parents : np.ndarray, shape(self.k)\n self.parents[i] is the index of the parent of the i-th ROI.\n\n TODO: have the parents as a list of id rather than a list of indices.\n \"\"\"\n\n def __init__(self, domain, label, parents, id=None):\n \"\"\"Building the HierarchicalROI\n \"\"\"\n SubDomains.__init__(self, domain, label, id=id)\n self.parents = np.ravel(parents).astype(np.int)\n\n ###\n # Getters for very basic features or roi features\n ###\n def get_volume(self, id=None, ignore_children=True):\n \"\"\"Get ROI volume\n\n Parameters\n ----------\n id: any hashable type, optional\n Id of the ROI from which we want to get the volume.\n Can be None (default) if we want all ROIs's volumes.\n ignore_children : bool, optional\n Specify if the volume of the node should include\n (ignore_children = False) or not the one of its children\n (ignore_children = True).\n\n Returns\n -------\n volume : float\n if an id is provided,\n or list of float\n if no id provided (default)\n \"\"\"\n if ignore_children:\n # volume of the children is not included\n volume = SubDomains.get_volume(self, id)\n else:\n # volume of the children is included\n if id is not None:\n volume = SubDomains.get_volume(self, id)\n desc = self.make_forest().get_descendents(\n self.select_id(id), exclude_self=True)\n # get children volume\n for k in desc:\n volume = volume + SubDomains.get_volume(\n self, self.get_id()[k])\n else:\n volume = []\n for id in self.get_id():\n roi_volume = SubDomains.get_volume(self, id)\n desc = self.make_forest().get_descendents(\n self.select_id(id), exclude_self=True)\n # get children volume\n for k in desc:\n roi_volume = roi_volume + SubDomains.get_volume(\n self, self.get_id()[k])\n volume.append(roi_volume)\n return volume\n\n def get_size(self, id=None, ignore_children=True):\n \"\"\"Get ROI size (counted in terms of voxels)\n\n Parameters\n ----------\n id: any hashable type, optional\n Id of the ROI from which we want to get the size.\n Can be None (default) if we want all ROIs's sizes.\n ignore_children: bool, optional\n Specify if the size of the node should include\n (ignore_children = False) or not the one of its children\n (ignore_children = True).\n\n Returns\n -------\n size: int\n if an id is provided,\n or list of int\n if no id provided (default)\n\n \"\"\"\n if ignore_children:\n # size of the children is not included\n size = SubDomains.get_size(self, id)\n else:\n # size of the children is included\n if id is not None:\n size = SubDomains.get_size(self, id)\n desc = self.make_forest().get_descendents(\n self.select_id(id), exclude_self=True)\n # get children size\n for k in desc:\n size = size + SubDomains.get_size(self, self.get_id()[k])\n else:\n size = []\n for id in self.get_id():\n roi_size = SubDomains.get_size(self, id)\n desc = self.make_forest().get_descendents(\n self.select_id(id), exclude_self=True)\n # get children size\n for k in desc:\n roi_size = roi_size + SubDomains.get_size(\n self, self.get_id()[k])\n size.append(roi_size)\n return size\n\n def select_roi(self, id_list):\n \"\"\"Returns an instance of HROI with only the subset of chosen ROIs.\n\n The hierarchy is set accordingly.\n\n Parameters\n ----------\n id_list: list of id (any hashable type)\n The id of the ROI to be kept in the structure.\n\n \"\"\"\n valid = np.asarray([int(i in id_list) for i in self.get_id()])\n if np.size(id_list) == 0:\n # handle the case of an empty selection\n new_parents = np.array([])\n self = HierarchicalROI(\n self.domain, -np.ones(self.label.size), np.array([]))\n else:\n # get new parents\n new_parents = Forest(self.k, self.parents).subforest(\n valid.astype(np.bool)).parents.astype(np.int)\n SubDomains.select_roi(self, id_list)\n self.parents = new_parents\n self.recompute_labels()\n\n def make_graph(self):\n \"\"\"Output an nipy graph structure to represent the ROI hierarchy.\n\n \"\"\"\n if self.k == 0:\n return None\n weights = np.ones(self.k)\n edges = (np.vstack((np.arange(self.k), self.parents))).T\n return WeightedGraph(self.k, edges, weights)\n\n def make_forest(self):\n \"\"\"Output an nipy forest structure to represent the ROI hierarchy.\n\n \"\"\"\n if self.k == 0:\n return None\n G = Forest(self.k, self.parents)\n return G\n\n def merge_ascending(self, id_list, pull_features=None):\n \"\"\"Remove the non-valid ROIs by including them in\n their parents when it exists.\n\n Parameters\n ----------\n id_list: list of id (any hashable type)\n The id of the ROI to be merged into their parents.\n Nodes that are their own parent are unmodified.\n pull_features: list of str\n List of the ROI features that will be pooled from the children\n when they are merged into their parents. Otherwise, the receiving\n parent would keep its own ROI feature.\n \"\"\"\n if pull_features is None:\n pull_features = []\n if self.k == 0:\n return\n id_list = [k for k in self.get_id() if k in id_list]\n\n # relabel maps old labels to new labels \n relabel = np.arange(self.k)\n\n # merge nodes, one at a time\n for c_id in id_list:\n # define alias for clearer indexing\n c_pos = self.select_id(c_id)\n p_pos = self.parents[c_pos]\n p_id = self.get_id()[p_pos]\n \n if p_pos != c_pos:\n # this will be used in many places\n mask_pos = np.ones(self.k, np.bool)\n mask_pos[c_pos] = False\n \n # set new parents\n self.parents = self.parents[mask_pos]\n self.parents[self.parents == c_pos] = p_pos\n self.parents[self.parents > c_pos] -= 1\n self.k -= 1\n\n # merge labels\n relabel[relabel == c_id] = p_id\n\n # compute new features\n for fid in self.features.keys():\n # replace feature\n # (without the API since self is in an inconsistent state)\n dj = self.get_feature(fid)\n dj[p_pos] = np.hstack((dj[self.select_id(c_id)], \n dj[self.select_id(p_id)]))\n del dj[c_pos]\n self.features[fid] = dj\n\n # compute new roi features\n for fid in self.roi_features.keys():\n dj = self.get_roi_feature(fid)\n if fid in pull_features:\n # modify only if `pull` requested\n dj[p_pos] = dj[c_pos]\n self.roi_features[fid] = dj[mask_pos]\n\n # update the labels \n self.label[self.label > -1] = relabel[self.label[self.label > - 1]]\n self.recompute_labels()\n\n def merge_descending(self, pull_features=None):\n \"\"\" Remove the items with only one son by including them in their son\n\n Parameters\n ----------\n methods indicates the way possible features are dealt with\n (not implemented yet)\n\n Caveat\n ------\n if roi_features have been defined, they will be removed\n \"\"\"\n if pull_features is None:\n pull_features = []\n\n if self.k == 0:\n return\n \n # relabel maps old labels to new labels \n relabel = np.arange(self.k)\n\n # merge nodes, one at a time\n id_list = self.get_id()[:: - 1]\n \n for p_id in id_list:\n p_pos = self.select_id(p_id)\n p_children = np.nonzero(self.parents == p_pos)[0]\n \n if p_pos in p_children:\n # remove current node from its children list\n p_children = p_children[p_children != p_pos]\n\n if p_children.size == 1:\n # merge node if it has only one child\n c_pos = p_children[0]\n c_id = self.get_id()[c_pos]\n mask_pos = np.ones(self.k, np.bool)\n mask_pos[p_pos] = False\n\n # set new parents\n self.parents[c_pos] = self.parents[p_pos]\n if self.parents[c_pos] == p_pos:\n self.parents[c_pos] = c_pos\n self.parents = self.parents[mask_pos]\n self.parents[self.parents > p_pos] -= 1\n # merge labels\n relabel[relabel == p_pos] = relabel[c_pos]\n self.k -= 1\n \n # compute new features\n for fid in self.features.keys():\n # replace feature\n # (without the API since self is in an inconsistent state)\n dj = self.get_feature(fid)\n dj[c_pos] = np.hstack((dj[self.select_id(c_id)], \n dj[self.select_id(p_id)]))\n del dj[p_pos]\n self.features[fid] = dj\n\n # compute new roi features\n for fid in self.roi_features.keys():\n dj = self.get_roi_feature(fid)\n if fid in pull_features:\n # modify only if `pull` requested\n dj[c_pos] = dj[p_pos]\n self.roi_features[fid] = dj[mask_pos]\n \n # update HROI structure\n self.label[self.label > -1] = relabel[self.label[self.label > - 1]]\n self.recompute_labels()\n\n def get_parents(self):\n \"\"\"Return the parent of each node in the hierarchy\n\n The parents are represented by their position in the nodes flat list.\n\n TODO:\n The purpose of this class API is not to rely on this order, so\n we should have self.parents as a list of ids instead of a list of\n positions\n \"\"\"\n return self.parents\n\n def get_leaves_id(self):\n \"\"\"Return the ids of the leaves.\n\n \"\"\"\n if self.k == 0:\n return np.array([])\n # locate the positions of the children of each node\n is_leaf_aux = [np.where(self.parents == k)[0] for k in range(self.k)]\n # select nodes that has no child (different from themselves)\n is_leaf = np.asarray(\n [(len(child) == 0) or (len(child) == 1 and child[0] == i)\n for i, child in enumerate(is_leaf_aux)])\n # finaly return ids\n return self.get_id()[is_leaf]\n\n def reduce_to_leaves(self):\n \"\"\"Create a new set of rois which are only the leaves of self.\n\n Modification of the structure is done in place. One way therefore\n want to work on a copy a of a given HROI oject.\n\n \"\"\"\n if self.k == 0:\n # handle the empy HROI case\n return HierarchicalROI(\n self.domain, -np.ones(self.domain.size), np.array([]))\n leaves_id = self.get_leaves_id()\n self.select_roi(leaves_id)\n\n def copy(self):\n \"\"\" Returns a copy of self.\n\n self.domain is not copied.\n\n \"\"\"\n cp = HierarchicalROI(\n self.domain, self.label.copy(), self.parents.copy(), self.get_id())\n # copy features\n for fid in self.features.keys():\n cp.set_feature(fid, self.get_feature(fid))\n # copy ROI features\n for fid in self.roi_features.keys():\n cp.set_roi_feature(fid, self.get_roi_feature(fid))\n return cp\n\n def representative_feature(self, fid, method='mean', id=None,\n ignore_children=True, assess_quality=True):\n \"\"\"Compute a ROI representative of a given feature.\n\n Parameters\n ----------\n fid: str,\n Feature id\n method: str,\n Method used to compute a representative.\n Chosen among 'mean' (default), 'max', 'median', 'min',\n 'weighted mean'.\n id: any hashable type\n Id of the ROI from which we want to extract a representative feature.\n Can be None (default) if we want to get all ROIs's representatives.\n ignore_children: bool,\n Specify if the volume of the node should include\n (ignore_children = False) or not the one of its children\n (ignore_children = True).\n assess_quality: bool\n If True, a new roi feature is created, which represent the quality\n of the feature representative (the number of non-nan value for the\n feature over the ROI size).\n Default is False.\n\n \"\"\"\n rf = []\n eps = 1.e-15\n feature_quality = np.zeros(self.k)\n for i, k in enumerate(self.get_id()):\n f = self.get_feature(fid, k)\n p_pos = self.select_id(k)\n if not ignore_children:\n # also include the children features\n desc = np.nonzero(self.parents == p_pos)[0]\n if p_pos in desc:\n desc = desc[desc != p_pos]\n for c in desc:\n f = np.concatenate(\n (f, self.get_feature(fid, self.get_id()[c])))\n # NaN-resistant representative\n if f.ndim == 2:\n nan = np.isnan(f.sum(1))\n else:\n nan = np.isnan(f)\n # feature quality\n feature_quality[i] = (~nan).sum() / float(nan.size)\n # compute representative\n if method == \"mean\":\n rf.append(np.mean(f[~nan], 0))\n if method == \"weighted mean\":\n lvk = self.get_local_volume(k)\n if not ignore_children:\n # append weights for children's voxels\n for c in desc:\n lvk = np.concatenate(\n (lvk,\n self.get_local_volume(fid, self.select_id(c))))\n tmp = np.dot(lvk[~nan], f[~nan].reshape((-1, 1))) / \\\n np.maximum(eps, np.sum(lvk[~nan]))\n rf.append(tmp)\n if method == \"min\":\n rf.append(np.min(f[~nan]))\n if method == \"max\":\n rf.append(np.max(f[~nan]))\n if method == \"median\":\n rf.append(np.median(f[~nan], 0))\n if id is not None:\n summary_feature = rf[self.select_id(id)]\n else:\n summary_feature = rf\n\n if assess_quality:\n self.set_roi_feature('%s_quality' % fid, feature_quality)\n return np.array(summary_feature)\n\n\ndef make_hroi_from_subdomain(sub_domain, parents):\n \"\"\"Instantiate an HROi from a SubDomain instance and parents\n\n \"\"\"\n hroi = HierarchicalROI(sub_domain.domain, sub_domain.label, parents)\n # set features\n for fid in sub_domain.features.keys():\n hroi.set_feature(fid, sub_domain.get_feature(fid))\n # set ROI features\n for fid in sub_domain.roi_features.keys():\n hroi.set_roi_feature(fid, sub_domain.get_roi_feature(fid))\n return hroi\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.zeros",
"numpy.median",
"numpy.size",
"numpy.ravel",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.isnan",
"numpy.array",
"numpy.where",
"numpy.nonzero",
"numpy.mean"
]
] |
lgraesser/MCER | [
"250aa6965064dbc73462eb5edb559bf9ce949b70"
] | [
"utils.py"
] | [
"import json\nimport logging\nimport matplotlib.pyplot as plt\nimport os\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\nimport model\nimport train\n\nlogger = logging.getLogger('utils')\nlogger.setLevel(logging.INFO)\n\n\ndef get_data_path():\n '''Returns the path to the image and annotation data.\n Downloads the data if it doesn't exist.\n '''\n # Download caption annotation files\n annotation_folder = '/data/train_data/annotations/'\n if not os.path.exists(os.path.abspath('.') + annotation_folder):\n logger.info('Downloading captions file.')\n annotation_zip = tf.keras.utils.get_file('captions.zip',\n cache_subdir=os.path.abspath('./data/train_data'),\n origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',\n extract = True)\n annotation_file_path = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'\n os.remove(annotation_zip)\n else:\n annotation_file_path = os.path.abspath('.') + annotation_folder + 'captions_train2014.json'\n logger.info(f'Captions file already exists here {annotation_file_path}.')\n\n # Download image files\n image_folder = '/data/train_data/train2014/'\n if not os.path.exists(os.path.abspath('.') + image_folder):\n logger.info('Downloading image data. This may take a while.')\n image_zip = tf.keras.utils.get_file('train2014.zip',\n cache_subdir=os.path.abspath('./data/train_data'),\n origin = 'http://images.cocodataset.org/zips/train2014.zip',\n extract = True)\n image_file_path = os.path.dirname(image_zip) + image_folder\n os.remove(image_zip)\n else:\n image_file_path = os.path.abspath('.') + image_folder\n logger.info(f'Image data already exists here {image_file_path}.')\n\n return image_file_path, annotation_file_path\n\n\ndef get_caption_image_names(annotation_file_path, image_file_path, shuffle_data=True):\n '''Returns a shuffled list of the captions and the corresponding image names.'''\n\n # Read the json file\n with open(annotation_file_path, 'r') as f:\n annotations = json.load(f)\n logger.info('Loaded the annotations file.')\n\n # Store captions and image names in vectors\n all_captions = []\n all_img_name_vector = []\n\n for annot in annotations['annotations']:\n caption = '<start> ' + annot['caption'] + ' <end>'\n image_id = annot['image_id']\n full_coco_image_path = image_file_path + 'COCO_train2014_' + '%012d.jpg' % (image_id)\n\n all_img_name_vector.append(full_coco_image_path)\n all_captions.append(caption)\n\n # Shuffle captions and image_names together\n # Set a random state\n if shuffle_data:\n logger.info('Shuffling the data...')\n train_captions, img_name_vector = shuffle(all_captions,\n all_img_name_vector,\n random_state=1)\n else:\n train_captions = all_captions\n img_name_vector = all_img_name_vector\n\n return train_captions, img_name_vector\n\n\ndef get_top_k(train_captions, img_name_vector, num_examples):\n '''Selects the first k examples from the data.'''\n assert len(train_captions) == len(img_name_vector)\n original_cap_length = len(train_captions)\n if num_examples > original_cap_length:\n logger.warning(f'Desired num examples {num_examples} > actual number examples {original_cap_length}, using whole training set')\n num_examples = original_cap_length\n\n train_captions = train_captions[:num_examples]\n img_name_vector = img_name_vector[:num_examples]\n logger.info(f'Num train captions: {len(train_captions)}, num all captions: {original_cap_length}')\n\n return train_captions, img_name_vector\n\n\ndef calc_max_length(tensor):\n \"\"\"Find the maximum length of any tensor\"\"\"\n return max(len(t) for t in tensor)\n\n\ndef load_image(image_path):\n img = tf.io.read_file(image_path)\n img = tf.image.decode_jpeg(img, channels=3)\n img = tf.image.resize(img, (299, 299))\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n return img, image_path\n\n\ndef plot_loss(loss_data):\n plt.plot(loss_plot)\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.title('Loss Plot')\n plt.show()\n\n\ndef save_loss_plot(loss_data, figname, data_label):\n plt.figure(figsize=(10, 10))\n plt.plot(loss_data, label=data_label)\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.title('Loss Plot')\n plt.legend(loc='upper left')\n plt.savefig(figname)\n plt.close()\n\n\ndef build_model(model_logdir, vocab_size):\n embedding_dim = 256\n units = 512\n # Shape of the vector extracted from InceptionV3 is (64, 2048)\n # These two variables represent that vector shape\n encoder = model.CNN_Encoder(embedding_dim)\n decoder = model.RNN_Decoder(embedding_dim, units, vocab_size)\n # get optim, and checkpoint manager\n optimizer = train.get_optimizer()\n loss_object = train.get_loss_object()\n ckpt_manager, ckpt = train.get_checkpoint_manager(encoder, decoder, optimizer, path=model_logdir)\n\n # Restore tokenizer\n with open(os.path.join(model_logdir, 'tokenizer.json')) as f:\n data = json.load(f)\n tokenizer = tf.keras.preprocessing.text.tokenizer_from_json(data)\n\n return encoder, decoder, tokenizer, ckpt_manager, ckpt\n"
] | [
[
"matplotlib.pyplot.legend",
"tensorflow.image.resize",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"tensorflow.keras.applications.inception_v3.preprocess_input",
"sklearn.utils.shuffle",
"matplotlib.pyplot.title",
"tensorflow.keras.preprocessing.text.tokenizer_from_json",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.io.read_file",
"tensorflow.image.decode_jpeg",
"matplotlib.pyplot.close",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
imdone/tensorflow | [
"bb4d1ef3861c83627ee9586b85ac3070a7d38335"
] | [
"tensorflow/python/training/checkpointable_utils.py"
] | [
"\"\"\"Utilities for saving/loading Checkpointable objects.\"\"\"\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport weakref\n\nfrom tensorflow.core.protobuf import checkpointable_object_graph_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import checkpointable as checkpointable_lib\nfrom tensorflow.python.training import optimizer as optimizer_lib\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_ESCAPE_CHAR = \".\" # For avoiding conflicts with user-specified names.\n\n# Keyword for identifying that the next bit of a checkpoint variable name is a\n# slot name. Checkpoint names for slot variables look like:\n#\n# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>\n#\n# Where <path to variable> is a full path from the checkpoint root to the\n# variable being slotted for.\n_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + \"OPTIMIZER_SLOT\"\n# Keyword for separating the path to an object from the name of an\n# attribute in checkpoint names. Used like:\n# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>\n_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + \"ATTRIBUTES\"\n\n\nclass _CheckpointRestoreCoordinator(object):\n \"\"\"Holds the status of an object-based checkpoint load.\"\"\"\n\n def __init__(self, object_graph_proto, save_path, dtype_map=None):\n \"\"\"Specify the checkpoint being loaded.\n\n Args:\n object_graph_proto: The CheckpointableObjectGraph protocol buffer\n associated with this checkpoint.\n save_path: A string `Tensor`. The path to the checkpoint, as returned by\n `tf.train.latest_checkpoint`.\n dtype_map: When executing eagerly, specifies dtypes for creating slot\n variables. None when graph building.\n \"\"\"\n self.builder = saver_lib.BulkSaverBuilder()\n self.object_graph_proto = object_graph_proto\n self.restore_uid = ops.uid()\n # Maps from objects to lists of attributes which were in the checkpoint but\n # not loaded into any object, for error checking.\n self.unused_attributes = weakref.WeakKeyDictionary()\n # Dictionary mapping from an id in the protocol buffer flat array to\n # Checkpointable Python objects. This mapping may be deferred if a\n # checkpoint is restored before all dependencies have been tracked. Uses\n # weak references so that partial restorations don't create reference cycles\n # (as objects with deferred dependencies will generally have references to\n # this object).\n self.object_by_proto_id = weakref.WeakValueDictionary()\n # A set of all Python objects we've seen as dependencies, even if we didn't\n # use them (for example because of inconsistent references when\n # loading). Used to make status assertions fail when loading checkpoints\n # that don't quite match.\n self.all_python_objects = weakref.WeakSet()\n self.save_path = save_path\n self.dtype_map = dtype_map\n # When graph building, contains a list of ops to run to restore objects from\n # this checkpoint.\n self.restore_ops = []\n self.restore_ops_by_name = {}\n # A mapping from optimizer proto ids to lists of slot variables to be\n # restored when the optimizer is tracked. Only includes slot variables whose\n # regular variables have already been created, and only for optimizer\n # objects which have not yet been created/tracked.\n self.deferred_slot_restorations = {}\n # A mapping from variable proto ids to lists of slot variables to be\n # restored when the variable is created/tracked. These get shifted over to\n # deferred_slot_restorations if the optimizer hasn't been created when that\n # happens.\n self.slot_restorations = {}\n for node_index, node in enumerate(self.object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n # `node` refers to an `Optimizer`, since only these have slot variables.\n self.slot_restorations.setdefault(\n slot_reference.original_variable_node_id, []).append(\n checkpointable_lib._SlotVariableRestoration( # pylint: disable=protected-access\n optimizer_id=node_index,\n slot_variable_id=slot_reference.slot_variable_node_id,\n slot_name=slot_reference.slot_name))\n\n\n# TODO (allenl): If this ends up in a public API, consider adding LINT.IfChange id:3465\n# https://github.com/imdone/tensorflow/issues/3464\n# or consolidating the implementation with get_variable.\ndef _default_getter(name, shape, dtype, initializer=None,\n partition_info=None, **kwargs):\n \"\"\"A pared-down version of get_variable which does not reuse variables.\"\"\"\n dtype = dtypes.as_dtype(dtype)\n shape_object = tensor_shape.as_shape(shape)\n with ops.init_scope():\n if initializer is None:\n initializer, initializing_from_value = (\n variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access\n name=name, shape=shape_object, dtype=dtype))\n else:\n initializing_from_value = not callable(initializer)\n # Same logic as get_variable\n variable_dtype = dtype.base_dtype\n if initializing_from_value:\n if shape is not None:\n raise ValueError(\"If initializer is a constant, do not specify shape.\")\n initial_value = initializer\n else:\n # Instantiate initializer if provided initializer is a type object.\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n def initial_value():\n return initializer(\n shape_object.as_list(), dtype=dtype, partition_info=partition_info)\n return resource_variable_ops.ResourceVariable(\n initial_value=initial_value,\n name=name,\n dtype=variable_dtype,\n **kwargs\n )\n\n\ndef add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,\n initializer=None):\n \"\"\"Add a variable to a Checkpointable with no scope influence.\"\"\"\n return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access\n name=name, shape=shape, dtype=dtype,\n initializer=initializer, getter=_default_getter)\n\n\ndef _breadth_first_checkpointable_traversal(root_checkpointable):\n \"\"\"Find shortest paths to all variables owned by dependencies of root.\"\"\"\n bfs_sorted = []\n to_visit = collections.deque([root_checkpointable])\n path_to_root = {root_checkpointable: ()}\n while to_visit:\n current_checkpointable = to_visit.popleft()\n current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access\n bfs_sorted.append(current_checkpointable)\n for child_checkpointable in (\n current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access\n if child_checkpointable.ref not in path_to_root:\n path_to_root[child_checkpointable.ref] = (\n path_to_root[current_checkpointable] + (child_checkpointable,))\n to_visit.append(child_checkpointable.ref)\n return bfs_sorted, path_to_root\n\n\ndef _escape_local_name(name):\n # We need to support slashes in local names for compatibility, since this\n # naming scheme is being patched in to things like Layer.add_variable where\n # slashes were previously accepted. We also want to use slashes to indicate\n # edges traversed to reach the variable, so we escape forward slashes in\n # names.\n return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)\n .replace(r\"/\", _ESCAPE_CHAR + \"S\"))\n\n\ndef _object_prefix_from_path(path_to_root):\n return \"/\".join(\n (_escape_local_name(checkpointable.name)\n for checkpointable in path_to_root))\n\n\ndef _slot_variable_naming_for_optimizer(optimizer_path):\n \"\"\"Make a function for naming slot variables in an optimizer.\"\"\"\n # Name slot variables:\n #\n # <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>\n #\n # where <variable name> is exactly the checkpoint name used for the original\n # variable, including the path from the checkpoint root and the local name in\n # the object which owns it. Note that we only save slot variables if the\n # variable it's slotting for is also being saved.\n\n optimizer_identifier = \"/%s/%s/\" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)\n\n def _name_slot_variable(variable_path, slot_name):\n \"\"\"With an optimizer specified, name a slot variable.\"\"\"\n return (variable_path\n + optimizer_identifier\n + _escape_local_name(slot_name))\n\n return _name_slot_variable\n\n\ndef _serialize_slot_variables(checkpointable_objects, node_ids, object_names):\n \"\"\"Gather and name slot variables.\"\"\"\n non_slot_objects = list(checkpointable_objects)\n slot_variables = {}\n for checkpointable in non_slot_objects:\n if isinstance(checkpointable, optimizer_lib.Optimizer):\n naming_scheme = _slot_variable_naming_for_optimizer(\n optimizer_path=object_names[checkpointable])\n slot_names = checkpointable.get_slot_names()\n for slot_name in slot_names:\n for original_variable_node_id, original_variable in enumerate(\n non_slot_objects):\n try:\n slot_variable = checkpointable.get_slot(\n original_variable, slot_name)\n except AttributeError:\n slot_variable = None\n if slot_variable is None:\n continue\n slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access\n if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access\n # TODO (allenl): Gather dependencies of slot variables. id:3924\n # https://github.com/imdone/tensorflow/issues/3922\n raise NotImplementedError(\n \"Currently only variables with no dependencies can be saved as \"\n \"slot variables. File a feature request if this limitation \"\n \"bothers you.\")\n if slot_variable in node_ids:\n raise NotImplementedError(\n \"A slot variable was re-used as a dependency of a \"\n \"Checkpointable object. This is not currently allowed. File a \"\n \"feature request if this limitation bothers you.\")\n checkpoint_name = naming_scheme(\n variable_path=object_names[original_variable],\n slot_name=slot_name)\n object_names[slot_variable] = checkpoint_name\n slot_variable_node_id = len(checkpointable_objects)\n node_ids[slot_variable] = slot_variable_node_id\n checkpointable_objects.append(slot_variable)\n slot_variable_proto = (\n checkpointable_object_graph_pb2.CheckpointableObjectGraph\n .CheckpointableObject.SlotVariableReference(\n slot_name=slot_name,\n original_variable_node_id=original_variable_node_id,\n slot_variable_node_id=slot_variable_node_id))\n slot_variables.setdefault(checkpointable, []).append(\n slot_variable_proto)\n return slot_variables\n\n\ndef _serialize_checkpointables(\n checkpointable_objects, node_ids, object_names, slot_variables):\n \"\"\"Name non-slot `Checkpointable`s and add them to `object_graph_proto`.\"\"\"\n object_graph_proto = (\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\n named_saveables = {}\n\n for checkpoint_id, checkpointable in enumerate(checkpointable_objects):\n assert node_ids[checkpointable] == checkpoint_id\n object_proto = object_graph_proto.nodes.add()\n object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))\n object_name = object_names[checkpointable]\n for name, saveable_factory in (\n checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access\n attribute = object_proto.attributes.add()\n attribute.name = name\n attribute.checkpoint_key = \"%s/%s/%s\" % (\n object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))\n if callable(saveable_factory):\n saveable = saveable_factory(name=attribute.checkpoint_key)\n else:\n saveable = saveable_factory\n # Figure out the name-based Saver's name for this variable.\n saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(\n [saveable], convert_variable_to_tensor=False)\n attribute.full_name, = saver_dict.keys()\n named_saveables[attribute.checkpoint_key] = saveable\n\n for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access\n child_proto = object_proto.children.add()\n child_proto.node_id = node_ids[child.ref]\n child_proto.local_name = child.name\n\n return named_saveables, object_graph_proto\n\n\ndef _serialize_object_graph(root_checkpointable):\n \"\"\"Determine checkpoint keys for variables and build a serialized graph.\n\n Non-slot variables are keyed based on a shortest path from the root saveable\n to the object which owns the variable (i.e. the one which called\n `Checkpointable._add_variable` to create it).\n\n Slot variables are keyed based on a shortest path to the variable being\n slotted for, a shortest path to their optimizer, and the slot name.\n\n Args:\n root_checkpointable: A `Checkpointable` object whose variables (including\n the variables of dependencies, recursively) should be saved.\n\n Returns:\n A tuple of (named_variables, object_graph_proto):\n named_variables: A dictionary mapping names to variable objects.\n object_graph_proto: A CheckpointableObjectGraph protocol buffer containing\n the serialized object graph and variable references.\n\n Raises:\n ValueError: If there are invalid characters in an optimizer's slot names.\n \"\"\"\n checkpointable_objects, path_to_root = (\n _breadth_first_checkpointable_traversal(root_checkpointable))\n object_names = {\n obj: _object_prefix_from_path(path)\n for obj, path in path_to_root.items()}\n node_ids = {node: node_id for node_id, node\n in enumerate(checkpointable_objects)}\n slot_variables = _serialize_slot_variables(\n checkpointable_objects=checkpointable_objects,\n node_ids=node_ids,\n object_names=object_names)\n return _serialize_checkpointables(\n checkpointable_objects=checkpointable_objects,\n node_ids=node_ids,\n object_names=object_names,\n slot_variables=slot_variables)\n\n\ndef list_objects(root_checkpointable):\n \"\"\"Traverse the object graph and list all accessible objects.\n\n Looks for `Checkpointable` objects which are dependencies of\n `root_checkpointable`. Includes slot variables only if the variable they are\n slotting for and the optimizer are dependencies of `root_checkpointable`\n (i.e. if they would be saved with a checkpoint).\n\n Args:\n root_checkpointable: A `Checkpointable` object whose dependencies should be\n flattened.\n Returns:\n A flat list of objects.\n \"\"\"\n # TODO (allenl): Extract out gathering logic so the naming logic doesn't have id:4322\n # https://github.com/imdone/tensorflow/issues/4320\n # to run.\n checkpointable_objects, path_to_root = (\n _breadth_first_checkpointable_traversal(root_checkpointable))\n object_names = {\n obj: _object_prefix_from_path(path)\n for obj, path in path_to_root.items()}\n node_ids = {node: node_id for node_id, node\n in enumerate(checkpointable_objects)}\n _serialize_slot_variables(\n checkpointable_objects=checkpointable_objects,\n node_ids=node_ids,\n object_names=object_names)\n return checkpointable_objects\n\n\ndef gather_initializers(root_checkpointable):\n \"\"\"Traverse the object graph and find initialization ops.\n\n Looks for `Checkpointable` objects which are dependencies of\n `root_checkpointable` and which have an `initializer` property. Includes\n initializers for slot variables only if the variable they are slotting for and\n the optimizer are dependencies of `root_checkpointable` (i.e. if they would be\n saved with a checkpoint).\n\n Args:\n root_checkpointable: A `Checkpointable` object to gather initializers for.\n Returns:\n A list of initialization ops.\n \"\"\"\n checkpointable_objects = list_objects(root_checkpointable)\n return [c.initializer for c in checkpointable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None]\n\n\nclass _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):\n\n def __init__(self, tensor, name):\n spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, \"\", name)\n super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n return control_flow_ops.no_op()\n\n\nclass _LoadStatus(object):\n \"\"\"Abstract base for load status callbacks.\"\"\"\n\n @abc.abstractmethod\n def assert_consumed(self):\n \"\"\"Raises an exception unless a non-trivial restoration has completed.\"\"\"\n pass\n\n @abc.abstractmethod\n def run_restore_ops(self, session=None):\n \"\"\"Runs restore ops from the checkpoint. Requires a valid checkpoint.\"\"\"\n pass\n\n @abc.abstractmethod\n def initialize_or_restore(self, session=None):\n \"\"\"Runs restore ops from the checkpoint, or initializes variables.\"\"\"\n pass\n\n\nclass CheckpointLoadStatus(_LoadStatus):\n \"\"\"Checks the status of checkpoint loading and manages restore ops.\n\n Returned from `Saver.restore`. Since `restore` may defer the loading of values\n in the checkpoint which don't yet have corresponding Python objects,\n `CheckpointLoadStatus` provides a callback to verify that checkpoint loading\n is complete (`assert_consumed`).\n\n When graph building, `restore` does not run restore ops itself since their\n creation may be deferred. The `run_restore_ops` method must be called once all\n Python objects with values to restore have been created and added to the\n dependency graph (this does not necessarily have to be the whole checkpoint;\n calling `run_restore_ops` while `assert_consumed` fails is supported and will\n partially restore the checkpoint).\n\n See `Saver.restore` for usage examples.\n \"\"\"\n\n def __init__(self, checkpoint, feed_dict, root_checkpointable):\n self._checkpoint = checkpoint\n self._feed_dict = feed_dict\n self._root_checkpointable = root_checkpointable\n\n def assert_consumed(self):\n \"\"\"Asserts that all objects in the checkpoint have been created/matched.\n\n Returns:\n `self` for chaining.\n Raises:\n AssertionError: If there are any Python objects in the dependency graph\n which have not been restored from this checkpoint or a later `restore`,\n or if there are any checkpointed values which have not been matched to\n Python objects.\n \"\"\"\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if checkpointable is None:\n raise AssertionError(\"Unresolved object in checkpoint: %s\" % (node,))\n if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access\n raise AssertionError(\n \"Object not assigned a value from checkpoint: %s\" % (node,))\n if self._checkpoint.slot_restorations:\n # Sanity check; this collection should be clear if everything has been\n # restored.\n raise AssertionError(\"Unresolved slot restorations: %s\" % (\n self._checkpoint.slot_restorations,))\n if self._checkpoint.unused_attributes:\n raise AssertionError(\n (\"Unused attributes in these objects (the attributes exist in the \"\n \"checkpoint but not in the objects): %s\") % (\n self._checkpoint.unused_attributes.items(),))\n for checkpointable_object in list_objects(self._root_checkpointable):\n self._checkpoint.all_python_objects.add(checkpointable_object)\n unused_python_objects = (\n set(self._checkpoint.all_python_objects)\n - set(self._checkpoint.object_by_proto_id.values()))\n if unused_python_objects:\n raise AssertionError(\n (\"Some Python objects were not bound to checkpointed values, likely \"\n \"due to changes in the Python program: %s\")\n % (unused_python_objects,))\n return self\n\n def run_restore_ops(self, session=None):\n \"\"\"Run operations to restore objects in the dependency graph.\"\"\"\n if context.executing_eagerly():\n return # Run eagerly\n if session is None:\n session = ops.get_default_session()\n session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Run operations to initialize or restore objects in the dependency graph.\n\n Any objects in the dependency graph which have initializers but are not in\n the checkpoint will have those initializers run, unless those variables are\n being restored by a later call to `tf.train.Checkpoint.restore()`.\n\n This method has a sibling in `InitializationOnlyStatus` which instead\n initializes variables. That type is returned if no checkpoint is specified\n in `Saver.restore`.\n\n Args:\n session: The session to run init/restore ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # Initialization and restoration ops are run eagerly\n if session is None:\n session = ops.get_default_session()\n all_objects = list_objects(self._root_checkpointable)\n already_initialized_objects = set(\n self._checkpoint.object_by_proto_id.values())\n initializers_for_non_restored_variables = [\n c.initializer for c in all_objects\n if hasattr(c, \"initializer\")\n and c not in already_initialized_objects\n and (getattr(c, \"_update_uid\", self._checkpoint.restore_uid - 1)\n < self._checkpoint.restore_uid)]\n self.run_restore_ops(session=session)\n session.run(initializers_for_non_restored_variables)\n\n\nclass InitializationOnlyStatus(_LoadStatus):\n \"\"\"Returned from `Saver.restore` when no checkpoint has been specified.\n\n Objects of this type have the same `assert_consumed` method as\n `CheckpointLoadStatus`, but it always fails. However,\n `initialize_or_restore` works on objects of both types, and will\n initialize variables in `InitializationOnlyStatus` objects or restore them\n otherwise.\n \"\"\"\n\n def __init__(self, root_checkpointable, restore_uid):\n self._restore_uid = restore_uid\n self._root_checkpointable = root_checkpointable\n\n def assert_consumed(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"No checkpoint specified (save_path=None); nothing is being restored.\")\n\n def run_restore_ops(self, session=None):\n \"\"\"For consistency with `CheckpointLoadStatus`.\n\n Use `initialize_or_restore` for initializing if no checkpoint was passed\n to `Saver.restore` and restoring otherwise.\n\n Args:\n session: Not used.\n \"\"\"\n raise AssertionError(\n \"No checkpoint specified, so no restore ops are available \"\n \"(save_path=None to Saver.restore).\")\n\n def initialize_or_restore(self, session=None):\n \"\"\"Runs initialization ops for variables.\n\n Objects which would be saved by `Saver.save` will be initialized, unless\n those variables are being restored by a later call to\n `tf.train.Checkpoint.restore()`.\n\n This method does nothing when executing eagerly (initializers get run\n eagerly).\n\n Args:\n session: The session to run initialization ops in. If `None`, uses the\n default session.\n \"\"\"\n if context.executing_eagerly():\n return # run eagerly\n if session is None:\n session = ops.get_default_session()\n checkpointable_objects = list_objects(self._root_checkpointable)\n initializers = [\n c.initializer for c in checkpointable_objects\n if hasattr(c, \"initializer\") and c.initializer is not None\n and (getattr(c, \"_update_uid\", self._restore_uid - 1)\n < self._restore_uid)]\n session.run(initializers)\n\n\n_DEPRECATED_RESTORE_INSTRUCTIONS = (\n \"Restoring a name-based tf.train.Saver checkpoint using the object-based \"\n \"restore API. This mode uses global names to match variables, and so is \"\n \"somewhat fragile. It also adds new restore ops to the graph each time it \"\n \"is called. Prefer re-encoding training checkpoints in the object-based \"\n \"format: run save() on the object-based saver (the same one this message \"\n \"is coming from) and use that checkpoint in the future.\")\n\n\nclass NameBasedSaverStatus(_LoadStatus):\n \"\"\"Status for loading a name-based training checkpoint.\"\"\"\n\n def __init__(self, object_saver, save_path):\n self._object_saver = object_saver\n self._save_path = save_path\n\n def assert_consumed(self):\n \"\"\"Assertion for consistency with `CheckpointLoadStatus`. Always fails.\"\"\"\n raise AssertionError(\n \"Restoring a name-based checkpoint. No load status is available.\")\n\n @deprecation.deprecated(\n date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)\n def run_restore_ops(self, session=None):\n \"\"\"Load the name-based training checkpoint using a new `tf.train.Saver`.\"\"\"\n if session is None and not context.executing_eagerly():\n session = ops.get_default_session()\n with ops.device(\"/cpu:0\"):\n saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access\n sess=session, save_path=self._save_path)\n\n def initialize_or_restore(self, session=None):\n \"\"\"Alias for `run_restore_ops`.\"\"\"\n self.run_restore_ops(session=session)\n\n\nclass _SessionWithFeedDictAdditions(session_lib.SessionInterface):\n \"\"\"Pretends to be a session, inserts extra feeds on run().\"\"\"\n\n def __init__(self, session, feed_additions):\n self._wrapped_session = session\n self._feed_additions = feed_additions\n\n def run(self, fetches, feed_dict=None, **kwargs):\n if feed_dict is None:\n feed_dict = {}\n else:\n feed_dict = feed_dict.copy()\n feed_dict.update(self._feed_additions)\n return self._wrapped_session.run(\n fetches=fetches, feed_dict=feed_dict, **kwargs)\n\n\ndef _copy_saver_with_new_var_list(old_saver, new_var_list):\n \"\"\"Copy a `tf.train.Saver`'s state to a new Saver with different variables.\"\"\"\n new_saver = saver_lib.Saver(var_list=new_var_list)\n # TODO (allenl): Move to copying functionality to Saver? id:3986\n # https://github.com/imdone/tensorflow/issues/3984\n # pylint: disable=protected-access\n new_saver._last_checkpoints = old_saver._last_checkpoints\n new_saver._checkpoints_to_be_deleted = old_saver._checkpoints_to_be_deleted\n new_saver._next_checkpoint_time = old_saver._next_checkpoint_time\n # pylint: enable=protected-access\n return new_saver\n\n\nclass CheckpointableSaver(object):\n \"\"\"Saves and restores a `Checkpointable` object and its dependencies.\n\n See `Checkpointable` for details of dependency management. `Saver` wraps\n `tf.train.Saver` for saving, including extra information about the graph of\n dependencies between Python objects. When restoring, it uses this information\n about the save-time dependency graph to more robustly match objects with their\n checkpointed values. When executing eagerly, it supports restoring variables\n on object creation (see `Saver.restore`).\n\n Values in a checkpoint are mapped to `Checkpointable` Python objects\n (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the\n checkpoint was written. To avoid breaking existing checkpoints when modifying\n a class, dependency names (the names of attributes to which `Checkpointable`\n objects are assigned) may not change. These names are local to objects, in\n contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and\n so allow additional program transformations.\n \"\"\"\n\n def __init__(self, root_checkpointable):\n \"\"\"Configure saving.\n\n Args:\n root_checkpointable: The root of the object graph to save/restore. This\n object and all of its dependencies are saved in the checkpoint. When\n restoring, objects are matched and restored starting from this root.\n \"\"\"\n # Allow passing in a weak reference to avoid reference cycles when\n # `Checkpointable` objects save themselves.\n self._root_checkpointable_ref = root_checkpointable\n # The file prefix placeholder is created lazily when graph building (and not\n # at all when executing eagerly) to avoid creating ops in the constructor\n # (when they may never be necessary).\n self._file_prefix_placeholder = None\n\n # Op caching for save\n self._object_graph_feed_tensor = None\n self._last_save_object_graph = None\n self._last_save_saver = None\n\n # Op caching for restore\n self._last_restore_object_graph = None\n self._last_restore_checkpoint = None\n\n @property\n def _root_checkpointable(self):\n if isinstance(self._root_checkpointable_ref, weakref.ref):\n derefed = self._root_checkpointable_ref()\n assert derefed is not None\n return derefed\n else:\n return self._root_checkpointable_ref\n\n def save(self, file_prefix, checkpoint_number=None, session=None):\n \"\"\"Save a training checkpoint.\n\n The saved checkpoint includes variables created by this object and any\n Checkpointable objects it depends on at the time `Saver.save()` is called.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `checkpoint_number`, if provided.\n checkpoint_number: An integer variable or Tensor, used to number\n checkpoints. Typically this value is saved along with other variables in\n training checkpoints, which will happen automatically if it was created\n by `root_checkpointable` or one of its dependencies (via\n `Checkpointable._add_variable`).\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n named_variables, graph_proto = _serialize_object_graph(\n self._root_checkpointable)\n if not context.executing_eagerly():\n if session is None:\n session = ops.get_default_session()\n if self._object_graph_feed_tensor is None:\n with ops.device(\"/cpu:0\"):\n self._object_graph_feed_tensor = constant_op.constant(\n \"\", dtype=dtypes.string)\n object_graph_tensor = self._object_graph_feed_tensor\n feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}\n else:\n session = None\n with ops.device(\"/cpu:0\"):\n object_graph_tensor = constant_op.constant(\n graph_proto.SerializeToString(), dtype=dtypes.string)\n feed_additions = None\n assert checkpointable_lib.OBJECT_GRAPH_PROTO_KEY not in named_variables\n named_variables[checkpointable_lib.OBJECT_GRAPH_PROTO_KEY] = (\n _NoRestoreSaveable(\n tensor=object_graph_tensor,\n name=checkpointable_lib.OBJECT_GRAPH_PROTO_KEY))\n if (self._last_save_object_graph != graph_proto\n # When executing eagerly, we need to re-create SaveableObjects each time\n # save() is called so they pick up new Tensors passed to their\n # constructors. That means the Saver needs to be copied with a new\n # var_list.\n or context.executing_eagerly()):\n if self._last_save_object_graph is not None:\n self._last_save_saver = _copy_saver_with_new_var_list(\n old_saver=self._last_save_saver, new_var_list=named_variables)\n else:\n self._last_save_saver = saver_lib.Saver(var_list=named_variables)\n self._last_save_object_graph = graph_proto\n with ops.device(\"/cpu:0\"):\n save_path = self._last_save_saver.save(\n sess=_SessionWithFeedDictAdditions(\n session=session, feed_additions=feed_additions),\n save_path=file_prefix,\n write_meta_graph=False,\n global_step=checkpoint_number)\n return save_path\n\n def _global_variable_names(self):\n \"\"\"Generate a `tf.train.Saver`-style `var_list` using `variable.name`s.\"\"\"\n named_saveables, graph_proto = _serialize_object_graph(\n self._root_checkpointable)\n saver_names = {}\n for object_proto in graph_proto.nodes:\n for attribute_proto in object_proto.attributes:\n saver_names[attribute_proto.full_name] = named_saveables[\n attribute_proto.checkpoint_key]\n return saver_names\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores `root_checkpointable` and any objects that it tracks\n (transitive). Either assigns values immediately if variables to restore have\n been created already, or defers restoration until the variables are\n created. Dependencies added to the `root_checkpointable` passed to the\n constructor after this call will be matched if they have a corresponding\n object in the checkpoint.\n\n When building a graph, restorations are added to the graph but not run.\n\n To disallow deferred loading, assert immediately that all checkpointed\n variables have been matched to variable objects:\n\n ```python\n saver = Saver(root)\n saver.restore(path).assert_consumed()\n ```\n\n An exception will be raised unless every object was matched and its\n variables already exist.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops which will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` function of the status object:\n\n ```python\n saver.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.train.Saver` checkpoints can be loaded using this\n method. There is no deferred loading, and names are used to match\n variables. No restore ops are created/run until `run_restore_ops()` or\n `initialize_or_restore()` are called on the returned status object, even\n when executing eagerly. Re-encode name-based checkpoints using this\n object-based `Saver.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency\n graph. If the checkpoint was written by the name-based `tf.train.Saver`,\n names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of checkpoint restoration and run initialization/restore ops\n (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if\n `save_path` is `None`).\n\n If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`\n object is returned which runs restore ops from a name-based saver.\n \"\"\"\n if save_path is None:\n return InitializationOnlyStatus(self._root_checkpointable, ops.uid())\n in_graph_mode = not context.executing_eagerly()\n if in_graph_mode:\n if self._file_prefix_placeholder is None:\n with ops.device(\"/cpu:0\"):\n self._file_prefix_placeholder = constant_op.constant(\"model\")\n file_prefix_tensor = self._file_prefix_placeholder\n file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}\n else:\n with ops.device(\"/cpu:0\"):\n file_prefix_tensor = constant_op.constant(save_path)\n file_prefix_feed_dict = None\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\n try:\n object_graph_string = reader.get_tensor(\n checkpointable_lib.OBJECT_GRAPH_PROTO_KEY)\n except errors_impl.NotFoundError:\n # The object graph proto does not exist in this checkpoint. Try again with\n # name-based saving.\n return NameBasedSaverStatus(self, save_path)\n\n object_graph_proto = (\n checkpointable_object_graph_pb2.CheckpointableObjectGraph())\n object_graph_proto.ParseFromString(object_graph_string)\n if in_graph_mode and object_graph_proto == self._last_restore_object_graph:\n checkpoint = self._last_restore_checkpoint\n else:\n if in_graph_mode:\n dtype_map = None\n else:\n dtype_map = reader.get_variable_to_dtype_map()\n checkpoint = _CheckpointRestoreCoordinator(\n object_graph_proto=object_graph_proto,\n save_path=file_prefix_tensor,\n dtype_map=dtype_map)\n if in_graph_mode:\n if self._last_restore_object_graph is not None:\n raise NotImplementedError(\n \"Using a single Saver to restore different object graphs is not \"\n \"currently supported when graph building. Use a different Saver \"\n \"for each object graph (restore ops will be duplicated), or \"\n \"file a feature request if this limitation bothers you.\")\n self._last_restore_checkpoint = checkpoint\n self._last_restore_object_graph = object_graph_proto\n checkpointable_lib._CheckpointPosition( # pylint: disable=protected-access\n checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)\n load_status = CheckpointLoadStatus(\n checkpoint,\n root_checkpointable=self._root_checkpointable,\n feed_dict=file_prefix_feed_dict)\n return load_status\n\n\n@tf_export(\"train.Checkpoint\")\nclass Checkpoint(checkpointable_lib.Checkpointable):\n \"\"\"Groups checkpointable objects, saving and restoring them.\n\n `Checkpoint`'s constructor accepts keyword arguments whose values are types\n that contain checkpointable state, such as `tf.train.Optimizer`\n implementations, `tf.Variable`, `tf.keras.Layer` implementations, or\n `tf.keras.Model` implementations. It saves these values with a checkpoint, and\n maintains a `save_counter` for numbering checkpoints.\n\n Example usage when graph building:\n\n ```python\n import tensorflow as tf\n import os\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n train_op = optimizer.minimize( ... )\n status.assert_consumed() # Optional sanity checks.\n with tf.Session() as session:\n # Use the Session to restore variables, or initialize them if\n # tf.train.latest_checkpoint returned None.\n status.initialize_or_restore(session)\n for _ in range(num_training_steps):\n session.run(train_op)\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n Example usage with eager execution enabled:\n\n ```python\n import tensorflow as tf\n import os\n\n tf.enable_eager_execution()\n\n checkpoint_directory = \"/tmp/training_checkpoints\"\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))\n for _ in range(num_training_steps):\n optimizer.minimize( ... ) # Variables will be restored on creation.\n status.assert_consumed() # Optional sanity checks.\n checkpoint.save(file_prefix=checkpoint_prefix)\n ```\n\n `Checkpoint.save` and `Checkpoint.restore` write and read object-based\n checkpoints, in contrast to `tf.train.Saver` which writes and reads\n `variable.name` based checkpoints. Object-based checkpointing saves a graph of\n dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,\n etc.) with named edges, and this graph is used to match variables when\n restoring a checkpoint. It can be more robust to changes in the Python\n program, and helps to support restore-on-create for variables when executing\n eagerly. Prefer `tf.train.Checkpoint` over `tf.train.Saver` for new code.\n\n `Checkpoint` objects have dependencies on the objects passed as keyword\n arguments to their constructors, and each dependency is given a name that is\n identical to the name of the keyword argument for which it was created.\n TensorFlow classes like `Layer`s and `Optimizer`s will automatically add\n dependencies on their variables (e.g. \"kernel\" and \"bias\" for\n `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing\n dependencies easy in user-defined classes, since `Model` hooks into attribute\n assignment. For example:\n\n ```python\n class Regress(tf.keras.Model):\n\n def __init__(self):\n super(Regress, self).__init__()\n self.input_transform = tf.keras.layers.Dense(10)\n # ...\n\n def call(self, inputs):\n x = self.input_transform(inputs)\n # ...\n ```\n\n This `Model` has a dependency named \"input_transform\" on its `Dense` layer,\n which in turn depends on its variables. As a result, saving an instance of\n `Regress` using `tf.train.Checkpoint` will also save all the variables created\n by the `Dense` layer.\n\n Attributes:\n save_counter: Incremented when `save()` is called. Used to number\n checkpoints.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Group objects into a training checkpoint.\n\n Args:\n **kwargs: Keyword arguments are set as attributes of this object, and are\n saved with the checkpoint. Values must be checkpointable objects.\n Raises:\n ValueError: If objects in `kwargs` are not checkpointable.\n \"\"\"\n super(Checkpoint, self).__init__()\n for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n if not isinstance(v, checkpointable_lib.CheckpointableBase):\n raise ValueError(\n (\"`Checkpoint` was expecting a checkpointable object (an object \"\n \"derived from `CheckpointableBase`), got %s. If you believe this \"\n \"object should be checkpointable (i.e. it is part of the \"\n \"TensorFlow Python API and manages state), please open an issue.\")\n % (v,))\n setattr(self, k, v)\n self._save_counter = None # Created lazily for restore-on-create.\n self._saver = CheckpointableSaver(weakref.ref(self))\n\n def _maybe_create_save_counter(self):\n \"\"\"Create a save counter if it does not yet exist.\"\"\"\n if self._save_counter is None:\n # Initialized to 0 and incremented before saving.\n with ops.device(\"/cpu:0\"):\n self._save_counter = add_variable(\n self, name=\"save_counter\", initializer=0, dtype=dtypes.int64)\n\n @property\n def save_counter(self):\n \"\"\"An integer variable which starts at zero and is incremented on save.\n\n Used to number checkpoints.\n\n Returns:\n The save counter variable.\n \"\"\"\n self._maybe_create_save_counter()\n return self._save_counter\n\n def save(self, file_prefix, session=None):\n \"\"\"Save a training checkpoint.\n\n The saved checkpoint includes variables created by this object and any\n checkpointable objects it depends on at the time `Checkpoint.save()` is\n called.\n\n Args:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n\n Returns:\n The full path to the checkpoint.\n \"\"\"\n in_graph_mode = not context.executing_eagerly()\n if in_graph_mode:\n if session is None:\n session = ops.get_default_session()\n if self._save_counter is None:\n # When graph building, if this is a new save counter variable then it\n # needs to be initialized before assign_add. This is only an issue if\n # restore() has not been called first.\n session.run(self.save_counter.initializer)\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1)\n if in_graph_mode:\n session.run(assign_op)\n return self._saver.save(\n file_prefix=file_prefix,\n checkpoint_number=self.save_counter,\n session=session)\n\n def restore(self, save_path):\n \"\"\"Restore a training checkpoint.\n\n Restores this `Checkpoint` and any objects it depends on.\n\n When executing eagerly, either assigns values immediately if variables to\n restore have been created already, or defers restoration until the variables\n are created. Dependencies added after this call will be matched if they have\n a corresponding object in the checkpoint (the restore request will queue in\n any checkpointable object waiting for the expected dependency to be added).\n\n When graph building, restoration ops are added to the graph but not run\n immediately.\n\n To ensure that loading is complete and no more assignments will take place,\n use the `assert_consumed()` method of the status object returned by\n `restore`:\n\n ```python\n checkpoint = tf.train.Checkpoint( ... )\n checkpoint.restore(path).assert_consumed()\n ```\n\n An exception will be raised if any Python objects in the dependency graph\n were not found in the checkpoint, or if any checkpointed values do not have\n a matching Python object.\n\n When graph building, `assert_consumed()` indicates that all of the restore\n ops that will be created for this checkpoint have been created. They can be\n run via the `run_restore_ops()` method of the status object:\n\n ```python\n checkpoint.restore(path).assert_consumed().run_restore_ops()\n ```\n\n If the checkpoint has not been consumed completely, then the list of restore\n ops will grow as more objects are added to the dependency graph.\n\n Name-based `tf.train.Saver` checkpoints can be loaded using this\n method. There is no deferred loading, and names are used to match\n variables. No restore ops are created/run until `run_restore_ops()` or\n `initialize_or_restore()` are called on the returned status object, even\n when executing eagerly. Re-encode name-based checkpoints using\n `tf.train.Checkpoint.save` as soon as possible.\n\n Args:\n save_path: The path to the checkpoint, as returned by `save` or\n `tf.train.latest_checkpoint`. If None (as when there is no latest\n checkpoint for `tf.train.latest_checkpoint` to return), returns an\n object which may run initializers for objects in the dependency\n graph. If the checkpoint was written by the name-based `tf.train.Saver`,\n names are used to match variables.\n\n Returns:\n A load status object, which can be used to make assertions about the\n status of a checkpoint restoration and run initialization/restore ops.\n\n The returned status object has the following methods:\n - `assert_consumed()`:\n Raises an exception if any variables/objects are unmatched: either\n checkpointed values which don't have a matching Python object or\n Python objects in the dependency graph with no values in the\n checkpoint. This method returns the status object, and so may be\n chained with `initialize_or_restore` or `run_restore_ops`.\n - `initialize_or_restore(session=None)`:\n When graph building, runs variable initializers if `save_path` is\n `None`, but otherwise runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect for\n object-based checkpoints when executing eagerly (variables are\n initialized or restored eagerly).\n - `run_restore_ops(session=None)`:\n When graph building, runs restore operations. If no `session` is\n explicitly specified, the default session is used. No effect for\n object-based checkpoints when executing eagerly (restore operations\n are run eagerly). May only be called when `save_path` is not `None`.\n \"\"\"\n status = self._saver.restore(save_path=save_path)\n # Create the save counter now so it gets initialized with other variables\n # when graph building. Creating it earlier would lead to double\n # initialization when executing eagerly.\n self._maybe_create_save_counter()\n return status\n"
] | [
[
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.training.saver.Saver",
"tensorflow.core.protobuf.checkpointable_object_graph_pb2.CheckpointableObjectGraph.CheckpointableObject.SlotVariableReference",
"tensorflow.python.ops.variable_scope._get_default_variable_store",
"tensorflow.python.training.saver.BaseSaverBuilder.SaveSpec",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.training.saver.BulkSaverBuilder",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.python.training.checkpointable._SlotVariableRestoration",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.device",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.training.checkpointable._CheckpointPosition",
"tensorflow.python.training.saver.BaseSaverBuilder.OpListToDict",
"tensorflow.core.protobuf.checkpointable_object_graph_pb2.CheckpointableObjectGraph",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.control_flow_ops.no_op"
]
] |
jakgel/clusterbuster | [
"d79400a0faf43dece457d99b024b955aef544fc2"
] | [
"surveysim/music2/interpolate.py"
] | [
"import numpy as np\nimport scipy.interpolate as interpolate\nimport matplotlib.pyplot as plt\nimport clusterbuster.mathut as math\n\"\"\"\nStart with e.g. InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(10,6)) \n\"\"\"\n\n\n\n# from http://stackoverflow.com/questions/5328128/scipy-interpolation-of-large-matrix\ndef my_interp(X, Y, Z, x, y, spn=3):\n xs,ys = map(np.array,(x,y))\n z = np.zeros(xs.shape)\n for i,(x,y) in enumerate(zip(xs,ys)):\n # get the indices of the nearest x,y\n xi = np.argmin(np.abs(X[0,:]-x))\n yi = np.argmin(np.abs(Y[:,0]-y))\n xlo = max(xi-spn, 0)\n ylo = max(yi-spn, 0)\n xhi = min(xi+spn, X[0,:].size)\n yhi = min(yi+spn, Y[:,0].size)\n # make slices of X,Y,Z that are only a few items wide\n nX = X[xlo:xhi, ylo:yhi]\n nY = Y[xlo:xhi, ylo:yhi]\n nZ = Z[xlo:xhi, ylo:yhi]\n intp = interpolate.interp2d(nX, nY, nZ)\n z[i] = intp(x,y)[0]\n return z\n\n\n# from here on: done by myself\n\ndef LoadFile_psi(psiFile):\n \"\"\" Just gives the Mach number and Temperature values \"\"\"\n #=== FILE A ===#\n # read first line .... split it and convert sstring to float science float('1.31E+01') or for a list:map(float, ['3.76E+00', '1.31E+01', '1.14E+01'])\n\n\n with open(psiFile, 'r') as f:\n first_line = f.readline()\n psi_x = first_line.split()[2:] # Splits into list without first two elements\n psi_x = np.asarray( [float(i) for i in psi_x ] ) # Converts strings to floats # Converts strings to floats\n psi_y = np.loadtxt(psiFile,skiprows=0)[:,0]\n \n return psi_x, psi_y\n\n\ndef InterpolateRadio2D(psiFile='../Analysis_MUSIC2/Hoeft_radio/mach_psi_table.txt', machFile='../Analysis_MUSIC2/Hoeft_radio/q_mach_machr_table.txt', saveplot='../Analysis_MUSIC2/Hoeft_radio/interpolated', psiFileNew = False, machFileNew = False, inter=(10,3)):\n# Currently the mach number is interpolated in an logarithmic space which is much sparser at lower mach numbers then anticipated \n# I suspect an double-exponential function for mach (both efficiency dependency stepsize) \n \n # Note that the original grid given in 'Hoeft_radio/mach_psi_table.txt' is (quite) regular in log-loglog space, which makes it very simple to invoke an interpolation function!\n # Irregular data points would make it nececcary to use functions like scipy.interpolate.griddata(points, values, (grid_x, grid_y), method='cubic')\n \n plot_old = False\n plot_new = False\n plot_PhD = True\n \n ##==== psiFile for psi factor; machfile for mach-numbers conversion factors\n H_mach = np.loadtxt(machFile,skiprows=0) \n H_psi = np.loadtxt(psiFile,skiprows=0)[:,1::] # you wont get the temperature values ... read them separetely\n psi_x,psi_y = LoadFile_psi(psiFile)\n \n psi_x = np.log10( psi_x ) # converts to and log10 space\n psi_y = np.log10(np.log10( psi_y )) # converts to and log10(log10) space\n X, Y = np.meshgrid(psi_x, psi_y)\n Z = np.log10(H_psi)\n \n #interp_spline = interpolate.interp2d(x, y, Z) #, kind='cubic'\n interp_spline = interpolate.RectBivariateSpline(psi_y, psi_x, Z) #, bbox=[None, None, None, None], kx=3, ky=3, s=0\n \n xnew = np.arange(psi_x[0], psi_x[-1], (psi_x[-1]-psi_x[0])/(len(psi_x)*inter[0]) ) #np.arange(-4, 2, 4e-2) #\n ynew = np.arange(psi_y[0], psi_y[-1], (psi_y[-1]-psi_y[0])/(len(psi_y)*inter[1]) ) #np.arange(0.2, 3, 2e-2) #\n Znew = interp_spline(ynew, xnew )\n \n keV2K = 1.16e7 # Translates keV to Kelvin\n \n if plot_old:\n plt.plot( np.arange(0, len(psi_x), 1 ), psi_x )\n plt.plot( np.arange(0, len(psi_y), 1 ), psi_y )\n plt.savefig(saveplot + '_linearity.png')\n \n fig = plt.figure()\n \n ax1 = plt.subplot(121)\n ax1.pcolor( np.log10(keV2K) + psi_x, psi_y, Z)\n ax1.set_title(\"Sparsely sampled function\")\n ax1.set_xlim([3.1, 9])\n ax1.set_ylim([psi_y[0], 0.5])\n ax1.set_xlabel('$\\\\mathrm{log_{10}(T)\\\\,[K]}$ ')\n ax1.set_ylabel('$\\\\mathrm{log_{10}(log_{10}(M))\\\\,[]}$')\n \n \n ax2 = plt.subplot(122)\n im2 = ax2.pcolor( np.log10(keV2K) + xnew, ynew, Znew)\n ax2.set_title(\"Interpolated function\")\n ax2.set_xlim([3.1, 9])\n ax2.set_ylim([psi_y[0], 0.5])\n ax2.set_xlabel('$\\\\mathrm{log_{10}(T)\\\\,[K]}$ ')\n ax2.set_yticklabels([])\n \n mach = [1.5,2.2,3.0,10.0]\n c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]\n for ii,m in enumerate(mach):\n ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 ) \n ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 ) \n \n ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)\n ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)\n \n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n fig.colorbar(im2, cax=cbar_ax)\n plt.savefig(saveplot + '.png')\n \n if plot_new:\n fig = plt.figure()\n \n ax1 = plt.subplot(111)\n im2 = ax1.pcolor( np.log10(keV2K) + xnew, ynew, Znew, vmin=-8)\n# ax1.set_title(\"Interpolated function\")\n ax1.set_xlim([7, 8.4])\n ax1.set_ylim([np.log10(np.log10(1.7)), np.log10(np.log10(10.))])\n ax1.set_xlabel('$\\\\mathrm{log_{10}(T)\\\\,[K]}$ ')\n ax1.set_ylabel('$M$ ')\n \n y_ticks = [np.log10(np.log10(m)) for m in [1.7,2.5,4,10]]\n print( ['%.2e' % (y) for y in y_ticks], [10**(10**y) for y in y_ticks] )\n ax1.set_yticklabels([10**(10**y) for y in y_ticks])\n plt.yticks(y_ticks)\n \n# temp = [1.5,2.2,3.0,10.0]\n# c = [plt.cm.rainbow( (np.log10(np.log10(m))-ax1.get_ylim()[0])/abs(ax1.get_ylim()[1]-ax1.get_ylim()[0]) ) for m in mach]\n# for ii,m in enumerate(mach):\n# ax1.plot( [ax1.get_xlim()[0], ax1.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 ) \n# ax2.plot( [ax2.get_xlim()[0], ax2.get_xlim()[1]] , [np.log10(np.log10(m))]*2, '-', c=c[ii], lw=1.5, alpha=0.9 ) \n# \n# ax1.text(ax1.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)\n# ax2.text(ax2.get_xlim()[0]+0.3, np.log10(np.log10(m))+0.02, 'Mach$=$%4.1f' % (m), fontsize=10, color=c[ii], alpha=0.9)\n \n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n fig.colorbar(im2, cax=cbar_ax, label='$\\log_{10}\\Phi$',)\n plt.savefig(saveplot + '_DSA.pdf')\n plt.savefig(saveplot + '_DSA.png', dpi=800) \n \n \n if plot_PhD:\n fig = plt.figure()\n \n temp = np.linspace(2,20,20)\n print(temp)\n mach = np.linspace(2,7,300)\n psi_x,psi_y = LoadFile_psi(psiFile)\n import itertools\n H,M,T = [],[],[]\n for t in temp:\n results_temp = math.find_closest(psi_x, t)\n results_mach = math.find_closest(psi_y, mach) # \n H.append(H_psi[results_mach,np.ones_like(results_mach)*results_temp])\n M.append(mach)\n T.append(np.ones_like(results_mach)*t)\n \n H = list(itertools.chain.from_iterable(H))\n M = list(itertools.chain.from_iterable(M))\n T = list(itertools.chain.from_iterable(T)) \n \n plt.scatter(M,np.log10(H),c=T,alpha=0.1,s=5) \n cb = plt.colorbar(label='Downstream Temperature [keV]')\n cb.set_alpha(1)\n cb.draw_all()\n plt.xlabel('Mach number $M$')\n plt.ylabel('$\\log_{10}\\,\\Phi(M,T)$')\n plt.savefig(saveplot + '_PhD.pdf')\n plt.savefig(saveplot + '_PhD.png', dpi=800) \n \n \n # Save File A\n if psiFileNew:\n location = psiFileNew\n else:\n location = psiFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) ) \n \n header = '# Mach'\n for x in xnew:\n header += '%13.4e' % (10**x)\n\n mf = open(location,\"w\")\n mf.write(header + '\\n')\n \n for ii,y in enumerate(ynew): \n string = '%9.4f' % (10**(10**y)) + ''.join(['%13.4e' % (10**z) for z in Znew[ii][:]])\n mf.write(string + '\\n') \n\n mf.close() \n \n \n #=== FILE B ===#\n Y_new = np.empty( (1,1) )\n for ii,h in enumerate(H_mach.T):\n interp_spline = interpolate.interp1d( 10**psi_y , h, kind='cubic') \n\n if Y_new.shape[0] > 1:\n Y_new = np.hstack( (Y_new, np.expand_dims(interp_spline( 10**ynew ), axis=1) ) )\n else:\n Y_new = np.expand_dims(interp_spline( 10**ynew ), axis=1)\n \n # Save File B\n if machFileNew:\n location = machFileNew\n else:\n location = machFile.replace('.txt', 'fine(%i,%i).txt' % (inter[0],inter[1]) ) \n \n header = '# q M r M*(1-1/r) s'\n \n mf = open(location,\"w\")\n mf.write(header + '\\n')\n\n for ii,y in enumerate(10**ynew): \n string = ''.join(['%14.6e' % (y) for y in Y_new[:][ii]]) #some numbers are very large and ewould need a good margin\n mf.write(string + '\\n') \n\n mf.close() \n\n return 0\n \n\nif __name__ == \"__main__\":\n InterpolateRadio2D(psiFile = '../Analysis_MUSIC2/Hoeft_radio/mach_psi_tablefine(10,3).txt', inter=(3,2)) #(90,27) \n\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.empty",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"numpy.abs",
"scipy.interpolate.RectBivariateSpline",
"numpy.ones_like",
"scipy.interpolate.interp2d",
"matplotlib.pyplot.subplot",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt"
]
] |
ammar-khan/raspberry-pi-opencv-dnn-face-detection | [
"04ea998ee9e4d7bf71da022b0d8613940e8e7cfb"
] | [
"main.py"
] | [
"##\n# Copyright 2018, Ammar Ali Khan\n# Licensed under MIT.\n# Since: v1.0.0\n##\n\nimport time\nimport cv2\nimport numpy as np\nfrom src.common.package.config import application\nfrom src.opencv.package.config import application as _application\nfrom src.common.package.http import server as _server\nfrom src.common.package.http.handler import Handler\nfrom src.common.package.camera.capture import Capture as _capture\nfrom src.common.package.frame.action import Action as _frame\nfrom src.common.package.frame.draw import Draw as _draw\nfrom src.opencv.package.opencv.opencv import OpenCV\n\n# Constant\n_opencv = OpenCV()\n\n\n##\n# StreamHandler class - inherit Handler\n# This class provide handler for HTTP streaming\n# Note: this class should override Handler.stream\n##\nclass StreamHandler(Handler):\n\n ##\n # Override method Handler.stream()\n ##\n def stream(self):\n Handler.stream(self)\n print('[INFO] Overriding stream method...')\n\n # Initialise capture\n capture = _capture(src=application.CAPTURING_DEVICE,\n use_pi_camera=application.USE_PI_CAMERA,\n resolution=application.RESOLUTION,\n frame_rate=application.FRAME_RATE)\n\n if application.USE_PI_CAMERA:\n print('[INFO] Warming up pi camera...')\n else:\n print('[INFO] Warming up camera...')\n\n time.sleep(2.0)\n\n print('[INFO] Start capturing...')\n\n while True:\n # Read a frame from capture\n frame = capture.read()\n\n # Down size frame to 50% (to increase performance on Raspberry Pi)\n # frame = _frame.scale(frame=frame, scale=0.5)\n\n # Convert frame to gray (to increase performance on Raspberry Pi)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Get frame dimensions\n (height, width) = frame.shape[:2]\n\n # OpenCV detection\n detections = _opencv.dnn_face_detector(frame=frame,\n scale_factor=1.0,\n size=(300, 300),\n mean=(104.0, 177.0, 123.0))\n\n # Up size frame to 50% (how the frame was before down sizing)\n # frame = _frame.scale(frame=frame, scale=2)\n\n # If returns any detection\n for i in range(0, detections.shape[2]):\n # Get confidence associated with the detection\n confidence = detections[0, 0, i, 2]\n\n # Filter weak detection\n if confidence < _application.CONFIDENCE:\n continue\n\n # Calculate coordinates\n box = detections[0, 0, i, 3:7] * np.array([width,\n height,\n width,\n height])\n\n (left, top, right, bottom) = box.astype('int')\n\n coordinates = {'left': left,\n 'top': top,\n 'right': right,\n 'bottom': bottom}\n\n text = \"{:.2f}%\".format(confidence * 100)\n\n frame = _draw.rectangle(frame=frame,\n coordinates=coordinates,\n text=text)\n\n # Write date time on the frame\n frame = _draw.text(frame=frame,\n coordinates={'left': application.WIDTH - 150, 'top': application.HEIGHT - 20},\n text=time.strftime('%d/%m/%Y %H:%M:%S', time.localtime()),\n font_color=(0, 0, 255))\n\n # Convert frame into buffer for streaming\n retval, buffer = cv2.imencode('.jpg', frame)\n\n # Write buffer to HTML Handler\n self.wfile.write(b'--FRAME\\r\\n')\n self.send_header('Content-Type', 'image/jpeg')\n self.send_header('Content-Length', len(buffer))\n self.end_headers()\n self.wfile.write(buffer)\n self.wfile.write(b'\\r\\n')\n\n\n##\n# Method main()\n##\ndef main():\n try:\n address = ('', application.HTTP_PORT)\n server = _server.Server(address, StreamHandler)\n print('[INFO] HTTP server started successfully at %s' % str(server.server_address))\n print('[INFO] Waiting for client to connect to port %s' % str(application.HTTP_PORT))\n server.serve_forever()\n except Exception as e:\n server.socket.close()\n print('[INFO] HTTP server closed successfully.')\n print('[ERROR] Exception: %s' % str(e))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array"
]
] |
ashleylqx/AIB | [
"77e418cac52f0ca5f2a7c54927468a7bd75a8fc9"
] | [
"CUB-experiments/nearest_embed.py"
] | [
"# adapted from https://github.com/nadavbh12/VQ-VAE\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function, Variable\nimport torch.nn.functional as F\n\nfrom config import *\n\nimport pdb\n\n\nclass NearestEmbedFunc(Function):\n \"\"\"\n Input:\n ------\n x - (batch_size, emb_dim, *)\n Last dimensions may be arbitrary\n emb - (emb_dim, num_emb)\n \"\"\"\n @staticmethod\n def forward(ctx, input, emb):\n # if input.size(1) != emb.size(0):\n # raise RuntimeError('invalid argument: input.size(1) ({}) must be equal to emb.size(0) ({})'.\n # format(input.size(1), emb.size(0)))\n\n # emb = emb.expand(input.size(1), emb.size(1))\n emb_ex = emb.expand(input.size(1), emb.size(1)) # new2\n\n # save sizes for backward\n ctx.batch_size = input.size(0)\n ctx.num_latents = int(np.prod(np.array(input.size()[2:])))\n # ctx.emb_dim = emb.size(0)\n # ctx.num_emb = emb.size(1)\n ctx.emb_dim = emb_ex.size(0)\n ctx.num_emb = emb_ex.size(1) # new2\n ctx.input_type = type(input)\n ctx.dims = list(range(len(input.size())))\n\n # expand to be broadcast-able\n x_expanded = input.unsqueeze(-1)\n num_arbitrary_dims = len(ctx.dims) - 2\n if num_arbitrary_dims:\n # emb_expanded = emb.view(emb.shape[0], *([1] * num_arbitrary_dims), emb.shape[1])\n emb_expanded = emb_ex.view(emb_ex.shape[0], *([1] * num_arbitrary_dims), emb_ex.shape[1]) # new2\n else:\n # emb_expanded = emb\n emb_expanded = emb_ex # new2\n\n # find nearest neighbors\n # dist = torch.norm(x_expanded - emb_expanded, 2, 1)\n dist = torch.pow(x_expanded - emb_expanded, 2) # (batch_size, emb_dim, *, num_emb) # new2\n _, argmin = dist.min(-1)\n shifted_shape = [input.shape[0], *list(input.shape[2:]) ,input.shape[1]]\n # pdb.set_trace()\n result = emb.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, ctx.dims[-1], *ctx.dims[1:-1]) # new2\n\n ctx.save_for_backward(argmin)\n return result.contiguous(), argmin\n\n @staticmethod\n def backward(ctx, grad_output, argmin=None):\n # pdb.set_trace()\n grad_input = grad_emb = None\n if ctx.needs_input_grad[0]:\n grad_input = grad_output\n\n if ctx.needs_input_grad[1]:\n argmin, = ctx.saved_variables\n latent_indices = torch.arange(ctx.num_emb).type_as(argmin)\n idx_choices = (argmin.view(-1, 1) == latent_indices.view(1, -1)).type_as(grad_output.data)\n n_idx_choice = idx_choices.sum(0)\n n_idx_choice[n_idx_choice == 0] = 1\n idx_avg_choices = idx_choices / n_idx_choice\n grad_output = grad_output.permute(0, *ctx.dims[2:], 1).contiguous()\n grad_output = grad_output.view(ctx.batch_size * ctx.num_latents, ctx.emb_dim)\n # pdb.set_trace()\n # grad_emb = torch.sum(grad_output.data.view(-1, ctx.emb_dim, 1) *\n # idx_avg_choices.view(-1, 1, ctx.num_emb), 0)\n grad_emb = torch.sum(grad_output.data.view(-1, 1) *\n idx_avg_choices.view(-1, ctx.num_emb), 0, keepdim=True) # new2\n return grad_input, grad_emb, None, None\n\n\ndef nearest_embed(x, emb):\n return NearestEmbedFunc().apply(x, emb)\n\n\nclass NearestEmbed(nn.Module):\n def __init__(self, num_embeddings, embeddings_dim, rd_init=True):\n super(NearestEmbed, self).__init__()\n if rd_init:\n self.weight = nn.Parameter(torch.rand(embeddings_dim, num_embeddings))\n else:\n # self.weight = nn.Parameter(torch.linspace(0.0, 1.0, num_embeddings).unsqueeze(0).expand(embeddings_dim, num_embeddings))\n self.weight = nn.Parameter(torch.linspace(lin_min, lin_max, num_embeddings).unsqueeze(0).expand(embeddings_dim, num_embeddings))\n\n print('Init emb weight:', self.weight.data)\n\n def forward(self, x, weight_sg=False):\n \"\"\"Input:\n ---------\n x - (batch_size, emb_size, *)\n \"\"\"\n return nearest_embed(x, self.weight.detach() if weight_sg else self.weight)\n\n\n# adapted from https://github.com/rosinality/vq-vae-2-pytorch/blob/master/vqvae.py#L25\n# that adapted from https://github.com/deepmind/sonnet\n\n\nclass NearestEmbedEMA(nn.Module):\n def __init__(self, n_emb, emb_dim, decay=0.99, eps=1e-5, rd_init=True):\n super(NearestEmbedEMA, self).__init__()\n self.decay = decay\n self.eps = eps\n self.embeddings_dim = emb_dim\n self.n_emb = n_emb\n self.emb_dim = emb_dim\n if rd_init:\n embed = torch.rand(emb_dim, n_emb)\n else:\n # embed = torch.linspace(0.0, 1.0, n_emb).unsqueeze(0).expand(emb_dim, n_emb)\n embed = torch.linspace(lin_min, lin_max, n_emb).unsqueeze(0).expand(emb_dim, n_emb)\n self.register_buffer('weight', embed)\n self.register_buffer('cluster_size', torch.zeros(n_emb))\n self.register_buffer('embed_avg', embed.clone())\n\n print('Init emb weight ema:', self.weight.data)\n\n def forward(self, x, weight_sg=None):\n \"\"\"Input:\n ---------\n x - (batch_size, emb_size, *)\n \"\"\"\n emb_ex = self.weight.expand(x.size(1), self.weight.size(1)) # new2\n #emb_avg_ex = self.embed_avg.expand(x.size(1), self.weight.size(1)) # new2\n\n dims = list(range(len(x.size())))\n x_expanded = x.unsqueeze(-1)\n num_arbitrary_dims = len(dims) - 2\n # if num_arbitrary_dims:\n # emb_expanded = self.weight.view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)\n # else:\n # emb_expanded = self.weight\n\n emb_size = x.size(1)\n if num_arbitrary_dims:\n #emb_expanded = self.weight.expand(emb_size, self.n_emb).view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)\n emb_expanded = emb_ex.expand(emb_size, self.n_emb).view(self.emb_dim, *([1] * num_arbitrary_dims), self.n_emb)\n else:\n #emb_expanded = self.weight.expand(emb_size, self.n_emb)\n emb_expanded = emb_ex.expand(emb_size, self.n_emb)\n\n # find nearest neighbors\n # dist = torch.norm(x_expanded - emb_expanded, 2, 1)\n dist = torch.pow(x_expanded - emb_expanded, 2) # (batch_size, emb_dim, *, num_emb) # new2\n _, argmin = dist.min(-1)\n shifted_shape = [x.shape[0], *list(x.shape[2:]), x.shape[1]]\n # result = emb_ex.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1]) # (batch_size, emb_dim, *, num_emb) # new2\n result = self.weight.t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1]) # (batch_size, emb_dim, *, num_emb) # new2\n # result = self.weight.expand(emb_size, self.n_emb).t().index_select(0, argmin.view(-1)).view(shifted_shape).permute(0, dims[-1], *dims[1:-1])\n\n if self.training:\n latent_indices = torch.arange(self.n_emb).type_as(argmin)\n emb_onehot = (argmin.view(-1, 1) == latent_indices.view(1, -1)).type_as(x.data)\n n_idx_choice = emb_onehot.sum(0)\n n_idx_choice[n_idx_choice == 0] = 1\n # pdb.set_trace()\n # flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(x.shape[1], -1)\n num_arbitrary_dims = len(dims) - 2\n if num_arbitrary_dims:\n # flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(x.shape[1], -1)\n # flatten = x.permute(1, 0, *dims[-2:]).contiguous().view(1, -1)\n flatten = x.view(1, -1)\n else:\n # flatten = x.permute(1, 0).contiguous()\n # flatten = x.permute(1, 0).contiguous().view(1, -1)\n flatten = x.view(1, -1)\n\n self.cluster_size.data.mul_(self.decay).add_(\n 1 - self.decay, n_idx_choice\n )\n # pdb.set_trace()\n embed_sum = flatten @ emb_onehot # -----dc0.99\n # embed_sum = torch.pow(flatten.t() - emb_onehot, 2).mean(0) # ----dc0.99_s\n #pdb.set_trace()\n self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)\n #emb_avg_ex.data.mul_(self.decay).add_(1 - self.decay, embed_sum)\n #pdb.set_trace()\n\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) / (n + self.n_emb * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.weight.data.copy_(embed_normalized) # ---dc0.99\n # self.weight.data.copy_(self.embed_avg) # -------dc0.99_s\n\n #embed_normalized = emb_avg_ex / cluster_size.unsqueeze(0)\n #self.weight.data.copy_(embed_normalized.mean(0, keepdim=True))\n #self.embed_avg.data.copy_(emb_avg_ex.mean(0, keepdim=True))\n\n return result, argmin\n"
] | [
[
"torch.linspace",
"torch.rand",
"torch.arange",
"torch.zeros",
"torch.pow"
]
] |
hartmanwilliam/federated | [
"ecf51cdf8b86cbd000f6edc5715dc904bce07540",
"ecf51cdf8b86cbd000f6edc5715dc904bce07540",
"ecf51cdf8b86cbd000f6edc5715dc904bce07540"
] | [
"tensorflow_federated/python/research/optimization/stackoverflow/dataset.py",
"tensorflow_federated/python/core/backends/mapreduce/canonical_form_test.py",
"tensorflow_federated/python/research/triehh/triehh_tff_test.py"
] | [
"# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Data loader for Stackoverflow.\"\"\"\nfrom typing import List\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nEVAL_BATCH_SIZE = 100\n\n\ndef create_vocab(vocab_size):\n \"\"\"Creates vocab from `vocab_size` most common words in Stackoverflow.\"\"\"\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts()\n return list(vocab_dict.keys())[:vocab_size]\n\n\ndef split_input_target(chunk):\n \"\"\"Generate input and target data.\n\n The task of language model is to predict the next word.\n\n Args:\n chunk: A Tensor of text data.\n\n Returns:\n A namedtuple of input and target data.\n \"\"\"\n input_text = tf.map_fn(lambda x: x[:-1], chunk)\n target_text = tf.map_fn(lambda x: x[1:], chunk)\n return (input_text, target_text)\n\n\ndef build_to_ids_fn(vocab, max_seq_len):\n \"\"\"Constructs function mapping examples to sequences of token indices.\"\"\"\n _, _, bos, eos = get_special_tokens(len(vocab))\n\n table_values = np.arange(len(vocab), dtype=np.int64)\n table = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(vocab, table_values),\n num_oov_buckets=1)\n\n def to_ids(example):\n sentence = tf.reshape(example['tokens'], shape=[1])\n words = tf.strings.split(sentence, sep=' ').values\n truncated_words = words[:max_seq_len]\n tokens = table.lookup(truncated_words) + 1\n tokens = tf.cond(\n tf.less(tf.size(tokens), max_seq_len),\n lambda: tf.concat([tokens, [eos]], 0), lambda: tokens)\n\n return tf.concat([[bos], tokens], 0)\n\n return to_ids\n\n\ndef batch_and_split(dataset, max_seq_len, batch_size):\n return dataset.padded_batch(\n batch_size, padded_shapes=[max_seq_len + 1]).map(\n split_input_target, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef get_special_tokens(vocab_size):\n \"\"\"Gets tokens dataset preprocessing code will add to Stackoverflow.\"\"\"\n pad = 0\n oov = vocab_size + 1\n bos = vocab_size + 2\n eos = vocab_size + 3\n return pad, oov, bos, eos\n\n\ndef create_train_dataset_preprocess_fn(vocab: List[str],\n client_batch_size: int,\n client_epochs_per_round: int,\n max_seq_len: int,\n max_training_elements_per_user: int,\n max_shuffle_buffer_size=10000):\n \"\"\"Creates preprocessing functions for stackoverflow data.\n\n This function returns a function which takes a dataset and returns a dataset,\n generally for mapping over a set of unprocessed client datasets during\n training.\n\n Args:\n vocab: Vocabulary which defines the embedding.\n client_batch_size: Integer representing batch size to use on the clients.\n client_epochs_per_round: Number of epochs for which to repeat train client\n dataset.\n max_seq_len: Integer determining shape of padded batches. Sequences will be\n padded up to this length, and sentences longer than `max_seq_len` will be\n truncated to this length.\n max_training_elements_per_user: Integer controlling the maximum number of\n elements to take per user. If -1, takes all elements for each user.\n max_shuffle_buffer_size: Maximum shuffle buffer size.\n\n Returns:\n Two functions, the first `preprocess_train` and the second\n `preprocess_val_and_test`, as described above.\n \"\"\"\n if client_batch_size <= 0:\n raise ValueError('client_batch_size must be a positive integer; you have '\n 'passed {}'.format(client_batch_size))\n elif client_epochs_per_round <= 0:\n raise ValueError('client_epochs_per_round must be a positive integer; you '\n 'have passed {}'.format(client_epochs_per_round))\n elif max_seq_len <= 0:\n raise ValueError('max_seq_len must be a positive integer; you have '\n 'passed {}'.format(max_seq_len))\n elif max_training_elements_per_user < -1:\n raise ValueError(\n 'max_training_elements_per_user must be an integer at '\n 'least -1; you have passed {}'.format(max_training_elements_per_user))\n\n if (max_training_elements_per_user == -1 or\n max_training_elements_per_user > max_shuffle_buffer_size):\n shuffle_buffer_size = max_shuffle_buffer_size\n else:\n shuffle_buffer_size = max_training_elements_per_user\n\n # TODO(b/155408842): need further investigation on why `tff.tf_compuation`\n # decorator causes b/153363900 for `to_ids`, and large memory consumption.\n def preprocess_train(dataset):\n to_ids = build_to_ids_fn(vocab, max_seq_len)\n dataset = dataset.take(max_training_elements_per_user)\n dataset = dataset.shuffle(shuffle_buffer_size)\n dataset = dataset.repeat(client_epochs_per_round)\n dataset = dataset.map(\n to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return batch_and_split(dataset, max_seq_len, client_batch_size)\n\n return preprocess_train\n\n\ndef create_test_dataset_preprocess_fn(vocab: List[str], max_seq_len: int):\n \"\"\"Creates preprocessing functions for stackoverflow data.\n\n This function returns a function which represents preprocessing logic\n for use on centralized validation and test datasets outside of TFF.\n\n Args:\n vocab: Vocabulary which defines the embedding.\n max_seq_len: Integer determining shape of padded batches. Sequences will be\n padded up to this length, and sentences longer than `max_seq_len` will be\n truncated to this length.\n\n Returns:\n `preprocess_val_and_test`, as described above.\n \"\"\"\n if max_seq_len <= 0:\n raise ValueError('max_seq_len must be a positive integer; you have '\n 'passed {}'.format(max_seq_len))\n\n def preprocess_val_and_test(dataset):\n to_ids = build_to_ids_fn(vocab, max_seq_len)\n id_dataset = dataset.map(\n to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return batch_and_split(id_dataset, max_seq_len, EVAL_BATCH_SIZE)\n\n return preprocess_val_and_test\n\n\ndef construct_word_level_datasets(vocab_size: int,\n client_batch_size: int,\n client_epochs_per_round: int,\n max_seq_len: int,\n max_training_elements_per_user: int,\n num_validation_examples: int,\n max_shuffle_buffer_size=10000):\n \"\"\"Preprocessing for Stackoverflow data.\n\n Notice that this preprocessing function *ignores* the heldout Stackoverflow\n dataset for consistency with the other datasets in the proposed optimization\n paper, and returns a validation/test split of the Stackoverflow \"test\" data,\n containing more examples from users in the Stackoverflow train dataset.\n\n Args:\n vocab_size: Integer representing size of the vocab to use. Vocabulary will\n then be the `vocab_size` most frequent words in the Stackoverflow dataset.\n client_batch_size: Integer representing batch size to use on the clients.\n client_epochs_per_round: Number of epochs for which to repeat train client\n dataset.\n max_seq_len: Integer determining shape of padded batches. Sequences will be\n padded up to this length, and sentences longer than `max_seq_len` will be\n truncated to this length.\n max_training_elements_per_user: Integer controlling the maximum number of\n elements to take per user. If -1, takes all elements for each user.\n num_validation_examples: Number of examples from Stackoverflow test set to\n use for validation on each round.\n max_shuffle_buffer_size: Maximum shuffle buffer size.\n\n Returns:\n stackoverflow_train: An instance of `tff.simulation.ClientData`\n representing Stackoverflow data for training.\n stackoverflow_validation: A split of the Stackoverflow Test data as outlined\n in `tff.simulation.datasets.stackoverflow`, containing at most\n `num_validation_examples` examples.\n stackoverflow_test: A split of the same Stackoverflow Test data containing\n the examples not used in `stackoverflow_validation`.\n \"\"\"\n if num_validation_examples < 1:\n raise ValueError(\n 'num_validation_examples must be an integer at '\n 'least 1; you have passed {}'.format(num_validation_examples))\n elif vocab_size <= 0:\n raise ValueError('vocab_size must be a positive integer; you have '\n 'passed {}'.format(vocab_size))\n\n (stackoverflow_train, _,\n stackoverflow_test) = tff.simulation.datasets.stackoverflow.load_data()\n\n vocab = create_vocab(vocab_size)\n\n raw_test_dataset = stackoverflow_test.create_tf_dataset_from_all_clients()\n\n preprocess_train = create_train_dataset_preprocess_fn(\n vocab, client_batch_size, client_epochs_per_round, max_seq_len,\n max_training_elements_per_user, max_shuffle_buffer_size)\n\n preprocess_val_and_test = create_test_dataset_preprocess_fn(\n vocab, max_seq_len)\n\n stackoverflow_train = stackoverflow_train.preprocess(preprocess_train)\n stackoverflow_val = preprocess_val_and_test(\n raw_test_dataset.take(num_validation_examples))\n stackoverflow_test = preprocess_val_and_test(\n raw_test_dataset.skip(num_validation_examples))\n\n return stackoverflow_train, stackoverflow_val, stackoverflow_test\n\n\ndef get_centralized_train_dataset(vocab_size: int,\n batch_size: int,\n max_seq_len: int,\n shuffle_buffer_size: int = 10000):\n \"\"\"Creates centralized approximately shuffled train dataset.\"\"\"\n\n vocab = create_vocab(vocab_size)\n to_ids = build_to_ids_fn(vocab, max_seq_len)\n train, _, _ = tff.simulation.datasets.stackoverflow.load_data()\n\n train = train.create_tf_dataset_from_all_clients()\n train = train.shuffle(buffer_size=shuffle_buffer_size)\n return batch_and_split(\n train.map(to_ids, num_parallel_calls=tf.data.experimental.AUTOTUNE),\n max_seq_len, batch_size)\n",
"# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.backends.mapreduce import canonical_form\nfrom tensorflow_federated.python.core.backends.mapreduce import test_utils\n\ntf.compat.v1.enable_v2_behavior()\n\n\ndef _dummy_canonical_form_computations():\n\n @computations.tf_computation\n def initialize():\n return tf.constant(0)\n\n @computations.tf_computation(tf.int32)\n def prepare(server_state):\n del server_state # Unused\n return tf.constant(1.0)\n\n @computations.tf_computation(\n computation_types.SequenceType(tf.float32), tf.float32)\n def work(client_data, client_input):\n del client_data # Unused\n del client_input # Unused\n return (True, []), []\n\n @computations.tf_computation\n def zero():\n return tf.constant(0), tf.constant(0)\n\n @computations.tf_computation((tf.int32, tf.int32), tf.bool)\n def accumulate(accumulator, client_update):\n del accumulator # Unused\n del client_update # Unused\n return tf.constant(1), tf.constant(1)\n\n @computations.tf_computation((tf.int32, tf.int32), (tf.int32, tf.int32))\n def merge(accumulator1, accumulator2):\n del accumulator1 # Unused\n del accumulator2 # Unused\n return tf.constant(1), tf.constant(1)\n\n @computations.tf_computation(tf.int32, tf.int32)\n def report(accumulator):\n del accumulator # Unused\n return tf.constant(1.0)\n\n @computations.tf_computation\n def bitwidth():\n return []\n\n @computations.tf_computation(\n tf.int32, (tf.float32, computation_types.NamedTupleType([])))\n def update(server_state, global_update):\n del server_state # Unused\n del global_update # Unused\n return tf.constant(1), []\n\n return (initialize, prepare, work, zero, accumulate, merge, report, bitwidth,\n update)\n\n\nclass CanonicalFormTest(absltest.TestCase):\n\n def test_init_does_not_raise_type_error(self):\n (initialize, prepare, work, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n try:\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n except TypeError:\n self.fail('Raised TypeError unexpectedly.')\n\n def test_init_does_not_raise_type_error_with_unknown_dimensions(self):\n\n @computations.tf_computation\n def initialize():\n return tf.constant(0)\n\n @computations.tf_computation(tf.int32)\n def prepare(server_state):\n del server_state # Unused\n return tf.constant(1.0)\n\n @computations.tf_computation(\n computation_types.SequenceType(tf.float32), tf.float32)\n def work(client_data, client_input):\n del client_data # Unused\n del client_input # Unused\n return (True, []), []\n\n @computations.tf_computation\n def zero():\n return tf.constant([], dtype=tf.string)\n\n @computations.tf_computation(\n computation_types.TensorType(shape=[None], dtype=tf.string), tf.bool)\n def accumulate(accumulator, client_update):\n del accumulator # Unused\n del client_update # Unused\n return tf.constant(['abc'])\n\n @computations.tf_computation(\n computation_types.TensorType(shape=[None], dtype=tf.string),\n computation_types.TensorType(shape=[None], dtype=tf.string))\n def merge(accumulator1, accumulator2):\n del accumulator1 # Unused\n del accumulator2 # Unused\n return tf.constant(['abc'])\n\n @computations.tf_computation(\n computation_types.TensorType(shape=[None], dtype=tf.string))\n def report(accumulator):\n del accumulator # Unused\n return tf.constant(1.0)\n\n @computations.tf_computation\n def bitwidth():\n return []\n\n @computations.tf_computation(\n tf.int32, (tf.float32, computation_types.NamedTupleType([])))\n def update(server_state, global_update):\n del server_state # Unused\n del global_update # Unused\n return tf.constant(1), []\n\n try:\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n except TypeError:\n self.fail('Raised TypeError unexpectedly.')\n\n def test_init_raises_type_error_with_bad_initialize_result_type(self):\n (_, prepare, work, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation\n def initialize():\n return tf.constant(0.0)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_prepare_parameter_type(self):\n (initialize, _, work, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(tf.float32)\n def prepare(server_state):\n del server_state # Unused\n return tf.constant(1.0)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_prepare_result_type(self):\n (initialize, _, work, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(tf.int32)\n def prepare(server_state):\n del server_state # Unused\n return tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_work_second_parameter_type(self):\n (initialize, prepare, _, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(\n computation_types.SequenceType(tf.float32), tf.int32)\n def work(client_data, client_input):\n del client_data # Unused\n del client_input # Unused\n return (True, []), []\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_work_result_type(self):\n (initialize, prepare, _, zero, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(\n computation_types.SequenceType(tf.float32), tf.float32)\n def work(client_data, client_input):\n del client_data # Unused\n del client_input # Unused\n return (tf.constant('abc'), []), []\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_zero_result_type(self):\n (initialize, prepare, work, _, accumulate, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation\n def zero():\n return tf.constant(0.0), tf.constant(0)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_accumulate_first_parameter_type(\n self):\n (initialize, prepare, work, zero, _, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.float32, tf.int32), tf.bool)\n def accumulate(accumulator, client_update):\n del accumulator # Unused\n del client_update # Unused\n return tf.constant(1), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_accumulate_second_parameter_type(\n self):\n (initialize, prepare, work, zero, _, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.float32, tf.float32), tf.string)\n def accumulate(accumulator, client_update):\n del accumulator # Unused\n del client_update # Unused\n return tf.constant(1), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_accumulate_result_type(self):\n (initialize, prepare, work, zero, _, merge, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.float32, tf.float32), tf.bool)\n def accumulate(accumulator, client_update):\n del accumulator # Unused\n del client_update # Unused\n return tf.constant(1.0), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_merge_first_parameter_type(self):\n (initialize, prepare, work, zero, accumulate, _, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.float32, tf.int32), (tf.int32, tf.int32))\n def merge(accumulator1, accumulator2):\n del accumulator1 # Unused\n del accumulator2 # Unused\n return tf.constant(1), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_merge_second_parameter_type(self):\n (initialize, prepare, work, zero, accumulate, _, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.int32, tf.int32), (tf.float32, tf.int32))\n def merge(accumulator1, accumulator2):\n del accumulator1 # Unused\n del accumulator2 # Unused\n return tf.constant(1), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_merge_result_type(self):\n (initialize, prepare, work, zero, accumulate, _, report, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation((tf.int32, tf.int32), (tf.int32, tf.int32))\n def merge(accumulator1, accumulator2):\n del accumulator1 # Unused\n del accumulator2 # Unused\n return tf.constant(1.0), tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_report_parameter_type(self):\n (initialize, prepare, work, zero, accumulate, merge, _, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(tf.float32, tf.int32)\n def report(accumulator):\n del accumulator # Unused\n return tf.constant(1.0)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_report_result_type(self):\n (initialize, prepare, work, zero, accumulate, merge, _, bitwidth,\n update) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(tf.int32, tf.int32)\n def report(accumulator):\n del accumulator # Unused\n return tf.constant(1)\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_update_first_parameter_type(self):\n (initialize, prepare, work, zero, accumulate, merge, report, bitwidth,\n _) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(\n tf.float32, (tf.float32, computation_types.NamedTupleType([])))\n def update(server_state, global_update):\n del server_state # Unused\n del global_update # Unused\n return tf.constant(1), []\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_update_second_parameter_type(self):\n (initialize, prepare, work, zero, accumulate, merge, report, bitwidth,\n _) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(\n tf.int32, (tf.int32, computation_types.NamedTupleType([])))\n def update(server_state, global_update):\n del server_state # Unused\n del global_update # Unused\n return tf.constant(1), []\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_init_raises_type_error_with_bad_update_result_type(self):\n (initialize, prepare, work, zero, accumulate, merge, report, bitwidth,\n _) = _dummy_canonical_form_computations()\n\n @computations.tf_computation(\n tf.int32, (tf.float32, computation_types.NamedTupleType([])))\n def update(server_state, global_update):\n del server_state # Unused\n del global_update # Unused\n return tf.constant(1.0), []\n\n with self.assertRaises(TypeError):\n canonical_form.CanonicalForm(initialize, prepare, work, zero, accumulate,\n merge, report, bitwidth, update)\n\n def test_summary(self):\n cf = test_utils.get_temperature_sensor_example()\n\n class CapturePrint(object):\n\n def __init__(self):\n self.summary = ''\n\n def __call__(self, msg):\n self.summary += msg + '\\n'\n\n capture = CapturePrint()\n cf.summary(print_fn=capture)\n # pyformat: disable\n self.assertEqual(\n capture.summary,\n 'initialize: ( -> <num_rounds=int32>)\\n'\n 'prepare : (<num_rounds=int32> -> <max_temperature=float32>)\\n'\n 'work : (<float32*,<max_temperature=float32>> -> <<<is_over=bool>,<>>,<num_readings=int32>>)\\n'\n 'zero : ( -> <num_total=int32,num_over=int32>)\\n'\n 'accumulate: (<<num_total=int32,num_over=int32>,<is_over=bool>> -> <num_total=int32,num_over=int32>)\\n'\n 'merge : (<<num_total=int32,num_over=int32>,<num_total=int32,num_over=int32>> -> <num_total=int32,num_over=int32>)\\n'\n 'report : (<num_total=int32,num_over=int32> -> <ratio_over_threshold=float32>)\\n'\n 'bitwidth : ( -> <>)\\n'\n 'update : ( -> <num_rounds=int32>)\\n'\n )\n # pyformat: enable\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# Lint as: python3\n# Copyright 2020, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport string\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom tensorflow_federated.python.research.triehh import triehh_tf\nfrom tensorflow_federated.python.research.triehh import triehh_tff\n\n\nclass TriehhTffTest(tf.test.TestCase):\n\n def test_build_triehh_process_works_as_expeted(self):\n clients = 3\n num_sub_rounds = 4\n max_rounds = 6\n max_num_heavy_hitters = 3\n max_user_contribution = 100\n roots = (\n string.ascii_lowercase + string.digits + \"'@#-;*:./\" +\n triehh_tf.DEFAULT_TERMINATOR)\n possible_prefix_extensions = list(roots)\n\n iterative_process = triehh_tff.build_triehh_process(\n possible_prefix_extensions,\n num_sub_rounds,\n max_num_heavy_hitters,\n max_user_contribution,\n default_terminator=triehh_tf.DEFAULT_TERMINATOR)\n\n server_state = iterative_process.initialize()\n expected_discovered_prefixes = tf.constant([''], dtype=tf.string)\n expected_discovered_heavy_hitters = tf.constant([], dtype=tf.string)\n expected_accumulated_votes = tf.zeros(\n dtype=tf.int32,\n shape=[max_num_heavy_hitters,\n len(possible_prefix_extensions)])\n expected_round_num = tf.constant(0, dtype=tf.int32)\n\n self.assertAllEqual(server_state.discovered_prefixes,\n expected_discovered_prefixes)\n self.assertAllEqual(server_state.discovered_heavy_hitters,\n expected_discovered_heavy_hitters)\n self.assertAllEqual(server_state.accumulated_votes,\n expected_accumulated_votes)\n self.assertAllEqual(server_state.round_num, expected_round_num)\n\n def create_dataset_fn(client_id):\n del client_id\n return tf.data.Dataset.from_tensor_slices(['hello', 'hey', 'hi'])\n\n client_ids = list(range(100))\n\n client_data = tff.simulation.ClientData.from_clients_and_fn(\n client_ids=client_ids,\n create_tf_dataset_for_client_fn=create_dataset_fn)\n\n for round_num in range(max_rounds * num_sub_rounds):\n # TODO(b/152051528): Remove this once lookup table state is cleared in\n # eager executer.\n tff.framework.set_default_executor(tff.framework.local_executor_factory())\n sampled_clients = list(range(clients))\n sampled_datasets = [\n client_data.create_tf_dataset_for_client(client_id)\n for client_id in sampled_clients\n ]\n server_state, _ = iterative_process.next(server_state, sampled_datasets)\n\n if (round_num + 1) % num_sub_rounds == 0:\n if (max_num_heavy_hitters - len(server_state.discovered_heavy_hitters) <\n 1) or (server_state.discovered_prefixes.size == 0):\n # Training is done.\n # All max_num_heavy_hitters have been discovered.\n break\n\n expected_discovered_heavy_hitters = tf.constant(['hi', 'hey', 'hello'],\n dtype=tf.string)\n\n self.assertAllEqual(server_state.discovered_heavy_hitters,\n expected_discovered_heavy_hitters)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_v2_behavior()\n tf.test.main()\n"
] | [
[
"tensorflow.size",
"tensorflow.lookup.KeyValueTensorInitializer",
"tensorflow.reshape",
"tensorflow.map_fn",
"tensorflow.strings.split",
"tensorflow.concat"
],
[
"tensorflow.constant",
"tensorflow.compat.v1.enable_v2_behavior"
],
[
"tensorflow.constant",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.test.main",
"tensorflow.compat.v1.enable_v2_behavior"
]
] |
aliavni/statsmodels | [
"ef5d57a8d45de76a895e9401705280d558d688ad",
"ef5d57a8d45de76a895e9401705280d558d688ad"
] | [
"statsmodels/multivariate/cancorr.py",
"statsmodels/graphics/tests/test_factorplots.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"Canonical correlation analysis\n\nauthor: Yichuan Liu\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import svd\nimport scipy\nimport pandas as pd\n\nfrom statsmodels.base.model import Model\nfrom statsmodels.iolib import summary2\nfrom .multivariate_ols import multivariate_stats\n\n\nclass CanCorr(Model):\n \"\"\"\n Canonical correlation analysis using singular value decomposition\n\n For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef\n such that:\n\n x1 = x * x_cancoef, x1' * x1 is identity matrix\n y1 = y * y_cancoef, y1' * y1 is identity matrix\n\n and the correlation between x1 and y1 is maximized.\n\n Attributes\n ----------\n endog : ndarray\n See Parameters.\n exog : ndarray\n See Parameters.\n cancorr : ndarray\n The canonical correlation values\n y_cancoeff : ndarray\n The canonical coefficients for endog\n x_cancoeff : ndarray\n The canonical coefficients for exog\n\n References\n ----------\n .. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf\n .. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf\n .. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/\n \"\"\" # noqa:E501\n def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):\n super(CanCorr, self).__init__(endog, exog, missing=missing,\n hasconst=hasconst, **kwargs)\n self._fit(tolerance)\n\n def _fit(self, tolerance=1e-8):\n \"\"\"Fit the model\n\n A ValueError is raised if there are singular values smaller than the\n tolerance. The treatment of singular arrays might change in future.\n\n Parameters\n ----------\n tolerance : float\n eigenvalue tolerance, values smaller than which is considered 0\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n k = np.min([k_yvar, k_xvar])\n\n x = np.array(self.exog)\n x = x - x.mean(0)\n y = np.array(self.endog)\n y = y - y.mean(0)\n\n ux, sx, vx = svd(x, 0)\n # vx_ds = vx.T divided by sx\n vx_ds = vx.T\n mask = sx > tolerance\n if mask.sum() < len(mask):\n raise ValueError('exog is collinear.')\n vx_ds[:, mask] /= sx[mask]\n uy, sy, vy = svd(y, 0)\n # vy_ds = vy.T divided by sy\n vy_ds = vy.T\n mask = sy > tolerance\n if mask.sum() < len(mask):\n raise ValueError('endog is collinear.')\n vy_ds[:, mask] /= sy[mask]\n u, s, v = svd(ux.T.dot(uy), 0)\n\n # Correct any roundoff\n self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])\n\n self.x_cancoef = vx_ds.dot(u[:, :k])\n self.y_cancoef = vy_ds.dot(v.T[:, :k])\n\n def corr_test(self):\n \"\"\"Approximate F test\n Perform multivariate statistical tests of the hypothesis that\n there is no canonical correlation between endog and exog.\n For each canonical correlation, testing its significance based on\n Wilks' lambda.\n\n Returns\n -------\n CanCorrTestResults instance\n \"\"\"\n nobs, k_yvar = self.endog.shape\n nobs, k_xvar = self.exog.shape\n eigenvals = np.power(self.cancorr, 2)\n stats = pd.DataFrame(columns=['Canonical Correlation', \"Wilks' lambda\",\n 'Num DF','Den DF', 'F Value','Pr > F'],\n index=list(range(len(eigenvals) - 1, -1, -1)))\n prod = 1\n for i in range(len(eigenvals) - 1, -1, -1):\n prod *= 1 - eigenvals[i]\n p = k_yvar - i\n q = k_xvar - i\n r = (nobs - k_yvar - 1) - (p - q + 1) / 2\n u = (p * q - 2) / 4\n df1 = p * q\n if p ** 2 + q ** 2 - 5 > 0:\n t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))\n else:\n t = 1\n df2 = r * t - 2 * u\n lmd = np.power(prod, 1 / t)\n F = (1 - lmd) / lmd * df2 / df1\n stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]\n stats.loc[i, \"Wilks' lambda\"] = prod\n stats.loc[i, 'Num DF'] = df1\n stats.loc[i, 'Den DF'] = df2\n stats.loc[i, 'F Value'] = F\n pval = scipy.stats.f.sf(F, df1, df2)\n stats.loc[i, 'Pr > F'] = pval\n '''\n # Wilk's Chi square test of each canonical correlation\n df = (p - i + 1) * (q - i + 1)\n chi2 = a * np.log(prod)\n pval = stats.chi2.sf(chi2, df)\n stats.loc[i, 'Canonical correlation'] = self.cancorr[i]\n stats.loc[i, 'Chi-square'] = chi2\n stats.loc[i, 'DF'] = df\n stats.loc[i, 'Pr > ChiSq'] = pval\n '''\n ind = stats.index.values[::-1]\n stats = stats.loc[ind, :]\n\n # Multivariate tests (remember x has mean removed)\n stats_mv = multivariate_stats(eigenvals,\n k_yvar, k_xvar, nobs - k_xvar - 1)\n return CanCorrTestResults(stats, stats_mv)\n\n\nclass CanCorrTestResults:\n \"\"\"\n Canonical correlation results class\n\n Attributes\n ----------\n stats : DataFrame\n Contain statistical tests results for each canonical correlation\n stats_mv : DataFrame\n Contain the multivariate statistical tests results\n \"\"\"\n def __init__(self, stats, stats_mv):\n self.stats = stats\n self.stats_mv = stats_mv\n\n def __str__(self):\n return self.summary().__str__()\n\n def summary(self):\n summ = summary2.Summary()\n summ.add_title('Cancorr results')\n summ.add_df(self.stats)\n summ.add_dict({'': ''})\n summ.add_dict({'Multivariate Statistics and F Approximations': ''})\n summ.add_df(self.stats_mv)\n return summ\n",
"import numpy as np\nfrom numpy.testing import assert_equal, assert_raises\nfrom pandas import Series\nimport pytest\n\nfrom statsmodels.graphics.factorplots import _recode, interaction_plot\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n pass\n\n\nclass TestInteractionPlot:\n\n @classmethod\n def setup_class(cls):\n np.random.seed(12345)\n cls.weight = np.random.randint(1,4,size=60)\n cls.duration = np.random.randint(1,3,size=60)\n cls.days = np.log(np.random.randint(1,30, size=60))\n\n @pytest.mark.matplotlib\n def test_plot_both(self, close_figures):\n fig = interaction_plot(self.weight, self.duration, self.days,\n colors=['red','blue'], markers=['D','^'], ms=10)\n\n @pytest.mark.matplotlib\n def test_plot_rainbow(self, close_figures):\n fig = interaction_plot(self.weight, self.duration, self.days,\n markers=['D','^'], ms=10)\n\n @pytest.mark.matplotlib\n @pytest.mark.parametrize('astype', ['str', 'int'])\n def test_plot_pandas(self, astype, close_figures):\n weight = Series(self.weight, name='Weight').astype(astype)\n duration = Series(self.duration, name='Duration')\n days = Series(self.days, name='Days')\n fig = interaction_plot(weight, duration, days,\n markers=['D', '^'], ms=10)\n ax = fig.axes[0]\n trace = ax.get_legend().get_title().get_text()\n assert_equal(trace, 'Duration')\n assert_equal(ax.get_ylabel(), 'mean of Days')\n assert_equal(ax.get_xlabel(), 'Weight')\n\n @pytest.mark.matplotlib\n def test_formatting(self, close_figures):\n fig = interaction_plot(self.weight, self.duration, self.days, colors=['r','g'], linestyles=['--','-.'])\n assert_equal(isinstance(fig, plt.Figure), True)\n\n @pytest.mark.matplotlib\n def test_formatting_errors(self, close_figures):\n assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, markers=['D'])\n assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, colors=['b','r','g'])\n assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, linestyles=['--','-.',':'])\n\n @pytest.mark.matplotlib\n def test_plottype(self, close_figures):\n fig = interaction_plot(self.weight, self.duration, self.days, plottype='line')\n assert_equal(isinstance(fig, plt.Figure), True)\n fig = interaction_plot(self.weight, self.duration, self.days, plottype='scatter')\n assert_equal(isinstance(fig, plt.Figure), True)\n assert_raises(ValueError, interaction_plot, self.weight, self.duration, self.days, plottype='unknown')\n\n def test_recode_series(self):\n series = Series(['a', 'b'] * 10, index=np.arange(0, 40, 2),\n name='index_test')\n series_ = _recode(series, {'a': 0, 'b': 1})\n assert_equal(series_.index.values, series.index.values,\n err_msg='_recode changed the index')\n"
] | [
[
"numpy.sqrt",
"scipy.stats.f.sf",
"numpy.linalg.svd",
"numpy.power",
"numpy.min",
"numpy.array"
],
[
"numpy.testing.assert_raises",
"pandas.Series",
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.arange",
"numpy.random.randint"
]
] |
rongtianjie/dcraw_py | [
"fd45d819a67d2f52d7ca61abbe145ab1b172bee9"
] | [
"demosaic_pack/amaze_demosaic.py"
] | [
"import numpy as np\n \ndef amaze_demosaic(src, raw):\n\n cfarray = raw.raw_colors\n cfarray[cfarray == 3] = 1\n\n rgb = amaze_demosaic_libraw(src, cfarray, raw.daylight_whitebalance)\n\n return rgb\n\ndef amaze_demosaic_libraw(src, cfarray, daylight_wb):\n\n TS = 512\n winx = winy = 0\n width = src.shape[1]\n height = src.shape[0]\n image = np.empty([height, width, 3], dtype=np.uint16)\n clip_pt = min(daylight_wb[0], daylight_wb[1], daylight_wb[2])\n\n v1 = TS\n v2 = 2 * TS\n v3 = 3 * TS\n p1 = -TS + 1\n p2 = -2 * TS + 2\n p3 = -3 * TS + 3\n m1 = TS + 1 \n m2 = 2 * TS + 2\n m3 = 3 * TS + 3\n\n nbr = [-v2,-2,2,v2,0]\n eps, epssq = 1e-5, 1e-10\n\n # adaptive ratios threshold\n arthresh=0.75\n # nyquist texture test threshold\n nyqthresh=0.5\n # diagonal interpolation test threshold\n pmthresh=0.25\n # factors for bounding interpolation in saturated regions\n lbd, ubd = 1, 1 # lbd=0.66, ubd=1.5 alternative values;\n\n # gaussian on 5x5 quincunx, sigma=1.2\n gaussodd = [0.14659727707323927, 0.103592713382435, 0.0732036125103057, 0.0365543548389495]\n # gaussian on 5x5, sigma=1.2\n gaussgrad = [0.07384411893421103, 0.06207511968171489, 0.0521818194747806, 0.03687419286733595, 0.03099732204057846, 0.018413194161458882]\n # gaussian on 3x3, sigma =0.7\n gauss1 = [0.3376688223162362, 0.12171198028231786, 0.04387081413862306]\n # gaussian on 5x5 alt quincunx, sigma=1.5\n gausseven = [0.13719494435797422, 0.05640252782101291]\n # guassian on quincunx grid\n gquinc = [0.169917, 0.108947, 0.069855, 0.0287182]\n\n\n rgb = np.empty([TS*TS, 3], dtype=np.float32)\n delh = np.empty(TS*TS, dtype=np.float32)\n delv = np.empty(TS*TS, dtype=np.float32)\n delhsq = np.empty(TS*TS, dtype=np.float32)\n delvsq = np.empty(TS*TS, dtype=np.float32)\n dirwts = np.empty([TS*TS, 2], dtype=np.float32)\n vcd = np.empty(TS*TS, dtype=np.float32)\n hcd = np.empty(TS*TS, dtype=np.float32)\n vcdalt = np.empty(TS*TS, dtype=np.float32)\n hcdalt = np.empty(TS*TS, dtype=np.float32)\n vcdsq = np.empty(TS*TS, dtype=np.float32)\n hcdsq = np.empty(TS*TS, dtype=np.float32)\n cddiffsq = np.empty(TS*TS, dtype=np.float32)\n hvwt = np.empty(TS*TS, dtype=np.float32)\n Dgrb = np.empty([TS*TS, 2], dtype=np.float32)\n delp = np.empty(TS*TS, dtype=np.float32)\n delm = np.empty(TS*TS, dtype=np.float32)\n rbint = np.empty(TS*TS, dtype=np.float32)\n Dgrbh2 = np.empty(TS*TS, dtype=np.float32)\n Dgrbv2 = np.empty(TS*TS, dtype=np.float32)\n dgintv = np.empty(TS*TS, dtype=np.float32)\n dginth = np.empty(TS*TS, dtype=np.float32)\n Dgrbpsq1 = np.empty(TS*TS, dtype=np.float32)\n Dgrbmsq1 = np.empty(TS*TS, dtype=np.float32)\n cfa = np.empty(TS*TS, dtype=np.float32)\n pmwt = np.empty(TS*TS, dtype=np.float32)\n rbp = np.empty(TS*TS, dtype=np.float32)\n rbm = np.empty(TS*TS, dtype=np.float32)\n\n nyquist = np.empty(TS*TS, dtype=np.int32)\n\n # determine GRBG coset; (ey,ex) is the offset of the R subarray\n if cfarray[0][0] == 1:\n if cfarray[0][1] == 0:\n ex, ey = 1, 0\n else:\n ex, ey = 0, 1\n else:\n if cfarray[0][0] == 0:\n ex = ey = 0\n else: \n ex = ey = 1\n \n # Start main loop\n loop_cnt = 1 \n for top in range(winy-16, winy+height, TS-32):\n for left in range(winx-16, winx+width, TS-32):\n print(\"Loop [{}]: top: {} left: {}\".format(loop_cnt, top, left))\n loop_cnt += 1\n # location of tile bottom edge\n bottom = min(top+TS, winy+height+16)\n # location of tile right edge\n right = min(left+TS, winx+width+16)\n # tile width (=TS except for right edge of image)\n rr1 = bottom - top\n # tile height (=TS except for bottom edge of image)\n cc1 = right - left\n \n # rgb from input CFA data\n # rgb values should be floating point number between 0 and 1 \n # after white balance multipliers are applied \n # a 16 pixel border is added to each side of the image\n\n # bookkeeping for borders\n rrmin = 16 if top < winy else 0\n ccmin = 16 if left < winx else 0\n rrmax = winy+height-top if bottom>(winy+height) else rr1\n ccmax = winx+width-left if right>(winx+width) else cc1\n\n for rr in range(rrmin, rrmax):\n row = rr + top\n for cc in range(ccmin, ccmax):\n col = cc + left\n c = cfarray[rr, cc]\n indx1 = rr * TS + cc\n indx = row * width + col\n rgb[indx1, c] = src[row, col] / 65535\n\n cfa[indx1] = rgb[indx1, c]\n \n # fill borders\n if rrmin > 0:\n for rr in range(16):\n for cc in range(ccmin, ccmax):\n c = cfarray[rr, cc]\n rgb[rr*TS+cc, c] = rgb[(32-rr)*TS+cc, c]\n cfa[rr*TS+cc] = rgb[rr*TS+cc, c]\n \n if rrmax < rr1:\n for rr in range(16):\n for cc in range(ccmin, ccmax):\n c = cfarray[rr, cc]\n rgb[(rrmax+rr)*TS+cc, c] = (src[(winy+height-rr-2), left+cc])/65535\n cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc, c]\n \n if ccmin > 0:\n for rr in range(rrmin, rrmax):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[rr*TS+cc, c] = rgb[rr*TS+32-cc, c]\n cfa[rr*TS+cc] = rgb[rr*TS+cc, c]\n \n if ccmax < cc1:\n for rr in range(rrmin, rrmax):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[rr*TS+ccmax+cc, c] = (src[(top+rr), (winx+width-cc-2)])/65535\n cfa[rr*TS+ccmax+cc] = rgb[rr*TS+ccmax+cc, c]\n \n # also, fill the image corners\n if rrmin > 0 and ccmin > 0:\n for rr in range(16):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[(rr)*TS+cc][c] = rgb[(32-rr)*TS+(32-cc)][c]\n cfa[(rr)*TS+cc] = rgb[(rr)*TS+cc][c]\n \n if rrmax < rr1 and ccmax < cc1:\n for rr in range(16):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[(rrmax+rr)*TS+ccmax+cc][c] = (src[(winy+height-rr-2)][(winx+width-cc-2)])/65535\n cfa[(rrmax+rr)*TS+ccmax+cc] = rgb[(rrmax+rr)*TS+ccmax+cc][c]\n \n if rrmin > 0 and ccmax < cc1:\n for rr in range(16):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[(rr)*TS+ccmax+cc][c] = (src[(winy+32-rr)][(winx+width-cc-2)])/65535\n cfa[(rr)*TS+ccmax+cc] = rgb[(rr)*TS+ccmax+cc][c]\n \n if rrmax < rr1 and ccmin > 0:\n for rr in range(16):\n for cc in range(16):\n c = cfarray[rr, cc]\n rgb[(rrmax+rr)*TS+cc][c] = (src[(winy+height-rr-2)][(winx+32-cc)])/65535\n cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc][c]\n \n # end of border fill\n\n for rr in range(1, rr1-1):\n for cc in range(1, cc1-1):\n indx = rr*TS+cc\n delh[indx] = abs(cfa[indx + 1] - cfa[indx - 1])\n delv[indx] = abs(cfa[indx + v1] - cfa[indx - v1])\n delhsq[indx] = SQR(delh[indx])\n delvsq[indx] = SQR(delv[indx])\n delp[indx] = abs(cfa[indx+p1]-cfa[indx-p1])\n delm[indx] = abs(cfa[indx+m1]-cfa[indx-m1])\n\n for rr in range(2, rr1-2):\n for cc in range(2, cc1-2):\n indx = rr*TS+cc\n # vert directional averaging weights\n dirwts[indx][0] = eps+delv[indx+v1]+delv[indx-v1]+delv[indx]\n # horizontal weights\n dirwts[indx][1] = eps+delh[indx+1]+delh[indx-1]+delh[indx]\n\n if cfarray[rr, cc] & 1:\n # for later use in diagonal interpolation\n Dgrbpsq1[indx]=(SQR(cfa[indx]-cfa[indx-p1])+SQR(cfa[indx]-cfa[indx+p1]))\n Dgrbmsq1[indx]=(SQR(cfa[indx]-cfa[indx-m1])+SQR(cfa[indx]-cfa[indx+m1]))\n \n for rr in range(4, rr1 - 4):\n for cc in range(4, cc1 - 4):\n indx = rr*TS+cc\n c = cfarray[rr, cc]\n sgn = -1 if c & 1 else 1\n\n # initialization of nyquist test\n nyquist[indx]=0\n # preparation for diag interp\n rbint[indx]=0\n\n # color ratios in each cardinal direction\n cru = cfa[indx - v1] * (dirwts[indx - v2][0] + dirwts[indx][0]) / (dirwts[indx - v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx - v2]))\n crd = cfa[indx + v1] * (dirwts[indx + v2][0] + dirwts[indx][0]) / (dirwts[indx + v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx + v2]))\n crl = cfa[indx - 1] * (dirwts[indx - 2][1] + dirwts[indx][1]) / (dirwts[indx - 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx - 2]))\n crr = cfa[indx + 1] * (dirwts[indx + 2][1] + dirwts[indx][1]) / (dirwts[indx + 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx + 2]))\n\n # G interpolated in vert/hor directions using Hamilton-Adams method\n guha = min(clip_pt, cfa[indx - v1] + 0.5 * (cfa[indx] - cfa[indx - v2]))\n gdha = min(clip_pt, cfa[indx + v1] + 0.5 * (cfa[indx] - cfa[indx + v2]))\n glha = min(clip_pt, cfa[indx - 1] + 0.5 * (cfa[indx] - cfa[indx - 2]))\n grha = min(clip_pt, cfa[indx + 1] + 0.5 * (cfa[indx] - cfa[indx + 2]))\n\n # G interpolated in vert/hor directions using adaptive ratios\n guar = cfa[indx] * cru if abs(1-cru) < arthresh else guha\n gdar = cfa[indx] * crd if abs(1-crd) < arthresh else gdha\n glar = cfa[indx] * crl if abs(1-crl) < arthresh else glha\n grar = cfa[indx] * crr if abs(1-crr) < arthresh else grha\n\n # adaptive weights for vertical/horizontal directions\n hwt = dirwts[indx - 1][1] / (dirwts[indx - 1][1] + dirwts[indx + 1][1])\n vwt = dirwts[indx - v1][0] / (dirwts[indx + v1][0] + dirwts[indx - v1][0])\n\n # interpolated G via adaptive weighTS of cardinal evaluations\n Gintvar = vwt * gdar + (1-vwt) * guar\n Ginthar = hwt * grar + (1-hwt) * glar\n Gintvha = vwt * gdha + (1-vwt) * guha\n Ginthha = hwt * grha + (1-hwt) * glha\n # interpolated color differences\n vcd[indx] = sgn * (Gintvar-cfa[indx])\n hcd[indx] = sgn * (Ginthar-cfa[indx])\n vcdalt[indx] = sgn * (Gintvha-cfa[indx])\n hcdalt[indx] = sgn * (Ginthha-cfa[indx])\n\n if cfa[indx] > 0.8 * clip_pt or Gintvha > 0.8 * clip_pt or Ginthha > 0.8 * clip_pt:\n # use HA if highlighTS are (nearly) clipped\n guar = guha\n gdar = gdha\n glar = glha\n grar = grha\n vcd[indx] = vcdalt[indx]\n hcd[indx] = hcdalt[indx]\n\n # differences of interpolations in opposite directions\n dgintv[indx] = min((guha - gdha) ** 2, (guar - gdar) ** 2)\n dginth[indx] = min((glha - grha) ** 2, (glar - grar) ** 2)\n \n for rr in range(4, rr1-4):\n for cc in range(4, cc1-4):\n c = cfarray[rr, cc]\n\n hcdvar = 3*(SQR(hcd[indx-2])+SQR(hcd[indx])+SQR(hcd[indx+2]))-SQR(hcd[indx-2]+hcd[indx]+hcd[indx+2])\n hcdaltvar = 3*(SQR(hcdalt[indx-2])+SQR(hcdalt[indx])+SQR(hcdalt[indx+2]))-SQR(hcdalt[indx-2]+hcdalt[indx]+hcdalt[indx+2])\n vcdvar = 3*(SQR(vcd[indx-v2])+SQR(vcd[indx])+SQR(vcd[indx+v2]))-SQR(vcd[indx-v2]+vcd[indx]+vcd[indx+v2])\n vcdaltvar = 3*(SQR(vcdalt[indx-v2])+SQR(vcdalt[indx])+SQR(vcdalt[indx+v2]))-SQR(vcdalt[indx-v2]+vcdalt[indx]+vcdalt[indx+v2])\n\n # choose the smallest variance; this yields a smoother interpolation\n if hcdaltvar < hcdvar:\n hcd[indx] = hcdalt[indx]\n if vcdaltvar < vcdvar:\n vcd[indx] = vcdalt[indx]\n\n # bound the interpolation in regions of high saturation\n # vertical and horizontal G interpolations\n if c & 1: # G site\n Ginth = -hcd[indx] + cfa[indx]\n Gintv = -vcd[indx] + cfa[indx]\n\n if hcd[indx] > 0:\n if 3 * hcd[indx] > (Ginth + cfa[indx]):\n hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]\n else:\n hwt = 1 - 3 * hcd[indx] / (eps + Ginth + cfa[indx])\n hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (-np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx])\n\n if vcd[indx] > 0:\n if 3 * vcd[indx] > (Gintv + cfa[indx]):\n vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]\n else:\n vwt = 1 - 3 * vcd[indx] / (eps + Gintv + cfa[indx])\n vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (-np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx])\n \n if Ginth > clip_pt:\n hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]\n\n if Gintv > clip_pt:\n vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]\n \n else: # R or B site\n\n Ginth = hcd[indx] + cfa[indx]\n Gintv = vcd[indx] + cfa[indx]\n\n if hcd[indx] < 0:\n if 3 * hcd[indx] < -(Ginth + cfa[indx]):\n hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]\n else:\n hwt = 1 + 3 * hcd[indx] / (eps + Ginth + cfa[indx])\n hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx])\n\n if vcd[indx] < 0:\n if 3 * vcd[indx] < -(Gintv + cfa[indx]):\n vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]\n else:\n vwt = 1 + 3 * vcd[indx] / (eps + Gintv + cfa[indx])\n vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx])\n\n if Ginth > clip_pt:\n hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]\n\n if Gintv > clip_pt:\n vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]\n\n vcdsq[indx] = SQR(vcd[indx])\n hcdsq[indx] = SQR(hcd[indx])\n cddiffsq[indx] = SQR(vcd[indx]-hcd[indx])\n\n for rr in range(6, rr1-6):\n for cc in range(6+(cfarray[rr, 2]&1), cc1-6, 2):\n indx = rr * TS + cc\n\n # compute color difference variances in cardinal directions\n\n Dgrbvvaru = 4*(vcdsq[indx]+vcdsq[indx-v1]+vcdsq[indx-v2]+vcdsq[indx-v3])-SQR(vcd[indx]+vcd[indx-v1]+vcd[indx-v2]+vcd[indx-v3])\n Dgrbvvard = 4*(vcdsq[indx]+vcdsq[indx+v1]+vcdsq[indx+v2]+vcdsq[indx+v3])-SQR(vcd[indx]+vcd[indx+v1]+vcd[indx+v2]+vcd[indx+v3])\n Dgrbhvarl = 4*(hcdsq[indx]+hcdsq[indx-1]+hcdsq[indx-2]+hcdsq[indx-3])-SQR(hcd[indx]+hcd[indx-1]+hcd[indx-2]+hcd[indx-3])\n Dgrbhvarr = 4*(hcdsq[indx]+hcdsq[indx+1]+hcdsq[indx+2]+hcdsq[indx+3])-SQR(hcd[indx]+hcd[indx+1]+hcd[indx+2]+hcd[indx+3])\n\t\t\t\t\t\n hwt = dirwts[indx-1][1]/(dirwts[indx-1][1]+dirwts[indx+1][1])\n vwt = dirwts[indx-v1][0]/(dirwts[indx+v1][0]+dirwts[indx-v1][0])\n\t\t\t\t\t\n vcdvar = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru\n hcdvar = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl\n\n # compute fluctuations in up/down and left/right interpolations of colors\n Dgrbvvaru = (dgintv[indx])+(dgintv[indx-v1])+(dgintv[indx-v2])\n Dgrbvvard = (dgintv[indx])+(dgintv[indx+v1])+(dgintv[indx+v2])\n Dgrbhvarl = (dginth[indx])+(dginth[indx-1])+(dginth[indx-2])\n Dgrbhvarr = (dginth[indx])+(dginth[indx+1])+(dginth[indx+2])\n\n vcdvar1 = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru\n hcdvar1 = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl\n\n # determine adaptive weights for G interpolation\n varwt=hcdvar/(vcdvar+hcdvar)\n diffwt=hcdvar1/(vcdvar1+hcdvar1)\n\n # if both agree on interpolation direction, choose the one with strongest directional discrimination;\n # otherwise, choose the u/d and l/r difference fluctuation weights\n if ((0.5 - varwt) * (0.5 - diffwt) > 0) and (abs(0.5 - diffwt) < abs(0.5 - varwt)):\n hvwt[indx] = varwt\n else:\n hvwt[indx] = diffwt\n \n # Nyquist test\n for rr in range(6, rr1-6):\n for cc in range(6 + (cfarray[rr, 2]&1), cc1 - 6, 2):\n indx = rr * TS + cc\n\n # nyquist texture test: ask if difference of vcd compared to hcd is larger or smaller than RGGB gradients\n nyqtest = (gaussodd[0]*cddiffsq[indx] + gaussodd[1]*(cddiffsq[indx-m1]+cddiffsq[indx+p1] + cddiffsq[indx-p1]+cddiffsq[indx+m1]) + gaussodd[2]*(cddiffsq[indx-v2]+cddiffsq[indx-2]+ cddiffsq[indx+2]+cddiffsq[indx+v2]) + gaussodd[3]*(cddiffsq[indx-m2]+cddiffsq[indx+p2] + cddiffsq[indx-p2]+cddiffsq[indx+m2]))\n\n nyqtest -= nyqthresh*(gaussgrad[0]*(delhsq[indx]+delvsq[indx])+gaussgrad[1]*(delhsq[indx-v1]+delvsq[indx-v1]+delhsq[indx+1]+delvsq[indx+1] + delhsq[indx-1]+delvsq[indx-1]+delhsq[indx+v1]+delvsq[indx+v1])+ gaussgrad[2]*(delhsq[indx-m1]+delvsq[indx-m1]+delhsq[indx+p1]+delvsq[indx+p1]+ delhsq[indx-p1]+delvsq[indx-p1]+delhsq[indx+m1]+delvsq[indx+m1])+ gaussgrad[3]*(delhsq[indx-v2]+delvsq[indx-v2]+delhsq[indx-2]+delvsq[indx-2]+ delhsq[indx+2]+delvsq[indx+2]+delhsq[indx+v2]+delvsq[indx+v2])+ gaussgrad[4]*(delhsq[indx-2*TS-1]+delvsq[indx-2*TS-1]+delhsq[indx-2*TS+1]+delvsq[indx-2*TS+1]+ delhsq[indx-TS-2]+delvsq[indx-TS-2]+delhsq[indx-TS+2]+delvsq[indx-TS+2]+ delhsq[indx+TS-2]+delvsq[indx+TS-2]+delhsq[indx+TS+2]+delvsq[indx-TS+2]+ delhsq[indx+2*TS-1]+delvsq[indx+2*TS-1]+delhsq[indx+2*TS+1]+delvsq[indx+2*TS+1])+ gaussgrad[5]*(delhsq[indx-m2]+delvsq[indx-m2]+delhsq[indx+p2]+delvsq[indx+p2]+ delhsq[indx-p2]+delvsq[indx-p2]+delhsq[indx+m2]+delvsq[indx+m2]))\n\n if nyqtest > 0:\n # nyquist=1 for nyquist region\n nyquist[indx] = 1\n \n for rr in range(8, rr1-8):\n for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):\n\n areawt=(nyquist[indx-v2]+nyquist[indx-m1]+nyquist[indx+p1]+nyquist[indx-2]+nyquist[indx]+nyquist[indx+2]+nyquist[indx-p1]+nyquist[indx+m1]+nyquist[indx+v2])\n\n # if most of your neighbors are named Nyquist, it's likely that you're one too\n nyquist[indx] = 1 if areawt > 4 else 0\n\n # end of Nyquist test\n\n # in areas of Nyquist texture, do area interpolation\n for rr in range(8, rr1 - 8):\n for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):\n indx = rr * TS + cc\n if nyquist[indx]:\n # area interpolation\n sumh = sumv = sumsqh = sumsqv = areawt = 0\n for i in range(-6, 7, 2):\n for j in range(-6, 7, 2):\n indx1 = (rr + i) * TS + cc + j\n if nyquist[indx1]:\n sumh += cfa[indx1] - 0.5 * (cfa[indx1-1]+cfa[indx1+1])\n sumv += cfa[indx1] - 0.5 * (cfa[indx1-v1]+cfa[indx1+v1])\n sumsqh += 0.5 * (SQR(cfa[indx1]-cfa[indx1-1]) + SQR(cfa[indx1]-cfa[indx1+1]))\n sumsqv += 0.5 * (SQR(cfa[indx1]-cfa[indx1-v1]) + SQR(cfa[indx1]-cfa[indx1+v1]))\n areawt += 1\n\n # horizontal and vertical color differences, and adaptive weight\n hcdvar = epssq + max(0, areawt*sumsqh-sumh*sumh)\n vcdvar = epssq + max(0, areawt*sumsqv-sumv*sumv)\n hvwt[indx] = hcdvar / (vcdvar + hcdvar)\n \n # end of area interpolation\n \n # populate G at R/B sites\n for rr in range(8, rr1-8):\n for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):\n indx = rr * TS + cc\n\n # first ask if one gets more directional discrimination from nearby B/R sites\n hvwtalt = 0.25 * (hvwt[indx-m1] + hvwt[indx+p1] + hvwt[indx-p1] + hvwt[indx+m1])\n vo = abs(0.5 - hvwt[indx])\n ve = abs(0.5 - hvwtalt)\n # a better result was obtained from the neighbors\n if vo < ve:\n hvwt[indx>>1] = hvwtalt\n # evaluate color differences\n Dgrb[indx][0] = (hcd[indx]*(1-hvwt[indx]) + vcd[indx]*hvwt[indx])\n # evaluate G\n rgb[indx][1] = cfa[indx] + Dgrb[indx][0]\n # local curvature in G (preparation for nyquist refinement step)\n if nyquist[indx]:\n Dgrbh2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-1][1]+rgb[indx+1][1]))\n Dgrbv2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-v1][1]+rgb[indx+v1][1]))\n else:\n Dgrbh2[indx] = Dgrbv2[indx] = 0\n\n # end of standard interpolation\n\n\n # refine Nyquist areas using G curvatures\n for rr in range(8, rr1-8):\n for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):\n indx = rr * TS + cc\n if nyquist[indx]:\n # local averages (over Nyquist pixels only) of G curvature squared \n gvarh = epssq + (gquinc[0]*Dgrbh2[indx]+gquinc[1]*(Dgrbh2[indx-m1]+Dgrbh2[indx+p1]+Dgrbh2[indx-p1]+Dgrbh2[indx+m1])+gquinc[2]*(Dgrbh2[indx-v2]+Dgrbh2[indx-2]+Dgrbh2[indx+2]+Dgrbh2[indx+v2])+gquinc[3]*(Dgrbh2[indx-m2]+Dgrbh2[indx+p2]+Dgrbh2[indx-p2]+Dgrbh2[indx+m2]))\n gvarv = epssq + (gquinc[0]*Dgrbv2[indx]+gquinc[1]*(Dgrbv2[indx-m1]+Dgrbv2[indx+p1]+Dgrbv2[indx-p1]+Dgrbv2[indx+m1])+gquinc[2]*(Dgrbv2[indx-v2]+Dgrbv2[indx-2]+Dgrbv2[indx+2]+Dgrbv2[indx+v2])+gquinc[3]*(Dgrbv2[indx-m2]+Dgrbv2[indx+p2]+Dgrbv2[indx-p2]+Dgrbv2[indx+m2]))\n # use the results as weights for refined G interpolation\n Dgrb[indx][0] = (hcd[indx]*gvarv + vcd[indx]*gvarh)/(gvarv+gvarh)\n rgb[indx][1] = cfa[indx] + Dgrb[indx][0]\n \n # diagonal interpolation correction\n for rr in range(8, rr1-8):\n for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):\n indx = rr * TS + cc\n rbvarp = epssq + (gausseven[0]*(Dgrbpsq1[indx-v1]+Dgrbpsq1[indx-1]+Dgrbpsq1[indx+1]+Dgrbpsq1[indx+v1]) + gausseven[1]*(Dgrbpsq1[indx-v2-1]+Dgrbpsq1[indx-v2+1]+Dgrbpsq1[indx-2-v1]+Dgrbpsq1[indx+2-v1]+ Dgrbpsq1[indx-2+v1]+Dgrbpsq1[indx+2+v1]+Dgrbpsq1[indx+v2-1]+Dgrbpsq1[indx+v2+1]))\n rbvarm = epssq + (gausseven[0]*(Dgrbmsq1[indx-v1]+Dgrbmsq1[indx-1]+Dgrbmsq1[indx+1]+Dgrbmsq1[indx+v1]) + gausseven[1]*(Dgrbmsq1[indx-v2-1]+Dgrbmsq1[indx-v2+1]+Dgrbmsq1[indx-2-v1]+Dgrbmsq1[indx+2-v1]+ Dgrbmsq1[indx-2+v1]+Dgrbmsq1[indx+2+v1]+Dgrbmsq1[indx+v2-1]+Dgrbmsq1[indx+v2+1]))\n\n # diagonal color ratios\n crse=2*(cfa[indx+m1])/(eps+cfa[indx]+(cfa[indx+m2]))\n crnw=2*(cfa[indx-m1])/(eps+cfa[indx]+(cfa[indx-m2]))\n crne=2*(cfa[indx+p1])/(eps+cfa[indx]+(cfa[indx+p2]))\n crsw=2*(cfa[indx-p1])/(eps+cfa[indx]+(cfa[indx-p2]))\n\n # assign B/R at R/B sites\n if abs(1 - crse) < arthresh:\n rbse = cfa[indx] * crse\n else:\n rbse = cfa[indx + m1] + 0.5 * (cfa[indx] - cfa[indx + m2])\n\n if abs(1 - crnw) < arthresh:\n rbnw = (cfa[indx - m1]) + 0.5 *(cfa[indx] - cfa[indx - m2])\n\n if abs(1 - crne) < arthresh:\n rbne = cfa[indx] * crne\n else:\n rbne = (cfa[indx + p1]) + 0.5 * cfa[indx] - cfa[indx + p2]\n\n if abs(1 - crsw) < arthresh:\n rbsw = cfa[indx] * crsw\n else:\n rbsw = (cfa[indx - p1]) + 0.5 * (cfa[indx] - cfa[indx - p2])\n \n wtse= eps+delm[indx]+delm[indx+m1]+delm[indx+m2] # same as for wtu,wtd,wtl,wtr\n wtnw= eps+delm[indx]+delm[indx-m1]+delm[indx-m2]\n wtne= eps+delp[indx]+delp[indx+p1]+delp[indx+p2]\n wtsw= eps+delp[indx]+delp[indx-p1]+delp[indx-p2]\n\n rbm[indx] = (wtse*rbnw+wtnw*rbse)/(wtse+wtnw)\n rbp[indx] = (wtne*rbsw+wtsw*rbne)/(wtne+wtsw)\n\n pmwt[indx] = rbvarm/(rbvarp+rbvarm)\n\n # bound the interpolation in regions of high saturation\n if rbp[indx] < cfa[indx]:\n if 2 * (rbp[indx]) < cfa[indx]:\n rbp[indx] = np.median([rbp[indx] , cfa[indx - p1], cfa[indx + p1]])\n else:\n pwt = 2 * (cfa[indx] - rbp[indx]) / (eps + rbp[indx] + cfa[indx])\n rbp[indx] = pwt * rbp[indx] + (1 - pwt) * np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])\n\n if rbm[indx] < cfa[indx]:\n if 2 * (rbm[indx]) < cfa[indx]:\n rbm[indx] = np.median([rbm[indx] , cfa[indx - m1], cfa[indx + m1]])\n else:\n mwt = 2 * (cfa[indx] - rbm[indx]) / (eps + rbm[indx] + cfa[indx])\n rbm[indx] = mwt * rbm[indx] + (1 - mwt) * np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])\n\n if rbp[indx] > clip_pt:\n rbp[indx] = np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])\n\n if rbm[indx] > clip_pt:\n rbm[indx] = np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])\n\n for rr in range(10, rr1-10):\n for cc in range(10 + (cfarray[rr, 2]&1), cc1-10, 2):\n indx = rr * TS + cc\n \n # first ask if one geTS more directional discrimination from nearby B/R sites\n pmwtalt = 0.25*(pmwt[indx-m1]+pmwt[indx+p1]+pmwt[indx-p1]+pmwt[indx+m1])\n vo = abs(0.5-pmwt[indx])\n ve = abs(0.5-pmwtalt)\n if vo < ve:\n pmwt[indx] = pmwtalt\n rbint[indx] = 0.5*(cfa[indx] + rbm[indx]*(1-pmwt[indx]) + rbp[indx]*pmwt[indx])\n\n for rr in range(12, rr1 - 12):\n for cc in range(12 + (cfarray[rr, 2]&1), cc1 - 12, 2):\n indx = rr * TS + cc\n if abs(0.5 - pmwt[indx]) < abs(0.5 - hvwt[indx]):\n continue\n \n # now interpolate G vertically/horizontally using R+B values\n # unfortunately, since G interpolation cannot be done diagonally this may lead to colour shifts\n # colour ratios for G interpolation\n cru = cfa[indx-v1]*2/(eps+rbint[indx]+rbint[indx-v2])\n crd = cfa[indx+v1]*2/(eps+rbint[indx]+rbint[indx+v2])\n crl = cfa[indx-1]*2/(eps+rbint[indx]+rbint[indx-2])\n crr = cfa[indx+1]*2/(eps+rbint[indx]+rbint[indx+2])\n\n # interpolated G via adaptive ratios or Hamilton-Adams in each cardinal direction\n if abs(1 - cru) < arthresh:\n gu = rbint[indx] * cru\n else:\n gu = cfa[indx - v1] + 0.5 * (rbint[indx] - rbint[(indx - v1)])\n\n if abs(1 - crd) < arthresh:\n gd = rbint[indx] * crd\n else:\n gd = cfa[indx + v1] + 0.5 * (rbint[indx] - rbint[(indx + v1)])\n\n if abs(1 - crl) < arthresh:\n gl = rbint[indx] * crl\n else:\n gl = cfa[indx - 1] + 0.5 * (rbint[indx] - rbint[(indx - 1)])\n\n if abs(1 - crr) < arthresh:\n gr = rbint[indx] * crr\n else:\n gr = cfa[indx + 1] + 0.5 * (rbint[indx] - rbint[(indx + 1)])\n\n # interpolated G via adaptive weighTS of cardinal evaluations\n Gintv = (dirwts[indx - v1][0] * gd + dirwts[indx + v1][0] * gu) / (dirwts[indx + v1][0] + dirwts[indx - v1][0])\n Ginth = (dirwts[indx - 1][1] * gr + dirwts[indx + 1][1] * gl) / (dirwts[indx - 1][1] + dirwts[indx + 1][1])\n\n # bound the interpolation in regions of high saturation\n if Gintv < rbint[indx]:\n if (2 * Gintv < rbint[indx]):\n Gintv = np.median([Gintv , cfa[indx - v1], cfa[indx + v1]])\n else:\n vwt = 2 * (rbint[indx] - Gintv) / (eps + Gintv + rbint[indx])\n Gintv = vwt * Gintv + (1 - vwt) * np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])\n\n if Ginth < rbint[indx]:\n if 2 * Ginth < rbint[indx]:\n Ginth = np.median([Ginth , cfa[indx - 1], cfa[indx + 1]])\n else:\n hwt = 2 * (rbint[indx] - Ginth) / (eps + Ginth + rbint[indx])\n Ginth = hwt * Ginth + (1 - hwt) * np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])\n \n if Ginth > clip_pt:\n Ginth = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])\n\n if Gintv > clip_pt:\n Gintv = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])\n \n rgb[indx][1] = Ginth*(1-hvwt[indx]) + Gintv*hvwt[indx]\n Dgrb[indx][0] = rgb[indx][1]-cfa[indx]\n\n # end of diagonal interpolation correction\n\n # fancy chrominance interpolation\n # (ey,ex) is location of R site\n for rr in range(13-ey, rr1-12, 2):\n for cc in range(13-ex, cc1-12, 2):\n indx = rr*TS+cc\n Dgrb[indx][1]=Dgrb[indx][0] # split out G-B from G-R\n Dgrb[indx][0]=0\n\n for rr in range(12, rr1-12):\n c = int(1- cfarray[rr, 12+(cfarray[rr,2]&1)]/2)\n for cc in range(12+(cfarray[rr,2]&1), cc1-12, 2):\n indx = rr * TS + cc\n wtnw=1/(eps+abs(Dgrb[indx-m1][c]-Dgrb[indx+m1][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx-m3][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-m3][c]))\n wtne=1/(eps+abs(Dgrb[indx+p1][c]-Dgrb[indx-p1][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx+p3][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+p3][c]))\n wtsw=1/(eps+abs(Dgrb[indx-p1][c]-Dgrb[indx+p1][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+m3][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx-p3][c]))\n wtse=1/(eps+abs(Dgrb[indx+m1][c]-Dgrb[indx-m1][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-p3][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx+m3][c]))\n \n Dgrb[indx][c]=(wtnw*(1.325*Dgrb[indx-m1][c]-0.175*Dgrb[indx-m3][c]-0.075*Dgrb[indx-m1-2][c]-0.075*Dgrb[indx-m1-v2][c] )+ wtne*(1.325*Dgrb[indx+p1][c]-0.175*Dgrb[indx+p3][c]-0.075*Dgrb[indx+p1+2][c]-0.075*Dgrb[indx+p1+v2][c] )+ wtsw*(1.325*Dgrb[indx-p1][c]-0.175*Dgrb[indx-p3][c]-0.075*Dgrb[indx-p1-2][c]-0.075*Dgrb[indx-p1-v2][c] )+ wtse*(1.325*Dgrb[indx+m1][c]-0.175*Dgrb[indx+m3][c]-0.075*Dgrb[indx+m1+2][c]-0.075*Dgrb[indx+m1+v2][c] ))/(wtnw+wtne+wtsw+wtse)\n\n for rr in range(12, rr1-12):\n # c = int(cfarray[rr, 12+(cfarray[rr,1]&1)+1]/2)\n for cc in range(12+(cfarray[rr,1]&1), cc1-12, 2):\n for c in range(2):\n Dgrb[indx][c]=((hvwt[indx-v1])*Dgrb[indx-v1][c]+(1-hvwt[indx+1])*Dgrb[indx+1][c]+(1-hvwt[indx-1])*Dgrb[indx-1][c]+(hvwt[indx+v1])*Dgrb[indx+v1][c])/((hvwt[indx-v1])+(1-hvwt[indx+1])+(1-hvwt[indx-1])+(hvwt[indx+v1]))\n\n for rr in range(12, rr1-12):\n for cc in range(12, cc1-12):\n indx = rr * TS + cc\n rgb[indx][0]=(rgb[indx][1]-Dgrb[indx][0])\n rgb[indx][2]=(rgb[indx][1]-Dgrb[indx][1])\n\n \n # copy smoothed results back to image matrix\n for rr in range(16, rr1-16):\n row = rr + top\n for cc in range(16, cc1-16):\n col = cc + left\n\n for c in range(3):\n image[row, col, c] = int(rgb[rr*TS+cc, c] * 65535 + 0.5)\n \n # end of main loop\n return image\n\n# Define some utility functions for demosaicing\n\n# For AMAzE\ndef fc(cfa, r, c):\n return cfa[r&1, c&1]\n\ndef intp(a, b, c):\n return a * (b - c) + c\n\ndef SQR(x):\n return x ** 2"
] | [
[
"numpy.median",
"numpy.empty"
]
] |
daxpryce/graspologic | [
"b076f58ca03a41eb2e1462d20a61ff09abfd6045"
] | [
"tests/test_plot.py"
] | [
"# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport unittest\n\nimport numpy as np\nfrom sklearn.mixture import GaussianMixture\n\nfrom graspologic.plot.plot import (\n _sort_inds,\n gridplot,\n heatmap,\n pairplot,\n pairplot_with_gmm,\n)\nfrom graspologic.simulations.simulations import er_np, sbm\n\n\ndef _test_pairplot_with_gmm_inputs(caller: unittest.TestCase, **kws):\n X = np.random.rand(15, 3)\n gmm = GaussianMixture(n_components=3, **kws).fit(X)\n labels = [\"A\"] * 5 + [\"B\"] * 5 + [\"C\"] * 5\n # test data\n with caller.assertRaises(ValueError):\n pairplot_with_gmm(X=\"test\", gmm=gmm)\n\n with caller.assertRaises(ValueError):\n pairplot_with_gmm(X=X, gmm=gmm, labels=[\"A\"])\n\n with caller.assertRaises(NameError):\n pairplot_with_gmm(X, gmm=None)\n\n\ndef _test_pairplot_with_gmm_outputs(**kws):\n X = np.random.rand(15, 3)\n gmm = GaussianMixture(n_components=3, **kws).fit(X)\n labels = [\"A\"] * 5 + [\"B\"] * 5 + [\"C\"] * 5\n cluster_palette = {0: \"red\", 1: \"blue\", 2: \"green\"}\n label_palette = {\"A\": \"red\", \"B\": \"blue\", \"C\": \"green\"}\n fig = pairplot_with_gmm(X, gmm)\n fig = pairplot_with_gmm(\n X,\n gmm,\n labels=labels,\n cluster_palette=cluster_palette,\n label_palette=label_palette,\n )\n\n\nclass TestPlot(unittest.TestCase):\n def test_common_inputs(self):\n X = er_np(100, 0.5)\n grid_labels = [\"Test1\"]\n\n # test figsize\n with self.assertRaises(TypeError):\n figsize = \"bad figsize\"\n heatmap(X, figsize=figsize)\n\n # test height\n height = \"1\"\n with self.assertRaises(TypeError):\n gridplot([X], grid_labels, height=height)\n with self.assertRaises(TypeError):\n pairplot(X, height=height)\n\n # test title\n title = 1\n with self.assertRaises(TypeError):\n heatmap(X, title=title)\n with self.assertRaises(TypeError):\n gridplot([X], grid_labels, title=title)\n with self.assertRaises(TypeError):\n pairplot(X, title=title)\n\n # test context\n context = 123\n with self.assertRaises(TypeError):\n heatmap(X, context=context)\n with self.assertRaises(TypeError):\n gridplot([X], grid_labels, context=context)\n with self.assertRaises(TypeError):\n pairplot(X, context=context)\n\n context = \"journal\"\n with self.assertRaises(ValueError):\n heatmap(X, context=context)\n with self.assertRaises(ValueError):\n gridplot([X], grid_labels, context=context)\n with self.assertRaises(ValueError):\n pairplot(X, context=context)\n\n # test font scales\n font_scales = [\"1\", []]\n for font_scale in font_scales:\n with self.assertRaises(TypeError):\n heatmap(X, font_scale=font_scale)\n with self.assertRaises(TypeError):\n gridplot([X], grid_labels, font_scale=font_scale)\n with self.assertRaises(TypeError):\n pairplot(X, cont_scale=font_scale)\n\n # ticklabels\n with self.assertRaises(TypeError):\n xticklabels = \"labels\"\n yticklabels = \"labels\"\n heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)\n\n with self.assertRaises(ValueError):\n xticklabels = [\"{}\".format(i) for i in range(5)]\n yticklabels = [\"{}\".format(i) for i in range(5)]\n heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)\n\n with self.assertRaises(TypeError):\n heatmap(X, title_pad=\"f\")\n\n with self.assertRaises(TypeError):\n gridplot([X], title_pad=\"f\")\n\n with self.assertRaises(TypeError):\n heatmap(X, hier_label_fontsize=\"f\")\n\n with self.assertRaises(TypeError):\n gridplot([X], hier_label_fontsize=\"f\")\n\n def test_heatmap_inputs(self):\n \"\"\"\n test parameter checks\n \"\"\"\n X = np.random.rand(10, 10)\n\n with self.assertRaises(TypeError):\n heatmap(X=\"input\")\n\n # transform\n with self.assertRaises(ValueError):\n transform = \"bad transform\"\n heatmap(X, transform=transform)\n\n # cmap\n with self.assertRaises(TypeError):\n cmap = 123\n heatmap(X, cmap=cmap)\n\n # center\n with self.assertRaises(TypeError):\n center = \"center\"\n heatmap(X, center=center)\n\n # cbar\n with self.assertRaises(TypeError):\n cbar = 1\n heatmap(X, cbar=cbar)\n\n def test_heatmap_output(self):\n \"\"\"\n simple function to see if plot is made without errors\n \"\"\"\n X = er_np(10, 0.5)\n xticklabels = [\"Dimension {}\".format(i) for i in range(10)]\n yticklabels = [\"Dimension {}\".format(i) for i in range(10)]\n\n fig = heatmap(\n X, transform=\"log\", xticklabels=xticklabels, yticklabels=yticklabels\n )\n fig = heatmap(X, transform=\"zero-boost\")\n fig = heatmap(X, transform=\"simple-all\")\n fig = heatmap(X, transform=\"simple-nonzero\")\n fig = heatmap(X, transform=\"binarize\")\n fig = heatmap(X, cmap=\"gist_rainbow\")\n\n def test_gridplot_inputs(self):\n X = [er_np(10, 0.5)]\n labels = [\"ER(10, 0.5)\"]\n\n with self.assertRaises(TypeError):\n gridplot(X=\"input\", labels=labels)\n\n with self.assertRaises(ValueError):\n gridplot(X, labels=[\"a\", \"b\"])\n\n # transform\n with self.assertRaises(ValueError):\n transform = \"bad transform\"\n gridplot(X, labels=labels, transform=transform)\n\n def test_gridplot_outputs(self):\n \"\"\"\n simple function to see if plot is made without errors\n \"\"\"\n X = [er_np(10, 0.5) for _ in range(2)]\n labels = [\"Random A\", \"Random B\"]\n fig = gridplot(X, labels)\n fig = gridplot(X, labels, transform=\"zero-boost\")\n fig = gridplot(X, labels, \"simple-all\", title=\"Test\", font_scale=0.9)\n\n def test_pairplot_inputs(self):\n X = np.random.rand(15, 3)\n Y = [\"A\"] * 5 + [\"B\"] * 5 + [\"C\"] * 5\n\n # test data\n with self.assertRaises(TypeError):\n pairplot(X=\"test\")\n\n with self.assertRaises(ValueError):\n pairplot(X=X, labels=[\"A\"])\n\n with self.assertRaises(TypeError):\n pairplot(X, col_names=\"A\")\n\n with self.assertRaises(ValueError):\n pairplot(X, col_names=[\"1\", \"2\"])\n\n with self.assertRaises(ValueError):\n pairplot(X, col_names=[\"1\", \"2\", \"3\"], variables=[1, 2, 3, 4])\n\n with self.assertRaises(KeyError):\n pairplot(X, col_names=[\"1\", \"2\", \"3\"], variables=[\"A\", \"B\"])\n\n def test_pairplot_outputs(self):\n X = np.random.rand(15, 3)\n Y = [\"A\"] * 5 + [\"B\"] * 5 + [\"C\"] * 5\n col_names = [\"Feature1\", \"Feature2\", \"Feature3\"]\n\n fig = pairplot(X)\n fig = pairplot(X, Y)\n fig = pairplot(X, Y, col_names)\n fig = pairplot(\n X,\n Y,\n col_names,\n title=\"Test\",\n height=1.5,\n variables=[\"Feature1\", \"Feature2\"],\n )\n\n def test_pairplot_with_gmm_inputs_type_full(self):\n _test_pairplot_with_gmm_inputs(self, covariance_type=\"full\")\n\n def test_pairplot_with_gmm_inputs_type_diag(self):\n _test_pairplot_with_gmm_inputs(self, covariance_type=\"diag\")\n\n def test_pairplot_with_gmm_inputs_type_tied(self):\n _test_pairplot_with_gmm_inputs(self, covariance_type=\"tied\")\n\n def test_pairplot_with_gmm_inputs_type_spherical(self):\n _test_pairplot_with_gmm_inputs(self, covariance_type=\"spherical\")\n\n def test_pairplot_with_gmm_outputs_type_full(self):\n _test_pairplot_with_gmm_outputs(covariance_type=\"full\")\n\n def test_pairplot_with_gmm_outputs_type_diag(self):\n _test_pairplot_with_gmm_outputs(covariance_type=\"diag\")\n\n def test_pairplot_with_gmm_outputs_type_tied(self):\n _test_pairplot_with_gmm_outputs(covariance_type=\"tied\")\n\n def test_pairplot_with_gmm_outputs_type_spherical(self):\n _test_pairplot_with_gmm_outputs(covariance_type=\"spherical\")\n\n def test_sort_inds(self):\n B = np.array(\n [\n [0, 0.2, 0.1, 0.1, 0.1],\n [0.2, 0.8, 0.1, 0.3, 0.1],\n [0.15, 0.1, 0, 0.05, 0.1],\n [0.1, 0.1, 0.2, 1, 0.1],\n [0.1, 0.2, 0.1, 0.1, 0.8],\n ]\n )\n\n g = sbm([10, 30, 50, 25, 25], B, directed=True)\n degrees = g.sum(axis=0) + g.sum(axis=1)\n degree_sort_inds = np.argsort(degrees)\n labels2 = 40 * [\"0\"] + 100 * [\"1\"]\n labels1 = 10 * [\"d\"] + 30 * [\"c\"] + 50 * [\"d\"] + 25 * [\"e\"] + 25 * [\"c\"]\n labels1 = np.array(labels1)\n labels2 = np.array(labels2)\n sorted_inds = _sort_inds(g, labels1, labels2, True)\n # sort outer blocks first if given, sort by num verts in the block\n # for inner hier, sort by num verts for that category across the entire graph\n # ie if there are multiple inner hier across different outer blocks, sort\n # by prevalence in the entire graph, not within block\n # this is to make the ordering within outer block consistent\n # within a block, sort by degree\n\n # outer block order should thus be: 1, 0\n # inner block order should thus be: d, c, e\n\n # show that outer blocks are sorted correctly\n labels2 = labels2[sorted_inds]\n self.assertTrue(np.all(labels2[:100] == \"1\"))\n self.assertTrue(np.all(labels2[100:] == \"0\"))\n\n # show that inner blocks are sorted correctly\n labels1 = labels1[sorted_inds]\n self.assertTrue(np.all(labels1[:50] == \"d\"))\n self.assertTrue(np.all(labels1[50:75] == \"c\"))\n self.assertTrue(np.all(labels1[75:100] == \"e\"))\n self.assertTrue(np.all(labels1[100:110] == \"d\"))\n self.assertTrue(np.all(labels1[110:] == \"c\"))\n\n # show that within block, everything is in descending degree order\n degrees = degrees[sorted_inds]\n self.assertTrue(np.all(np.diff(degrees[:50]) <= 0))\n self.assertTrue(np.all(np.diff(degrees[50:75]) <= 0))\n self.assertTrue(np.all(np.diff(degrees[75:100]) <= 0))\n self.assertTrue(np.all(np.diff(degrees[100:110]) <= 0))\n self.assertTrue(np.all(np.diff(degrees[110:]) <= 0))\n"
] | [
[
"numpy.diff",
"numpy.argsort",
"numpy.all",
"numpy.random.rand",
"numpy.array",
"sklearn.mixture.GaussianMixture"
]
] |
TUDelftHao/models | [
"faf0c2dc442ceaa8425aff73abd00f92f3137b7b",
"faf0c2dc442ceaa8425aff73abd00f92f3137b7b"
] | [
"research/slim/nets/mobilenet_v1.py",
"research/object_detection/core/box_list_ops.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"MobileNet v1.\n\nMobileNet is a general architecture and can be used for multiple use cases.\nDepending on the use case, it can use different input layer size and different\nhead (for example: embeddings, localization and classification).\n\nAs described in https://arxiv.org/abs/1704.04861.\n\n MobileNets: Efficient Convolutional Neural Networks for\n Mobile Vision Applications\n Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,\n Tobias Weyand, Marco Andreetto, Hartwig Adam\n\n100% Mobilenet V1 (base) with input size 224x224:\n\nSee mobilenet_v1()\n\nLayer params macs\n--------------------------------------------------------------------------------\nMobilenetV1/Conv2d_0/Conv2D: 864 10,838,016\nMobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112\nMobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112\nMobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224\nMobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112\nMobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224\nMobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112\nMobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224\nMobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112\nMobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224\n--------------------------------------------------------------------------------\nTotal: 3,185,088 567,716,352\n\n\n75% Mobilenet V1 (base) with input size 128x128:\n\nSee mobilenet_v1_075()\n\nLayer params macs\n--------------------------------------------------------------------------------\nMobilenetV1/Conv2d_0/Conv2D: 648 2,654,208\nMobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736\nMobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592\nMobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368\nMobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592\nMobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736\nMobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184\nMobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184\nMobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592\nMobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368\nMobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184\nMobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592\nMobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592\nMobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184\nMobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184\nMobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296\nMobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592\nMobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592\nMobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184\n--------------------------------------------------------------------------------\nTotal: 1,800,144 106,002,432\n\n\"\"\"\n\n# Tensorflow mandates these.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport functools\n\nimport tensorflow.compat.v1 as tf\nimport tf_slim as slim\n\n# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture\n# Conv defines 3x3 convolution layers\n# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.\n# stride is the stride of the convolution\n# depth is the number of channels or filters in a layer\nConv = namedtuple('Conv', ['kernel', 'stride', 'depth'])\nDepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])\n\n# MOBILENETV1_CONV_DEFS specifies the MobileNet body\nMOBILENETV1_CONV_DEFS = [\n Conv(kernel=[3, 3], stride=2, depth=32),\n DepthSepConv(kernel=[3, 3], stride=1, depth=64),\n DepthSepConv(kernel=[3, 3], stride=2, depth=128),\n DepthSepConv(kernel=[3, 3], stride=1, depth=128),\n DepthSepConv(kernel=[3, 3], stride=2, depth=256),\n DepthSepConv(kernel=[3, 3], stride=1, depth=256),\n DepthSepConv(kernel=[3, 3], stride=2, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=1, depth=512),\n DepthSepConv(kernel=[3, 3], stride=2, depth=1024),\n DepthSepConv(kernel=[3, 3], stride=1, depth=1024)\n]\n\n\ndef _fixed_padding(inputs, kernel_size, rate=1):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Pads the input such that if it was used in a convolution with 'VALID' padding,\n the output would have the same dimensions as if the unpadded input was used\n in a convolution with 'SAME' padding.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n rate: An integer, rate for atrous convolution.\n\n Returns:\n output: A tensor of size [batch, height_out, width_out, channels] with the\n input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).\n \"\"\"\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\n kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\n padded_inputs = tf.pad(\n tensor=inputs,\n paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],\n [0, 0]])\n return padded_inputs\n\n\ndef mobilenet_v1_base(inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n output_stride=None,\n use_explicit_padding=False,\n scope=None):\n \"\"\"Mobilenet v1.\n\n Constructs a Mobilenet v1 network from inputs to the given final endpoint.\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n final_endpoint: specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,\n 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\n output_stride: An integer that specifies the requested ratio of input to\n output spatial resolution. If not None, then we invoke atrous convolution\n if necessary to prevent the network from reducing the spatial resolution\n of the activation maps. Allowed values are 8 (accurate fully convolutional\n mode), 16 (fast fully convolutional mode), 32 (classification mode).\n use_explicit_padding: Use 'VALID' padding for convolutions, but prepad\n inputs so that the output dimensions are the same as if 'SAME' padding\n were used.\n scope: Optional variable_scope.\n\n Returns:\n tensor_out: output tensor corresponding to the final_endpoint.\n end_points: a set of activations for external use, for example summaries or\n losses.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values,\n or depth_multiplier <= 0, or the target output_stride is not\n allowed.\n \"\"\"\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n end_points = {}\n\n # Used to find thinned depths for each layer.\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n\n if conv_defs is None:\n conv_defs = MOBILENETV1_CONV_DEFS\n\n if output_stride is not None and output_stride not in [8, 16, 32]:\n raise ValueError('Only allowed output_stride values are 8, 16, 32.')\n\n padding = 'SAME'\n if use_explicit_padding:\n padding = 'VALID'\n with tf.variable_scope(scope, 'MobilenetV1', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):\n # The current_stride variable keeps track of the output stride of the\n # activations, i.e., the running product of convolution strides up to the\n # current network layer. This allows us to invoke atrous convolution\n # whenever applying the next convolution would result in the activations\n # having output stride larger than the target output_stride.\n current_stride = 1\n\n # The atrous convolution rate parameter.\n rate = 1\n\n net = inputs\n for i, conv_def in enumerate(conv_defs):\n end_point_base = 'Conv2d_%d' % i\n\n if output_stride is not None and current_stride == output_stride:\n # If we have reached the target output_stride, then we need to employ\n # atrous convolution with stride=1 and multiply the atrous rate by the\n # current unit's stride for use in subsequent layers.\n layer_stride = 1\n layer_rate = rate\n rate *= conv_def.stride\n else:\n layer_stride = conv_def.stride\n layer_rate = 1\n current_stride *= conv_def.stride\n\n if isinstance(conv_def, Conv):\n end_point = end_point_base\n if use_explicit_padding:\n net = _fixed_padding(net, conv_def.kernel)\n net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,\n stride=conv_def.stride,\n scope=end_point)\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n elif isinstance(conv_def, DepthSepConv):\n end_point = end_point_base + '_depthwise'\n\n # By passing filters=None\n # separable_conv2d produces only a depthwise convolution layer\n if use_explicit_padding:\n net = _fixed_padding(net, conv_def.kernel, layer_rate)\n net = slim.separable_conv2d(net, None, conv_def.kernel,\n depth_multiplier=1,\n stride=layer_stride,\n rate=layer_rate,\n scope=end_point)\n\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n\n end_point = end_point_base + '_pointwise'\n\n net = slim.conv2d(net, depth(conv_def.depth), [1, 1],\n stride=1,\n scope=end_point)\n\n end_points[end_point] = net\n if end_point == final_endpoint:\n return net, end_points\n else:\n raise ValueError('Unknown convolution type %s for layer %d'\n % (conv_def.ltype, i))\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef mobilenet_v1(inputs,\n num_classes=1000,\n dropout_keep_prob=0.999,\n is_training=True,\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='MobilenetV1',\n global_pool=False):\n \"\"\"Mobilenet v1 model for classification.\n\n Args:\n inputs: a tensor of shape [batch_size, height, width, channels].\n num_classes: number of predicted classes. If 0 or None, the logits layer\n is omitted and the input features to the logits layer (before dropout)\n are returned instead.\n dropout_keep_prob: the percentage of activation values that are retained.\n is_training: whether is training or not.\n min_depth: Minimum depth value (number of channels) for all convolution ops.\n Enforced when depth_multiplier < 1, and not an active constraint when\n depth_multiplier >= 1.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops. The value must be greater than zero. Typical\n usage will be to set this value in (0, 1) to reduce the number of\n parameters or computation cost of the model.\n conv_defs: A list of ConvDef namedtuples specifying the net architecture.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n global_pool: Optional boolean flag to control the avgpooling before the\n logits layer. If false or unset, pooling is done with a fixed window\n that reduces default-sized inputs to 1x1, while larger inputs lead to\n larger outputs. If true, any input size is pooled down to 1x1.\n\n Returns:\n net: a 2D Tensor with the logits (pre-softmax activations) if num_classes\n is a non-zero integer, or the non-dropped-out input to the logits layer\n if num_classes is 0 or None.\n end_points: a dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: Input rank is invalid.\n \"\"\"\n input_shape = inputs.get_shape().as_list()\n if len(input_shape) != 4:\n raise ValueError('Invalid input tensor rank, expected 4, was: %d' %\n len(input_shape))\n\n with tf.variable_scope(\n scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n net, end_points = mobilenet_v1_base(inputs, scope=scope,\n min_depth=min_depth,\n depth_multiplier=depth_multiplier,\n conv_defs=conv_defs)\n with tf.variable_scope('Logits'):\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(\n input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')\n end_points['global_pool'] = net\n else:\n # Pooling with a fixed kernel size.\n kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\n scope='AvgPool_1a')\n end_points['AvgPool_1a'] = net\n if not num_classes:\n return net, end_points\n # 1 x 1 x 1024\n net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')\n logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n end_points['Logits'] = logits\n if prediction_fn:\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\n\nmobilenet_v1.default_image_size = 224\n\n\ndef wrapped_partial(func, *args, **kwargs):\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\nmobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)\nmobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)\nmobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)\n\n\ndef _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n \"\"\"Define kernel size which is automatically reduced for small input.\n\n If the shape of the input images is unknown at graph construction time this\n function assumes that the input images are large enough.\n\n Args:\n input_tensor: input tensor of size [batch_size, height, width, channels].\n kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]\n\n Returns:\n a tensor with the kernel size.\n \"\"\"\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out\n\n\ndef mobilenet_v1_arg_scope(\n is_training=True,\n weight_decay=0.00004,\n stddev=0.09,\n regularize_depthwise=False,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001,\n batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,\n normalizer_fn=slim.batch_norm):\n \"\"\"Defines the default MobilenetV1 arg scope.\n\n Args:\n is_training: Whether or not we're training the model. If this is set to\n None, the parameter is not added to the batch_norm arg_scope.\n weight_decay: The weight decay to use for regularizing the model.\n stddev: The standard deviation of the trunctated normal weight initializer.\n regularize_depthwise: Whether or not apply regularization on depthwise.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n batch_norm_updates_collections: Collection for the update ops for\n batch norm.\n normalizer_fn: Normalization function to apply after convolution.\n\n Returns:\n An `arg_scope` to use for the mobilenet v1 model.\n \"\"\"\n batch_norm_params = {\n 'center': True,\n 'scale': True,\n 'decay': batch_norm_decay,\n 'epsilon': batch_norm_epsilon,\n 'updates_collections': batch_norm_updates_collections,\n }\n if is_training is not None:\n batch_norm_params['is_training'] = is_training\n\n # Set weight_decay for weights in Conv and DepthSepConv layers.\n weights_init = tf.truncated_normal_initializer(stddev=stddev)\n regularizer = slim.l2_regularizer(weight_decay)\n if regularize_depthwise:\n depthwise_regularizer = regularizer\n else:\n depthwise_regularizer = None\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n weights_initializer=weights_init,\n activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\n with slim.arg_scope([slim.separable_conv2d],\n weights_regularizer=depthwise_regularizer) as sc:\n return sc\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Bounding Box List operations.\n\nExample box operations that are supported:\n * areas: compute bounding box areas\n * iou: pairwise intersection-over-union scores\n * sq_dist: pairwise distances between bounding boxes\n\nWhenever box_list_ops functions output a BoxList, the fields of the incoming\nBoxList are retained unless documented otherwise.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.core import box_list\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\n\n\nclass SortOrder(object):\n \"\"\"Enum class for sort order.\n\n Attributes:\n ascend: ascend order.\n descend: descend order.\n \"\"\"\n ascend = 1\n descend = 2\n\n\ndef area(boxlist, scope=None):\n \"\"\"Computes area of boxes.\n\n Args:\n boxlist: BoxList holding N boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N] representing box areas.\n \"\"\"\n with tf.name_scope(scope, 'Area'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])\n\n\ndef height_width(boxlist, scope=None):\n \"\"\"Computes height and width of boxes in boxlist.\n\n Args:\n boxlist: BoxList holding N boxes\n scope: name scope.\n\n Returns:\n Height: A tensor with shape [N] representing box heights.\n Width: A tensor with shape [N] representing box widths.\n \"\"\"\n with tf.name_scope(scope, 'HeightWidth'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])\n\n\ndef scale(boxlist, y_scale, x_scale, scope=None):\n \"\"\"scale box coordinates in x and y dimensions.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n with tf.name_scope(scope, 'Scale'):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n y_min = y_scale * y_min\n y_max = y_scale * y_max\n x_min = x_scale * x_min\n x_max = x_scale * x_max\n scaled_boxlist = box_list.BoxList(\n tf.concat([y_min, x_min, y_max, x_max], 1))\n return _copy_extra_fields(scaled_boxlist, boxlist)\n\n\ndef scale_height_width(boxlist, y_scale, x_scale, scope=None):\n \"\"\"Scale the height and width of boxes, leaving centers unchanged.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: (float) scalar tensor\n x_scale: (float) scalar tensor\n scope: name scope.\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n with tf.name_scope(scope, 'ScaleHeightWidth'):\n y_scale = tf.cast(y_scale, tf.float32)\n x_scale = tf.cast(x_scale, tf.float32)\n yc, xc, height_orig, width_orig = boxlist.get_center_coordinates_and_sizes()\n y_min = yc - 0.5 * y_scale * height_orig\n y_max = yc + 0.5 * y_scale * height_orig\n x_min = xc - 0.5 * x_scale * width_orig\n x_max = xc + 0.5 * x_scale * width_orig\n scaled_boxlist = box_list.BoxList(\n tf.stack([y_min, x_min, y_max, x_max], 1))\n return _copy_extra_fields(scaled_boxlist, boxlist)\n\n\ndef clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):\n \"\"\"Clip bounding boxes to a window.\n\n This op clips any input bounding boxes (represented by bounding box\n corners) to a window, optionally filtering out boxes that do not\n overlap at all with the window.\n\n Args:\n boxlist: BoxList holding M_in boxes\n window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]\n window to which the op should clip boxes.\n filter_nonoverlapping: whether to filter out boxes that do not overlap at\n all with the window.\n scope: name scope.\n\n Returns:\n a BoxList holding M_out boxes where M_out <= M_in\n \"\"\"\n with tf.name_scope(scope, 'ClipToWindow'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)\n y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)\n x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)\n x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)\n clipped = box_list.BoxList(\n tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],\n 1))\n clipped = _copy_extra_fields(clipped, boxlist)\n if filter_nonoverlapping:\n areas = area(clipped)\n nonzero_area_indices = tf.cast(\n tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)\n clipped = gather(clipped, nonzero_area_indices)\n return clipped\n\n\ndef prune_outside_window(boxlist, window, scope=None):\n \"\"\"Prunes bounding boxes that fall outside a given window.\n\n This function prunes bounding boxes that even partially fall outside the given\n window. See also clip_to_window which only prunes bounding boxes that fall\n completely outside the window, and clips any bounding boxes that partially\n overflow.\n\n Args:\n boxlist: a BoxList holding M_in boxes.\n window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]\n of the window\n scope: name scope.\n\n Returns:\n pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in\n valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes\n in the input tensor.\n \"\"\"\n with tf.name_scope(scope, 'PruneOutsideWindow'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n coordinate_violations = tf.concat([\n tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),\n tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)\n ], 1)\n valid_indices = tf.reshape(\n tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])\n return gather(boxlist, valid_indices), valid_indices\n\n\ndef prune_completely_outside_window(boxlist, window, scope=None):\n \"\"\"Prunes bounding boxes that fall completely outside of the given window.\n\n The function clip_to_window prunes bounding boxes that fall\n completely outside the window, but also clips any bounding boxes that\n partially overflow. This function does not clip partially overflowing boxes.\n\n Args:\n boxlist: a BoxList holding M_in boxes.\n window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]\n of the window\n scope: name scope.\n\n Returns:\n pruned_boxlist: a new BoxList with all bounding boxes partially or fully in\n the window.\n valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes\n in the input tensor.\n \"\"\"\n with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):\n y_min, x_min, y_max, x_max = tf.split(\n value=boxlist.get(), num_or_size_splits=4, axis=1)\n win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)\n coordinate_violations = tf.concat([\n tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),\n tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)\n ], 1)\n valid_indices = tf.reshape(\n tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])\n return gather(boxlist, valid_indices), valid_indices\n\n\ndef intersection(boxlist1, boxlist2, scope=None):\n \"\"\"Compute pairwise intersection areas between boxes.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise intersections\n \"\"\"\n with tf.name_scope(scope, 'Intersection'):\n y_min1, x_min1, y_max1, x_max1 = tf.split(\n value=boxlist1.get(), num_or_size_splits=4, axis=1)\n y_min2, x_min2, y_max2, x_max2 = tf.split(\n value=boxlist2.get(), num_or_size_splits=4, axis=1)\n all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))\n all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))\n intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)\n all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))\n all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))\n intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)\n return intersect_heights * intersect_widths\n\n\ndef matched_intersection(boxlist1, boxlist2, scope=None):\n \"\"\"Compute intersection areas between corresponding boxes in two boxlists.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding N boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N] representing pairwise intersections\n \"\"\"\n with tf.name_scope(scope, 'MatchedIntersection'):\n y_min1, x_min1, y_max1, x_max1 = tf.split(\n value=boxlist1.get(), num_or_size_splits=4, axis=1)\n y_min2, x_min2, y_max2, x_max2 = tf.split(\n value=boxlist2.get(), num_or_size_splits=4, axis=1)\n min_ymax = tf.minimum(y_max1, y_max2)\n max_ymin = tf.maximum(y_min1, y_min2)\n intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)\n min_xmax = tf.minimum(x_max1, x_max2)\n max_xmin = tf.maximum(x_min1, x_min2)\n intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)\n return tf.reshape(intersect_heights * intersect_widths, [-1])\n\n\ndef iou(boxlist1, boxlist2, scope=None):\n \"\"\"Computes pairwise intersection-over-union between box collections.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise iou scores.\n \"\"\"\n with tf.name_scope(scope, 'IOU'):\n intersections = intersection(boxlist1, boxlist2)\n areas1 = area(boxlist1)\n areas2 = area(boxlist2)\n unions = (\n tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)\n return tf.where(\n tf.equal(intersections, 0.0),\n tf.zeros_like(intersections), tf.truediv(intersections, unions))\n\n\ndef l1(boxlist1, boxlist2, scope=None):\n \"\"\"Computes l1 loss (pairwise) between two boxlists.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing the pairwise L1 loss.\n \"\"\"\n with tf.name_scope(scope, 'PairwiseL1'):\n ycenter1, xcenter1, h1, w1 = boxlist1.get_center_coordinates_and_sizes()\n ycenter2, xcenter2, h2, w2 = boxlist2.get_center_coordinates_and_sizes()\n ycenters = tf.abs(tf.expand_dims(ycenter2, axis=0) - tf.expand_dims(\n tf.transpose(ycenter1), axis=1))\n xcenters = tf.abs(tf.expand_dims(xcenter2, axis=0) - tf.expand_dims(\n tf.transpose(xcenter1), axis=1))\n heights = tf.abs(tf.expand_dims(h2, axis=0) - tf.expand_dims(\n tf.transpose(h1), axis=1))\n widths = tf.abs(tf.expand_dims(w2, axis=0) - tf.expand_dims(\n tf.transpose(w1), axis=1))\n return ycenters + xcenters + heights + widths\n\n\ndef giou(boxlist1, boxlist2, scope=None):\n \"\"\"Computes pairwise generalized IOU between two boxlists.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing the pairwise GIoU loss.\n \"\"\"\n with tf.name_scope(scope, 'PairwiseGIoU'):\n n = boxlist1.num_boxes()\n m = boxlist2.num_boxes()\n boxes1 = tf.repeat(boxlist1.get(), repeats=m, axis=0)\n boxes2 = tf.tile(boxlist2.get(), multiples=[n, 1])\n return tf.reshape(ops.giou(boxes1, boxes2), [n, m])\n\n\ndef matched_iou(boxlist1, boxlist2, scope=None):\n \"\"\"Compute intersection-over-union between corresponding boxes in boxlists.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding N boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N] representing pairwise iou scores.\n \"\"\"\n with tf.name_scope(scope, 'MatchedIOU'):\n intersections = matched_intersection(boxlist1, boxlist2)\n areas1 = area(boxlist1)\n areas2 = area(boxlist2)\n unions = areas1 + areas2 - intersections\n return tf.where(\n tf.equal(intersections, 0.0),\n tf.zeros_like(intersections), tf.truediv(intersections, unions))\n\n\ndef ioa(boxlist1, boxlist2, scope=None):\n \"\"\"Computes pairwise intersection-over-area between box collections.\n\n intersection-over-area (IOA) between two boxes box1 and box2 is defined as\n their intersection area over box2's area. Note that ioa is not symmetric,\n that is, ioa(box1, box2) != ioa(box2, box1).\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise ioa scores.\n \"\"\"\n with tf.name_scope(scope, 'IOA'):\n intersections = intersection(boxlist1, boxlist2)\n areas = tf.expand_dims(area(boxlist2), 0)\n return tf.truediv(intersections, areas)\n\n\ndef prune_non_overlapping_boxes(\n boxlist1, boxlist2, min_overlap=0.0, scope=None):\n \"\"\"Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.\n\n For each box in boxlist1, we want its IOA to be more than minoverlap with\n at least one of the boxes in boxlist2. If it does not, we remove it.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n min_overlap: Minimum required overlap between boxes, to count them as\n overlapping.\n scope: name scope.\n\n Returns:\n new_boxlist1: A pruned boxlist with size [N', 4].\n keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the\n first input BoxList `boxlist1`.\n \"\"\"\n with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):\n ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor\n ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor\n keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))\n keep_inds = tf.squeeze(tf.where(keep_bool), axis=[1])\n new_boxlist1 = gather(boxlist1, keep_inds)\n return new_boxlist1, keep_inds\n\n\ndef prune_small_boxes(boxlist, min_side, scope=None):\n \"\"\"Prunes small boxes in the boxlist which have a side smaller than min_side.\n\n Args:\n boxlist: BoxList holding N boxes.\n min_side: Minimum width AND height of box to survive pruning.\n scope: name scope.\n\n Returns:\n A pruned boxlist.\n \"\"\"\n with tf.name_scope(scope, 'PruneSmallBoxes'):\n height, width = height_width(boxlist)\n is_valid = tf.logical_and(tf.greater_equal(width, min_side),\n tf.greater_equal(height, min_side))\n return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))\n\n\ndef change_coordinate_frame(boxlist, window, scope=None):\n \"\"\"Change coordinate frame of the boxlist to be relative to window's frame.\n\n Given a window of the form [ymin, xmin, ymax, xmax],\n changes bounding box coordinates from boxlist to be relative to this window\n (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).\n\n An example use case is data augmentation: where we are given groundtruth\n boxes (boxlist) and would like to randomly crop the image to some\n window (window). In this case we need to change the coordinate frame of\n each groundtruth box to be relative to this new window.\n\n Args:\n boxlist: A BoxList object holding N boxes.\n window: A rank 1 tensor [4].\n scope: name scope.\n\n Returns:\n Returns a BoxList object with N boxes.\n \"\"\"\n with tf.name_scope(scope, 'ChangeCoordinateFrame'):\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n boxlist_new = scale(box_list.BoxList(\n boxlist.get() - [window[0], window[1], window[0], window[1]]),\n 1.0 / win_height, 1.0 / win_width)\n boxlist_new = _copy_extra_fields(boxlist_new, boxlist)\n return boxlist_new\n\n\ndef sq_dist(boxlist1, boxlist2, scope=None):\n \"\"\"Computes the pairwise squared distances between box corners.\n\n This op treats each box as if it were a point in a 4d Euclidean space and\n computes pairwise squared distances.\n\n Mathematically, we are given two matrices of box coordinates X and Y,\n where X(i,:) is the i'th row of X, containing the 4 numbers defining the\n corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to\n boxlist2. We compute\n Z(i,j) = ||X(i,:) - Y(j,:)||^2\n = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n scope: name scope.\n\n Returns:\n a tensor with shape [N, M] representing pairwise distances\n \"\"\"\n with tf.name_scope(scope, 'SqDist'):\n sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)\n sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)\n innerprod = tf.matmul(boxlist1.get(), boxlist2.get(),\n transpose_a=False, transpose_b=True)\n return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod\n\n\ndef boolean_mask(boxlist, indicator, fields=None, scope=None,\n use_static_shapes=False, indicator_sum=None):\n \"\"\"Select boxes from BoxList according to indicator and return new BoxList.\n\n `boolean_mask` returns the subset of boxes that are marked as \"True\" by the\n indicator tensor. By default, `boolean_mask` returns boxes corresponding to\n the input index list, as well as all additional fields stored in the boxlist\n (indexing into the first dimension). However one can optionally only draw\n from a subset of fields.\n\n Args:\n boxlist: BoxList holding N boxes\n indicator: a rank-1 boolean tensor\n fields: (optional) list of fields to also gather from. If None (default),\n all fields are gathered from. Pass an empty fields list to only gather\n the box coordinates.\n scope: name scope.\n use_static_shapes: Whether to use an implementation with static shape\n gurantees.\n indicator_sum: An integer containing the sum of `indicator` vector. Only\n required if `use_static_shape` is True.\n\n Returns:\n subboxlist: a BoxList corresponding to the subset of the input BoxList\n specified by indicator\n Raises:\n ValueError: if `indicator` is not a rank-1 boolean tensor.\n \"\"\"\n with tf.name_scope(scope, 'BooleanMask'):\n if indicator.shape.ndims != 1:\n raise ValueError('indicator should have rank 1')\n if indicator.dtype != tf.bool:\n raise ValueError('indicator should be a boolean tensor')\n if use_static_shapes:\n if not (indicator_sum and isinstance(indicator_sum, int)):\n raise ValueError('`indicator_sum` must be a of type int')\n selected_positions = tf.cast(indicator, dtype=tf.float32)\n indexed_positions = tf.cast(\n tf.multiply(\n tf.cumsum(selected_positions), selected_positions),\n dtype=tf.int32)\n one_hot_selector = tf.one_hot(\n indexed_positions - 1, indicator_sum, dtype=tf.float32)\n sampled_indices = tf.cast(\n tf.tensordot(\n tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),\n one_hot_selector,\n axes=[0, 0]),\n dtype=tf.int32)\n return gather(boxlist, sampled_indices, use_static_shapes=True)\n else:\n subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))\n if fields is None:\n fields = boxlist.get_extra_fields()\n for field in fields:\n if not boxlist.has_field(field):\n raise ValueError('boxlist must contain all specified fields')\n subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)\n subboxlist.add_field(field, subfieldlist)\n return subboxlist\n\n\ndef gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False):\n \"\"\"Gather boxes from BoxList according to indices and return new BoxList.\n\n By default, `gather` returns boxes corresponding to the input index list, as\n well as all additional fields stored in the boxlist (indexing into the\n first dimension). However one can optionally only gather from a\n subset of fields.\n\n Args:\n boxlist: BoxList holding N boxes\n indices: a rank-1 tensor of type int32 / int64\n fields: (optional) list of fields to also gather from. If None (default),\n all fields are gathered from. Pass an empty fields list to only gather\n the box coordinates.\n scope: name scope.\n use_static_shapes: Whether to use an implementation with static shape\n gurantees.\n\n Returns:\n subboxlist: a BoxList corresponding to the subset of the input BoxList\n specified by indices\n Raises:\n ValueError: if specified field is not contained in boxlist or if the\n indices are not of type int32\n \"\"\"\n with tf.name_scope(scope, 'Gather'):\n if len(indices.shape.as_list()) != 1:\n raise ValueError('indices should have rank 1')\n if indices.dtype != tf.int32 and indices.dtype != tf.int64:\n raise ValueError('indices should be an int32 / int64 tensor')\n gather_op = tf.gather\n if use_static_shapes:\n gather_op = ops.matmul_gather_on_zeroth_axis\n subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))\n if fields is None:\n fields = boxlist.get_extra_fields()\n fields += ['boxes']\n for field in fields:\n if not boxlist.has_field(field):\n raise ValueError('boxlist must contain all specified fields')\n subfieldlist = gather_op(boxlist.get_field(field), indices)\n subboxlist.add_field(field, subfieldlist)\n return subboxlist\n\n\ndef concatenate(boxlists, fields=None, scope=None):\n \"\"\"Concatenate list of BoxLists.\n\n This op concatenates a list of input BoxLists into a larger BoxList. It also\n handles concatenation of BoxList fields as long as the field tensor shapes\n are equal except for the first dimension.\n\n Args:\n boxlists: list of BoxList objects\n fields: optional list of fields to also concatenate. By default, all\n fields from the first BoxList in the list are included in the\n concatenation.\n scope: name scope.\n\n Returns:\n a BoxList with number of boxes equal to\n sum([boxlist.num_boxes() for boxlist in BoxList])\n Raises:\n ValueError: if boxlists is invalid (i.e., is not a list, is empty, or\n contains non BoxList objects), or if requested fields are not contained in\n all boxlists\n \"\"\"\n with tf.name_scope(scope, 'Concatenate'):\n if not isinstance(boxlists, list):\n raise ValueError('boxlists should be a list')\n if not boxlists:\n raise ValueError('boxlists should have nonzero length')\n for boxlist in boxlists:\n if not isinstance(boxlist, box_list.BoxList):\n raise ValueError('all elements of boxlists should be BoxList objects')\n concatenated = box_list.BoxList(\n tf.concat([boxlist.get() for boxlist in boxlists], 0))\n if fields is None:\n fields = boxlists[0].get_extra_fields()\n for field in fields:\n first_field_shape = boxlists[0].get_field(field).get_shape().as_list()\n first_field_shape[0] = -1\n if None in first_field_shape:\n raise ValueError('field %s must have fully defined shape except for the'\n ' 0th dimension.' % field)\n for boxlist in boxlists:\n if not boxlist.has_field(field):\n raise ValueError('boxlist must contain all requested fields')\n field_shape = boxlist.get_field(field).get_shape().as_list()\n field_shape[0] = -1\n if field_shape != first_field_shape:\n raise ValueError('field %s must have same shape for all boxlists '\n 'except for the 0th dimension.' % field)\n concatenated_field = tf.concat(\n [boxlist.get_field(field) for boxlist in boxlists], 0)\n concatenated.add_field(field, concatenated_field)\n return concatenated\n\n\ndef sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):\n \"\"\"Sort boxes and associated fields according to a scalar field.\n\n A common use case is reordering the boxes according to descending scores.\n\n Args:\n boxlist: BoxList holding N boxes.\n field: A BoxList field for sorting and reordering the BoxList.\n order: (Optional) descend or ascend. Default is descend.\n scope: name scope.\n\n Returns:\n sorted_boxlist: A sorted BoxList with the field in the specified order.\n\n Raises:\n ValueError: if specified field does not exist\n ValueError: if the order is not either descend or ascend\n \"\"\"\n with tf.name_scope(scope, 'SortByField'):\n if order != SortOrder.descend and order != SortOrder.ascend:\n raise ValueError('Invalid sort order')\n\n field_to_sort = boxlist.get_field(field)\n if len(field_to_sort.shape.as_list()) != 1:\n raise ValueError('Field should have rank 1')\n\n num_boxes = boxlist.num_boxes()\n num_entries = tf.size(field_to_sort)\n length_assert = tf.Assert(\n tf.equal(num_boxes, num_entries),\n ['Incorrect field size: actual vs expected.', num_entries, num_boxes])\n\n with tf.control_dependencies([length_assert]):\n _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)\n\n if order == SortOrder.ascend:\n sorted_indices = tf.reverse_v2(sorted_indices, [0])\n\n return gather(boxlist, sorted_indices)\n\n\ndef visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):\n \"\"\"Overlay bounding box list on image.\n\n Currently this visualization plots a 1 pixel thick red bounding box on top\n of the image. Note that tf.image.draw_bounding_boxes essentially is\n 1 indexed.\n\n Args:\n image: an image tensor with shape [height, width, 3]\n boxlist: a BoxList\n normalized: (boolean) specify whether corners are to be interpreted\n as absolute coordinates in image space or normalized with respect to the\n image size.\n scope: name scope.\n\n Returns:\n image_and_boxes: an image tensor with shape [height, width, 3]\n \"\"\"\n with tf.name_scope(scope, 'VisualizeBoxesInImage'):\n if not normalized:\n height, width, _ = tf.unstack(tf.shape(image))\n boxlist = scale(boxlist,\n 1.0 / tf.cast(height, tf.float32),\n 1.0 / tf.cast(width, tf.float32))\n corners = tf.expand_dims(boxlist.get(), 0)\n image = tf.expand_dims(image, 0)\n return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])\n\n\ndef filter_field_value_equals(boxlist, field, value, scope=None):\n \"\"\"Filter to keep only boxes with field entries equal to the given value.\n\n Args:\n boxlist: BoxList holding N boxes.\n field: field name for filtering.\n value: scalar value.\n scope: name scope.\n\n Returns:\n a BoxList holding M boxes where M <= N\n\n Raises:\n ValueError: if boxlist not a BoxList object or if it does not have\n the specified field.\n \"\"\"\n with tf.name_scope(scope, 'FilterFieldValueEquals'):\n if not isinstance(boxlist, box_list.BoxList):\n raise ValueError('boxlist must be a BoxList')\n if not boxlist.has_field(field):\n raise ValueError('boxlist must contain the specified field')\n filter_field = boxlist.get_field(field)\n gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])\n return gather(boxlist, gather_index)\n\n\ndef filter_greater_than(boxlist, thresh, scope=None):\n \"\"\"Filter to keep only boxes with score exceeding a given threshold.\n\n This op keeps the collection of boxes whose corresponding scores are\n greater than the input threshold.\n\n TODO(jonathanhuang): Change function name to filter_scores_greater_than\n\n Args:\n boxlist: BoxList holding N boxes. Must contain a 'scores' field\n representing detection scores.\n thresh: scalar threshold\n scope: name scope.\n\n Returns:\n a BoxList holding M boxes where M <= N\n\n Raises:\n ValueError: if boxlist not a BoxList object or if it does not\n have a scores field\n \"\"\"\n with tf.name_scope(scope, 'FilterGreaterThan'):\n if not isinstance(boxlist, box_list.BoxList):\n raise ValueError('boxlist must be a BoxList')\n if not boxlist.has_field('scores'):\n raise ValueError('input boxlist must have \\'scores\\' field')\n scores = boxlist.get_field('scores')\n if len(scores.shape.as_list()) > 2:\n raise ValueError('Scores should have rank 1 or 2')\n if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:\n raise ValueError('Scores should have rank 1 or have shape '\n 'consistent with [None, 1]')\n high_score_indices = tf.cast(tf.reshape(\n tf.where(tf.greater(scores, thresh)),\n [-1]), tf.int32)\n return gather(boxlist, high_score_indices)\n\n\ndef non_max_suppression(boxlist, thresh, max_output_size, scope=None):\n \"\"\"Non maximum suppression.\n\n This op greedily selects a subset of detection bounding boxes, pruning\n away boxes that have high IOU (intersection over union) overlap (> thresh)\n with already selected boxes. Note that this only works for a single class ---\n to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.\n\n Args:\n boxlist: BoxList holding N boxes. Must contain a 'scores' field\n representing detection scores.\n thresh: scalar threshold\n max_output_size: maximum number of retained boxes\n scope: name scope.\n\n Returns:\n a BoxList holding M boxes where M <= max_output_size\n Raises:\n ValueError: if thresh is not in [0, 1]\n \"\"\"\n with tf.name_scope(scope, 'NonMaxSuppression'):\n if not 0 <= thresh <= 1.0:\n raise ValueError('thresh must be between 0 and 1')\n if not isinstance(boxlist, box_list.BoxList):\n raise ValueError('boxlist must be a BoxList')\n if not boxlist.has_field('scores'):\n raise ValueError('input boxlist must have \\'scores\\' field')\n selected_indices = tf.image.non_max_suppression(\n boxlist.get(), boxlist.get_field('scores'),\n max_output_size, iou_threshold=thresh)\n return gather(boxlist, selected_indices)\n\n\ndef _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):\n \"\"\"Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.\n\n Args:\n boxlist_to_copy_to: BoxList to which extra fields are copied.\n boxlist_to_copy_from: BoxList from which fields are copied.\n\n Returns:\n boxlist_to_copy_to with extra fields.\n \"\"\"\n for field in boxlist_to_copy_from.get_extra_fields():\n boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))\n return boxlist_to_copy_to\n\n\ndef to_normalized_coordinates(boxlist, height, width,\n check_range=True, scope=None):\n \"\"\"Converts absolute box coordinates to normalized coordinates in [0, 1].\n\n Usually one uses the dynamic shape of the image or conv-layer tensor:\n boxlist = box_list_ops.to_normalized_coordinates(boxlist,\n tf.shape(images)[1],\n tf.shape(images)[2]),\n\n This function raises an assertion failed error at graph execution time when\n the maximum coordinate is smaller than 1.01 (which means that coordinates are\n already normalized). The value 1.01 is to deal with small rounding errors.\n\n Args:\n boxlist: BoxList with coordinates in terms of pixel-locations.\n height: Maximum value for height of absolute box coordinates.\n width: Maximum value for width of absolute box coordinates.\n check_range: If True, checks if the coordinates are normalized or not.\n scope: name scope.\n\n Returns:\n boxlist with normalized coordinates in [0, 1].\n \"\"\"\n with tf.name_scope(scope, 'ToNormalizedCoordinates'):\n height = tf.cast(height, tf.float32)\n width = tf.cast(width, tf.float32)\n\n if check_range:\n max_val = tf.reduce_max(boxlist.get())\n max_assert = tf.Assert(tf.greater(max_val, 1.01),\n ['max value is lower than 1.01: ', max_val])\n with tf.control_dependencies([max_assert]):\n width = tf.identity(width)\n\n return scale(boxlist, 1 / height, 1 / width)\n\n\ndef to_absolute_coordinates(boxlist,\n height,\n width,\n check_range=True,\n maximum_normalized_coordinate=1.1,\n scope=None):\n \"\"\"Converts normalized box coordinates to absolute pixel coordinates.\n\n This function raises an assertion failed error when the maximum box coordinate\n value is larger than maximum_normalized_coordinate (in which case coordinates\n are already absolute).\n\n Args:\n boxlist: BoxList with coordinates in range [0, 1].\n height: Maximum value for height of absolute box coordinates.\n width: Maximum value for width of absolute box coordinates.\n check_range: If True, checks if the coordinates are normalized or not.\n maximum_normalized_coordinate: Maximum coordinate value to be considered\n as normalized, default to 1.1.\n scope: name scope.\n\n Returns:\n boxlist with absolute coordinates in terms of the image size.\n\n \"\"\"\n with tf.name_scope(scope, 'ToAbsoluteCoordinates'):\n height = tf.cast(height, tf.float32)\n width = tf.cast(width, tf.float32)\n\n # Ensure range of input boxes is correct.\n if check_range:\n box_maximum = tf.reduce_max(boxlist.get())\n max_assert = tf.Assert(\n tf.greater_equal(maximum_normalized_coordinate, box_maximum),\n ['maximum box coordinate value is larger '\n 'than %f: ' % maximum_normalized_coordinate, box_maximum])\n with tf.control_dependencies([max_assert]):\n width = tf.identity(width)\n\n return scale(boxlist, height, width)\n\n\ndef refine_boxes_multi_class(pool_boxes,\n num_classes,\n nms_iou_thresh,\n nms_max_detections,\n voting_iou_thresh=0.5):\n \"\"\"Refines a pool of boxes using non max suppression and box voting.\n\n Box refinement is done independently for each class.\n\n Args:\n pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must\n have a rank 1 'scores' field and a rank 1 'classes' field.\n num_classes: (int scalar) Number of classes.\n nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).\n nms_max_detections: (int scalar) maximum output size for NMS.\n voting_iou_thresh: (float scalar) iou threshold for box voting.\n\n Returns:\n BoxList of refined boxes.\n\n Raises:\n ValueError: if\n a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].\n b) pool_boxes is not a BoxList.\n c) pool_boxes does not have a scores and classes field.\n \"\"\"\n if not 0.0 <= nms_iou_thresh <= 1.0:\n raise ValueError('nms_iou_thresh must be between 0 and 1')\n if not 0.0 <= voting_iou_thresh <= 1.0:\n raise ValueError('voting_iou_thresh must be between 0 and 1')\n if not isinstance(pool_boxes, box_list.BoxList):\n raise ValueError('pool_boxes must be a BoxList')\n if not pool_boxes.has_field('scores'):\n raise ValueError('pool_boxes must have a \\'scores\\' field')\n if not pool_boxes.has_field('classes'):\n raise ValueError('pool_boxes must have a \\'classes\\' field')\n\n refined_boxes = []\n for i in range(num_classes):\n boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)\n refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,\n nms_max_detections, voting_iou_thresh)\n refined_boxes.append(refined_boxes_class)\n return sort_by_field(concatenate(refined_boxes), 'scores')\n\n\ndef refine_boxes(pool_boxes,\n nms_iou_thresh,\n nms_max_detections,\n voting_iou_thresh=0.5):\n \"\"\"Refines a pool of boxes using non max suppression and box voting.\n\n Args:\n pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must\n have a rank 1 'scores' field.\n nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).\n nms_max_detections: (int scalar) maximum output size for NMS.\n voting_iou_thresh: (float scalar) iou threshold for box voting.\n\n Returns:\n BoxList of refined boxes.\n\n Raises:\n ValueError: if\n a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].\n b) pool_boxes is not a BoxList.\n c) pool_boxes does not have a scores field.\n \"\"\"\n if not 0.0 <= nms_iou_thresh <= 1.0:\n raise ValueError('nms_iou_thresh must be between 0 and 1')\n if not 0.0 <= voting_iou_thresh <= 1.0:\n raise ValueError('voting_iou_thresh must be between 0 and 1')\n if not isinstance(pool_boxes, box_list.BoxList):\n raise ValueError('pool_boxes must be a BoxList')\n if not pool_boxes.has_field('scores'):\n raise ValueError('pool_boxes must have a \\'scores\\' field')\n\n nms_boxes = non_max_suppression(\n pool_boxes, nms_iou_thresh, nms_max_detections)\n return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)\n\n\ndef box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):\n \"\"\"Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.\n\n Performs box voting as described in 'Object detection via a multi-region &\n semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For\n each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes\n with iou overlap >= iou_thresh. The location of B is set to the weighted\n average location of boxes in S (scores are used for weighting). And the score\n of B is set to the average score of boxes in S.\n\n Args:\n selected_boxes: BoxList containing a subset of boxes in pool_boxes. These\n boxes are usually selected from pool_boxes using non max suppression.\n pool_boxes: BoxList containing a set of (possibly redundant) boxes.\n iou_thresh: (float scalar) iou threshold for matching boxes in\n selected_boxes and pool_boxes.\n\n Returns:\n BoxList containing averaged locations and scores for each box in\n selected_boxes.\n\n Raises:\n ValueError: if\n a) selected_boxes or pool_boxes is not a BoxList.\n b) if iou_thresh is not in [0, 1].\n c) pool_boxes does not have a scores field.\n \"\"\"\n if not 0.0 <= iou_thresh <= 1.0:\n raise ValueError('iou_thresh must be between 0 and 1')\n if not isinstance(selected_boxes, box_list.BoxList):\n raise ValueError('selected_boxes must be a BoxList')\n if not isinstance(pool_boxes, box_list.BoxList):\n raise ValueError('pool_boxes must be a BoxList')\n if not pool_boxes.has_field('scores'):\n raise ValueError('pool_boxes must have a \\'scores\\' field')\n\n iou_ = iou(selected_boxes, pool_boxes)\n match_indicator = tf.cast(tf.greater(iou_, iou_thresh), dtype=tf.float32)\n num_matches = tf.reduce_sum(match_indicator, 1)\n # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not\n # match to any boxes in pool_boxes. For such boxes without any matches, we\n # should return the original boxes without voting.\n match_assert = tf.Assert(\n tf.reduce_all(tf.greater(num_matches, 0)),\n ['Each box in selected_boxes must match with at least one box '\n 'in pool_boxes.'])\n\n scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)\n scores_assert = tf.Assert(\n tf.reduce_all(tf.greater_equal(scores, 0)),\n ['Scores must be non negative.'])\n\n with tf.control_dependencies([scores_assert, match_assert]):\n sum_scores = tf.matmul(match_indicator, scores)\n averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches\n\n box_locations = tf.matmul(match_indicator,\n pool_boxes.get() * scores) / sum_scores\n averaged_boxes = box_list.BoxList(box_locations)\n _copy_extra_fields(averaged_boxes, selected_boxes)\n averaged_boxes.add_field('scores', averaged_scores)\n return averaged_boxes\n\n\ndef pad_or_clip_box_list(boxlist, num_boxes, scope=None):\n \"\"\"Pads or clips all fields of a BoxList.\n\n Args:\n boxlist: A BoxList with arbitrary of number of boxes.\n num_boxes: First num_boxes in boxlist are kept.\n The fields are zero-padded if num_boxes is bigger than the\n actual number of boxes.\n scope: name scope.\n\n Returns:\n BoxList with all fields padded or clipped.\n \"\"\"\n with tf.name_scope(scope, 'PadOrClipBoxList'):\n subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(\n boxlist.get(), num_boxes))\n for field in boxlist.get_extra_fields():\n subfield = shape_utils.pad_or_clip_tensor(\n boxlist.get_field(field), num_boxes)\n subboxlist.add_field(field, subfield)\n return subboxlist\n\n\ndef select_random_box(boxlist,\n default_box=None,\n seed=None,\n scope=None):\n \"\"\"Selects a random bounding box from a `BoxList`.\n\n Args:\n boxlist: A BoxList.\n default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,\n this default box will be returned. If None, will use a default box of\n [[-1., -1., -1., -1.]].\n seed: Random seed.\n scope: Name scope.\n\n Returns:\n bbox: A [1, 4] tensor with a random bounding box.\n valid: A bool tensor indicating whether a valid bounding box is returned\n (True) or whether the default box is returned (False).\n \"\"\"\n with tf.name_scope(scope, 'SelectRandomBox'):\n bboxes = boxlist.get()\n combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)\n number_of_boxes = combined_shape[0]\n default_box = default_box or tf.constant([[-1., -1., -1., -1.]])\n\n def select_box():\n random_index = tf.random_uniform([],\n maxval=number_of_boxes,\n dtype=tf.int32,\n seed=seed)\n return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)\n\n return tf.cond(\n tf.greater_equal(number_of_boxes, 1),\n true_fn=select_box,\n false_fn=lambda: (default_box, tf.constant(False)))\n\n\ndef get_minimal_coverage_box(boxlist,\n default_box=None,\n scope=None):\n \"\"\"Creates a single bounding box which covers all boxes in the boxlist.\n\n Args:\n boxlist: A Boxlist.\n default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,\n this default box will be returned. If None, will use a default box of\n [[0., 0., 1., 1.]].\n scope: Name scope.\n\n Returns:\n A [1, 4] float32 tensor with a bounding box that tightly covers all the\n boxes in the box list. If the boxlist does not contain any boxes, the\n default box is returned.\n \"\"\"\n with tf.name_scope(scope, 'CreateCoverageBox'):\n num_boxes = boxlist.num_boxes()\n\n def coverage_box(bboxes):\n y_min, x_min, y_max, x_max = tf.split(\n value=bboxes, num_or_size_splits=4, axis=1)\n y_min_coverage = tf.reduce_min(y_min, axis=0)\n x_min_coverage = tf.reduce_min(x_min, axis=0)\n y_max_coverage = tf.reduce_max(y_max, axis=0)\n x_max_coverage = tf.reduce_max(x_max, axis=0)\n return tf.stack(\n [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],\n axis=1)\n\n default_box = default_box or tf.constant([[0., 0., 1., 1.]])\n return tf.cond(\n tf.greater_equal(num_boxes, 1),\n true_fn=lambda: coverage_box(boxlist.get()),\n false_fn=lambda: default_box)\n\n\ndef sample_boxes_by_jittering(boxlist,\n num_boxes_to_sample,\n stddev=0.1,\n scope=None):\n \"\"\"Samples num_boxes_to_sample boxes by jittering around boxlist boxes.\n\n It is possible that this function might generate boxes with size 0. The larger\n the stddev, this is more probable. For a small stddev of 0.1 this probability\n is very small.\n\n Args:\n boxlist: A boxlist containing N boxes in normalized coordinates.\n num_boxes_to_sample: A positive integer containing the number of boxes to\n sample.\n stddev: Standard deviation. This is used to draw random offsets for the\n box corners from a normal distribution. The offset is multiplied by the\n box size so will be larger in terms of pixels for larger boxes.\n scope: Name scope.\n\n Returns:\n sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in\n normalized coordinates.\n \"\"\"\n with tf.name_scope(scope, 'SampleBoxesByJittering'):\n num_boxes = boxlist.num_boxes()\n box_indices = tf.random_uniform(\n [num_boxes_to_sample],\n minval=0,\n maxval=num_boxes,\n dtype=tf.int32)\n sampled_boxes = tf.gather(boxlist.get(), box_indices)\n sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0]\n sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1]\n rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)\n rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)\n rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)\n rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)\n miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0]\n minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1]\n maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2]\n maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3]\n maxy = tf.maximum(miny, maxy)\n maxx = tf.maximum(minx, maxx)\n sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1)\n sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0)\n return box_list.BoxList(sampled_boxes)\n"
] | [
[
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.truncated_normal_initializer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.pad"
],
[
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.cumsum",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.truediv",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.greater",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.less_equal",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.nn.top_k",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.reduce_any",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.random_uniform",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.reverse_v2",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.reduce_min",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.random_normal",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.image.draw_bounding_boxes",
"tensorflow.compat.v1.name_scope"
]
] |
HapKoM/pyhowfar | [
"b12c248f696dc9bc2b50455b63a2b6ca7a440ba7"
] | [
"datasets/W300.py"
] | [
"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport random\nimport math\nfrom skimage import io\n\nimport torch\nimport torch.utils.data as data\nimport torchfile\n\n# from utils.utils import *\nfrom utils.imutils import *\nfrom utils.transforms import *\n\n\nclass W300(data.Dataset):\n\n def __init__(self, args, split):\n self.nParts = 68\n self.pointType = args.pointType\n # self.anno = anno\n self.img_folder = args.data\n self.split = split\n self.is_train = True if self.split == 'train' else False\n self.anno = self._getDataFaces(self.is_train)\n self.total = len(self.anno)\n self.scale_factor = args.scale_factor\n self.rot_factor = args.rot_factor\n self.mean, self.std = self._comput_mean()\n\n def _getDataFaces(self, is_train):\n base_dir = self.img_folder\n dirs = os.listdir(base_dir)\n lines = []\n vallines = []\n\n if is_train:\n fid = open(os.path.join(base_dir, 'train.txt'), 'r')\n for line in fid.readlines():\n lines.append(line.strip())\n fid.close()\n else:\n fid = open(os.path.join(base_dir, 'test.txt'), 'r')\n for line in fid.readlines():\n vallines.append(line.strip())\n fid.close()\n\n if is_train:\n print('=> loaded train set, {} images were found'.format(len(lines)))\n return lines\n else:\n print('=> loaded validation set, {} images were found'.format(len(vallines)))\n return vallines\n\n def __len__(self):\n return self.total\n\n def __getitem__(self, index):\n inp, out, pts, c, s = self.generateSampleFace(index)\n self.pts, self.c, self.s = pts, c, s\n if self.is_train:\n return inp, out\n else:\n meta = {'index': index, 'center': c, 'scale': s, 'pts': pts,}\n return inp, out, meta\n\n def generateSampleFace(self, idx):\n sf = self.scale_factor\n rf = self.rot_factor\n\n main_pts = torchfile.load(\n os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],\n self.anno[idx][:-4] + '.t7'))\n pts = main_pts[0] if self.pointType == '2D' else main_pts[1]\n c = torch.Tensor((450 / 2, 450 / 2 + 50))\n s = 1.8\n\n img = load_image(\n os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +\n '.jpg'))\n\n r = 0\n if self.is_train:\n s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]\n r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0\n\n if random.random() <= 0.5:\n img = torch.from_numpy(fliplr(img.numpy())).float()\n pts = shufflelr(pts, width=img.size(2), dataset='w300lp')\n c[0] = img.size(2) - c[0]\n\n img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)\n\n inp = crop(img, c, s, [256, 256], rot=r)\n inp = color_normalize(inp, self.mean, self.std)\n\n tpts = pts.clone()\n out = torch.zeros(self.nParts, 64, 64)\n for i in range(self.nParts):\n if tpts[i, 0] > 0:\n tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))\n out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)\n\n return inp, out, pts, c, s\n\n def _comput_mean(self):\n meanstd_file = './data/300W_LP/mean.pth.tar'\n if os.path.isfile(meanstd_file):\n ms = torch.load(meanstd_file)\n else:\n print(\"\\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...\")\n mean = torch.zeros(3)\n std = torch.zeros(3)\n if self.is_train:\n for i in range(self.total):\n a = self.anno[i]\n img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],\n self.anno[i][:-8] + '.jpg')\n img = load_image(img_path)\n mean += img.view(img.size(0), -1).mean(1)\n std += img.view(img.size(0), -1).std(1)\n\n mean /= self.total\n std /= self.total\n ms = {\n 'mean': mean,\n 'std': std,\n }\n torch.save(ms, meanstd_file)\n if self.is_train:\n print('\\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))\n print('\\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))\n return ms['mean'], ms['std']\n"
] | [
[
"torch.load",
"torch.randn",
"torch.save",
"torch.zeros",
"torch.Tensor"
]
] |
vijaysharmapc/Python-End-to-end-Data-Analysis | [
"a00f2d5d1547993e000b2551ec6a1360240885ba"
] | [
"Module1/Getting_Started_with_Data_Analysis_Code/4/annotate.py"
] | [
"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-2.4, 0.4, 20)\ny = x * x + 2 * x + 1\nplt.plot(x, y, 'c', linewidth=2.0)\nplt.text(-1.5, 1.8, 'y=x^2 + 2*x + 1',\n fontsize=14, style='italic')\nplt.annotate('minima point', xy=(-1, 0),\n xytext=(-1, 0.3), horizontalalignment='center',\n verticalalignment='top', \n arrowprops=dict(arrowstyle='->', \n connectionstyle='arc3'))\n \nplt.savefig('annotate.png')"
] | [
[
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig"
]
] |
AYCHAIN/PracticalAI | [
"1657e31dfc60645f4f999475803f57c0ab9f1a2d",
"1657e31dfc60645f4f999475803f57c0ab9f1a2d"
] | [
"Section 4/04.02_omniscient_agent_webapp.py",
"Section 5/05.02_random_agent.py"
] | [
"from flask import Flask, redirect, render_template, url_for\nimport numpy as np\n\napp = Flask( __name__ )\n\[email protected]( '/home' )\ndef index():\n # retrieve the agent\n agent = app.config['AGENT']\n\n print( 'Episode: {}/{}'.format( agent.get_episode(), agent.get_episodes() ) )\n print( 'Trial: {}/{}'.format( agent.get_trial(), agent.get_trials() ) )\n if agent.get_episode() > agent.get_episodes():\n # episodes are over\n # compute the final prob\n prob_reward_array = agent.get_prob_reward_array()\n prob_01 = 100*np.round( prob_reward_array[0] / agent.get_episodes(), 2 )\n prob_02 = 100*np.round( prob_reward_array[1] / agent.get_episodes(), 2 )\n\n # avg the accumulated reward\n avg_accumulated_reward = agent.get_avg_accumulated_reward_array()\n\n # print the final \n print( '\\nProb Bandit 01:{}% - Prob Bandit 02:{}%'.format( prob_01, prob_02 ) )\n print( '\\n Avg accumulated reward: {}\\n'.format( np.mean( avg_accumulated_reward ) ) )\n\n # reset the episodes\n agent.reset_episode()\n\n elif agent.get_trial() > agent.get_trials():\n # trials are over\n # increase the episode\n agent.set_episode()\n\n # compute the partial results\n agent.set_prob_reward_array()\n\n # append the accumualted reward\n agent.set_append_accumulated_reward()\n\n # append the avg accumulated reward\n agent.set_append_avg_accumulated_reward()\n\n # reset the trial and initial variables\n agent.set_trial( reset=1 )\n\n # get the partial results\n partial_result = agent.get_prob_reward_array()\n prob_01 = partial_result[0] / agent.get_episode()\n prob_02 = partial_result[1] / agent.get_episode()\n\n # print the partial results\n print( '\\n Prob Bandit 01:{} - Prob Bandit 02:{}\\n'.format( prob_01, prob_02 ) )\n return redirect( url_for( 'index' ) )\n\n else:\n # trials are not over\n # code the omniscient agent\n bandit_machine = np.argmax( agent.get_prob_list() )\n\n # set the current bandit machine\n agent.set_current_bandit( bandit_machine )\n\n # pick up the web page\n if bandit_machine == 0: # red Yes button\n return render_template( 'layout_red.html' )\n else:\n return render_template( 'layout_blue.html' )\n\[email protected]( '/yes', methods=['POST'] )\ndef yes_event():\n agent = app.config['AGENT']\n\n # set the reward\n reward = 1\n\n # get the current bandit machine\n bandit_machine = agent.get_current_bandit()\n\n # add a reward to the bandit machine\n agent.set_reward_array( bandit_machine, reward )\n\n # increase how many times the bandit machine gets the lever pulled\n agent.set_bandit_array( bandit_machine )\n\n # sum the accumulated reward\n agent.set_accumulated_reward( reward )\n\n # increase the number of trial\n agent.set_trial( reset=0 )\n\n return redirect( url_for( 'index' ) )\n\[email protected]( '/no', methods=['POST'] )\ndef no_event():\n agent = app.config['AGENT']\n\n # set the reward\n reward = 0\n\n # get the current bandit machine\n bandit_machine = agent.get_current_bandit()\n\n # add a reward to the bandit machine\n agent.set_reward_array( bandit_machine, reward )\n\n # increase how many times the bandit machine gets the lever pulled\n agent.set_bandit_array( bandit_machine )\n\n # sum the accumulated reward\n agent.set_accumulated_reward( reward )\n\n # increase the number of trial\n agent.set_trial( reset=0 )\n return redirect( url_for( 'index' ) )\n\nif __name__ == \"__main__\":\n trials = 100\n episodes = 20\n\n prob_list = [0.3, 0.8]\n\n agent = OmniscientAgent( prob_list, trials, episodes )\n\n app.config['AGENT'] = agent\n app.run()\n\n",
"#import the libraries\nimport numpy as np\nimport random\n\n# class of random agent\nclass RandomAgent( object ):\n def __init__( self, prob_list ):\n self.prob_list = prob_list\n\n def pull( self, bandit_machine ):\n if np.random.random() < self.prob_list[ bandit_machine ]:\n reward = 1\n else:\n reward = 0\n\n return reward\n\n# prob list\nprob_list = [0.3, 0.8 ]\n\n# define the variable of the episode\ntrials = 1000\nepisodes = 200\n\nprob_reward_array = np.zeros( len( prob_list ) )\naccumulated_reward_array = list()\navg_accumulated_reward_array = list()\n\nbandit = RandomAgent( prob_list )\n\nfor episode in range( episodes ):\n if episode % 10 == 0:\n print( 'Episode: {} / {}'.format( episode, episodes ) )\n\n # initialize the variable\n reward_array = np.zeros( len( prob_list ) )\n bandit_array = np.full( len( prob_list ), 1.0e-5 )\n accumulated_reward = 0\n\n for trial in range( trials ):\n # define the random strategy\n bandit_machine = np.random.randint( low=0, high=2, size=1 )[0]\n\n # get the reward\n reward = bandit.pull( bandit_machine )\n\n # save the partial results\n reward_array[ bandit_machine ] += reward\n bandit_array[ bandit_machine ] += 1\n accumulated_reward += reward\n\n # compute the partial results\n prob_reward_array += reward_array / bandit_array\n accumulated_reward_array.append( accumulated_reward )\n avg_accumulated_reward_array.append( np.mean( accumulated_reward_array ) )\n\n# compute the final results\nprob_01 = 100*np.round( prob_reward_array[0] / episodes, 2 )\nprob_02 = 100*np.round( prob_reward_array[1] / episodes, 2 )\n\n# print the final results\nprint( '\\n Prob Bandit 01:{}% - Prob Bandit 02:{}%'.format( prob_01, prob_02 ) )\nprint( '\\n Avg accumulated reward: {}\\n'.format( np.mean( avg_accumulated_reward_array ) ) )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.mean"
],
[
"numpy.round",
"numpy.random.randint",
"numpy.mean",
"numpy.random.random"
]
] |
andrei-assa/pandas | [
"ded76dbbfdff3211cfff0ec7039611b50d531efb"
] | [
"pandas/core/indexes/extension.py"
] | [
"\"\"\"\nShared methods for Index subclasses backed by ExtensionArray.\n\"\"\"\nfrom typing import (\n Hashable,\n List,\n Type,\n TypeVar,\n Union,\n)\n\nimport numpy as np\n\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from,\n)\nfrom pandas.core.dtypes.common import (\n is_dtype_equal,\n is_object_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCSeries,\n)\n\nfrom pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n IntervalArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays._mixins import NDArrayBackedExtensionArray\nfrom pandas.core.indexers import deprecate_ndim_indexing\nfrom pandas.core.indexes.base import Index\nfrom pandas.core.ops import get_op_result_name\n\n_T = TypeVar(\"_T\", bound=\"NDArrayBackedExtensionIndex\")\n\n\ndef inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):\n \"\"\"\n Make an alias for a method of the underlying ExtensionArray.\n\n Parameters\n ----------\n name : str\n Name of an attribute the class should inherit from its EA parent.\n delegate : class\n cache : bool, default False\n Whether to convert wrapped properties into cache_readonly\n wrap : bool, default False\n Whether to wrap the inherited result in an Index.\n\n Returns\n -------\n attribute, method, property, or cache_readonly\n \"\"\"\n attr = getattr(delegate, name)\n\n if isinstance(attr, property) or type(attr).__name__ == \"getset_descriptor\":\n # getset_descriptor i.e. property defined in cython class\n if cache:\n\n def cached(self):\n return getattr(self._data, name)\n\n cached.__name__ = name\n cached.__doc__ = attr.__doc__\n method = cache_readonly(cached)\n\n else:\n\n def fget(self):\n result = getattr(self._data, name)\n if wrap:\n if isinstance(result, type(self._data)):\n return type(self)._simple_new(result, name=self.name)\n elif isinstance(result, ABCDataFrame):\n return result.set_index(self)\n return Index(result, name=self.name)\n return result\n\n def fset(self, value):\n setattr(self._data, name, value)\n\n fget.__name__ = name\n fget.__doc__ = attr.__doc__\n\n method = property(fget, fset)\n\n elif not callable(attr):\n # just a normal attribute, no wrapping\n method = attr\n\n else:\n\n def method(self, *args, **kwargs):\n result = attr(self._data, *args, **kwargs)\n if wrap:\n if isinstance(result, type(self._data)):\n return type(self)._simple_new(result, name=self.name)\n elif isinstance(result, ABCDataFrame):\n return result.set_index(self)\n return Index(result, name=self.name)\n return result\n\n method.__name__ = name\n method.__doc__ = attr.__doc__\n return method\n\n\ndef inherit_names(names: List[str], delegate, cache: bool = False, wrap: bool = False):\n \"\"\"\n Class decorator to pin attributes from an ExtensionArray to a Index subclass.\n\n Parameters\n ----------\n names : List[str]\n delegate : class\n cache : bool, default False\n wrap : bool, default False\n Whether to wrap the inherited result in an Index.\n \"\"\"\n\n def wrapper(cls):\n for name in names:\n meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)\n setattr(cls, name, meth)\n\n return cls\n\n return wrapper\n\n\ndef _make_wrapped_comparison_op(opname: str):\n \"\"\"\n Create a comparison method that dispatches to ``._data``.\n \"\"\"\n\n def wrapper(self, other):\n if isinstance(other, ABCSeries):\n # the arrays defer to Series for comparison ops but the indexes\n # don't, so we have to unwrap here.\n other = other._values\n\n other = _maybe_unwrap_index(other)\n\n op = getattr(self._data, opname)\n return op(other)\n\n wrapper.__name__ = opname\n return wrapper\n\n\ndef make_wrapped_arith_op(opname: str):\n def method(self, other):\n if (\n isinstance(other, Index)\n and is_object_dtype(other.dtype)\n and type(other) is not Index\n ):\n # We return NotImplemented for object-dtype index *subclasses* so they have\n # a chance to implement ops before we unwrap them.\n # See https://github.com/pandas-dev/pandas/issues/31109\n return NotImplemented\n meth = getattr(self._data, opname)\n result = meth(_maybe_unwrap_index(other))\n return _wrap_arithmetic_op(self, other, result)\n\n method.__name__ = opname\n return method\n\n\ndef _wrap_arithmetic_op(self, other, result):\n if result is NotImplemented:\n return NotImplemented\n\n if isinstance(result, tuple):\n # divmod, rdivmod\n assert len(result) == 2\n return (\n _wrap_arithmetic_op(self, other, result[0]),\n _wrap_arithmetic_op(self, other, result[1]),\n )\n\n if not isinstance(result, Index):\n # Index.__new__ will choose appropriate subclass for dtype\n result = Index(result)\n\n res_name = get_op_result_name(self, other)\n result.name = res_name\n return result\n\n\ndef _maybe_unwrap_index(obj):\n \"\"\"\n If operating against another Index object, we need to unwrap the underlying\n data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray\n implementation, otherwise we will incorrectly return NotImplemented.\n\n Parameters\n ----------\n obj : object\n\n Returns\n -------\n unwrapped object\n \"\"\"\n if isinstance(obj, Index):\n return obj._data\n return obj\n\n\nclass ExtensionIndex(Index):\n \"\"\"\n Index subclass for indexes backed by ExtensionArray.\n \"\"\"\n\n # The base class already passes through to _data:\n # size, __len__, dtype\n\n _data: Union[IntervalArray, NDArrayBackedExtensionArray]\n\n __eq__ = _make_wrapped_comparison_op(\"__eq__\")\n __ne__ = _make_wrapped_comparison_op(\"__ne__\")\n __lt__ = _make_wrapped_comparison_op(\"__lt__\")\n __gt__ = _make_wrapped_comparison_op(\"__gt__\")\n __le__ = _make_wrapped_comparison_op(\"__le__\")\n __ge__ = _make_wrapped_comparison_op(\"__ge__\")\n\n @property\n def _has_complex_internals(self) -> bool:\n # used to avoid libreduction code paths, which raise or require conversion\n return True\n\n # ---------------------------------------------------------------------\n # NDarray-Like Methods\n\n def __getitem__(self, key):\n result = self._data[key]\n if isinstance(result, type(self._data)):\n if result.ndim == 1:\n return type(self)(result, name=self.name)\n # Unpack to ndarray for MPL compat\n\n result = result._ndarray\n\n # Includes cases where we get a 2D ndarray back for MPL compat\n deprecate_ndim_indexing(result)\n return result\n\n def searchsorted(self, value, side=\"left\", sorter=None) -> np.ndarray:\n # overriding IndexOpsMixin improves performance GH#38083\n return self._data.searchsorted(value, side=side, sorter=sorter)\n\n # ---------------------------------------------------------------------\n\n def _get_engine_target(self) -> np.ndarray:\n return np.asarray(self._data)\n\n def delete(self, loc):\n \"\"\"\n Make new Index with passed location(-s) deleted\n\n Returns\n -------\n new_index : Index\n \"\"\"\n arr = self._data.delete(loc)\n return type(self)._simple_new(arr, name=self.name)\n\n def repeat(self, repeats, axis=None):\n nv.validate_repeat((), {\"axis\": axis})\n result = self._data.repeat(repeats, axis=axis)\n return type(self)._simple_new(result, name=self.name)\n\n def insert(self, loc: int, item):\n # ExtensionIndex subclasses must override Index.insert\n raise AbstractMethodError(self)\n\n def _validate_fill_value(self, value):\n \"\"\"\n Convert value to be insertable to underlying array.\n \"\"\"\n return self._data._validate_setitem_value(value)\n\n def _get_unique_index(self):\n if self.is_unique:\n return self\n\n result = self._data.unique()\n return self._shallow_copy(result)\n\n @doc(Index.map)\n def map(self, mapper, na_action=None):\n # Try to run function on index first, and then on elements of index\n # Especially important for group-by functionality\n try:\n result = mapper(self)\n\n # Try to use this result if we can\n if isinstance(result, np.ndarray):\n result = Index(result)\n\n if not isinstance(result, Index):\n raise TypeError(\"The map function must return an Index object\")\n return result\n except Exception:\n return self.astype(object).map(mapper)\n\n @doc(Index.astype)\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if is_dtype_equal(self.dtype, dtype):\n if not copy:\n # Ensure that self.astype(self.dtype) is self\n return self\n return self.copy()\n\n if isinstance(dtype, np.dtype) and dtype.kind == \"M\" and dtype != \"M8[ns]\":\n # For now Datetime supports this by unwrapping ndarray, but DTI doesn't\n raise TypeError(f\"Cannot cast {type(self._data).__name__} to dtype\")\n\n new_values = self._data.astype(dtype, copy=copy)\n\n # pass copy=False because any copying will be done in the\n # _data.astype call above\n return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)\n\n @cache_readonly\n def _isnan(self) -> np.ndarray:\n # error: Incompatible return value type (got \"ExtensionArray\", expected\n # \"ndarray\")\n return self._data.isna() # type: ignore[return-value]\n\n @doc(Index.equals)\n def equals(self, other) -> bool:\n # Dispatch to the ExtensionArray's .equals method.\n if self.is_(other):\n return True\n\n if not isinstance(other, type(self)):\n return False\n\n return self._data.equals(other._data)\n\n\nclass NDArrayBackedExtensionIndex(ExtensionIndex):\n \"\"\"\n Index subclass for indexes backed by NDArrayBackedExtensionArray.\n \"\"\"\n\n _data: NDArrayBackedExtensionArray\n\n _data_cls: Union[\n Type[Categorical],\n Type[DatetimeArray],\n Type[TimedeltaArray],\n Type[PeriodArray],\n ]\n\n @classmethod\n def _simple_new(\n cls,\n values: NDArrayBackedExtensionArray,\n name: Hashable = None,\n ):\n assert isinstance(values, cls._data_cls), type(values)\n\n result = object.__new__(cls)\n result._data = values\n result._name = name\n result._cache = {}\n\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = values._ndarray\n\n result._reset_identity()\n return result\n\n def _get_engine_target(self) -> np.ndarray:\n return self._data._ndarray\n\n def insert(self: _T, loc: int, item) -> _T:\n \"\"\"\n Make new Index inserting new item at location. Follows\n Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n\n Raises\n ------\n ValueError if the item is not valid for this dtype.\n \"\"\"\n arr = self._data\n try:\n code = arr._validate_scalar(item)\n except (ValueError, TypeError):\n # e.g. trying to insert an integer into a DatetimeIndex\n # We cannot keep the same dtype, so cast to the (often object)\n # minimal shared dtype before doing the insert.\n dtype, _ = infer_dtype_from(item, pandas_dtype=True)\n dtype = find_common_type([self.dtype, dtype])\n return self.astype(dtype).insert(loc, item)\n else:\n new_vals = np.concatenate(\n (\n arr._ndarray[:loc],\n np.asarray([code], dtype=arr._ndarray.dtype),\n arr._ndarray[loc:],\n )\n )\n new_arr = arr._from_backing_data(new_vals)\n return type(self)._simple_new(new_arr, name=self.name)\n\n def putmask(self, mask, value) -> Index:\n res_values = self._data.copy()\n try:\n res_values.putmask(mask, value)\n except (TypeError, ValueError):\n return self.astype(object).putmask(mask, value)\n\n return type(self)._simple_new(res_values, name=self.name)\n\n def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T:\n name = get_op_result_name(self, other)\n arr = self._data._from_backing_data(joined)\n return type(self)._simple_new(arr, name=name)\n"
] | [
[
"pandas.core.ops.get_op_result_name",
"pandas.core.indexes.base.Index",
"pandas.core.indexers.deprecate_ndim_indexing",
"pandas.core.dtypes.cast.infer_dtype_from",
"numpy.asarray",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.errors.AbstractMethodError",
"pandas.util._decorators.cache_readonly",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.compat.numpy.function.validate_repeat",
"pandas.core.dtypes.cast.find_common_type"
]
] |
lanl/SEPIA | [
"0a1e606e1d1072f49e4f3f358962bd8918a5d3a3"
] | [
"examples/Ball_Drop/GenDataBallDrop1.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 24 07:52:25 2020\nGenerate, Plot, and write all data needed for ball drop example 1\n@author: granthutchings\n\"\"\"\n#%% Imports\nimport numpy as np\n#import pyDOE # Latin Hypercube\nimport matplotlib.pyplot as plt\nfrom matplotlib.gridspec import GridSpec\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom invertH import invertHsim, invertHtrue\n\n#%% notes\n# x = R\n# theta = C\n# y = {h, t}, i.e., pairs of h and t that form a trace when plotted\n\n# imagine the field experiments involve say 4 platforms --> 4 values of h.\n# Then for each R, one experiment gives output of 4 h-t pairs (a curve).\n# Likewise for the simulator, we have a dense grid of say 100 heights h.\n# Then for each setting of {x, theta} = {R, C} we get output of 100 h-t\n# pairs.\n\n# I'll make python files to:\n# 1. generate the h-t pairs and write them into files. (this file and invertH.py)\n# 2. a \"runmcmc\"-type file that first calls...\n# 3. ...a file that reads in the data and packages it appropriately\n\n\n# generate \"field\" data and \"simulator\" data, where the simulator model is\n# systematically off from reality.\n\n# true: d2h/dt2 = g - C (dh/dt)^2 / R\n# sim: d2h/dt2 = g - C (dh/dt) / R\n\n# inputs for field experiments: x = R\n# inputs for simulator: x = R, theta = C\n# We want to calibrate theta in the simulator to match the field data.\n\n#%% Compute data\ndef gen_data(et,plot_design=False,R_new=None,R_design=None,C_design=None):\n n = 3; m = 25\n g = 9.8 # gravity\n C_true = .1 / (4 * np.pi / 3); print('generating data with C = ',C_true)\n n_field_heights = 4\n h_field = np.linspace(5,20,n_field_heights) # platform heights for the field experiments\n h_sim = np.arange(1.5,25,1.5) # grid of heights fed to the simulator\n h_dense = np.concatenate((np.arange(0,2,.01),np.arange(2,25,.5))) # a denser grid for drawing the curves\n\n # the coefficient of drag for a smooth sphere is 0.1, and we're\n # dividing by 4/3 pi to absorb a constant related to the volume of the\n # sphere (not including R)\n if R_new is None: R = np.array([.1, .2, .4]) # radii of balls to try (in meters)\n else: R = R_new\n\n # get a Latin hypercube sim_design of m=25 points over R_sim, C_sim\n #sim_design = pyDOE.lhs(2,m)\n\n # Use Kary's sim_designign for testing purposes\n sim_design = np.array([\n [0.1239, 0.8024],\n [0.8738, 0.6473],\n [0.6140, 0.3337],\n [0.8833, 0.4783],\n [0.9946, 0.0548],\n [0.1178, 0.9382],\n [0.1805, 0.2411],\n [0.6638, 0.2861],\n [0.2939, 0.1208],\n [0.2451, 0.2397],\n [0.4577, 0.5696],\n [0.4377, 0.8874],\n [0.0737, 0.7384],\n [0.6931, 0.8683],\n [0.4901, 0.7070],\n [0.5953, 0.9828],\n [0.7506, 0.1009],\n [0.7783, 0.4225],\n [0.8333, 0.5318],\n [0.3987, 0.6312],\n [0.2021, 0.4990],\n [0.3495, 0.3680],\n [0.9411, 0.7935],\n [0.0198, 0.0218],\n [0.5440, 0.1925]])\n \n # scale the first column to [0,.5] and call it R_sim\n # (this inclusim_design our field values, i.e., R \\in [0,.5])\n # scale the second column to [0.05,.25] and call it Csim\n # (likewise, Ctrue \\in [0.05, .25])\n sim_design[:,0] = sim_design[:,0] * .4 + .05\n sim_design[:,1] = sim_design[:,1] * .2 + .05\n if R_design is not None: R_sim = R_design\n else: R_sim = sim_design[:,0]\n if C_design is not None: C_sim = C_design\n else: C_sim = sim_design[:,1]\n if plot_design:\n plt.scatter(R_sim,C_sim)\n plt.xlabel(\"R design points\");plt.ylabel(\"C design points\")\n plt.title(\"Simulator Design\")\n plt.show()\n\n # Generate field data for each R\n y_field = invertHtrue(h_field, g, C_true, R, et) # observed times\n y_field_dense = invertHtrue(h_dense, g, C_true, R, et) # dense grid for plots\n\n # imagine that the biggest ball is too big to get to the highest\n # platform, so we don't observe data there\n #y_field[-1,-1] = np.nan\n\n # Generate simulated data for each (C,R) pair\n y_sim = invertHsim(h_sim, g, C_sim, R_sim)\n y_sim_dense = invertHsim(h_dense, g, C_sim, R_sim)\n \n data_dict = dict([('R',R),('sim_design',np.column_stack((R_sim,C_sim))),\\\n ('n',n),('m',m),('C_true',C_true),\\\n ('h_field',h_field),('h_sim',h_sim),('h_dense',h_dense),\\\n ('y_field',y_field),('y_field_dense',y_field_dense),\\\n ('y_sim',y_sim),('y_sim_dense',y_sim_dense)])\n \n return(data_dict)\n\n#%% #===================== Plots ===============================#\ndef plot_data(data_dict,inset=True,near_sim=True):\n n = data_dict['n']\n m = data_dict['m']\n y_sim = data_dict['y_sim']\n y_field = data_dict['y_field']\n R = data_dict['R']\n R_sim = data_dict['sim_design'][:,0]\n C_sim = data_dict['sim_design'][:,1]\n h_field = data_dict['h_field']\n h_sim = data_dict['h_sim']\n h_dense = data_dict['h_dense']\n y_field = data_dict['y_field']\n y_field_dense = data_dict['y_field_dense']\n y_sim = data_dict['y_sim']\n y_sim_dense = data_dict['y_sim_dense']\n \n if isinstance(y_field, list): ragged = True\n else: ragged = False\n \n if ragged:\n y_max = max(max(np.array([np.max(k) for k in y_field])),max(y_sim.max(1)))\n else:\n y_max = max(max(y_field.max(1)),max(y_sim.max(1))) # max of all row maxes for axis limit\n # find closest values each R\n # ith column of R_nearest_sim_design contains the n_neighbors nearest sim_designign points (by index)\n # for ith value of R\n n_neighbors = 3\n R_nearest_sim_design = np.zeros(shape=(n_neighbors,len(R)),dtype=int)\n for i in range(len(R)):\n dist = np.argsort(np.abs(R_sim-R[i]))\n R_nearest_sim_design[:,i] = dist[0:n_neighbors]\n\n # Generate plot for each radius\n colors = ('r', 'g', 'b')\n fig = plt.figure(figsize=[12,12],constrained_layout=True)\n gs = GridSpec(2,2,figure=fig)\n axs = np.array([fig.add_subplot(gs[0,0]),\\\n fig.add_subplot(gs[0,1]),\\\n fig.add_subplot(gs[1,0])])\n for i in range(len(R)):\n # axis limits, ticks, and labels\n axs[i].set_xlim([0, 25])\n axs[i].set_ylim([0, y_max+.5])\n axs[i].xaxis.set_ticks(np.arange(0,30,5))\n axs[i].yaxis.set_ticks(np.arange(0,y_max+.5,1))\n axs[i].set_title(\"Ball Radius {} m\".format(R[i]),fontweight=\"bold\")\n axs[i].set_xlabel(\"Distance (m)\")\n axs[i].set_ylabel(\"Time (s)\")\n\n # simulations - all\n for j in range(m):\n axs[i].plot(h_dense, np.transpose(y_sim_dense)[:,j],color='lightgreen',\\\n label=\"Simulation runs\" if j==0 else \"\")\n\n if near_sim:\n # simulations - nearest neighbors\n for j in range(n_neighbors):\n axs[i].plot(h_dense,np.transpose(y_sim_dense)[:,R_nearest_sim_design[j,i]],\\\n linestyle=\"--\",\\\n color=colors[j],label=\"Nearest Sim {}\".format(j+1))\n\n # true data curve and \"real data points\"\n axs[i].plot(h_dense, y_field_dense[i,:],'k',label=\"Reality\")\n if ragged:\n axs[i].plot(h_field[i],y_field[i],'ks',label='Reality')\n else:\n axs[i].plot(h_field, y_field[i,],'ks',label=\"Field data\")\n\n\n axs[i].legend(loc=\"lower right\")\n\n\n if inset:\n # imbed sim_designign point subplot\n inset_ax = inset_axes(axs[i],width=\"30%\",height=\"30%\",loc=\"upper left\",\\\n borderpad=2.5)\n inset_ax.set_xlabel(\"R sim_design values\",fontsize=7,labelpad=1)\n inset_ax.set_ylabel(\"C sim_design values\",fontsize=7)\n inset_ax.xaxis.set_ticks(R)\n inset_ax.yaxis.set_ticks(np.arange(0,.251,.05))\n inset_ax.tick_params(axis='both', which='major', labelsize=7, pad = -5)\n inset_ax.scatter(R_sim,C_sim,s=15, facecolors='none', edgecolors='grey')\n inset_ax.scatter(R_sim[R_nearest_sim_design[:,i]],C_sim[R_nearest_sim_design[:,i]],s=15,\\\n color=colors)\n inset_ax.axvline(x=R[i], ymin=0, ymax=1,color='k',linewidth=.5)\n plt.savefig('data/plotAll.png', dpi=300)\n plt.show()\n\n#%% #==================== Write data ===========================#\n# write the h-t pairs into files\ndef write_data(data_dict, datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):\n # datadir == directory where data files should be written to or read from\n\n # sim.dat, should be length(hsim) x length(Csim)\n y_sim = data_dict['y_sim']\n with open(datadir+'sim.dat',\"w+\") as f:\n for line in np.array(np.transpose(y_sim)):\n np.savetxt(f, line)\n\n # sim.height, a file with just the heights (same for all sim runs)\n h_sim = data_dict['h_sim']\n with open(datadir+'sim.height',\"w+\") as f:\n for line in np.array(np.transpose(h_sim)):\n np.savetxt(f, line)\n\n # sim.sim_designign, length(Csim) x (num X's + num thetas)\n R_sim = data_dict['R_sim']; C_sim = data_dict['C_sim'] \n sim_design = np.transpose(np.array([R_sim, C_sim]))\n with open(datadir+'sim.design',\"w+\") as f:\n for line in sim_design:\n np.savetxt(f, line)\n\n # field.dat, one row per experiment (radius)\n y_field = data_dict['y_field']\n with open(datadir+'field.dat',\"w+\") as f:\n for line in np.array(y_field):\n np.savetxt(f, line)\n\n # field.height\n h_field = data_dict['h_field']\n with open(datadir+'field.height',\"w+\") as f:\n for line in np.array(h_field):\n np.savetxt(f, line)\n\n # field radii\n R = data_dict['R']\n with open(datadir+'field.radii',\"w+\") as f:\n for line in np.array(R):\n np.savetxt(f, line)\n \n#%%\ndef read_data(datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):\n \n with open(datadir+'sim.dat','r') as f:\n y_sim = np.loadtxt(f)\n with open(datadir+'sim.height',\"r\") as f:\n h_sim = np.loadtxt(f)\n with open(datadir+'sim.design','r') as f:\n sim_design = np.loadtxt(f)\n with open(datadir+'field.dat','r') as f:\n y_field = np.loadtxt(f)\n with open(datadir+'field.height','r') as f:\n h_field = np.loadtxt(f)\n with open(datadir+'field.radii','r') as f:\n R = np.loadtxt(f)\n \n data_dict = dict([('R',R),('sim_design',sim_design),\\\n ('h_field',h_field),('h_sim',h_sim),\\\n ('y_field',y_field),('y_sim',y_sim)])\n \n return(data_dict)\n \n \n \n \n \n"
] | [
[
"numpy.transpose",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.savetxt",
"numpy.abs",
"numpy.column_stack",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.max",
"numpy.loadtxt",
"numpy.array",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
kmohrman/coffea | [
"2baae94028c38b59f0eb52127d8fb92840dbf23d"
] | [
"coffea/lookup_tools/dense_lookup.py"
] | [
"from coffea.lookup_tools.lookup_base import lookup_base\n\nimport numpy\nfrom copy import deepcopy\n\n\nclass dense_lookup(lookup_base):\n def __init__(self, values, dims, feval_dim=None):\n super(dense_lookup, self).__init__()\n self._dimension = 0\n whattype = type(dims)\n if whattype == numpy.ndarray:\n self._dimension = 1\n else:\n self._dimension = len(dims)\n if self._dimension == 0:\n raise Exception(\"Could not define dimension for {}\".format(whattype))\n self._axes = deepcopy(dims)\n self._feval_dim = None\n vals_are_strings = (\n \"string\" in values.dtype.name\n or \"str\" in values.dtype.name\n or \"unicode\" in values.dtype.name\n or \"bytes\" in values.dtype.name\n ) # ....\n if not isinstance(values, numpy.ndarray):\n raise TypeError(\"values is not a numpy array, but %r\" % type(values))\n if vals_are_strings:\n raise Exception(\"dense_lookup cannot handle string values!\")\n self._values = deepcopy(values)\n\n def _evaluate(self, *args):\n indices = []\n if self._dimension == 1:\n indices.append(\n numpy.clip(\n numpy.searchsorted(self._axes, args[0], side=\"right\") - 1,\n 0,\n self._values.shape[0] - 1,\n )\n )\n else:\n for dim in range(self._dimension):\n indices.append(\n numpy.clip(\n numpy.searchsorted(self._axes[dim], args[dim], side=\"right\")\n - 1,\n 0,\n self._values.shape[dim] - 1,\n )\n )\n return self._values[tuple(indices)]\n\n def __repr__(self):\n myrepr = \"{} dimensional histogram with axes:\\n\".format(self._dimension)\n temp = \"\"\n if self._dimension == 1:\n temp = \"\\t1: {}\\n\".format(self._axes)\n else:\n temp = \"\\t1: {}\\n\".format(self._axes[0])\n for idim in range(1, self._dimension):\n temp += \"\\t{}: {}\\n\".format(idim + 1, self._axes[idim])\n myrepr += temp\n return myrepr\n"
] | [
[
"numpy.searchsorted"
]
] |
dataiku/dss-plugin-timeseries-preparation | [
"bdb662c909a0ad6d7845325a70e3dac2bdcc6b28"
] | [
"tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom dku_timeseries.timeseries_helpers import generate_date_range, get_date_offset\nfrom recipe_config_loading import get_resampling_params\n\n\[email protected]\ndef config():\n config = {u'clip_end': 0, u'constant_value': 0, u'extrapolation_method': u'none', u'shift': 0, u'time_unit_end_of_week': u'SUN',\n u'datetime_column': u'Date', u'advanced_activated': False, u'time_unit': u'quarters', u'clip_start': 0, u'time_step': 2,\n u'interpolation_method': u'linear'}\n return config\n\n\nclass TestResamplerHelpers:\n def test_date_offset(self):\n time_unit = \"business_days\"\n offset_value = 0\n sunday = pd.Timestamp('2021-01-31 10:00:00')\n offset = get_date_offset(time_unit, offset_value)\n assert sunday + offset == sunday\n\n sunday = pd.Timestamp('2021-01-31 00:00:00')\n offset = get_date_offset(time_unit, 1)\n assert sunday + offset == pd.Timestamp('2021-02-01 00:00:00')\n assert sunday - offset == pd.Timestamp('2021-01-29 00:00:00')\n assert sunday + offset + offset == pd.Timestamp('2021-02-02 00:00:00')\n\n friday = pd.Timestamp('2021-01-29 00:00:00')\n offset = get_date_offset(time_unit, 1)\n assert friday + offset == pd.Timestamp('2021-02-01 00:00:00')\n\n friday = pd.Timestamp('2021-01-29 00:00:00')\n offset = get_date_offset(time_unit, 2)\n assert friday + offset == pd.Timestamp('2021-02-02 00:00:00')\n\n saturday = pd.Timestamp('2021-01-30 00:00:00')\n offset = get_date_offset(time_unit, 1)\n assert saturday + offset == pd.Timestamp('2021-02-01 00:00:00')\n\n saturday = pd.Timestamp('2021-02-04 00:00:00')\n offset = get_date_offset(time_unit, 1)\n assert saturday + offset == pd.Timestamp('2021-02-05 00:00:00')\n\n def test_generate_date_range_month(self, config):\n config[\"time_unit\"] = \"months\"\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n end_time = pd.Timestamp('2021-06-20 00:00:00')\n\n start_time = pd.Timestamp('2021-01-31 00:00:00')\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))\n\n start_time = pd.Timestamp('2021-01-23 00:00:00')\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))\n\n start_time = pd.Timestamp('2021-01-31 10:00:00')\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))\n\n start_time = pd.Timestamp('2021-01-31 10:00:00').tz_localize(\"CET\")\n end_time = pd.Timestamp('2021-06-20 00:00:00').tz_localize(\"CET\")\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(\n ['2021-01-31 00:00:00+01:00', '2021-03-31 00:00:00+02:00', '2021-05-31 00:00:00+02:00', '2021-07-31 00:00:00+02:00']))\n\n start_time = pd.Timestamp('2021-01-31 10:00:00')\n end_time = pd.Timestamp('2021-06-20 00:00:00')\n date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-03-31', '2021-05-31', '2021-07-31']))\n\n def test_generate_date_range_week(self, config):\n config[\"time_unit\"] = \"weeks\"\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n start_time = pd.Timestamp('2020-12-23 00:00:00')\n end_time = pd.Timestamp('2021-01-18 00:00:00')\n\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24']))\n\n end_time = pd.Timestamp('2021-01-24 00:00:00')\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24', '2021-02-07']))\n\n date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-10', '2021-01-24', '2021-02-07']))\n\n config[\"time_unit\"] = \"weeks\"\n config[\"time_unit_end_of_week\"] = \"WED\"\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-23', '2021-01-6', '2021-01-20', '2021-02-03']))\n\n def test_generate_date_range_quarters(self, config):\n config[\"time_step\"] = 1\n config[\"time_unit\"] = \"quarters\"\n start_time = pd.Timestamp('2020-01-23 00:00:00')\n end_time = pd.Timestamp('2021-01-18 00:00:00')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-04-30', '2020-07-31', '2020-10-31', '2021-01-31']))\n\n def test_generate_date_range_half_year(self, config):\n config[\"time_step\"] = 1\n config[\"time_unit\"] = \"semi_annual\"\n start_time = pd.Timestamp('2020-01-01 00:00:00')\n end_time = pd.Timestamp('2021-06-18 00:00:00')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-07-31', '2021-01-31', '2021-07-31']))\n\n def test_generate_date_range_b_days(self, config):\n config[\"time_unit\"] = \"business_days\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('2021-01-02 00:00:00')\n end_time = pd.Timestamp('2021-01-10 00:00:00')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))\n\n clip_start = 1\n clip_end = 1\n shift = 0\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))\n\n clip_start = 2\n clip_end = 2\n shift = 0\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08']))\n\n def test_generate_date_range_days(self, config):\n config[\"time_unit\"] = \"days\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('20190214 01:59:00').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-02-07 00:00:00+01:00', '2019-02-08 00:00:00+01:00',\n '2019-02-09 00:00:00+01:00', '2019-02-10 00:00:00+01:00',\n '2019-02-11 00:00:00+01:00', '2019-02-12 00:00:00+01:00',\n '2019-02-13 00:00:00+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_hours(self, config):\n config[\"time_unit\"] = \"hours\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('20190131 11:59:00').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-01-31 09:00:00+01:00', '2019-01-31 10:00:00+01:00',\n '2019-01-31 11:00:00+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_minutes(self, config):\n config[\"time_unit\"] = \"minutes\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('20190131 02:15:00').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-01-31 02:06:00+01:00', '2019-01-31 02:07:00+01:00',\n '2019-01-31 02:08:00+01:00', '2019-01-31 02:09:00+01:00',\n '2019-01-31 02:10:00+01:00', '2019-01-31 02:11:00+01:00',\n '2019-01-31 02:12:00+01:00', '2019-01-31 02:13:00+01:00',\n '2019-01-31 02:14:00+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_seconds(self, config):\n config[\"time_unit\"] = \"seconds\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('20190131 01:59:12').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-01-31 01:59:07+01:00', '2019-01-31 01:59:08+01:00',\n '2019-01-31 01:59:09+01:00', '2019-01-31 01:59:10+01:00',\n '2019-01-31 01:59:11+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_milliseconds(self, config):\n config[\"time_unit\"] = \"milliseconds\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('2019-01-31 01:59:00.015000').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.007000+01:00',\n '2019-01-31 01:59:00.008000+01:00',\n '2019-01-31 01:59:00.009000+01:00',\n '2019-01-31 01:59:00.010000+01:00',\n '2019-01-31 01:59:00.011000+01:00',\n '2019-01-31 01:59:00.012000+01:00',\n '2019-01-31 01:59:00.013000+01:00',\n '2019-01-31 01:59:00.014000+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_microseconds(self, config):\n config[\"time_unit\"] = \"microseconds\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')\n end_time = pd.Timestamp('2019-01-31 01:59:00.000016').tz_localize('CET')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.000007+01:00',\n '2019-01-31 01:59:00.000008+01:00',\n '2019-01-31 01:59:00.000009+01:00',\n '2019-01-31 01:59:00.000010+01:00',\n '2019-01-31 01:59:00.000011+01:00',\n '2019-01-31 01:59:00.000012+01:00',\n '2019-01-31 01:59:00.000013+01:00',\n '2019-01-31 01:59:00.000014+01:00',\n '2019-01-31 01:59:00.000015+01:00'])\n np.testing.assert_array_equal(date_range, expected_range)\n\n def test_generate_date_range_nanoseconds(self, config):\n config[\"time_unit\"] = \"nanoseconds\"\n config[\"time_step\"] = 1\n start_time = pd.Timestamp('2019-01-31T00:59:00.000000000')\n end_time = pd.Timestamp('2019-01-31T00:59:00.000000009')\n\n params = get_resampling_params(config)\n frequency = params.resampling_step\n time_unit = params.time_unit\n time_step = params.time_step\n\n clip_start = 5\n shift = 2\n clip_end = 3\n\n date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)\n np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2019-01-31 00:59:00.000000007',\n '2019-01-31 00:59:00.000000008']))\n"
] | [
[
"pandas.DatetimeIndex",
"pandas.Timestamp",
"numpy.testing.assert_array_equal"
]
] |
nata1y/fltk-testbed-group-3 | [
"e23b59fa2a5e638d3804a39fe5012983e2988ca6"
] | [
"fltk/nets/fashion_mnist_ls_gan.py"
] | [
"import torch.nn as nn\n\n\nclass Generator(nn.Module):\n def __init__(self, img_size=32):\n super(Generator, self).__init__()\n\n # TODO: update to proper image size\n self.init_size = img_size // 4\n self.l1 = nn.Sequential(nn.Linear(10, 128 * self.init_size ** 2))\n\n self.conv_blocks = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, 1, 3, stride=1, padding=1), #3\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self, img_size=32):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n self.model = nn.Sequential(\n *discriminator_block(1, 16, bn=False), #3\n *discriminator_block(16, 32),\n *discriminator_block(32, 64),\n *discriminator_block(64, 128),\n )\n\n # The height and width of downsampled image\n # TODO: update to proper image size\n ds_size = img_size // 2 ** 4\n self.adv_layer = nn.Linear(128 * ds_size ** 2, 1)\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n validity = self.adv_layer(out)\n\n return validity\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.Dropout2d",
"torch.nn.Tanh",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.LeakyReLU"
]
] |
MRCIEU/ewascatalog | [
"a37dfeb207537831b4c5e313e0edecbad8a7c1a2"
] | [
"database/zenodo.py"
] | [
"# script to upload a file to zenodo sandbox via api\n# seperate sandbox- and real-zenodo accounts and ACCESS_TOKENs each need to be created\n\n# to adapt this script to real-zenodo (from sandbox implementation):\n # update urls to zenodo.org from sandbox.zenodo.org\n # update SANDBOX_TOKEN to a ACCESS_TOKEN from real-zenodo\n\nimport sys, json, requests\nimport pandas as pd\n\nstudyid = sys.argv[1]\nfile_dir = sys.argv[2]\naccess_token = sys.argv[3]\ndata_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid\n\nzfile=data_dir+'/zenodo.csv'\ntry:\n zdata = pd.read_csv(zfile)\nexcept FileNotFoundError:\n print(\"Can't find the file \"+zfile)\n sys.exit()\n\nprint('Starting Zenodo upload process')\n\n# specify ACCESS_TOKEN\n # this needs to be generated for each sanbox/real account\nACCESS_TOKEN = access_token\n\n# create empty upload\nheaders = {\"Content-Type\": \"application/json\"}\nr = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)\n# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)\n\nr.status_code\nr.json()\n\n# Get the deposition id from the previous response\n# Upload the file to be deposited to Zenodo\ndeposition_id = r.json()['id']\n\ndata = {'name': 'results.csv'}\nfiles = {'file': open(data_dir+'/results.csv')}\nr = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)\n# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)\n\nr.status_code\nr.json()\n\n# specify and attach the metadata for the upload\ntitle = zdata.loc[0, 'title']\nauthors = zdata.loc[0, 'authors']\ndesc = zdata.loc[0, 'desc']\n\ndesc = desc + '\\n\\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'\n\ndata = {'metadata': \n\t\t\t\t {'title': title, \n\t\t\t\t 'upload_type': 'dataset', \n\t\t\t\t 'description': desc, \n\t\t\t\t 'creators': [{'name': authors}]}}\n\nr = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)\n# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)\n\nr.status_code\nr.json()\n\n# publish \nr = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )\n# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )\n\nstatus_code = r.status_code\nif status_code != 202:\n\traise ValueError(\"Status code was\" + str(status_code) + \" and it should be 202. Check zenodo\")\nelse:\n\tprint(\"Status code is 202. Happy days!\")\n# should be: 202\n"
] | [
[
"pandas.read_csv"
]
] |
lzhmarkk/pytorch-deeplab-xception | [
"63f699214e4095a4edda21173012cc29e53125b3"
] | [
"utils/summaries.py"
] | [
"import os\nimport torch\nfrom torchvision.utils import make_grid\nfrom tensorboardX import SummaryWriter\nfrom dataloaders.utils import decode_seg_map_sequence\n\nclass TensorboardSummary(object):\n def __init__(self, directory):\n self.directory = directory\n\n def create_summary(self):\n writer = SummaryWriter(log_dir=os.path.join(self.directory))\n return writer\n\n def visualize_image(self, writer, dataset, image, target, output, global_step):\n grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)\n writer.add_image('Image', grid_image, global_step)\n grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),\n dataset=dataset), 3, normalize=False, range=(0, 255))\n writer.add_image('Predicted label', grid_image, global_step)\n grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),\n dataset=dataset), 3, normalize=False, range=(0, 255))\n writer.add_image('Groundtruth label', grid_image, global_step)"
] | [
[
"torch.squeeze",
"torch.max"
]
] |
samsledje/D-SCRIPT | [
"3fa7ea685f7fcdc63468380267d1672f63bb8772"
] | [
"dscript/commands/train.py"
] | [
"\"\"\"\nTrain a new model.\n\"\"\"\n\nimport sys\nimport argparse\nimport h5py\nimport datetime\nimport subprocess as sp\nimport numpy as np\nimport pandas as pd\nimport gzip as gz\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import IterableDataset, DataLoader\nfrom sklearn.metrics import average_precision_score as average_precision\n\nimport dscript\nfrom dscript.utils import PairedDataset, collate_paired_sequences\nfrom dscript.models.embedding import (\n IdentityEmbed,\n FullyConnectedEmbed,\n)\nfrom dscript.models.contact import ContactCNN\nfrom dscript.models.interaction import ModelInteraction\n\n\ndef add_args(parser):\n \"\"\"\n Create parser for command line utility.\n\n :meta private:\n \"\"\"\n\n data_grp = parser.add_argument_group(\"Data\")\n proj_grp = parser.add_argument_group(\"Projection Module\")\n contact_grp = parser.add_argument_group(\"Contact Module\")\n inter_grp = parser.add_argument_group(\"Interaction Module\")\n train_grp = parser.add_argument_group(\"Training\")\n misc_grp = parser.add_argument_group(\"Output and Device\")\n\n # Data\n data_grp.add_argument(\"--train\", help=\"Training data\", required=True)\n data_grp.add_argument(\"--val\", help=\"Validation data\", required=True)\n data_grp.add_argument(\"--embedding\", help=\"h5 file with embedded sequences\", required=True)\n data_grp.add_argument(\n \"--no-augment\",\n action=\"store_false\",\n dest='augment',\n help=\"Set flag to not augment data by adding (B A) for all pairs (A B)\",\n )\n\n # Embedding model\n proj_grp.add_argument(\n \"--projection-dim\",\n type=int,\n default=100,\n help=\"Dimension of embedding projection layer (default: 100)\",\n )\n proj_grp.add_argument(\n \"--dropout-p\",\n type=float,\n default=0.5,\n help=\"Parameter p for embedding dropout layer (default: 0.5)\",\n )\n\n # Contact model\n contact_grp.add_argument(\n \"--hidden-dim\",\n type=int,\n default=50,\n help=\"Number of hidden units for comparison layer in contact prediction (default: 50)\",\n )\n contact_grp.add_argument(\n \"--kernel-width\",\n type=int,\n default=7,\n help=\"Width of convolutional filter for contact prediction (default: 7)\",\n )\n\n # Interaction Model\n inter_grp.add_argument(\n \"--no-w\",\n action=\"store_false\",\n dest='use_w',\n help=\"Don't use weight matrix in interaction prediction model\",\n )\n inter_grp.add_argument(\n \"--pool-width\",\n type=int,\n default=9,\n help=\"Size of max-pool in interaction model (default: 9)\",\n )\n\n # Training\n train_grp.add_argument(\n \"--negative-ratio\",\n type=int,\n default=10,\n help=\"Number of negative training samples for each positive training sample (default: 10)\",\n )\n train_grp.add_argument(\n \"--epoch-scale\",\n type=int,\n default=1,\n help=\"Report heldout performance every this many epochs (default: 1)\",\n )\n train_grp.add_argument(\"--num-epochs\", type=int, default=10, help=\"Number of epochs (default: 10)\")\n train_grp.add_argument(\"--batch-size\", type=int, default=25, help=\"Minibatch size (default: 25)\")\n train_grp.add_argument(\"--weight-decay\", type=float, default=0, help=\"L2 regularization (default: 0)\")\n train_grp.add_argument(\"--lr\", type=float, default=0.001, help=\"Learning rate (default: 0.001)\")\n train_grp.add_argument(\n \"--lambda\",\n dest=\"lambda_\",\n type=float,\n default=0.35,\n help=\"Weight on the similarity objective (default: 0.35)\",\n )\n\n # Output\n misc_grp.add_argument(\"-o\", \"--outfile\", help=\"Output file path (default: stdout)\")\n misc_grp.add_argument(\"--save-prefix\", help=\"Path prefix for saving models\")\n misc_grp.add_argument(\"-d\", \"--device\", type=int, default=-1, help=\"Compute device to use\")\n misc_grp.add_argument(\"--checkpoint\", help=\"Checkpoint model to start training from\")\n\n return parser\n\n\ndef predict_interaction(model, n0, n1, tensors, use_cuda):\n \"\"\"\n Predict whether a list of protein pairs will interact.\n\n :param model: Model to be trained\n :type model: dscript.models.interaction.ModelInteraction\n :param n0: First protein names\n :type n0: list[str]\n :param n1: Second protein names\n :type n1: list[str]\n :param tensors: Dictionary of protein names to embeddings\n :type tensors: dict[str, torch.Tensor]\n :param use_cuda: Whether to use GPU\n :type use_cuda: bool\n \"\"\"\n\n b = len(n0)\n\n p_hat = []\n for i in range(b):\n z_a = tensors[n0[i]]\n z_b = tensors[n1[i]]\n if use_cuda:\n z_a = z_a.cuda()\n z_b = z_b.cuda()\n\n p_hat.append(model.predict(z_a, z_b))\n p_hat = torch.stack(p_hat, 0)\n return p_hat\n\n\ndef predict_cmap_interaction(model, n0, n1, tensors, use_cuda):\n \"\"\"\n Predict whether a list of protein pairs will interact, as well as their contact map.\n\n :param model: Model to be trained\n :type model: dscript.models.interaction.ModelInteraction\n :param n0: First protein names\n :type n0: list[str]\n :param n1: Second protein names\n :type n1: list[str]\n :param tensors: Dictionary of protein names to embeddings\n :type tensors: dict[str, torch.Tensor]\n :param use_cuda: Whether to use GPU\n :type use_cuda: bool\n \"\"\"\n\n b = len(n0)\n\n p_hat = []\n c_map_mag = []\n for i in range(b):\n z_a = tensors[n0[i]]\n z_b = tensors[n1[i]]\n if use_cuda:\n z_a = z_a.cuda()\n z_b = z_b.cuda()\n\n cm, ph = model.map_predict(z_a, z_b)\n p_hat.append(ph)\n c_map_mag.append(torch.mean(cm))\n p_hat = torch.stack(p_hat, 0)\n c_map_mag = torch.stack(c_map_mag, 0)\n return c_map_mag, p_hat\n\n\ndef interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35):\n \"\"\"\n Compute gradient and backpropagate loss for a batch.\n\n :param model: Model to be trained\n :type model: dscript.models.interaction.ModelInteraction\n :param n0: First protein names\n :type n0: list[str]\n :param n1: Second protein names\n :type n1: list[str]\n :param y: Interaction labels\n :type y: torch.Tensor\n :param tensors: Dictionary of protein names to embeddings\n :type tensors: dict[str, torch.Tensor]\n :param use_cuda: Whether to use GPU\n :type use_cuda: bool\n :param weight: Weight on the contact map magnitude objective. BCE loss is :math:`1 - \\\\text{weight}`.\n :type weight: float\n\n :return: (Loss, number correct, mean square error, batch size)\n :rtype: (torch.Tensor, int, torch.Tensor, int)\n \"\"\"\n\n c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda)\n if use_cuda:\n y = y.cuda()\n y = Variable(y)\n\n bce_loss = F.binary_cross_entropy(p_hat.float(), y.float())\n cmap_loss = torch.mean(c_map_mag)\n loss = (weight * bce_loss) + ((1 - weight) * cmap_loss)\n b = len(p_hat)\n\n # backprop loss\n loss.backward()\n\n if use_cuda:\n y = y.cpu()\n p_hat = p_hat.cpu()\n\n with torch.no_grad():\n guess_cutoff = 0.5\n p_hat = p_hat.float()\n p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()\n y = y.float()\n correct = torch.sum(p_guess == y).item()\n mse = torch.mean((y.float() - p_hat) ** 2).item()\n\n return loss, correct, mse, b\n\n\ndef interaction_eval(model, test_iterator, tensors, use_cuda):\n \"\"\"\n Evaluate test data set performance.\n\n :param model: Model to be trained\n :type model: dscript.models.interaction.ModelInteraction\n :param test_iterator: Test data iterator\n :type test_iterator: torch.utils.data.DataLoader\n :param tensors: Dictionary of protein names to embeddings\n :type tensors: dict[str, torch.Tensor]\n :param use_cuda: Whether to use GPU\n :type use_cuda: bool\n\n :return: (Loss, number correct, mean square error, precision, recall, F1 Score, AUPR)\n :rtype: (torch.Tensor, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)\n \"\"\"\n p_hat = []\n true_y = []\n\n for n0, n1, y in test_iterator:\n p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda))\n true_y.append(y)\n\n y = torch.cat(true_y, 0)\n p_hat = torch.cat(p_hat, 0)\n\n if use_cuda:\n y.cuda()\n p_hat = torch.Tensor([x.cuda() for x in p_hat])\n p_hat.cuda()\n\n loss = F.binary_cross_entropy(p_hat.float(), y.float()).item()\n b = len(y)\n\n with torch.no_grad():\n guess_cutoff = torch.Tensor([0.5]).float()\n p_hat = p_hat.float()\n y = y.float()\n p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()\n correct = torch.sum(p_guess == y).item()\n mse = torch.mean((y.float() - p_hat) ** 2).item()\n\n tp = torch.sum(y * p_hat).item()\n pr = tp / torch.sum(p_hat).item()\n re = tp / torch.sum(y).item()\n f1 = 2 * pr * re / (pr + re)\n\n y = y.cpu().numpy()\n p_hat = p_hat.data.cpu().numpy()\n\n aupr = average_precision(y, p_hat)\n\n return loss, correct, mse, pr, re, f1, aupr\n\n\ndef main(args):\n \"\"\"\n Run training from arguments.\n\n :meta private:\n \"\"\"\n\n output = args.outfile\n if output is None:\n output = sys.stdout\n else:\n output = open(output, \"w\")\n\n print(f'# Called as: {\" \".join(sys.argv)}', file=output)\n if output is not sys.stdout:\n print(f'Called as: {\" \".join(sys.argv)}')\n\n # Set device\n device = args.device\n use_cuda = (device >= 0) and torch.cuda.is_available()\n if use_cuda:\n torch.cuda.set_device(device)\n print(\n f\"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}\",\n file=output,\n )\n else:\n print(\"# Using CPU\", file=output)\n device = \"cpu\"\n\n batch_size = args.batch_size\n\n train_fi = args.train\n test_fi = args.val\n augment = args.augment\n embedding_h5 = args.embedding\n h5fi = h5py.File(embedding_h5, \"r\")\n\n print(f\"# Loading training pairs from {train_fi}...\", file=output)\n output.flush()\n\n train_df = pd.read_csv(train_fi, sep=\"\\t\", header=None)\n if augment:\n train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True)\n train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True)\n train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values)\n else:\n train_n0, train_n1 = train_df[0], train_df[1]\n train_y = torch.from_numpy(train_df[2].values)\n\n print(f\"# Loading testing pairs from {test_fi}...\", file=output)\n output.flush()\n\n test_df = pd.read_csv(test_fi, sep=\"\\t\", header=None)\n test_n0, test_n1 = test_df[0], test_df[1]\n test_y = torch.from_numpy(test_df[2].values)\n output.flush()\n\n train_pairs = PairedDataset(train_n0, train_n1, train_y)\n pairs_train_iterator = torch.utils.data.DataLoader(\n train_pairs,\n batch_size=batch_size,\n collate_fn=collate_paired_sequences,\n shuffle=True,\n )\n\n test_pairs = PairedDataset(test_n0, test_n1, test_y)\n pairs_test_iterator = torch.utils.data.DataLoader(\n test_pairs,\n batch_size=batch_size,\n collate_fn=collate_paired_sequences,\n shuffle=True,\n )\n\n output.flush()\n\n print(f\"# Loading embeddings\", file=output)\n tensors = {}\n all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1))\n for prot_name in tqdm(all_proteins):\n tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :])\n\n use_cuda = (args.device > -1) and torch.cuda.is_available()\n\n if args.checkpoint is None:\n\n projection_dim = args.projection_dim\n dropout_p = args.dropout_p\n embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p)\n print(\"# Initializing embedding model with:\", file=output)\n print(f\"\\tprojection_dim: {projection_dim}\", file=output)\n print(f\"\\tdropout_p: {dropout_p}\", file=output)\n\n # Create contact model\n hidden_dim = args.hidden_dim\n kernel_width = args.kernel_width\n print(\"# Initializing contact model with:\", file=output)\n print(f\"\\thidden_dim: {hidden_dim}\", file=output)\n print(f\"\\tkernel_width: {kernel_width}\", file=output)\n\n contact = ContactCNN(projection_dim, hidden_dim, kernel_width)\n\n # Create the full model\n use_W = args.use_w\n pool_width = args.pool_width\n print(\"# Initializing interaction model with:\", file=output)\n print(f\"\\tpool_width: {pool_width}\", file=output)\n print(f\"\\tuse_w: {use_W}\", file=output)\n model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width)\n\n print(model, file=output)\n\n else:\n print(\"# Loading model from checkpoint {}\".format(args.checkpoint), file=output)\n model = torch.load(args.checkpoint)\n model.use_cuda = use_cuda\n\n if use_cuda:\n model = model.cuda()\n\n # Train the model\n lr = args.lr\n wd = args.weight_decay\n num_epochs = args.num_epochs\n batch_size = args.batch_size\n report_steps = args.epoch_scale\n inter_weight = args.lambda_\n cmap_weight = 1 - inter_weight\n digits = int(np.floor(np.log10(num_epochs))) + 1\n save_prefix = args.save_prefix\n if save_prefix is None:\n save_prefix = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n\n params = [p for p in model.parameters() if p.requires_grad]\n optim = torch.optim.Adam(params, lr=lr, weight_decay=wd)\n\n print(f'# Using save prefix \"{save_prefix}\"', file=output)\n print(f\"# Training with Adam: lr={lr}, weight_decay={wd}\", file=output)\n print(f\"\\tnum_epochs: {num_epochs}\", file=output)\n print(f\"\\tepoch_scale: {report_steps}\", file=output)\n print(f\"\\tbatch_size: {batch_size}\", file=output)\n print(f\"\\tinteraction weight: {inter_weight}\", file=output)\n print(f\"\\tcontact map weight: {cmap_weight}\", file=output)\n output.flush()\n\n batch_report_fmt = \"# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}\"\n epoch_report_fmt = \"# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}\"\n\n N = len(pairs_train_iterator) * batch_size\n for epoch in range(num_epochs):\n\n model.train()\n\n n = 0\n loss_accum = 0\n acc_accum = 0\n mse_accum = 0\n\n # Train batches\n for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f\"Epoch {epoch+1}/{num_epochs}\",total=len(pairs_train_iterator)):\n\n loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight)\n\n n += b\n delta = b * (loss - loss_accum)\n loss_accum += delta / n\n\n delta = correct - b * acc_accum\n acc_accum += delta / n\n\n delta = b * (mse - mse_accum)\n mse_accum += delta / n\n\n report = (n - b) // 100 < n // 100\n\n optim.step()\n optim.zero_grad()\n model.clip()\n\n if report:\n tokens = [\n epoch + 1,\n num_epochs,\n n / N,\n loss_accum,\n acc_accum,\n mse_accum,\n ]\n if output is not sys.stdout:\n print(batch_report_fmt.format(*tokens), file=output)\n output.flush()\n\n if (epoch + 1) % report_steps == 0:\n model.eval()\n\n with torch.no_grad():\n\n (\n inter_loss,\n inter_correct,\n inter_mse,\n inter_pr,\n inter_re,\n inter_f1,\n inter_aupr,\n ) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda)\n tokens = [\n epoch + 1,\n num_epochs,\n inter_loss,\n inter_correct / (len(pairs_test_iterator) * batch_size),\n inter_mse,\n inter_pr,\n inter_re,\n inter_f1,\n inter_aupr,\n ]\n print(epoch_report_fmt.format(*tokens), file=output)\n output.flush()\n\n # Save the model\n if save_prefix is not None:\n save_path = save_prefix + \"_epoch\" + str(epoch + 1).zfill(digits) + \".sav\"\n print(f\"# Saving model to {save_path}\", file=output)\n model.cpu()\n torch.save(model, save_path)\n if use_cuda:\n model.cuda()\n\n output.flush()\n\n if save_prefix is not None:\n save_path = save_prefix + \"_final.sav\"\n print(f\"# Saving final model to {save_path}\", file=output)\n model.cpu()\n torch.save(model, save_path)\n if use_cuda:\n model.cuda()\n\n output.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=__doc__)\n add_args(parser)\n main(parser.parse_args())\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.stack",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.cat",
"torch.autograd.Variable",
"torch.save",
"torch.optim.Adam",
"torch.from_numpy",
"numpy.log10",
"torch.Tensor",
"torch.mean",
"torch.cuda.set_device",
"torch.ones",
"torch.load",
"pandas.read_csv",
"pandas.concat",
"sklearn.metrics.average_precision_score",
"torch.sum"
]
] |
spk921/RTFNet | [
"4dad2a63e13e9c302da45ad5a3af4d85cf474694"
] | [
"test.py"
] | [
"# coding:utf-8\n# modified from: https://github.com/haqishen/MFNet-pytorch\n# By Yuxiang Sun, Aug. 2, 2019\n# Email: [email protected]\n\nimport os\nimport argparse\nimport time\nimport datetime\nimport numpy as np\nimport sys\nimport torch \nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom util.MF_dataset import MF_dataset \nfrom model import RTFNet \nfrom sklearn.metrics import confusion_matrix\n \nn_class = 9\ndata_dir = './dataset/'\nmodel_dir = './weights_backup/' \n\ndef main():\n \n conf_total = np.zeros((n_class,n_class))\n\n model = eval(args.model_name)(n_class=n_class)\n if args.gpu >= 0: model.cuda(args.gpu)\n print('| loading model file %s... ' % model_file)\n \n pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu))\n own_state = model.state_dict()\n for name, param in pretrained_weight.items():\n if name not in own_state:\n continue\n own_state[name].copy_(param) \n print('done!')\n\n test_dataset = MF_dataset(data_dir, args.dataset_name, have_label=True, input_h=args.img_height, input_w=args.img_width)\n test_loader = DataLoader(\n dataset = test_dataset,\n batch_size = batch_size,\n shuffle = False,\n num_workers = args.num_workers,\n pin_memory = True,\n drop_last = False\n )\n test_loader.n_iter = len(test_loader)\n ave_time_cost = 0.0\n\n model.eval()\n \n with torch.no_grad():\n for it, (images, labels, names) in enumerate(test_loader):\n images = Variable(images)\n labels = Variable(labels)\n if args.gpu >= 0:\n images = images.cuda(args.gpu)\n labels = labels.cuda(args.gpu)\n\n start_time = time.time()\n logits = model(images) # logits.size(): mini_batch*num_class*480*640\n end_time = time.time()\n if it>10: # # ignore the first 10 frames\n ave_time_cost += (end_time-start_time)\n\n # convert tensor to numpy 1d array\n label = labels.cpu().numpy().squeeze().flatten()\n prediction = logits.argmax(1).cpu().numpy().squeeze().flatten() # prediction and label are both 1-d array, size: minibatch*640*480\n # generate confusion matrix frame-by-frame\n conf = confusion_matrix(label, prediction, [0,1,2,3,4,5,6,7,8]) # conf is an n_class*n_class matrix, vertical axis: groundtruth, horizontal axis: prediction\n conf_total += conf\n print(\"| frame %d/%d, time cost: %.2f ms\" %(it+1, test_loader.n_iter, (end_time-start_time)*1000)) \n \n # calculate recall (Acc) and IoU for each class \n recall_per_class = np.zeros(n_class)\n iou_per_class = np.zeros(n_class)\n for cid in range(0, n_class): # cid: class id \n if conf_total[cid, 0:].sum() == 0:\n recall_per_class[cid] = np.nan\n else:\n recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, 0:].sum()) # recall (Acc) = TP/TP+FN\n if (conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]) == 0:\n iou_per_class[cid] = np.nan\n else:\n iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid])) # IoU = TP/TP+FP+FN\n \n print('\\n###########################################################################')\n print('\\n| %s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu))) \n print('\\n| * the tested dataset name: %s' % args.dataset_name)\n print('| * the tested image count: %d' % test_loader.n_iter)\n print('| * the tested image size: %d*%d' %(args.img_height, args.img_width)) \n print(\"| * recall per class: \\n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f\" \\\n %(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8]))\n print(\"| * iou per class: \\n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f\" \\\n %(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8])) \n\n print(\"\\n| * average values (np.mean(x)): \\n recall: %.6f, iou: %.6f\" \\\n %(recall_per_class.mean(), iou_per_class.mean()))\n print(\"| * average values (np.mean(np.nan_to_num(x))): \\n recall: %.6f, iou: %.6f\" \\\n %(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class))))\n\n print('\\n| * the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(test_loader.n_iter-11), 1.0/(ave_time_cost/(test_loader.n_iter-11)))) # ignore the first 10 frames\n\n #print('\\n| * the total confusion matrix: ') \n #np.set_printoptions(precision=8, threshold=np.inf, linewidth=np.inf, suppress=True)\n #print(conf_total)\n print('\\n###########################################################################')\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Test with pytorch')\n parser.add_argument('--model_name', '-M', type=str, default='RTFNet')\n parser.add_argument('--weight_name', '-W', type=str, default='RTFNet_152') # RTFNet_152, RTFNet_50, please change the number of layers in the network file\n parser.add_argument('--dataset_name', '-D', type=str, default='test') # test, test_day, test_night\n parser.add_argument('--img_height', '-IH', type=int, default=480) \n parser.add_argument('--img_width', '-IW', type=int, default=640) \n parser.add_argument('--gpu', '-G', type=int, default=0)\n parser.add_argument('--num_workers', '-j', type=int, default=8)\n args = parser.parse_args()\n\n batch_size = 1 # do not change this parameter!\t\n\n torch.cuda.set_device(args.gpu)\n print(\"\\n| the gpu count:\", torch.cuda.device_count())\n print(\"| the current used gpu:\", torch.cuda.current_device(), '\\n')\n\n model_dir = os.path.join(model_dir, args.weight_name) # model_dir = './weights_backup/'\n if os.path.exists(model_dir) is False:\n print(\"| the %s does not exit.\" %(model_dir))\n sys.exit()\n model_file = os.path.join(model_dir, 'final.pth')\n if os.path.exists(model_file) is True:\n print('| use the final model file.')\n else:\n print('| no model file found.')\n sys.exit() \n print('| testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu))\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.zeros",
"torch.no_grad",
"torch.autograd.Variable",
"torch.cuda.device_count",
"torch.cuda.current_device",
"sklearn.metrics.confusion_matrix",
"numpy.nan_to_num",
"torch.cuda.get_device_name",
"torch.cuda.set_device"
]
] |
wxw-matt/xalpha | [
"b142a5daebac5f1129ead0553efcd40cd471190c"
] | [
"xalpha/multiple.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nmodule for mul and mulfix class: fund combination management\n\"\"\"\n\nimport logging\nimport pandas as pd\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Pie, ThemeRiver\n\nfrom xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj\nfrom xalpha.evaluate import evaluate\nfrom xalpha.exceptions import FundTypeError, TradeBehaviorError\nfrom xalpha.record import record, irecord\nfrom xalpha.indicator import indicator\nfrom xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings\nfrom xalpha.trade import (\n bottleneck,\n trade,\n turnoverrate,\n vtradevolume,\n xirrcal,\n itrade,\n vtradecost,\n)\nfrom xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq\nimport xalpha.universal as xu\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass mul:\n \"\"\"\n multiple fund positions manage class\n\n :param fundtradeobj: list of trade obj which you want to analyse together\n :param status: the status table of trade, all code in this table would be considered.\n one must provide one of the two paramters, if both are offered, status will be overlooked\n 可以是场内记账单 DataFrame,也可以是 record 对象。\n :param istatus: 场内交易账单,也可以是 irecord 对象。\n 若提供,则场内外交易联合统计展示。该选项只保证 ``combsummary`` 方法可正常使用,不保证 ``mul`` 类的其他方法可用。\n :param property: Dict[fundcode, property_number]. property number 的解释:\n int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值处理(暂时只支持货币基金,事实上无法精确支持按份额赎回的净值型基金)。将想要的性质数值相加即可,类似 *nix 上的 xwr 系统。\n :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init\n :param save: boolean, when open the save option, info classes automatically save the class to files\n :param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database\n :param form: string, the format of IO, options including: 'csv','sql'\n \"\"\"\n\n def __init__(\n self,\n *fundtradeobj,\n status=None,\n istatus=None,\n property=None,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\"\n ):\n if isinstance(status, record):\n if not property:\n property = getattr(status, \"property\", {})\n status = status.status\n elif not property:\n property = {}\n self.is_in = False\n if fundtradeobj:\n for t in fundtradeobj:\n if isinstance(t, itrade):\n self.is_in = True\n break\n else:\n fundtradeobj = []\n # warning: not a very good way to automatic generate these fund obj\n # because there might be some funds use round_down for share calculation, ie, label=2 must be given\n # unless you are sure corresponding funds are added to the droplist\n fundcodelist = [f.code for f in fundtradeobj]\n if status is not None:\n for code in status.columns:\n if code == \"date\":\n continue\n # r1, d2, v4 p = r+d+v\n if code in fundcodelist:\n continue\n p = property.get(code, 0)\n round_label = p % 2\n dividend_label = ((p - round_label) / 2) % 2\n value_label = ((p - round_label - dividend_label) / 4) % 2\n try:\n fundtradeobj.append(\n trade(\n fundinfo(\n code,\n round_label=round_label,\n dividend_label=dividend_label,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n ),\n status,\n )\n )\n except FundTypeError:\n fundtradeobj.append(\n trade(\n mfundinfo(\n code,\n round_label=round_label,\n value_label=value_label,\n fetch=fetch,\n save=save,\n path=path,\n form=form,\n ),\n status,\n )\n )\n if istatus is not None:\n self.is_in = True\n if isinstance(istatus, irecord):\n istatus = istatus.status\n for code in istatus.code.unique():\n if code not in fundcodelist and not code.startswith(\"#\"):\n fundtradeobj.append(itrade(code, istatus))\n self.fundtradeobj = tuple(fundtradeobj)\n self.totcftable = self._mergecftb()\n\n def tot(self, prop=\"基金现值\", date=yesterdayobj()):\n \"\"\"\n sum of all the values from one prop of fund daily report,\n of coures many of the props make no sense to sum\n\n :param prop: string defined in the daily report dict,\n typical one is 'currentvalue' or 'originalpurchase'\n \"\"\"\n res = 0\n for fund in self.fundtradeobj:\n res += fund.dailyreport().iloc[0][prop]\n return res\n\n def combsummary(self, date=yesterdayobj()):\n \"\"\"\n brief report table of every funds and the combination investment\n\n :param date: string or obj of date, show info of the date given\n :returns: empty dict if nothing is remaining that date\n dict of various data on the trade positions\n \"\"\"\n date = convert_date(date)\n columns = [\n \"基金名称\",\n \"基金代码\",\n \"当日净值\",\n \"单位成本\",\n \"持有份额\",\n \"基金现值\",\n \"基金总申购\",\n \"历史最大占用\",\n \"基金持有成本\",\n \"基金分红与赎回\",\n \"换手率\",\n \"基金收益总额\",\n \"投资收益率\",\n ]\n summarydf = pd.DataFrame([], columns=columns)\n for fund in self.fundtradeobj:\n summarydf = summarydf.append(\n fund.dailyreport(date), ignore_index=True, sort=True\n )\n tname = \"总计\"\n tcode = \"total\"\n tunitvalue = float(\"NaN\")\n tunitcost = float(\"NaN\")\n tholdshare = float(\"NaN\")\n tcurrentvalue = summarydf[\"基金现值\"].sum()\n tpurchase = summarydf[\"基金总申购\"].sum()\n tbtnk = bottleneck(self.totcftable[self.totcftable[\"date\"] <= date])\n tcost = summarydf[\"基金持有成本\"].sum()\n toutput = summarydf[\"基金分红与赎回\"].sum()\n tturnover = turnoverrate(self.totcftable[self.totcftable[\"date\"] <= date], date)\n # 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率\n tearn = summarydf[\"基金收益总额\"].sum()\n trate = round(tearn / tbtnk * 100, 4)\n trow = pd.DataFrame(\n [\n [\n tname,\n tcode,\n tunitvalue,\n tunitcost,\n tholdshare,\n tcurrentvalue,\n tpurchase,\n tbtnk,\n tcost,\n toutput,\n tturnover,\n tearn,\n trate,\n ]\n ],\n columns=columns,\n )\n summarydf = summarydf.append(trow, ignore_index=True, sort=True)\n\n return summarydf[columns].sort_values(by=\"基金现值\", ascending=False)\n\n summary = combsummary\n\n def _mergecftb(self):\n \"\"\"\n merge the different cftable for different funds into one table\n \"\"\"\n dtlist = []\n for fund in self.fundtradeobj:\n dtlist2 = []\n for _, row in fund.cftable.iterrows():\n dtlist2.append((row[\"date\"], row[\"cash\"]))\n dtlist.extend(dtlist2)\n\n nndtlist = set([item[0] for item in dtlist])\n nndtlist = sorted(list(nndtlist), key=lambda x: x)\n reslist = []\n for date in nndtlist:\n reslist.append(sum([item[1] for item in dtlist if item[0] == date]))\n df = pd.DataFrame(data={\"date\": nndtlist, \"cash\": reslist})\n df = df[df[\"cash\"] != 0]\n df = df.reset_index(drop=True)\n return df\n\n def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):\n \"\"\"\n xirr rate evauation of the whole invest combination\n\n :param date: string or obj of datetime, the virtually sell-all date\n :param startdate: string or obj of datetime, the beginning date of calculation, default from first buy\n \"\"\"\n return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)\n\n def evaluation(self, start=None):\n \"\"\"\n give the evaluation object to analysis funds properties themselves instead of trades\n\n :returns: :class:`xalpha.evaluate.evaluate` object, with referenced funds the same as funds\n we invested\n \"\"\"\n if self.is_in:\n raise NotImplementedError()\n case = evaluate(\n *[fundtrade.aim for fundtrade in self.fundtradeobj], start=start\n )\n return case\n\n def get_stock_holdings(\n self, year=None, season=None, date=yesterdayobj(), threhold=100\n ):\n \"\"\"\n 获取整个基金组合的底层股票持仓总和和细节,组合穿透\n\n :param year: 基于的基金季报年份\n :param season: 基于的基金季报季度\n :param date: 默认昨天\n :param threhold: 默认100。小于100元的底层股票将不在最后的结果中展示\n :return: pd.DataFrame column: name, code, value, ratio\n \"\"\"\n d = {}\n if year is None or season is None:\n rd = convert_date(date) - pd.Timedelta(days=120)\n if not year:\n year = rd.year\n if not season:\n season = int((rd.month - 0.1) / 3) + 1\n logger.debug(\"use %s, %s for fund report\" % (year, season))\n for f in self.fundtradeobj:\n if isinstance(f, itrade):\n if f.get_type() == \"股票\":\n code = f.code\n elif f.get_type() == \"场内基金\":\n code = f.code[2:]\n else:\n continue\n else:\n code = f.code\n value = f.briefdailyreport(date).get(\"currentvalue\", 0)\n if value > 0:\n if code.startswith(\"SH\") or code.startswith(\"SZ\"):\n stock = code\n d[stock] = d.get(stock, 0) + value\n elif code == \"mf\":\n continue\n else:\n df = get_fund_holdings(code, year, season)\n if df is None:\n continue\n\n for _, row in df.iterrows():\n stock = row[\"code\"]\n stock = ttjjcode(stock)\n d[stock] = d.get(stock, 0) + row[\"ratio\"] / 100 * value\n # print(\"%s has %s contribution from %s\" %(stock, row[\"ratio\"] / 100 * value, f.name))\n\n l = []\n for code, value in sorted(d.items(), key=lambda item: -item[1]):\n if value >= threhold:\n try:\n name = get_rt(code)[\"name\"]\n except:\n name = code\n l.append([name, code, value])\n fdf = pd.DataFrame(l, columns=[\"name\", \"code\", \"value\"])\n fdf[\"ratio\"] = fdf[\"value\"] / fdf[\"value\"].sum()\n return fdf\n\n def get_portfolio(self, date=yesterdayobj()):\n \"\"\"\n 获取基金组合底层资产大类配置的具体值\n\n :param date:\n :return: Dict[str, float]. stock,bond,cash 对应总值的字典\n \"\"\"\n\n d = {\"stock\": 0, \"bond\": 0, \"cash\": 0}\n date = convert_date(date)\n for f in self.fundtradeobj:\n value = f.briefdailyreport(date).get(\"currentvalue\", 0)\n if value > 0:\n if isinstance(f, itrade):\n if f.get_type() == \"股票\":\n d[\"stock\"] += value\n continue\n elif f.get_type() in [\"可转债\", \"债券\"]:\n d[\"bond\"] += value\n continue\n elif f.get_type() == \"货币基金\":\n d[\"cash\"] += value\n continue\n elif f.get_type() == \"场内基金\":\n code = f.code[2:]\n else:\n continue\n else:\n code = f.code\n if code == \"mf\":\n d[\"cash\"] += value\n continue\n if get_fund_type(code) == \"货币基金\":\n d[\"cash\"] += value\n continue\n df = xu.get_daily(\"pt-F\" + code, end=date.strftime(\"%Y%m%d\"))\n if df is None or len(df) == 0:\n logger.warning(\"empty portfolio info for %s\" % code)\n row = df.iloc[-1]\n if row[\"bond_ratio\"] + row[\"stock_ratio\"] < 10: # 联接基金\n d[\"stock\"] += (\n (100 - row[\"bond_ratio\"] - row[\"cash_ratio\"]) * value / 100\n )\n d[\"bond\"] += row[\"bond_ratio\"] * value / 100\n d[\"cash\"] += row[\"cash_ratio\"] * value / 100\n else:\n d[\"stock\"] += row[\"stock_ratio\"] * value / 100\n d[\"bond\"] += row[\"bond_ratio\"] * value / 100\n d[\"cash\"] += row[\"cash_ratio\"] * value / 100\n return d\n\n get_portfolio_holdings = get_portfolio\n\n def get_industry(self, date=yesterdayobj()):\n \"\"\"\n 获取基金组合持仓的行业占比信息,底层为非 A 股持仓的暂不支持\n\n :param date:\n :return: Dict\n \"\"\"\n # TODO: hard coded 一个字典来合并一些二级行业\n d = {}\n date = convert_date(date)\n rd = date - pd.Timedelta(days=120)\n year = rd.year\n season = int((rd.month - 0.1) / 3) + 1\n for f in self.fundtradeobj:\n value = f.briefdailyreport(date).get(\"currentvalue\", 0)\n if value > 0:\n if isinstance(f, itrade):\n if f.get_type() == \"股票\":\n industry = get_industry_fromxq(f.code).get(\"industryname\", \"\")\n if industry.strip():\n d[industry] = d.get(industry, 0) + value\n continue\n elif f.get_type() in [\"可转债\", \"债券\", \"货币基金\"]:\n # 现在简化实现可转债暂时不按正股记行业\n continue\n elif f.get_type() == \"场内基金\":\n code = f.code[2:]\n else:\n continue\n else:\n code = f.code\n if code == \"mf\":\n continue\n if get_fund_type(code) == \"货币基金\":\n continue\n ## 以下为持有股票的基金处理\n ## fundinfo 有点浪费,不过简化实现暂时如此\n fobj = fundinfo(code)\n industry_dict = fobj.get_industry_holdings(year=year, season=season)\n if industry_dict is None:\n continue\n ## 这里行业占比需要做个 scaling\n sv = sum([v for _, v in industry_dict.items()])\n if sv < 1.0:\n # 只有极少数持仓存在行业信息\n continue\n stock_ratio = fobj.get_portfolio_holdings(date.strftime(\"%Y%m%d\"))[\n \"stock_ratio\"\n ]\n scale = stock_ratio / sv\n print(scale)\n for k, v in industry_dict.items():\n if k.strip():\n d[k] = d.get(k, 0) + value * v / 100 * scale\n return d\n\n get_industry_holdings = get_industry\n\n def v_positions(self, date=yesterdayobj(), rendered=True):\n \"\"\"\n pie chart visualization of positions ratio in combination\n \"\"\"\n sdata = sorted(\n [\n (fob.name, fob.briefdailyreport(date).get(\"currentvalue\", 0))\n for fob in self.fundtradeobj\n ],\n key=lambda x: x[1],\n reverse=True,\n )\n pie = Pie()\n pie.add(\n series_name=\"总值占比\",\n data_pair=sdata,\n label_opts=opts.LabelOpts(is_show=False, position=\"center\"),\n ).set_global_opts(\n legend_opts=opts.LegendOpts(\n pos_left=\"left\", type_=\"scroll\", orient=\"vertical\"\n )\n ).set_series_opts(\n tooltip_opts=opts.TooltipOpts(\n trigger=\"item\", formatter=\"{a} <br/>{b}: {c} ({d}%)\"\n ),\n )\n\n if rendered:\n return pie.render_notebook()\n else:\n return pie\n\n def v_category_positions(self, date=yesterdayobj(), rendered=True):\n \"\"\"\n 资产分类扇形图,按大类资产求和绘制\n\n :param date:\n :param rendered: bool. default true for notebook, for plain pyechart obj to return, set rendered=False\n :return:\n \"\"\"\n d = {}\n for f in self.fundtradeobj:\n if isinstance(f, itrade):\n t = f.get_type()\n if t == \"场内基金\":\n t = get_fund_type(f.code[2:])\n elif f.code == \"mf\":\n t = \"货币基金\"\n else:\n t = get_fund_type(f.code)\n if t == \"其他\":\n logger.warning(\n \"%s has category others which should be double checked\" % f.code\n )\n d[t] = d.get(t, 0) + f.briefdailyreport(date).get(\"currentvalue\", 0)\n\n sdata = sorted([(k, round(v, 2)) for k, v in d.items()])\n pie = Pie()\n pie.add(\n series_name=\"总值占比\",\n data_pair=sdata,\n label_opts=opts.LabelOpts(is_show=False, position=\"center\"),\n ).set_global_opts(\n legend_opts=opts.LegendOpts(\n pos_left=\"left\", type_=\"scroll\", orient=\"vertical\"\n )\n ).set_series_opts(\n tooltip_opts=opts.TooltipOpts(\n trigger=\"item\", formatter=\"{a} <br/>{b}: {c} ({d}%)\"\n ),\n )\n\n if rendered:\n return pie.render_notebook()\n else:\n return pie\n\n def v_positions_history(self, end=yesterdaydash(), rendered=True):\n \"\"\"\n river chart visulization of positions ratio history\n use text size to avoid legend overlap in some sense, eg. legend_text_size=8\n \"\"\"\n start = self.totcftable.iloc[0].date\n times = pd.date_range(start, end)\n tdata = []\n for date in times:\n sdata = sorted(\n [\n (date, fob.briefdailyreport(date).get(\"currentvalue\", 0), fob.name,)\n for fob in self.fundtradeobj\n ],\n key=lambda x: x[1],\n reverse=True,\n )\n tdata.extend(sdata)\n\n tr = ThemeRiver()\n tr.add(\n series_name=[foj.name for foj in self.fundtradeobj],\n data=tdata,\n label_opts=opts.LabelOpts(is_show=False),\n singleaxis_opts=opts.SingleAxisOpts(type_=\"time\", pos_bottom=\"10%\"),\n )\n if rendered:\n return tr.render_notebook()\n else:\n return tr\n\n def v_tradevolume(self, freq=\"D\", rendered=True):\n \"\"\"\n visualization on trade summary of the funds combination\n\n :param freq: one character string, frequency label, now supporting D for date,\n W for week and M for month, namely the trade volume is shown based on the time unit\n :returns: ``pyecharts.Bar()``\n \"\"\"\n return vtradevolume(self.totcftable, freq=freq, rendered=rendered)\n\n\nclass mulfix(mul, indicator):\n \"\"\"\n introduce cash to make a closed investment system, where netvalue analysis can be applied\n namely the totcftable only has one row at the very beginning\n\n :param fundtradeobj: trade obj to be include\n :param status: status table, if no trade obj is provided, it will include all fund\n based on code in status table\n :param property: Dict[fundcode, property_number]. property number 的解释:\n int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值\n :param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init\n :param save: boolean, when open the save option, info classes automatically save the class to files\n :param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database\n :param form: string, the format of IO, options including: 'csv','sql'\n :param totmoney: positive float, the total money as the input at the beginning\n :param cashobj: cashinfo object, which is designed to balance the cash in and out\n \"\"\"\n\n def __init__(\n self,\n *fundtradeobj,\n status=None,\n istatus=None,\n property=None,\n fetch=False,\n save=False,\n path=\"\",\n form=\"csv\",\n totmoney=100000,\n cashobj=None\n ):\n super().__init__(\n *fundtradeobj,\n status=status,\n istatus=istatus,\n property=property,\n fetch=fetch,\n save=save,\n path=path,\n form=form\n )\n if cashobj is None:\n cashobj = cashinfo()\n self.totmoney = totmoney\n nst = mulfix._vcash(totmoney, self.totcftable, cashobj)\n cashtrade = trade(cashobj, nst)\n # \t\t super().__init__(*self.fundtradeobj, cashtrade)\n self.cashobj = cashobj\n self.fundtradeobj = list(self.fundtradeobj)\n self.fundtradeobj.append(cashtrade)\n self.fundtradeobj = tuple(self.fundtradeobj)\n btnk = bottleneck(self.totcftable)\n if btnk > totmoney:\n raise TradeBehaviorError(\"the initial total cash is too low\")\n self.totcftable = pd.DataFrame(\n data={\"date\": [nst.iloc[0].date], \"cash\": [-totmoney]}\n )\n\n @staticmethod\n def _vcash(totmoney, totcftable, cashobj):\n \"\"\"\n return a virtue status table with a mf(cash) column based on the given tot money and cftable\n \"\"\"\n cashl = []\n cashl.append(totmoney + totcftable.iloc[0].cash)\n for i in range(len(totcftable) - 1):\n date = totcftable.iloc[i + 1].date\n delta = totcftable.iloc[i + 1].cash\n if delta < 0:\n cashl.append(\n myround(\n delta\n / cashobj.price[cashobj.price[\"date\"] <= date].iloc[-1].netvalue\n )\n )\n else:\n cashl.append(delta)\n datadict = {\"date\": totcftable.loc[:, \"date\"], \"mf\": cashl}\n return pd.DataFrame(data=datadict)\n\n def unitvalue(self, date=yesterdayobj()):\n \"\"\"\n :returns: float at unitvalue of the whole investment combination\n \"\"\"\n date = convert_date(date)\n res = 0\n for fund in self.fundtradeobj:\n res += fund.briefdailyreport(date).get(\"currentvalue\", 0)\n return res / self.totmoney\n\n def v_tradecost(self, threhold=0, date=yesterdayobj(), rendered=True):\n if getattr(self, \"price\", None) is None:\n raise ValueError(\"Please generate price table by ``bcmkset()`` first\")\n cftable = self.fundtradeobj[-1].cftable[1:]\n cftable = cftable[abs(cftable[\"cash\"]) > threhold]\n cftable[\"cash\"] = -cftable[\"cash\"]\n return vtradecost(self, cftable, end=date, rendered=rendered)\n\n\nclass imul(mul):\n def __init__(self, *fundtradeobj, status=None, istatus=None):\n \"\"\"\n 对场内投资组合进行分析的类\n\n :param fundtradeobj: itrade objects.\n :param status: 场内格式记账单,或 irecord 对象。\n \"\"\"\n\n if not fundtradeobj:\n fundtradeobj = []\n if status is None:\n status = istatus\n if isinstance(status, irecord):\n status = status.status\n fundcodelist = [f.code for f in fundtradeobj]\n if status is not None:\n for code in status.code.unique():\n if code not in fundcodelist and not code.startswith(\"#\"):\n fundtradeobj.append(itrade(code, status))\n self.fundtradeobj = tuple(fundtradeobj)\n self.totcftable = self._mergecftb()\n self.is_in = True\n\n\nMul = mul\nMulFix = mulfix\nIMul = imul\n"
] | [
[
"pandas.DataFrame",
"pandas.date_range",
"pandas.Timedelta"
]
] |
smrutiranjans/tensorflow | [
"d8e8b872eae63188c75046d5bb068e03a81b3f85"
] | [
"tensorflow/python/ops/op_def_library.py"
] | [
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Class to hold a library of OpDefs and use it to create Brain operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\n\nimport six\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.core.framework import tensor_pb2\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import constant_op\nfrom tensorflow.python.platform import logging\nfrom tensorflow.python.util import compat\n\n\ndef _Attr(op_def, name):\n for attr in op_def.attr:\n if attr.name == name:\n return attr\n raise TypeError(\"Inconsistent OpDef for '%s', missing attr '%s'\" %\n (op_def.name, name))\n\n\ndef _AttrValue(attr_protos, name):\n if name in attr_protos:\n return attr_protos[name]\n raise TypeError(\"Inconsistent OpDef, missing attr '%s' from '%s'.\" %\n (name, attr_protos))\n\n\ndef _SatisfiesTypeConstraint(dtype, attr_def):\n if attr_def.HasField(\"allowed_values\"):\n allowed_list = attr_def.allowed_values.list.type\n if dtype not in allowed_list:\n raise TypeError(\n \"DataType %s for attr '%s' not in list of allowed values: %s\" %\n (dtypes.as_dtype(dtype).name, attr_def.name,\n \", \".join(dtypes.as_dtype(x).name for x in allowed_list)))\n\n\ndef _IsListParameter(arg):\n if arg.number_attr:\n return True\n elif arg.type_list_attr:\n return True\n return False\n\n\ndef _NumTypeFields(arg):\n num = 0\n if arg.type != types_pb2.DT_INVALID: num += 1\n if arg.type_attr: num += 1\n if arg.type_list_attr: num += 1\n return num\n\n\ndef _IsListValue(v):\n return isinstance(v, (list, tuple))\n\n\ndef _Flatten(l):\n \"\"\"Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5].\"\"\"\n # [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]\n l_of_l = [x if _IsListValue(x) else [x] for x in l]\n # [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]\n return [item for sublist in l_of_l for item in sublist]\n\n\ndef _Restructure(l, structure):\n \"\"\"Returns the elements of list l structured according to the given structure.\n\n A structure is represented by a list whose elements are either\n `None` or a non-negative integer. `None` corresponds to a single\n element in the output list, and an integer N corresponds to a nested\n list of length N.\n\n The function returns a data structure whose shape is given by\n `structure`, and whose elements are taken from `l`. If `structure`\n is a singleton, the function returns the single data structure\n implied by the 0th element of `structure`. For example:\n\n _Restructure([\"foo\", \"bar\", \"baz\", \"qux\"], [None, 2, None])\n -> [\"foo\", [\"bar\", \"baz\"], \"qux\"]\n\n _Restructure([\"foo\"], [None]) -> \"foo\"\n\n _Restructure([\"foo\"], [1]) -> [\"foo\"]\n\n _Restructure([], [0]) -> []\n\n Args:\n l: A list.\n structure: A list whose elements are either `None` or a non-negative\n integer.\n\n Returns:\n The elements of `l`, restructured according to `structure`. If\n `structure` is a list of length 1, this function returns the\n single data structure implied by `structure[0]`.\n\n \"\"\"\n result = []\n current_index = 0\n for element in structure:\n if element is None:\n result.append(l[current_index])\n current_index += 1\n else:\n result.append(l[current_index:current_index+element])\n current_index += element\n\n if len(result) == 1:\n return result[0]\n else:\n return tuple(result)\n\n\ndef _MakeFloat(v, arg_name):\n if not isinstance(v, compat.real_types):\n raise TypeError(\"Expected float for argument '%s' not %s.\" %\n (arg_name, repr(v)))\n return float(v)\n\n\ndef _MakeInt(v, arg_name):\n if isinstance(v, six.string_types):\n raise TypeError(\"Expected int for argument '%s' not %s.\" %\n (arg_name, repr(v)))\n try:\n return int(v)\n except (ValueError, TypeError):\n raise TypeError(\"Expected int for argument '%s' not %s.\" %\n (arg_name, repr(v)))\n\n\ndef _MakeStr(v, arg_name):\n if not isinstance(v, compat.bytes_or_text_types):\n raise TypeError(\"Expected string for argument '%s' not %s.\" %\n (arg_name, repr(v)))\n return compat.as_bytes(v) # Convert unicode strings to bytes.\n\n\ndef _MakeBool(v, arg_name):\n if not isinstance(v, bool):\n raise TypeError(\"Expected bool for argument '%s' not %s.\" %\n (arg_name, repr(v)))\n return v\n\n\ndef _MakeType(v, attr_def):\n try:\n v = dtypes.as_dtype(v)\n except TypeError:\n raise TypeError(\"Expected DataType for argument '%s' not %s.\" %\n (attr_def.name, repr(v)))\n i = v.as_datatype_enum\n _SatisfiesTypeConstraint(i, attr_def)\n return i\n\n\ndef _MakeShape(v, arg_name):\n \"\"\"Convert v into a TensorShapeProto.\"\"\"\n # Args:\n # v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.\n # arg_name: String, for error messages.\n\n # Returns:\n # A TensorShapeProto.\n if isinstance(v, tensor_shape_pb2.TensorShapeProto):\n for d in v.dim:\n if d.name:\n logging.warning(\"Warning: TensorShapeProto with a named dimension: %s\",\n str(v))\n break\n return v\n return tensor_shape.as_shape(v).as_proto()\n\n\ndef _MakeTensor(v, arg_name):\n \"\"\"Ensure v is a TensorProto.\"\"\"\n if isinstance(v, tensor_pb2.TensorProto):\n return v\n raise TypeError(\n \"Don't know how to convert %s to a TensorProto for argument '%s'\" %\n (repr(v), arg_name))\n\n\nclass _OpInfo(object):\n \"\"\"All per-Op state we would like to precompute/validate.\"\"\"\n\n def __init__(self, op_def):\n self.op_def = op_def\n # TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it\n # here, instead of these checks.\n for arg in list(op_def.input_arg) + list(op_def.output_arg):\n num_type_fields = _NumTypeFields(arg)\n if num_type_fields != 1:\n raise TypeError(\"Arg '%s' of '%s' must have one type field not %d\" %\n (arg.name, op_def.name, num_type_fields))\n if arg.type_attr:\n attr_type = _Attr(op_def, arg.type_attr).type\n if attr_type != \"type\":\n raise TypeError(\"Attr '%s' of '%s' used as a type_attr \"\n \"but has type %s\" %\n (arg.type_attr, op_def.name, attr_type))\n if arg.type_list_attr:\n attr_type = _Attr(op_def, arg.type_list_attr).type\n if attr_type != \"list(type)\":\n raise TypeError(\n \"Attr '%s' of '%s' used as a type_list_attr but has type %s\" %\n (arg.type_attr, op_def.name, attr_type))\n if arg.number_attr:\n attr_type = _Attr(op_def, arg.number_attr).type\n if attr_type != \"int\":\n raise TypeError(\n \"Attr '%s' of '%s' used as a number_attr but has type %s\" %\n (arg.number_attr, op_def.name, attr_type))\n\n\n# pylint: disable=g-doc-return-or-yield\[email protected]\ndef _MaybeColocateWith(inputs):\n \"\"\"A context manager for (maybe) colocating with a list of input tensors.\n\n Args:\n inputs: A list of `Tensor` or `Operation` objects.\n\n Returns:\n A context manager.\n \"\"\"\n if not inputs:\n yield\n else:\n # NOTE(mrry): The `ops.colocate_with()` function accepts only a single\n # op or tensor, so we create one context manager per element in the list.\n with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):\n yield\n# pylint: enable=g-doc-return-or-yield\n\n\nclass OpDefLibrary(object):\n \"\"\"Holds a collection of OpDefs, can add the corresponding Ops to a graph.\"\"\"\n\n def __init__(self):\n self._ops = {}\n\n def add_op(self, op_def):\n \"\"\"Register an OpDef. May call apply_op with the name afterwards.\"\"\"\n if not isinstance(op_def, op_def_pb2.OpDef):\n raise TypeError(\"%s is %s, not an op_def_pb2.OpDef\" %\n (op_def, type(op_def)))\n if not op_def.name:\n raise ValueError(\"%s missing name.\" % op_def)\n if op_def.name in self._ops:\n raise RuntimeError(\"Op name %s registered twice.\" % op_def.name)\n self._ops[op_def.name] = _OpInfo(op_def)\n\n def add_op_list(self, op_list):\n \"\"\"Register the OpDefs from an OpList.\"\"\"\n if not isinstance(op_list, op_def_pb2.OpList):\n raise TypeError(\"%s is %s, not an op_def_pb2.OpList\" %\n (op_list, type(op_list)))\n for op_def in op_list.op:\n self.add_op(op_def)\n\n def apply_op(self, op_type_name, name=None, **keywords):\n # pylint: disable=g-doc-args\n \"\"\"Add a node invoking a registered Op to a graph.\n\n Config proto extensions must be provided via the 'ext' keyword argument.\n Example usage:\n # input1 and input2 can be Tensors or anything ops.convert_to_tensor()\n # will convert to a Tensor.\n op_def_library.apply_op(\"op\", input1=input1, input2=input2)\n # Can specify a node name.\n op_def_library.apply_op(\"op\", input1=input1, name=\"node_name\")\n # Must use keyword arguments, with the names specified in the OpDef.\n op_def_library.apply_op(\"op\", input_name=input, attr_name=attr)\n\n All attrs must either be inferred from an input or specified.\n (If inferred, the attr must not be specified.) If an attr has a default\n value specified in the Op's OpDef, then you may pass None as the value\n of that attr to get the default.\n\n Args:\n op_type_name: string. Must match the name field of a registered Op.\n name: string. Optional name of the created op.\n **keywords: input Tensor and attr arguments specified by name,\n and optional parameters to pass when constructing the Operation.\n\n Returns:\n The Tensor(s) representing the output of the operation, or the Operation\n itself if there are no outputs.\n\n Raises:\n RuntimeError: On some errors.\n TypeError: On some errors.\n ValueError: On some errors.\n \"\"\"\n op_info = self._ops.get(op_type_name, None)\n if op_info is None:\n raise RuntimeError(\"Unrecognized Op name \" + op_type_name)\n op_def = op_info.op_def\n\n # Determine the graph context.\n try:\n # Need to flatten all the arguments into a list.\n # pylint: disable=protected-access\n g = ops._get_graph_from_inputs(_Flatten(keywords.values()))\n # pyline: enable=protected-access\n except AssertionError as e:\n raise RuntimeError(\n \"Cannot determine graph for Op '%s' due to: %s\"\n % (op_type_name, e.message))\n\n # Default name if not specified.\n if name is None:\n name = op_type_name\n\n # Check for deprecation\n deprecation_version = op_def.deprecation.version\n if deprecation_version:\n producer = g.graph_def_versions.producer\n if producer >= deprecation_version:\n raise NotImplementedError(\n (\"Op %s is not available in GraphDef version %d. \"\n \"It has been removed in version %d. %s.\") %\n (op_type_name, producer, deprecation_version,\n op_def.deprecation.explanation))\n\n # Requires that op_def has passed validation (using the C++\n # ValidateOpDef() from ../framework/op_def_util.h).\n attrs = {}\n inputs = []\n input_types = []\n with g.as_default(), ops.name_scope(name) as scope:\n\n # Perform input type inference\n inferred_from = {}\n for input_arg in op_def.input_arg:\n input_name = input_arg.name\n if input_name in keywords:\n values = keywords.pop(input_name)\n elif input_name + \"_\" in keywords:\n # Handle the case where the name is a keyword or built-in\n # for Python so we use the name + _ instead.\n input_name += \"_\"\n values = keywords.pop(input_name)\n else:\n raise TypeError(\"No argument for input \" + input_name)\n\n # Goals:\n # * Convert values to Tensors if it contains constants.\n # * Verify that values is a list if that matches the input_arg's\n # type.\n # * If the input_arg's type is determined by attrs, either set\n # those attrs and validate those attr values are legal (if\n # they have not yet been set) or validate the input matches\n # the type indicated by the attrs (if they have already been\n # inferred via an earlier input).\n # * If the input_arg has an explicit type, make sure the input\n # conforms.\n\n if _IsListParameter(input_arg):\n if not _IsListValue(values):\n raise TypeError(\n \"Expected list for '%s' argument to '%s' Op, not %s.\" %\n (input_name, op_type_name, values))\n # In cases where we expect all elements of the list to have the\n # same dtype, try to cast non-Tensor elements to that type.\n dtype = None\n if input_arg.type != types_pb2.DT_INVALID:\n dtype = input_arg.type\n elif input_arg.number_attr:\n if input_arg.type_attr in attrs:\n dtype = attrs[input_arg.type_attr]\n else:\n for t in values:\n if isinstance(t, ops.Tensor):\n dtype = t.dtype\n break\n\n try:\n if not input_arg.is_ref and dtype:\n dtype = dtypes.as_dtype(dtype).base_dtype\n values = ops.convert_n_to_tensor(\n values, name=input_arg.name, dtype=dtype if dtype else None,\n as_ref=input_arg.is_ref)\n except (TypeError, ValueError):\n assert dtype is not None, \"Should not fail if dtype is None\"\n assert input_arg.number_attr, \"Should be number_attr case\"\n # What types does the conversion function think values have?\n values = ops.convert_n_to_tensor(values, as_ref=input_arg.is_ref)\n observed = \", \".join(v.dtype.base_dtype.name for v in values)\n\n prefix = (\n \"Tensors in list passed to '%s' of '%s' Op have types [%s]\" %\n (input_name, op_type_name, observed))\n if input_arg.type != types_pb2.DT_INVALID:\n raise TypeError(\"%s that do not match expected type %s.\" %\n (prefix, dtype.name))\n elif input_arg.type_attr in attrs:\n raise TypeError(\"%s that do not match type %s inferred from \"\n \"earlier arguments.\" %\n (prefix, dtype.name))\n else:\n raise TypeError(\"%s that don't all match.\" % prefix)\n\n types = [x.dtype for x in values]\n inputs.extend(values)\n else:\n # In cases where we have an expected type, try to convert non-Tensor\n # arguments to that type.\n dtype = None\n if input_arg.type != types_pb2.DT_INVALID:\n dtype = input_arg.type\n elif input_arg.type_attr in attrs:\n dtype = attrs[input_arg.type_attr]\n try:\n values = ops.convert_to_tensor(\n values, name=input_arg.name, dtype=dtype,\n as_ref=input_arg.is_ref)\n except ValueError:\n # What type does convert_to_tensor think it has?\n observed = ops.convert_to_tensor(values,\n as_ref=input_arg.is_ref).dtype.name\n prefix = (\"Input '%s' of '%s' Op has type %s that does not match\" %\n (input_name, op_type_name, observed))\n if input_arg.type != types_pb2.DT_INVALID:\n raise TypeError(\"%s expected type of %s.\" %\n (prefix, dtypes.as_dtype(input_arg.type).name))\n else:\n raise TypeError(\n \"%s type %s of argument '%s'.\" %\n (prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,\n inferred_from[input_arg.type_attr]))\n\n types = [values.dtype]\n inputs.append(values)\n base_types = [x.base_dtype for x in types]\n\n if input_arg.number_attr:\n # <number-attr> * <type> or <number-attr> * <type-attr>\n if input_arg.number_attr in attrs:\n if len(values) != attrs[input_arg.number_attr]:\n raise ValueError(\n \"List argument '%s' to '%s' Op with length %d must match \"\n \"length %d of argument '%s'.\" %\n (input_name, op_type_name, len(values),\n attrs[input_arg.number_attr],\n inferred_from[input_arg.number_attr]))\n else:\n attrs[input_arg.number_attr] = len(values)\n inferred_from[input_arg.number_attr] = input_name\n num_attr = _Attr(op_def, input_arg.number_attr)\n if num_attr.has_minimum and len(values) < num_attr.minimum:\n raise ValueError(\n \"List argument '%s' to '%s' Op with length %d shorter \"\n \"than minimum length %d.\" %\n (input_name, op_type_name, len(values), num_attr.minimum))\n # All tensors must have the same base type.\n if any([bt != base_types[0] for bt in base_types]):\n raise TypeError(\n \"All tensors passed to '%s' of '%s' Op \"\n \"must have the same type.\" %\n (input_name, op_type_name))\n if input_arg.type != types_pb2.DT_INVALID:\n # <number-attr> * <type> case\n if base_types and base_types[0] != input_arg.type:\n assert False, \"Unreachable\"\n elif input_arg.type_attr in attrs:\n # <number-attr> * <type-attr> case, where <type-attr> already\n # has an inferred value.\n if base_types and base_types[0] != attrs[input_arg.type_attr]:\n assert False, \"Unreachable\"\n else:\n # <number-attr> * <type-attr> case, where we are now setting\n # the <type-attr> based on this input\n if not base_types:\n raise TypeError(\n \"Don't know how to infer type variable from empty input \"\n \"list passed to input '%s' of '%s' Op.\" %\n (input_name, op_type_name))\n attrs[input_arg.type_attr] = base_types[0]\n inferred_from[input_arg.type_attr] = input_name\n type_attr = _Attr(op_def, input_arg.type_attr)\n _SatisfiesTypeConstraint(base_types[0], type_attr)\n elif input_arg.type_attr:\n # <type-attr>\n attr_value = base_types[0]\n if input_arg.type_attr in attrs:\n if attrs[input_arg.type_attr] != attr_value:\n assert False, \"Unreachable\"\n else:\n for base_type in base_types:\n _SatisfiesTypeConstraint(base_type,\n _Attr(op_def, input_arg.type_attr))\n attrs[input_arg.type_attr] = attr_value\n inferred_from[input_arg.type_attr] = input_name\n elif input_arg.type_list_attr:\n # <type-list-attr>\n attr_value = base_types\n if input_arg.type_list_attr in attrs:\n if attrs[input_arg.type_list_attr] != attr_value:\n raise TypeError(\n \"Input '%s' of '%s' Op has type list of %s that does not \"\n \"match type list %s of argument '%s'.\" %\n (input_name, op_type_name,\n \", \".join(dtypes.as_dtype(x).name for x in attr_value),\n \", \".join(dtypes.as_dtype(x).name\n for x in attrs[input_arg.type_list_attr]),\n inferred_from[input_arg.type_list_attr]))\n else:\n for base_type in base_types:\n _SatisfiesTypeConstraint(base_type,\n _Attr(op_def, input_arg.type_list_attr))\n attrs[input_arg.type_list_attr] = attr_value\n inferred_from[input_arg.type_list_attr] = input_name\n else:\n # single Tensor with specified type\n if base_types[0] != input_arg.type:\n assert False, \"Unreachable\"\n\n if input_arg.is_ref:\n if not all(x.is_ref_dtype for x in types):\n raise TypeError(\n \"Input '%s' of '%s' Op requires l-value input\" %\n (input_name, op_type_name))\n input_types.extend(types)\n else:\n input_types.extend(base_types)\n\n # Process remaining attrs\n for attr in op_def.attr:\n # Skip attrs that have already had their values inferred\n if attr.name in attrs:\n if attr.name in keywords:\n raise TypeError(\n \"Should not specify value for inferred attr '%s'.\" % attr.name)\n continue\n if attr.name in keywords:\n attrs[attr.name] = keywords.pop(attr.name)\n elif attr.name + \"_\" in keywords:\n # Attrs whose names match Python keywords have an extra '_'\n # appended, so we must check for that as well.\n attrs[attr.name] = keywords.pop(attr.name + \"_\")\n else:\n raise TypeError(\"No argument for attr \" + attr.name)\n\n # Convert attr values to AttrValue protos.\n attr_protos = {}\n for attr_def in op_def.attr:\n key = attr_def.name\n value = attrs[key]\n attr_value = attr_value_pb2.AttrValue()\n if attr_def.HasField(\"default_value\") and value is None:\n attr_value.CopyFrom(attr_def.default_value)\n attr_protos[key] = attr_value\n continue\n if attr_def.type.startswith(\"list(\"):\n if not _IsListValue(value):\n raise TypeError(\"Expected list for attr \" + key)\n if attr_def.has_minimum:\n if len(value) < attr_def.minimum:\n raise ValueError(\"Attr '%s' of '%s' Op passed list of length %d \"\n \"less than minimum %d.\" %\n (key, op_type_name, len(value),\n attr_def.minimum))\n attr_value.list.SetInParent()\n if attr_def.type == \"string\":\n attr_value.s = _MakeStr(value, key)\n if attr_def.HasField(\"allowed_values\"):\n if attr_value.s not in attr_def.allowed_values.list.s:\n raise ValueError(\n \"Attr '%s' of '%s' Op passed string '%s' not in: \\\"%s\\\".\" %\n (key, op_type_name, compat.as_text(attr_value.s),\n '\", \"'.join(map(compat.as_text,\n attr_def.allowed_values.list.s))))\n elif attr_def.type == \"list(string)\":\n attr_value.list.s.extend([_MakeStr(x, key) for x in value])\n if attr_def.HasField(\"allowed_values\"):\n for x in attr_value.list.s:\n if x not in attr_def.allowed_values.list.s:\n raise ValueError(\n \"Attr '%s' of '%s' Op passed string '%s' not in: \\\"%s\\\".\" %\n (key, op_type_name, compat.as_text(x),\n '\", \"'.join(map(compat.as_text,\n attr_def.allowed_values.list.s))))\n elif attr_def.type == \"int\":\n attr_value.i = _MakeInt(value, key)\n if attr_def.has_minimum:\n if attr_value.i < attr_def.minimum:\n raise ValueError(\n \"Attr '%s' of '%s' Op passed %d less than minimum %d.\" %\n (key, op_type_name, attr_value.i, attr_def.minimum))\n elif attr_def.type == \"list(int)\":\n attr_value.list.i.extend([_MakeInt(x, key) for x in value])\n elif attr_def.type == \"float\":\n attr_value.f = _MakeFloat(value, key)\n elif attr_def.type == \"list(float)\":\n attr_value.list.f.extend([_MakeFloat(x, key) for x in value])\n elif attr_def.type == \"bool\":\n attr_value.b = _MakeBool(value, key)\n elif attr_def.type == \"list(bool)\":\n attr_value.list.b.extend([_MakeBool(x, key) for x in value])\n elif attr_def.type == \"type\":\n attr_value.type = _MakeType(value, attr_def)\n elif attr_def.type == \"list(type)\":\n attr_value.list.type.extend(\n [_MakeType(x, attr_def) for x in value])\n elif attr_def.type == \"shape\":\n attr_value.shape.CopyFrom(_MakeShape(value, key))\n elif attr_def.type == \"list(shape)\":\n attr_value.list.shape.extend(\n [_MakeShape(x, key) for x in value])\n elif attr_def.type == \"tensor\":\n attr_value.tensor.CopyFrom(_MakeTensor(value, key))\n elif attr_def.type == \"list(tensor)\":\n attr_value.list.tensor.extend(\n [_MakeTensor(x, key) for x in value])\n elif attr_def.type == \"func\":\n if not isinstance(value, compat.bytes_or_text_types):\n raise TypeError(\"Expects a string for the func name\")\n attr_value.func.name = value\n else:\n raise TypeError(\"Unrecognized Attr type \" + attr_def.type)\n\n attr_protos[key] = attr_value\n del attrs # attrs is no longer authoritative, use attr_protos instead\n\n # Determine output types (possibly using attrs)\n output_types = []\n output_structure = []\n for arg in op_def.output_arg:\n types = []\n if arg.number_attr:\n n = _AttrValue(attr_protos, arg.number_attr).i\n if arg.type_attr:\n types = [_AttrValue(attr_protos, arg.type_attr).type] * n\n else:\n types = [arg.type] * n\n output_structure.append(n)\n elif arg.type_attr:\n t = _AttrValue(attr_protos, arg.type_attr)\n types = [t.type]\n output_structure.append(None)\n elif arg.type_list_attr:\n t = _AttrValue(attr_protos, arg.type_list_attr)\n types = t.list.type\n output_structure.append(len(t.list.type))\n else:\n types = [arg.type]\n output_structure.append(None)\n if arg.is_ref:\n types = [dtypes.as_dtype(x).as_ref for x in types]\n output_types.extend(types)\n\n if keywords:\n raise TypeError(\"apply_op() got unexpected keyword arguments: \" +\n \", \".join(sorted(keywords.keys())))\n\n # NOTE(mrry): We add an explicit colocation constraint between\n # the newly created op and any of its reference-typed inputs.\n must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)\n if arg.is_ref]\n with _MaybeColocateWith(must_colocate_inputs):\n # Add Op to graph\n if output_structure:\n op = g.create_op(op_type_name, inputs, output_types, name=scope,\n input_types=input_types, attrs=attr_protos,\n op_def=op_def)\n outputs = op.outputs\n return _Restructure(ops.convert_n_to_tensor(outputs),\n output_structure)\n else:\n return g.create_op(op_type_name, inputs, output_types, name=scope,\n input_types=input_types, attrs=attr_protos,\n op_def=op_def)\n"
] | [
[
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.util.compat.as_text",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.dtypes.as_dtype"
]
] |
Del9fina/robel | [
"63dfac65932757134e5766f1e20a339efe281bc7",
"63dfac65932757134e5766f1e20a339efe281bc7"
] | [
"robel/components/tracking/group_config.py",
"robel/dclaw/turn.py"
] | [
"# Copyright 2019 The ROBEL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration for a tracker component group.\"\"\"\n\nfrom typing import Iterable, Optional\n\nimport numpy as np\nfrom transforms3d.euler import euler2mat, quat2euler\nfrom transforms3d.quaternions import quat2mat\n\nfrom robel.simulation.sim_scene import SimScene\n\n\nclass TrackerGroupConfig:\n \"\"\"Group configuration for a TrackerComponent.\"\"\"\n\n def __init__(self,\n sim_scene: SimScene,\n element_name: Optional[str] = None,\n element_type: Optional[str] = None,\n qpos_indices: Optional[Iterable[int]] = None,\n qvel_indices: Optional[Iterable[int]] = None,\n sim_observation_noise: Optional[float] = None):\n \"\"\"Initializes a group configuration for a TrackerComponent.\n\n Args:\n sim_scene: The simulation, used for validation purposes.\n element_name: The name of the element to use for tracking in\n simulation.\n element_type: The type of the element as defined in the XML.\n Should be one of `site`, `body`, `geom`, or `joint`. If this is\n `joint`, `qpos_indices` and `qvel_indices` should be\n provided.\n qpos_indices: The indices into `MjData.qpos` to read for the\n joint element position and rotation.\n qvel_indices: The indices into `MjData.qvel` to read for the joint\n element velocity. This defaults to `qpos_indices`.\n sim_observation_noise: The range of the observation noise (in\n meters) to apply to the state in simulation.\n \"\"\"\n self.element_type = element_type\n if self.element_type not in ['site', 'body', 'geom', 'joint']:\n raise ValueError('Unknown element type %s' % self.element_type)\n\n self.element_name = element_name\n self.element_id = None\n self.element_attr = None\n self.qpos_indices = None\n self.qvel_indices = None\n self._is_euler = False\n\n if self.element_type == 'joint':\n if qpos_indices is None:\n raise ValueError('Must provided qpos_indices for joints.')\n # Ensure that the qpos indices are valid.\n nq = sim_scene.model.nq\n assert all(-nq <= i < nq for i in qpos_indices), \\\n 'All qpos indices must be in [-{}, {}]'.format(nq, nq - 1)\n self.qpos_indices = np.array(qpos_indices, dtype=int)\n\n if len(self.qpos_indices) == 6:\n self._is_euler = True\n elif len(self.qpos_indices) != 7:\n raise ValueError('qpos_indices must be 6 or 7 elements.')\n\n if qvel_indices is None:\n if not self._is_euler:\n raise ValueError(\n 'qvel_indices must be provided for free joints.')\n qvel_indices = qpos_indices\n\n # Ensure that the qvel indices are valid.\n nv = sim_scene.model.nv\n assert all(-nv <= i < nv for i in qvel_indices), \\\n 'All qvel indices must be in [-{}, {}]'.format(nv, nv - 1)\n self.qvel_indices = np.array(qvel_indices, dtype=int)\n else:\n self.element_attr = (lambda obj, attr_name: getattr(\n obj, self.element_type + '_' + attr_name))\n self.element_id = self.element_attr(sim_scene.model, 'name2id')(\n element_name)\n\n self.sim_observation_noise = sim_observation_noise\n\n def get_pos(self, sim_scene: SimScene) -> np.ndarray:\n \"\"\"Returns the cartesian position of the element.\"\"\"\n if self.qpos_indices is not None:\n return sim_scene.data.qpos[self.qpos_indices[:3]]\n return self.element_attr(sim_scene.data, 'xpos')[self.element_id, :]\n\n def get_rot(self, sim_scene: SimScene) -> np.ndarray:\n \"\"\"Returns the (3x3) rotation matrix of the element.\"\"\"\n if self.qpos_indices is not None:\n qpos = sim_scene.data.qpos[self.qpos_indices[3:]]\n if self._is_euler:\n return euler2mat(*qpos, axes='rxyz')\n return quat2mat(qpos)\n return self.element_attr(sim_scene.data,\n 'xmat')[self.element_id].reshape((3, 3))\n\n def get_vel(self, sim_scene: SimScene) -> np.ndarray:\n \"\"\"Returns the cartesian velocity of the element.\"\"\"\n if self.qvel_indices is not None:\n return sim_scene.data.qvel[self.qvel_indices[:3]]\n raise NotImplementedError('Cartesian velocity is not supported for ' +\n self.element_type)\n\n def get_angular_vel(self, sim_scene: SimScene) -> np.ndarray:\n \"\"\"Returns the angular velocity (x, y, z) of the element.\"\"\"\n if self.qvel_indices is not None:\n return sim_scene.data.qvel[self.qvel_indices[3:]]\n raise NotImplementedError('Angular velocity is not supported for ' +\n self.element_type)\n\n def set_pos(self, sim_scene: SimScene, pos: np.ndarray):\n \"\"\"Sets the cartesian position of the element.\"\"\"\n if self.qpos_indices is not None:\n sim_scene.data.qpos[self.qpos_indices[:len(pos)]] = pos\n return\n self.element_attr(sim_scene.model,\n 'pos')[self.element_id, :len(pos)] = pos\n\n def set_rot_quat(self, sim_scene: SimScene, quat: np.ndarray):\n \"\"\"Sets the cartesian position of the element.\"\"\"\n if self.qpos_indices is not None:\n qpos = quat\n if self._is_euler:\n qpos = quat2euler(quat, axes='rxyz')\n sim_scene.data.qpos[self.qpos_indices[3:]] = qpos\n return\n self.element_attr(sim_scene.model, 'quat')[self.element_id, :] = quat\n",
"# Copyright 2019 The ROBEL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Turn tasks with DClaw robots.\n\nThis is a single rotation of an object from an initial angle to a target angle.\n\"\"\"\n\nimport abc\nimport collections\nfrom typing import Dict, Optional, Sequence\n\nimport numpy as np\nfrom transforms3d.euler import euler2quat\n\nfrom robel.components.robot.dynamixel_robot import DynamixelRobotState\nfrom robel.dclaw.base_env import BaseDClawObjectEnv\nfrom robel.simulation.randomize import SimRandomizer\nfrom robel.utils.configurable import configurable\nfrom robel.utils.resources import get_asset_path\n\n# The observation keys that are concatenated as the environment observation.\nDEFAULT_OBSERVATION_KEYS = (\n 'claw_qpos',\n 'object_x',\n 'object_y',\n 'last_action',\n 'target_error',\n)\n\n# Reset pose for the claw joints.\nRESET_POSE = [0, -np.pi / 3, np.pi / 3] * 3\n\nDCLAW3_ASSET_PATH = 'robel/dclaw/assets/dclaw3xh_valve3_v0.xml'\n\n\nclass BaseDClawTurn(BaseDClawObjectEnv, metaclass=abc.ABCMeta):\n \"\"\"Shared logic for DClaw turn tasks.\"\"\"\n\n def __init__(self,\n asset_path: str = DCLAW3_ASSET_PATH,\n observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,\n frame_skip: int = 40,\n interactive: bool = False,\n success_threshold: float = 0.1,\n **kwargs):\n \"\"\"Initializes the environment.\n\n Args:\n asset_path: The XML model file to load.\n observation_keys: The keys in `get_obs_dict` to concatenate as the\n observations returned by `step` and `reset`.\n frame_skip: The number of simulation steps per environment step.\n interactive: If True, allows the hardware guide motor to freely\n rotate and its current angle is used as the goal.\n success_threshold: The difference threshold (in radians) of the\n object position and the goal position within which we consider\n as a sucesss.\n \"\"\"\n super().__init__(\n sim_model=get_asset_path(asset_path),\n observation_keys=observation_keys,\n frame_skip=frame_skip,\n **kwargs)\n\n self._interactive = interactive\n self._success_threshold = success_threshold\n self._desired_claw_pos = RESET_POSE\n\n self._target_bid = self.model.body_name2id('target')\n\n # The following are modified (possibly every reset) by subclasses.\n self._initial_object_pos = 0\n self._initial_object_vel = 0\n self._set_target_object_pos(0)\n\n def _reset(self):\n \"\"\"Resets the environment.\"\"\"\n self._reset_dclaw_and_object(\n claw_pos=RESET_POSE,\n object_pos=self._initial_object_pos,\n object_vel=self._initial_object_vel,\n guide_pos=self._target_object_pos)\n\n # Disengage the motor.\n if self._interactive and self.robot.is_hardware:\n self.robot.set_motors_engaged('guide', False)\n\n def _step(self, action: np.ndarray):\n \"\"\"Applies an action to the robot.\"\"\"\n self.robot.step({\n 'dclaw': action,\n 'guide': np.atleast_1d(self._target_object_pos),\n })\n\n def get_obs_dict(self) -> Dict[str, np.ndarray]:\n \"\"\"Returns the current observation of the environment.\n\n Returns:\n A dictionary of observation values. This should be an ordered\n dictionary if `observation_keys` isn't set.\n \"\"\"\n claw_state, object_state, guide_state = self.robot.get_state(\n ['dclaw', 'object', 'guide'])\n\n # If in interactive mode, use the guide motor position as the goal.\n if self._interactive:\n self._set_target_object_pos(guide_state.qpos)\n\n # Calculate the signed angle difference to the target in [-pi, pi].\n target_error = self._target_object_pos - object_state.qpos\n target_error = np.mod(target_error + np.pi, 2 * np.pi) - np.pi\n\n obs_dict = collections.OrderedDict((\n ('claw_qpos', claw_state.qpos),\n ('claw_qvel', claw_state.qvel),\n ('object_x', np.cos(object_state.qpos)),\n ('object_y', np.sin(object_state.qpos)),\n ('object_qvel', object_state.qvel),\n ('last_action', self._get_last_action()),\n ('target_error', target_error),\n ))\n # Add hardware-specific state if present.\n if isinstance(claw_state, DynamixelRobotState):\n obs_dict['claw_current'] = claw_state.current\n\n return obs_dict\n\n def get_reward_dict(\n self,\n action: np.ndarray,\n obs_dict: Dict[str, np.ndarray],\n ) -> Dict[str, np.ndarray]:\n \"\"\"Returns the reward for the given action and observation.\"\"\"\n target_dist = np.abs(obs_dict['target_error'])\n claw_vel = obs_dict['claw_qvel']\n\n reward_dict = collections.OrderedDict((\n # Penalty for distance away from goal.\n ('target_dist_cost', -5 * target_dist),\n # Penalty for difference with nomimal pose.\n ('pose_diff_cost',\n -1 * np.linalg.norm(obs_dict['claw_qpos'] - self._desired_claw_pos)\n ),\n # Penality for high velocities.\n ('joint_vel_cost',\n -1 * np.linalg.norm(claw_vel[np.abs(claw_vel) >= 0.5])),\n\n # Reward for close proximity with goal.\n ('bonus_small', 10 * (target_dist < 0.25)),\n ('bonus_big', 50 * (target_dist < 0.10)),\n ))\n return reward_dict\n\n def get_score_dict(\n self,\n obs_dict: Dict[str, np.ndarray],\n reward_dict: Dict[str, np.ndarray],\n ) -> Dict[str, np.ndarray]:\n \"\"\"Returns a standardized measure of success for the environment.\"\"\"\n target_dist = np.abs(obs_dict['target_error'])\n score_dict = collections.OrderedDict((\n ('points', 1.0 - target_dist / np.pi),\n ('success', target_dist < self._success_threshold),\n ))\n score_dict.update(\n self._get_safety_scores(\n pos=obs_dict['claw_qpos'],\n vel=obs_dict['claw_qvel'],\n current=obs_dict.get('claw_current'),\n ))\n return score_dict\n\n def _set_target_object_pos(self, target_pos: float,\n unbounded: bool = False):\n \"\"\"Sets the goal angle to the given position.\"\"\"\n # Modulo to [-pi, pi].\n if not unbounded:\n target_pos = np.mod(target_pos + np.pi, 2 * np.pi) - np.pi\n self._target_object_pos = np.asarray(target_pos, dtype=np.float32)\n\n # Mark the target position in sim.\n # WARNING: euler2quat will mutate a passed numpy array.\n self.model.body_quat[self._target_bid] = euler2quat(\n 0, 0, float(target_pos))\n\n\n@configurable(pickleable=True)\nclass DClawTurnFixed(BaseDClawTurn):\n \"\"\"Turns the object with a fixed initial and fixed target position.\"\"\"\n\n def _reset(self):\n # Turn from 0 degrees to 180 degrees.\n self._initial_object_pos = 0\n self._set_target_object_pos(np.pi)\n super()._reset()\n\n\n@configurable(pickleable=True)\nclass DClawTurnRandom(BaseDClawTurn):\n \"\"\"Turns the object with a random initial and random target position.\"\"\"\n\n def _reset(self):\n # Initial position is +/- 60 degrees.\n self._initial_object_pos = self.np_random.uniform(\n low=-np.pi / 3, high=np.pi / 3)\n # Target position is 180 +/- 60 degrees.\n self._set_target_object_pos(\n np.pi + self.np_random.uniform(low=-np.pi / 3, high=np.pi / 3))\n super()._reset()\n\n\n@configurable(pickleable=True)\nclass DClawTurnRandomDynamics(DClawTurnRandom):\n \"\"\"Turns the object with a random initial and random target position.\n\n The dynamics of the simulation are randomized each episode.\n \"\"\"\n\n def __init__(self,\n *args,\n sim_observation_noise: Optional[float] = 0.05,\n **kwargs):\n super().__init__(\n *args, sim_observation_noise=sim_observation_noise, **kwargs)\n self._randomizer = SimRandomizer(self)\n self._dof_indices = (\n self.robot.get_config('dclaw').qvel_indices.tolist() +\n self.robot.get_config('object').qvel_indices.tolist())\n\n def _reset(self):\n # Randomize joint dynamics.\n self._randomizer.randomize_dofs(\n self._dof_indices,\n damping_range=(0.005, 0.1),\n friction_loss_range=(0.001, 0.005),\n )\n self._randomizer.randomize_actuators(\n all_same=True,\n kp_range=(1, 3),\n )\n # Randomize friction on all geoms in the scene.\n self._randomizer.randomize_geoms(\n all_same=True,\n friction_slide_range=(0.8, 1.2),\n friction_spin_range=(0.003, 0.007),\n friction_roll_range=(0.00005, 0.00015),\n )\n self._randomizer.randomize_bodies(\n ['mount'],\n position_perturb_range=(-0.01, 0.01),\n )\n self._randomizer.randomize_geoms(\n ['mount'],\n color_range=(0.2, 0.9),\n )\n self._randomizer.randomize_geoms(\n parent_body_names=['valve'],\n color_range=(0.2, 0.9),\n )\n super()._reset()\n"
] | [
[
"numpy.array"
],
[
"numpy.abs",
"numpy.cos",
"numpy.asarray",
"numpy.atleast_1d",
"numpy.mod",
"numpy.sin",
"numpy.linalg.norm"
]
] |
raphacosta27/geopandas | [
"2c22a26bd40ec48536026b160c54c6fe523d22d7"
] | [
"geopandas/tests/test_geom_methods.py"
] | [
"import string\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom pandas import DataFrame, MultiIndex, Series\n\nfrom shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.ops import unary_union\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.base import GeoPandasBase\n\nfrom geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals\nfrom pandas.testing import assert_frame_equal, assert_series_equal\nimport pytest\n\n\ndef assert_array_dtype_equal(a, b, *args, **kwargs):\n a = np.asanyarray(a)\n b = np.asanyarray(b)\n assert a.dtype == b.dtype\n assert_array_equal(a, b, *args, **kwargs)\n\n\nclass TestGeomMethods:\n def setup_method(self):\n self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])\n self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])\n self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])\n self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.inner_sq = Polygon(\n [(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]\n )\n self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary])\n self.p0 = Point(5, 5)\n self.p3d = Point(5, 5, 5)\n self.g0 = GeoSeries(\n [\n self.t1,\n self.t2,\n self.sq,\n self.inner_sq,\n self.nested_squares,\n self.p0,\n None,\n ]\n )\n self.g1 = GeoSeries([self.t1, self.sq])\n self.g2 = GeoSeries([self.sq, self.t1])\n self.g3 = GeoSeries([self.t1, self.t2])\n self.g3.crs = \"epsg:4326\"\n self.g4 = GeoSeries([self.t2, self.t1])\n self.g4.crs = \"epsg:4326\"\n self.g_3d = GeoSeries([self.p0, self.p3d])\n self.na = GeoSeries([self.t1, self.t2, Polygon()])\n self.na_none = GeoSeries([self.t1, None])\n self.a1 = self.g1.copy()\n self.a1.index = [\"A\", \"B\"]\n self.a2 = self.g2.copy()\n self.a2.index = [\"B\", \"C\"]\n self.esb = Point(-73.9847, 40.7484)\n self.sol = Point(-74.0446, 40.6893)\n self.landmarks = GeoSeries([self.esb, self.sol], crs=\"epsg:4326\")\n self.l1 = LineString([(0, 0), (0, 1), (1, 1)])\n self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])\n self.g5 = GeoSeries([self.l1, self.l2])\n self.g6 = GeoSeries([self.p0, self.t3])\n self.empty = GeoSeries([])\n self.all_none = GeoSeries([None, None])\n self.empty_poly = Polygon()\n\n # Crossed lines\n self.l3 = LineString([(0, 0), (1, 1)])\n self.l4 = LineString([(0, 1), (1, 0)])\n self.crossed_lines = GeoSeries([self.l3, self.l4])\n\n # Placeholder for testing, will just drop in different geometries\n # when needed\n self.gdf1 = GeoDataFrame(\n {\"geometry\": self.g1, \"col0\": [1.0, 2.0], \"col1\": [\"geo\", \"pandas\"]}\n )\n self.gdf2 = GeoDataFrame(\n {\"geometry\": self.g1, \"col3\": [4, 5], \"col4\": [\"rand\", \"string\"]}\n )\n self.gdf3 = GeoDataFrame(\n {\"geometry\": self.g3, \"col3\": [4, 5], \"col4\": [\"rand\", \"string\"]}\n )\n\n def _test_unary_real(self, op, expected, a):\n \"\"\" Tests for 'area', 'length', 'is_valid', etc. \"\"\"\n fcmp = assert_series_equal\n self._test_unary(op, expected, a, fcmp)\n\n def _test_unary_topological(self, op, expected, a):\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n\n def fcmp(a, b):\n assert a.equals(b)\n\n self._test_unary(op, expected, a, fcmp)\n\n def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):\n \"\"\" Tests for 'intersection', 'union', 'symmetric_difference', etc. \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n\n def fcmp(a, b):\n assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs)\n\n def _test_binary_real(self, op, expected, a, b, *args, **kwargs):\n fcmp = assert_series_equal\n self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)\n\n def _test_binary_operator(self, op, expected, a, b):\n \"\"\"\n The operators only have GeoSeries on the left, but can have\n GeoSeries or GeoDataFrame on the right.\n If GeoDataFrame is on the left, geometry column is used.\n\n \"\"\"\n if isinstance(expected, GeoPandasBase):\n fcmp = assert_geoseries_equal\n else:\n\n def fcmp(a, b):\n assert geom_equals(a, b)\n\n if isinstance(b, GeoPandasBase):\n right_df = True\n else:\n right_df = False\n\n self._binary_op_test(op, expected, a, b, fcmp, False, right_df)\n\n def _binary_op_test(\n self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs\n ):\n \"\"\"\n This is a helper to call a function on GeoSeries and GeoDataFrame\n arguments. For example, 'intersection' is a member of both GeoSeries\n and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.\n This function has the ability to test all four combinations of input\n types.\n\n Parameters\n ----------\n\n expected : str\n The operation to be tested. e.g., 'intersection'\n left: GeoSeries\n right: GeoSeries\n fcmp: function\n Called with the result of the operation and expected. It should\n assert if the result is incorrect\n left_df: bool\n If the left input should also be called with a GeoDataFrame\n right_df: bool\n Indicates whether the right input should be called with a\n GeoDataFrame\n\n \"\"\"\n\n def _make_gdf(s):\n n = len(s)\n col1 = string.ascii_lowercase[:n]\n col2 = range(n)\n\n return GeoDataFrame(\n {\"geometry\": s.values, \"col1\": col1, \"col2\": col2},\n index=s.index,\n crs=s.crs,\n )\n\n # Test GeoSeries.op(GeoSeries)\n result = getattr(left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoSeries)\n gdf_left = _make_gdf(left)\n result = getattr(gdf_left, op)(right, *args, **kwargs)\n fcmp(result, expected)\n\n if right_df:\n # Test GeoSeries.op(GeoDataFrame)\n gdf_right = _make_gdf(right)\n result = getattr(left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n if left_df:\n # Test GeoDataFrame.op(GeoDataFrame)\n result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)\n fcmp(result, expected)\n\n def _test_unary(self, op, expected, a, fcmp):\n # GeoSeries, (GeoSeries or geometry)\n result = getattr(a, op)\n fcmp(result, expected)\n\n # GeoDataFrame, (GeoSeries or geometry)\n gdf = self.gdf1.set_geometry(a)\n result = getattr(gdf, op)\n fcmp(result, expected)\n\n # TODO reenable for all operations once we use pyproj > 2\n # def test_crs_warning(self):\n # # operations on geometries should warn for different CRS\n # no_crs_g3 = self.g3.copy()\n # no_crs_g3.crs = None\n # with pytest.warns(UserWarning):\n # self._test_binary_topological('intersection', self.g3,\n # self.g3, no_crs_g3)\n\n def test_intersection(self):\n self._test_binary_topological(\"intersection\", self.t1, self.g1, self.g2)\n with pytest.warns(UserWarning, match=\"The indices .+ different\"):\n self._test_binary_topological(\n \"intersection\", self.all_none, self.g1, self.empty\n )\n\n def test_union_series(self):\n self._test_binary_topological(\"union\", self.sq, self.g1, self.g2)\n\n def test_union_polygon(self):\n self._test_binary_topological(\"union\", self.sq, self.g1, self.t2)\n\n def test_symmetric_difference_series(self):\n self._test_binary_topological(\"symmetric_difference\", self.sq, self.g3, self.g4)\n\n def test_symmetric_difference_poly(self):\n expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)\n self._test_binary_topological(\n \"symmetric_difference\", expected, self.g3, self.t1\n )\n\n def test_difference_series(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n self._test_binary_topological(\"difference\", expected, self.g1, self.g2)\n\n def test_difference_poly(self):\n expected = GeoSeries([self.t1, self.t1])\n self._test_binary_topological(\"difference\", expected, self.g1, self.t2)\n\n def test_geo_op_empty_result(self):\n l1 = LineString([(0, 0), (1, 1)])\n l2 = LineString([(2, 2), (3, 3)])\n expected = GeoSeries([GeometryCollection()])\n # binary geo resulting in empty geometry\n result = GeoSeries([l1]).intersection(l2)\n assert_geoseries_equal(result, expected)\n # binary geo empty result with right GeoSeries\n result = GeoSeries([l1]).intersection(GeoSeries([l2]))\n assert_geoseries_equal(result, expected)\n # unary geo resulting in emtpy geometry\n result = GeoSeries([GeometryCollection()]).convex_hull\n assert_geoseries_equal(result, expected)\n\n def test_boundary(self):\n l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])\n l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])\n expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)\n\n self._test_unary_topological(\"boundary\", expected, self.g1)\n\n def test_area(self):\n expected = Series(np.array([0.5, 1.0]), index=self.g1.index)\n self._test_unary_real(\"area\", expected, self.g1)\n\n expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)\n self._test_unary_real(\"area\", expected, self.na_none)\n\n def test_bounds(self):\n # Set columns to get the order right\n expected = DataFrame(\n {\n \"minx\": [0.0, 0.0],\n \"miny\": [0.0, 0.0],\n \"maxx\": [1.0, 1.0],\n \"maxy\": [1.0, 1.0],\n },\n index=self.g1.index,\n columns=[\"minx\", \"miny\", \"maxx\", \"maxy\"],\n )\n\n result = self.g1.bounds\n assert_frame_equal(expected, result)\n\n gdf = self.gdf1.set_geometry(self.g1)\n result = gdf.bounds\n assert_frame_equal(expected, result)\n\n def test_bounds_empty(self):\n # test bounds of empty GeoSeries\n # https://github.com/geopandas/geopandas/issues/1195\n s = GeoSeries([])\n result = s.bounds\n expected = DataFrame(\n columns=[\"minx\", \"miny\", \"maxx\", \"maxy\"], index=s.index, dtype=\"float64\"\n )\n assert_frame_equal(result, expected)\n\n def test_unary_union(self):\n p1 = self.t1\n p2 = Polygon([(2, 0), (3, 0), (3, 1)])\n expected = unary_union([p1, p2])\n g = GeoSeries([p1, p2])\n\n self._test_unary_topological(\"unary_union\", expected, g)\n\n def test_contains(self):\n expected = [True, False, True, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.contains(self.t1))\n\n def test_length(self):\n expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)\n self._test_unary_real(\"length\", expected, self.g1)\n\n expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index)\n self._test_unary_real(\"length\", expected, self.na_none)\n\n def test_crosses(self):\n expected = [False, False, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.crosses(self.t1))\n\n expected = [False, True]\n assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))\n\n def test_disjoint(self):\n expected = [False, False, False, False, False, True, False]\n assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))\n\n def test_relate(self):\n expected = Series(\n [\n \"212101212\",\n \"212101212\",\n \"212FF1FF2\",\n \"2FFF1FFF2\",\n \"FF2F112F2\",\n \"FF0FFF212\",\n None,\n ],\n index=self.g0.index,\n )\n assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))\n\n expected = Series([\"FF0FFF212\", None], index=self.g6.index)\n assert_array_dtype_equal(expected, self.g6.relate(self.na_none))\n\n def test_distance(self):\n expected = Series(\n np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index\n )\n assert_array_dtype_equal(expected, self.na_none.distance(self.p0))\n\n expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index)\n assert_array_dtype_equal(expected, self.g6.distance(self.na_none))\n\n def test_intersects(self):\n expected = [True, True, True, True, True, False, False]\n assert_array_dtype_equal(expected, self.g0.intersects(self.t1))\n\n expected = [True, False]\n assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(expected, self.empty.intersects(self.t1))\n\n expected = np.array([], dtype=bool)\n assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly))\n\n expected = [False] * 7\n assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))\n\n def test_overlaps(self):\n expected = [True, True, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))\n\n expected = [False, False]\n assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))\n\n def test_touches(self):\n expected = [False, True, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.touches(self.t1))\n\n def test_within(self):\n expected = [True, False, False, False, False, False, False]\n assert_array_dtype_equal(expected, self.g0.within(self.t1))\n\n expected = [True, True, True, True, True, False, False]\n assert_array_dtype_equal(expected, self.g0.within(self.sq))\n\n def test_is_valid(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real(\"is_valid\", expected, self.g1)\n\n def test_is_empty(self):\n expected = Series(np.array([False] * len(self.g1)), self.g1.index)\n self._test_unary_real(\"is_empty\", expected, self.g1)\n\n def test_is_ring(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real(\"is_ring\", expected, self.g1)\n\n def test_is_simple(self):\n expected = Series(np.array([True] * len(self.g1)), self.g1.index)\n self._test_unary_real(\"is_simple\", expected, self.g1)\n\n def test_has_z(self):\n expected = Series([False, True], self.g_3d.index)\n self._test_unary_real(\"has_z\", expected, self.g_3d)\n\n def test_xy_points(self):\n expected_x = [-73.9847, -74.0446]\n expected_y = [40.7484, 40.6893]\n\n assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)\n assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)\n\n def test_xy_polygons(self):\n # accessing x attribute in polygon geoseries should raise an error\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.x\n # and same for accessing y attribute in polygon geoseries\n with pytest.raises(ValueError):\n _ = self.gdf1.geometry.y\n\n def test_centroid(self):\n polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])\n point = Point(0, 0)\n polygons = GeoSeries([polygon for i in range(3)])\n points = GeoSeries([point for i in range(3)])\n assert_geoseries_equal(polygons.centroid, points)\n\n def test_convex_hull(self):\n # the convex hull of a square should be the same as the square\n squares = GeoSeries([self.sq for i in range(3)])\n assert_geoseries_equal(squares, squares.convex_hull)\n\n def test_exterior(self):\n exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])\n for expected, computed in zip(exp_exterior, self.g3.exterior):\n assert computed.equals(expected)\n\n def test_interiors(self):\n original = GeoSeries([self.t1, self.nested_squares])\n\n # This is a polygon with no interior.\n expected = []\n assert original.interiors[0] == expected\n # This is a polygon with an interior.\n expected = LinearRing(self.inner_sq.boundary)\n assert original.interiors[1][0].equals(expected)\n\n def test_interpolate(self):\n expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])\n self._test_binary_topological(\n \"interpolate\", expected, self.g5, 0.75, normalized=True\n )\n\n expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])\n self._test_binary_topological(\"interpolate\", expected, self.g5, 1.5)\n\n def test_interpolate_distance_array(self):\n expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])\n self._test_binary_topological(\n \"interpolate\", expected, self.g5, np.array([0.75, 1.5])\n )\n\n expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])\n self._test_binary_topological(\n \"interpolate\", expected, self.g5, np.array([0.75, 1.5]), normalized=True\n )\n\n def test_interpolate_distance_wrong_length(self):\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_interpolate_distance_wrong_index(self):\n distances = Series([1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n self.g5.interpolate(distances)\n\n def test_project(self):\n expected = Series([2.0, 1.5], index=self.g5.index)\n p = Point(1.0, 0.5)\n self._test_binary_real(\"project\", expected, self.g5, p)\n\n expected = Series([1.0, 0.5], index=self.g5.index)\n self._test_binary_real(\"project\", expected, self.g5, p, normalized=True)\n\n def test_affine_transform(self):\n # 45 degree reflection matrix\n matrix = [0, 1, 1, 0, 0, 0]\n expected = self.g4\n\n res = self.g3.affine_transform(matrix)\n assert_geoseries_equal(expected, res)\n\n def test_translate_tuple(self):\n trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y\n assert self.landmarks.translate(*trans)[0].equals(self.sol)\n\n res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]\n assert res.equals(self.sol)\n\n def test_rotate(self):\n angle = 98\n expected = self.g4\n\n o = Point(0, 0)\n res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)\n assert geom_almost_equals(self.g4, res)\n\n res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))\n assert geom_almost_equals(expected, res.rotate(-angle, origin=o))\n\n def test_scale(self):\n expected = self.g4\n\n scale = 2.0, 1.0\n inv = tuple(1.0 / i for i in scale)\n\n o = Point(0, 0)\n res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)\n res = res.scale(*inv, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_skew(self):\n expected = self.g4\n\n skew = 45.0\n o = Point(0, 0)\n\n # Test xs\n res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)\n res = res.skew(xs=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n # Test ys\n res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)\n res = res.skew(ys=-skew, origin=o)\n assert geom_almost_equals(expected, res)\n\n def test_buffer(self):\n original = GeoSeries([Point(0, 0)])\n expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5), (5, 0)))])\n calculated = original.buffer(5, resolution=1)\n assert geom_almost_equals(expected, calculated)\n\n def test_buffer_args(self):\n args = dict(cap_style=3, join_style=2, mitre_limit=2.5)\n calculated_series = self.g0.buffer(10, **args)\n for original, calculated in zip(self.g0, calculated_series):\n if original is None:\n assert calculated is None\n else:\n expected = original.buffer(10, **args)\n assert calculated.equals(expected)\n\n def test_buffer_distance_array(self):\n original = GeoSeries([self.p0, self.p0])\n expected = GeoSeries(\n [\n Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),\n Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),\n ]\n )\n calculated = original.buffer(np.array([1, 5]), resolution=1)\n assert_geoseries_equal(calculated, expected, check_less_precise=True)\n\n def test_buffer_distance_wrong_length(self):\n original = GeoSeries([self.p0, self.p0])\n distances = np.array([1, 2, 3])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_buffer_distance_wrong_index(self):\n original = GeoSeries([self.p0, self.p0], index=[0, 1])\n distances = Series(data=[1, 2], index=[99, 98])\n with pytest.raises(ValueError):\n original.buffer(distances)\n\n def test_buffer_empty_none(self):\n p = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])\n s = GeoSeries([p, GeometryCollection(), None])\n result = s.buffer(0)\n assert_geoseries_equal(result, s)\n\n result = s.buffer(np.array([0, 0, 0]))\n assert_geoseries_equal(result, s)\n\n def test_envelope(self):\n e = self.g3.envelope\n assert np.all(e.geom_equals(self.sq))\n assert isinstance(e, GeoSeries)\n assert self.g3.crs == e.crs\n\n def test_total_bounds(self):\n bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y\n assert isinstance(self.landmarks.total_bounds, np.ndarray)\n assert tuple(self.landmarks.total_bounds) == bbox\n\n df = GeoDataFrame(\n {\"geometry\": self.landmarks, \"col1\": range(len(self.landmarks))}\n )\n assert tuple(df.total_bounds) == bbox\n\n def test_explode_geoseries(self):\n s = GeoSeries(\n [MultiPoint([(0, 0), (1, 1)]), MultiPoint([(2, 2), (3, 3), (4, 4)])]\n )\n s.index.name = \"test_index_name\"\n expected_index_name = [\"test_index_name\", None]\n index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]\n expected = GeoSeries(\n [Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 3), Point(4, 4)],\n index=MultiIndex.from_tuples(index, names=expected_index_name),\n )\n assert_geoseries_equal(expected, s.explode())\n\n @pytest.mark.parametrize(\"index_name\", [None, \"test\"])\n def test_explode_geodataframe(self, index_name):\n s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])\n df = GeoDataFrame({\"col\": [1, 2], \"geometry\": s})\n df.index.name = index_name\n\n test_df = df.explode()\n\n expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])\n expected_df = GeoDataFrame({\"col\": [1, 1, 2], \"geometry\": expected_s})\n expected_index = MultiIndex(\n [[0, 1], [0, 1]], # levels\n [[0, 0, 1], [0, 1, 0]], # labels/codes\n names=[index_name, None],\n )\n expected_df = expected_df.set_index(expected_index)\n assert_frame_equal(test_df, expected_df)\n\n #\n # Test '&', '|', '^', and '-'\n #\n def test_intersection_operator(self):\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__and__\", self.t1, self.g1, self.g2)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__and__\", self.t1, self.gdf1, self.g2)\n\n def test_union_operator(self):\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__or__\", self.sq, self.g1, self.g2)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__or__\", self.sq, self.gdf1, self.g2)\n\n def test_union_operator_polygon(self):\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__or__\", self.sq, self.g1, self.t2)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__or__\", self.sq, self.gdf1, self.t2)\n\n def test_symmetric_difference_operator(self):\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__xor__\", self.sq, self.g3, self.g4)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__xor__\", self.sq, self.gdf3, self.g4)\n\n def test_difference_series2(self):\n expected = GeoSeries([GeometryCollection(), self.t2])\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__sub__\", expected, self.g1, self.g2)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__sub__\", expected, self.gdf1, self.g2)\n\n def test_difference_poly2(self):\n expected = GeoSeries([self.t1, self.t1])\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__sub__\", expected, self.g1, self.t2)\n with pytest.warns(DeprecationWarning):\n self._test_binary_operator(\"__sub__\", expected, self.gdf1, self.t2)\n"
] | [
[
"pandas.MultiIndex",
"numpy.sqrt",
"pandas.Series",
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.asanyarray",
"pandas.MultiIndex.from_tuples",
"numpy.array",
"pandas.testing.assert_frame_equal"
]
] |
moesio-f/agents | [
"53ce87c9203222585fdcd833e052fcdce1b6fa37"
] | [
"tf_agents/policies/tf_policy.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorFlow Policies API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nfrom typing import Optional, Text, Sequence\n\nimport six\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.distributions import reparameterized_sampling\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.trajectories import trajectory\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\n\n\ntfd = tfp.distributions\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass TFPolicy(tf.Module):\n \"\"\"Abstract base class for TF Policies.\n\n The Policy represents a mapping from `time_steps` recieved from the\n environment to `actions` that can be applied to the environment.\n\n Agents expose two policies. A `policy` meant for deployment and evaluation,\n and a `collect_policy` for collecting data from the environment. The\n `collect_policy` is usually stochastic for exploring the environment better\n and may log auxilliary information such as log probabilities required for\n training as well. `Policy` objects can also be created directly by the users\n without using an `Agent`.\n\n The main methods of TFPolicy are:\n\n * `action`: Maps a `time_step` from the environment to an action.\n * `distribution`: Maps a `time_step` to a distribution over actions.\n * `get_initial_state`: Generates the initial state for stateful policies, e.g.\n RNN/LSTM policies.\n\n Example usage:\n\n ```\n env = SomeTFEnvironment()\n policy = TFRandomPolicy(env.time_step_spec(), env.action_spec())\n # Or policy = agent.policy or agent.collect_policy\n\n policy_state = policy.get_initial_state(env.batch_size)\n time_step = env.reset()\n\n while not time_step.is_last():\n policy_step = policy.action(time_step, policy_state)\n time_step = env.step(policy_step.action)\n\n policy_state = policy_step.state\n # policy_step.info may contain side info for logging, such as action log\n # probabilities.\n ```\n\n Policies can be saved to disk as SavedModels (see policy_saver.py and\n policy_loader.py) or as TF Checkpoints.\n\n A `PyTFEagerPolicy` can be used to wrap a `TFPolicy` so that it works with\n `PyEnvironment`s.\n\n\n **NOTE**: For API consistency, subclasses are not allowed to override public\n methods of `TFPolicy` class. Instead, they may implement the protected methods\n including `_get_initial_state`, `_action`, and `_distribution`. This\n public-calls-private convention allowed this base class to do things like\n properly add `spec` and shape checks, which provide users an easier experience\n when debugging their environments and networks.\n\n For researchers, and those developing new Policies, the `TFPolicy` base class\n constructor also accept a `validate_args` parameter. If `False`, this\n disables all spec structure, dtype, and shape checks in the public methods of\n these classes. It allows algorithm developers to iterate and try different\n input and output structures without worrying about overly restrictive\n requirements, or input and output states being in a certain format. However,\n *disabling argument validation* can make it very hard to identify structural\n input or algorithmic errors; and should not be done for final, or\n production-ready, Policies. In addition to having implementations that may\n disagree with specs, this mean that the resulting Policy may no longer\n interact well with other parts of TF-Agents. Examples include impedance\n mismatches with Actor/Learner APIs, replay buffers, and the model export\n functionality in `PolicySaver.\n \"\"\"\n\n # TODO(b/127327645) Remove this attribute.\n # This attribute allows subclasses to back out of automatic tf.function\n # attribute inside TF1 (for autodeps).\n _enable_functions = True\n\n def __init__(\n self,\n time_step_spec: ts.TimeStep,\n action_spec: types.NestedTensorSpec,\n policy_state_spec: types.NestedTensorSpec = (),\n info_spec: types.NestedTensorSpec = (),\n clip: bool = True,\n emit_log_probability: bool = False,\n automatic_state_reset: bool = True,\n observation_and_action_constraint_splitter: Optional[\n types.Splitter] = None,\n validate_args: bool = True,\n name: Optional[Text] = None):\n \"\"\"Initialization of TFPolicy class.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps. Usually\n provided by the user to the subclass.\n action_spec: A nest of BoundedTensorSpec representing the actions. Usually\n provided by the user to the subclass.\n policy_state_spec: A nest of TensorSpec representing the policy_state.\n Provided by the subclass, not directly by the user.\n info_spec: A nest of TensorSpec representing the policy info. Provided by\n the subclass, not directly by the user.\n clip: Whether to clip actions to spec before returning them. Default\n True. Most policy-based algorithms (PCL, PPO, REINFORCE) use unclipped\n continuous actions for training.\n emit_log_probability: Emit log-probabilities of actions, if supported. If\n True, policy_step.info will have CommonFields.LOG_PROBABILITY set.\n Please consult utility methods provided in policy_step for setting and\n retrieving these. When working with custom policies, either provide a\n dictionary info_spec or a namedtuple with the field 'log_probability'.\n automatic_state_reset: If `True`, then `get_initial_policy_state` is used\n to clear state in `action()` and `distribution()` for for time steps\n where `time_step.is_first()`.\n observation_and_action_constraint_splitter: A function used to process\n observations with action constraints. These constraints can indicate,\n for example, a mask of valid/invalid actions for a given state of the\n environment. The function takes in a full observation and returns a\n tuple consisting of 1) the part of the observation intended as input to\n the network and 2) the constraint. An example\n `observation_and_action_constraint_splitter` could be as simple as: ```\n def observation_and_action_constraint_splitter(observation): return\n observation['network_input'], observation['constraint'] ```\n *Note*: when using `observation_and_action_constraint_splitter`, make\n sure the provided `q_network` is compatible with the network-specific\n half of the output of the\n `observation_and_action_constraint_splitter`. In particular,\n `observation_and_action_constraint_splitter` will be called on the\n observation before passing to the network. If\n `observation_and_action_constraint_splitter` is None, action\n constraints are not applied.\n validate_args: Python bool. Whether to verify inputs to, and outputs of,\n functions like `action` and `distribution` against spec structures,\n dtypes, and shapes.\n\n Research code may prefer to set this value to `False` to allow iterating\n on input and output structures without being hamstrung by overly\n rigid checking (at the cost of harder-to-debug errors).\n\n See also `TFAgent.validate_args`.\n name: A name for this module. Defaults to the class name.\n \"\"\"\n super(TFPolicy, self).__init__(name=name)\n common.check_tf1_allowed()\n common.tf_agents_gauge.get_cell('TFAPolicy').set(True)\n common.assert_members_are_not_overridden(base_cls=TFPolicy, instance=self)\n if not isinstance(time_step_spec, ts.TimeStep):\n raise ValueError(\n 'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.'\n .format(type(time_step_spec)))\n\n self._time_step_spec = tensor_spec.from_spec(time_step_spec)\n self._action_spec = tensor_spec.from_spec(action_spec)\n self._policy_state_spec = tensor_spec.from_spec(policy_state_spec)\n self._emit_log_probability = emit_log_probability\n self._validate_args = validate_args\n\n if emit_log_probability:\n log_probability_spec = tensor_spec.BoundedTensorSpec(\n shape=(),\n dtype=tf.float32,\n maximum=0,\n minimum=-float('inf'),\n name='log_probability')\n log_probability_spec = tf.nest.map_structure(\n lambda _: log_probability_spec, action_spec)\n info_spec = policy_step.set_log_probability(\n info_spec, log_probability_spec) # pytype: disable=wrong-arg-types\n\n self._info_spec = tensor_spec.from_spec(info_spec)\n self._setup_specs()\n self._clip = clip\n self._action_fn = common.function_in_tf1(experimental_relax_shapes=False)(\n self._action)\n self._automatic_state_reset = automatic_state_reset\n self._observation_and_action_constraint_splitter = (\n observation_and_action_constraint_splitter)\n\n def _setup_specs(self):\n self._policy_step_spec = policy_step.PolicyStep(\n action=self._action_spec,\n state=self._policy_state_spec,\n info=self._info_spec)\n self._trajectory_spec = trajectory.from_transition(self._time_step_spec,\n self._policy_step_spec,\n self._time_step_spec)\n\n def variables(self) -> Sequence[tf.Variable]:\n \"\"\"Returns the list of Variables that belong to the policy.\"\"\"\n # Ignore self._variables() in favor of using tf.Module's tracking.\n return super(TFPolicy, self).variables\n\n @property\n def observation_and_action_constraint_splitter(self) -> types.Splitter:\n return self._observation_and_action_constraint_splitter\n\n @property\n def validate_args(self) -> bool:\n \"\"\"Whether `action` & `distribution` validate input and output args.\"\"\"\n return self._validate_args\n\n def get_initial_state(self,\n batch_size: Optional[types.Int]) -> types.NestedTensor:\n \"\"\"Returns an initial state usable by the policy.\n\n Args:\n batch_size: Tensor or constant: size of the batch dimension. Can be None\n in which case no dimensions gets added.\n\n Returns:\n A nested object of type `policy_state` containing properly\n initialized Tensors.\n \"\"\"\n return self._get_initial_state(batch_size)\n\n def _maybe_reset_state(self, time_step, policy_state):\n if policy_state is (): # pylint: disable=literal-comparison\n return policy_state\n\n batch_size = tf.compat.dimension_value(time_step.discount.shape[0])\n if batch_size is None:\n batch_size = tf.shape(time_step.discount)[0]\n\n # Make sure we call this with a kwarg as it may be wrapped in tf.function\n # which would expect a tensor if it was not a kwarg.\n zero_state = self.get_initial_state(batch_size=batch_size)\n condition = time_step.is_first()\n # When experience is a sequence we only reset automatically for the first\n # time_step in the sequence as we can't easily generalize how the policy is\n # unrolled over the sequence.\n if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1:\n condition = time_step.is_first()[:, 0, ...]\n return nest_utils.where(condition, zero_state, policy_state)\n\n def action(self,\n time_step: ts.TimeStep,\n policy_state: types.NestedTensor = (),\n seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:\n \"\"\"Generates next action given the time_step and policy_state.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n seed: Seed to use if action performs sampling (optional).\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: An action Tensor matching the `action_spec`.\n `state`: A policy state tensor to be fed into the next call to action.\n `info`: Optional side information such as action log probabilities.\n\n Raises:\n RuntimeError: If subclass __init__ didn't call super().__init__.\n ValueError or TypeError: If `validate_args is True` and inputs or\n outputs do not match `time_step_spec`, `policy_state_spec`,\n or `policy_step_spec`.\n \"\"\"\n if self._enable_functions and getattr(self, '_action_fn', None) is None:\n raise RuntimeError(\n 'Cannot find _action_fn. Did %s.__init__ call super?' %\n type(self).__name__)\n if self._enable_functions:\n action_fn = self._action_fn\n else:\n action_fn = self._action\n\n if self._validate_args:\n time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)\n policy_state = nest_utils.prune_extra_keys(\n self._policy_state_spec, policy_state)\n nest_utils.assert_same_structure(\n time_step,\n self._time_step_spec,\n message='time_step and time_step_spec structures do not match')\n # TODO(b/158804957): Use literal comparison because in some strange cases\n # (tf.function? autograph?) the expression \"x not in (None, (), [])\" gets\n # converted to a tensor.\n if not (policy_state is None or policy_state is () or policy_state is []): # pylint: disable=literal-comparison\n nest_utils.assert_same_structure(\n policy_state,\n self._policy_state_spec,\n message=('policy_state and policy_state_spec '\n 'structures do not match'))\n\n if self._automatic_state_reset:\n policy_state = self._maybe_reset_state(time_step, policy_state)\n step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed)\n\n def clip_action(action, action_spec):\n if isinstance(action_spec, tensor_spec.BoundedTensorSpec):\n return common.clip_to_spec(action, action_spec)\n return action\n\n if self._validate_args:\n nest_utils.assert_same_structure(\n step.action, self._action_spec,\n message='action and action_spec structures do not match')\n\n if self._clip:\n clipped_actions = tf.nest.map_structure(clip_action,\n step.action,\n self._action_spec)\n step = step._replace(action=clipped_actions)\n\n if self._validate_args:\n nest_utils.assert_same_structure(\n step,\n self._policy_step_spec,\n message='action output and policy_step_spec structures do not match')\n\n def compare_to_spec(value, spec):\n return value.dtype.is_compatible_with(spec.dtype)\n\n compatibility = [\n compare_to_spec(v, s) for (v, s)\n in zip(tf.nest.flatten(step.action),\n tf.nest.flatten(self.action_spec))]\n\n if not all(compatibility):\n get_dtype = lambda x: x.dtype\n action_dtypes = tf.nest.map_structure(get_dtype, step.action)\n spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec)\n\n raise TypeError('Policy produced an action with a dtype that doesn\\'t '\n 'match its action_spec. Got action:\\n %s\\n with '\n 'action_spec:\\n %s' % (action_dtypes, spec_dtypes))\n\n return step\n\n def distribution(\n self, time_step: ts.TimeStep, policy_state: types.NestedTensor = ()\n ) -> policy_step.PolicyStep:\n \"\"\"Generates the distribution over next actions given the time_step.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n\n Returns:\n A `PolicyStep` named tuple containing:\n\n `action`: A tf.distribution capturing the distribution of next actions.\n `state`: A policy state tensor for the next call to distribution.\n `info`: Optional side information such as action log probabilities.\n\n Raises:\n ValueError or TypeError: If `validate_args is True` and inputs or\n outputs do not match `time_step_spec`, `policy_state_spec`,\n or `policy_step_spec`.\n \"\"\"\n if self._validate_args:\n time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)\n policy_state = nest_utils.prune_extra_keys(\n self._policy_state_spec, policy_state)\n nest_utils.assert_same_structure(\n time_step,\n self._time_step_spec,\n message='time_step and time_step_spec structures do not match')\n nest_utils.assert_same_structure(\n policy_state,\n self._policy_state_spec,\n message='policy_state and policy_state_spec structures do not match')\n if self._automatic_state_reset:\n policy_state = self._maybe_reset_state(time_step, policy_state)\n step = self._distribution(time_step=time_step, policy_state=policy_state)\n if self.emit_log_probability:\n # This here is set only for compatibility with info_spec in constructor.\n info = policy_step.set_log_probability(\n step.info,\n tf.nest.map_structure(\n lambda _: tf.constant(0., dtype=tf.float32),\n policy_step.get_log_probability(self._info_spec)))\n step = step._replace(info=info)\n if self._validate_args:\n nest_utils.assert_same_structure(\n step,\n self._policy_step_spec,\n message=('distribution output and policy_step_spec structures '\n 'do not match'))\n return step\n\n def update(self,\n policy,\n tau: float = 1.0,\n tau_non_trainable: Optional[float] = None,\n sort_variables_by_name: bool = False) -> tf.Operation:\n \"\"\"Update the current policy with another policy.\n\n This would include copying the variables from the other policy.\n\n Args:\n policy: Another policy it can update from.\n tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard\n update. This is used for trainable variables.\n tau_non_trainable: A float scalar in [0, 1] for non_trainable variables.\n If None, will copy from tau.\n sort_variables_by_name: A bool, when True would sort the variables by name\n before doing the update.\n\n Returns:\n An TF op to do the update.\n \"\"\"\n if self.variables():\n return common.soft_variables_update(\n policy.variables(),\n self.variables(),\n tau=tau,\n tau_non_trainable=tau_non_trainable,\n sort_variables_by_name=sort_variables_by_name)\n else:\n return tf.no_op()\n\n @property\n def emit_log_probability(self) -> bool:\n \"\"\"Whether this policy instance emits log probabilities or not.\"\"\"\n return self._emit_log_probability\n\n @property\n def time_step_spec(self) -> ts.TimeStep:\n \"\"\"Describes the `TimeStep` tensors returned by `step()`.\n\n Returns:\n A `TimeStep` namedtuple with `TensorSpec` objects instead of Tensors,\n which describe the shape, dtype and name of each tensor returned by\n `step()`.\n \"\"\"\n return self._time_step_spec\n\n @property\n def action_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the TensorSpecs of the Tensors expected by `step(action)`.\n\n `action` can be a single Tensor, or a nested dict, list or tuple of\n Tensors.\n\n Returns:\n An single BoundedTensorSpec, or a nested dict, list or tuple of\n `BoundedTensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step()`.\n \"\"\"\n return self._action_spec\n\n @property\n def policy_state_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the Tensors expected by `step(_, policy_state)`.\n\n `policy_state` can be an empty tuple, a single Tensor, or a nested dict,\n list or tuple of Tensors.\n\n Returns:\n An single TensorSpec, or a nested dict, list or tuple of\n `TensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step(_, policy_state)`.\n \"\"\"\n return self._policy_state_spec\n\n @property\n def info_spec(self) -> types.NestedTensorSpec:\n \"\"\"Describes the Tensors emitted as info by `action` and `distribution`.\n\n `info` can be an empty tuple, a single Tensor, or a nested dict,\n list or tuple of Tensors.\n\n Returns:\n An single TensorSpec, or a nested dict, list or tuple of\n `TensorSpec` objects, which describe the shape and\n dtype of each Tensor expected by `step(_, policy_state)`.\n \"\"\"\n return self._info_spec\n\n @property\n def policy_step_spec(self) -> policy_step.PolicyStep:\n \"\"\"Describes the output of `action()`.\n\n Returns:\n A nest of TensorSpec which describe the shape and dtype of each Tensor\n emitted by `action()`.\n \"\"\"\n return self._policy_step_spec\n\n # TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents?\n @property\n def trajectory_spec(self) -> trajectory.Trajectory:\n \"\"\"Describes the Tensors written when using this policy with an environment.\n\n Returns:\n A `Trajectory` containing all tensor specs associated with the\n observation_spec, action_spec, policy_state_spec, and info_spec of\n this policy.\n \"\"\"\n return self._trajectory_spec\n\n @property\n def collect_data_spec(self) -> trajectory.Trajectory:\n \"\"\"Describes the Tensors written when using this policy with an environment.\n\n Returns:\n A nest of TensorSpec which describe the shape and dtype of each Tensor\n required to train the agent which generated this policy.\n \"\"\"\n return self._trajectory_spec\n\n # Subclasses MAY optionally override _action.\n def _action(self, time_step: ts.TimeStep,\n policy_state: types.NestedTensor,\n seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:\n \"\"\"Implementation of `action`.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n seed: Seed to use if action performs sampling (optional).\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: An action Tensor matching the `action_spec`.\n `state`: A policy state tensor to be fed into the next call to action.\n `info`: Optional side information such as action log probabilities.\n \"\"\"\n seed_stream = tfp.util.SeedStream(seed=seed, salt='tf_agents_tf_policy')\n distribution_step = self._distribution(time_step, policy_state) # pytype: disable=wrong-arg-types\n actions = tf.nest.map_structure(\n lambda d: reparameterized_sampling.sample(d, seed=seed_stream()),\n distribution_step.action)\n info = distribution_step.info\n if self.emit_log_probability:\n try:\n log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),\n actions,\n distribution_step.action)\n info = policy_step.set_log_probability(info, log_probability)\n except:\n raise TypeError('%s does not support emitting log-probabilities.' %\n type(self).__name__)\n\n return distribution_step._replace(action=actions, info=info)\n\n ## Subclasses MUST implement these.\n def _distribution(\n self, time_step: ts.TimeStep,\n policy_state: types.NestedTensorSpec) -> policy_step.PolicyStep:\n \"\"\"Implementation of `distribution`.\n\n Args:\n time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.\n policy_state: A Tensor, or a nested dict, list or tuple of Tensors\n representing the previous policy_state.\n\n Returns:\n A `PolicyStep` named tuple containing:\n `action`: A (optionally nested) of tfp.distribution.Distribution\n capturing the distribution of next actions.\n `state`: A policy state tensor for the next call to distribution.\n `info`: Optional side information such as action log probabilities.\n \"\"\"\n raise NotImplementedError()\n\n # Subclasses MAY optionally overwrite _get_initial_state.\n def _get_initial_state(self, batch_size: int) -> types.NestedTensor:\n \"\"\"Returns the initial state of the policy network.\n\n Args:\n batch_size: A constant or Tensor holding the batch size. Can be None, in\n which case the state will not have a batch dimension added.\n\n Returns:\n A nest of zero tensors matching the spec of the policy network state.\n \"\"\"\n return tensor_spec.zero_spec_nest(\n self._policy_state_spec,\n outer_dims=None if batch_size is None else [batch_size])\n"
] | [
[
"tensorflow.shape",
"tensorflow.no_op",
"tensorflow.nest.map_structure",
"tensorflow.compat.dimension_value",
"tensorflow.constant",
"tensorflow.nest.flatten"
]
] |
zhangyuwangumass/General-Data-Driven-Adaptive-Learning | [
"63c4ddef36b2b7bd7078cd9b431e3502c358915a"
] | [
"trajectoryPlugin/collate.py"
] | [
"r\"\"\"\"Contains definitions of the methods used by the _DataLoaderIter workers to\ncollate samples fetched from dataset into Tensor(s).\n\nThese **needs** to be in global scope since Py2 doesn't support serializing\nstatic methods.\n\"\"\"\n\nimport torch\nimport re\nfrom torch._six import container_abcs, string_classes, int_classes\n\n_use_shared_memory = False\nr\"\"\"Whether to use shared memory in default_collate\"\"\"\n\nnp_str_obj_array_pattern = re.compile(r'[SaUO]')\n\nerror_msg_fmt = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n\nnumpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\ndef default_collate(batch):\n r\"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(error_msg_fmt.format(elem.dtype))\n\n return default_collate([torch.from_numpy(b) for b in batch])\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], float):\n return torch.tensor(batch, dtype=torch.float32)\n elif isinstance(batch[0], int_classes):\n return torch.tensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], container_abcs.Mapping):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple\n return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))\n elif isinstance(batch[0], container_abcs.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg_fmt.format(type(batch[0]))))"
] | [
[
"torch.stack",
"torch.tensor",
"torch.from_numpy"
]
] |
coco-robotics/rllab-curriculum | [
"f55b50224fcf5a9a5c064542eb0850a966cab223"
] | [
"curriculum/envs/maze/maze_swim/swimmer_env.py"
] | [
"from rllab.envs.base import Step\nfrom rllab.misc.overrides import overrides\nfrom rllab.envs.mujoco.mujoco_env import MujocoEnv\nimport numpy as np\nfrom rllab.core.serializable import Serializable\nfrom rllab.misc import logger\nfrom rllab.misc import autoargs\nfrom contextlib import contextmanager\n\nclass SwimmerEnv(MujocoEnv, Serializable):\n\n FILE = 'swimmer.xml'\n\n @autoargs.arg('ctrl_cost_coeff', type=float,\n help='cost coefficient for controls')\n def __init__(\n self,\n ctrl_cost_coeff=1e-2,\n *args, **kwargs):\n self.ctrl_cost_coeff = ctrl_cost_coeff\n super(SwimmerEnv, self).__init__(*args, **kwargs)\n Serializable.quick_init(self, locals())\n\n def get_current_obs(self):\n return np.concatenate([\n self.model.data.qpos.flat,\n self.model.data.qvel.flat,\n self.get_body_com(\"torso\").flat,\n ]).reshape(-1)\n\n def step(self, action):\n self.forward_dynamics(action)\n next_obs = self.get_current_obs()\n lb, ub = self.action_bounds\n scaling = (ub - lb) * 0.5\n ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(\n np.square(action / scaling))\n forward_reward = self.get_body_comvel(\"torso\")[0]\n reward = forward_reward - ctrl_cost\n done = False\n return Step(next_obs, reward, done)\n\n # @overrides\n # def reset_mujoco(self, init_state=None):\n # super(SwimmerEnv, self).reset_mujoco(init)\n # if init_state is not None:\n # idx = self.model.body_names.index(\"torso\")\n # self.model.data.com_subtree[idx][0] = init_state[0]\n # self.model.data.com_subtree[idx][1] = init_state[1]\n\n @overrides # ignoring the goal\n def reset(self, *args, **kwargs):\n return super(SwimmerEnv, self).reset(*args, **kwargs) # passing in keyword arguments\n\n @overrides\n def log_diagnostics(self, paths):\n if len(paths) > 0:\n progs = [\n path[\"observations\"][-1][-3] - path[\"observations\"][0][-3]\n for path in paths\n ]\n logger.record_tabular('AverageForwardProgress', np.mean(progs))\n logger.record_tabular('MaxForwardProgress', np.max(progs))\n logger.record_tabular('MinForwardProgress', np.min(progs))\n logger.record_tabular('StdForwardProgress', np.std(progs))\n else:\n logger.record_tabular('AverageForwardProgress', np.nan)\n logger.record_tabular('MaxForwardProgress', np.nan)\n logger.record_tabular('MinForwardProgress', np.nan)\n logger.record_tabular('StdForwardProgress', np.nan)\n\n @contextmanager\n def set_kill_outside(self):\n self.kill_outside = True\n try:\n yield\n finally:\n self.kill_outside = False"
] | [
[
"numpy.max",
"numpy.min",
"numpy.std",
"numpy.square",
"numpy.mean"
]
] |
stragu/pandas | [
"b8890eb33b40993da00656f16c65070c42429f0d"
] | [
"pandas/io/common.py"
] | [
"\"\"\"Common IO api utilities\"\"\"\nfrom __future__ import annotations\n\nimport bz2\nimport codecs\nfrom collections import abc\nimport dataclasses\nimport gzip\nfrom io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOWrapper\nimport mmap\nimport os\nfrom typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union, cast\nfrom urllib.parse import (\n urljoin,\n urlparse as parse_url,\n uses_netloc,\n uses_params,\n uses_relative,\n)\nimport warnings\nimport zipfile\n\nfrom pandas._typing import (\n Buffer,\n CompressionDict,\n CompressionOptions,\n FileOrBuffer,\n FilePathOrBuffer,\n StorageOptions,\n)\nfrom pandas.compat import get_lzma_file, import_lzma\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.common import is_file_like\n\nlzma = import_lzma()\n\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard(\"\")\n\n\[email protected]\nclass IOArgs:\n \"\"\"\n Return value of io/common.py:_get_filepath_or_buffer.\n\n Note (copy&past from io/parsers):\n filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]\n though mypy handling of conditional imports is difficult.\n See https://github.com/python/mypy/issues/1297\n \"\"\"\n\n filepath_or_buffer: FileOrBuffer\n encoding: str\n mode: str\n compression: CompressionDict\n should_close: bool = False\n\n\[email protected]\nclass IOHandles:\n \"\"\"\n Return value of io/common.py:get_handle\n\n Can be used as a context manager.\n\n This is used to easily close created buffers and to handle corner cases when\n TextIOWrapper is inserted.\n\n handle: The file handle to be used.\n created_handles: All file handles that are created by get_handle\n is_wrapped: Whether a TextIOWrapper needs to be detached.\n \"\"\"\n\n handle: Buffer\n compression: CompressionDict\n created_handles: List[Buffer] = dataclasses.field(default_factory=list)\n is_wrapped: bool = False\n is_mmap: bool = False\n\n def close(self) -> None:\n \"\"\"\n Close all created buffers.\n\n Note: If a TextIOWrapper was inserted, it is flushed and detached to\n avoid closing the potentially user-created buffer.\n \"\"\"\n if self.is_wrapped:\n assert isinstance(self.handle, TextIOWrapper)\n self.handle.flush()\n self.handle.detach()\n self.created_handles.remove(self.handle)\n try:\n for handle in self.created_handles:\n handle.close()\n except (OSError, ValueError):\n pass\n self.created_handles = []\n self.is_wrapped = False\n\n def __enter__(self) -> IOHandles:\n return self\n\n def __exit__(self, *args: Any) -> None:\n self.close()\n\n\ndef is_url(url) -> bool:\n \"\"\"\n Check to see if a URL has a valid protocol.\n\n Parameters\n ----------\n url : str or unicode\n\n Returns\n -------\n isurl : bool\n If `url` has a valid protocol return True otherwise False.\n \"\"\"\n if not isinstance(url, str):\n return False\n return parse_url(url).scheme in _VALID_URLS\n\n\ndef _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:\n \"\"\"\n Return the argument with an initial component of ~ or ~user\n replaced by that user's home directory.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted if possible\n\n Returns\n -------\n expanded_filepath_or_buffer : an expanded filepath or the\n input if not expandable\n \"\"\"\n if isinstance(filepath_or_buffer, str):\n return os.path.expanduser(filepath_or_buffer)\n return filepath_or_buffer\n\n\ndef validate_header_arg(header) -> None:\n if isinstance(header, bool):\n raise TypeError(\n \"Passing a bool to header is invalid. Use header=None for no header or \"\n \"header=int or list-like of ints to specify \"\n \"the row(s) making up the column names\"\n )\n\n\ndef stringify_path(\n filepath_or_buffer: FilePathOrBuffer[AnyStr],\n convert_file_like: bool = False,\n) -> FileOrBuffer[AnyStr]:\n \"\"\"\n Attempt to convert a path-like object to a string.\n\n Parameters\n ----------\n filepath_or_buffer : object to be converted\n\n Returns\n -------\n str_filepath_or_buffer : maybe a string version of the object\n\n Notes\n -----\n Objects supporting the fspath protocol (python 3.6+) are coerced\n according to its __fspath__ method.\n\n Any other object is passed through unchanged, which includes bytes,\n strings, buffers, or anything else that's not even path-like.\n \"\"\"\n if not convert_file_like and is_file_like(filepath_or_buffer):\n # GH 38125: some fsspec objects implement os.PathLike but have already opened a\n # file. This prevents opening the file a second time. infer_compression calls\n # this function with convert_file_like=True to infer the compression.\n return cast(FileOrBuffer[AnyStr], filepath_or_buffer)\n\n if isinstance(filepath_or_buffer, os.PathLike):\n filepath_or_buffer = filepath_or_buffer.__fspath__()\n return _expand_user(filepath_or_buffer)\n\n\ndef urlopen(*args, **kwargs):\n \"\"\"\n Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of\n the stdlib.\n \"\"\"\n import urllib.request\n\n return urllib.request.urlopen(*args, **kwargs)\n\n\ndef is_fsspec_url(url: FilePathOrBuffer) -> bool:\n \"\"\"\n Returns true if the given URL looks like\n something fsspec can handle\n \"\"\"\n return (\n isinstance(url, str)\n and \"://\" in url\n and not url.startswith((\"http://\", \"https://\"))\n )\n\n\ndef _get_filepath_or_buffer(\n filepath_or_buffer: FilePathOrBuffer,\n encoding: str = \"utf-8\",\n compression: CompressionOptions = None,\n mode: str = \"r\",\n storage_options: StorageOptions = None,\n) -> IOArgs:\n \"\"\"\n If the filepath_or_buffer is a url, translate and return the buffer.\n Otherwise passthrough.\n\n Parameters\n ----------\n filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),\n or buffer\n compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional\n encoding : the encoding to use to decode bytes, default is 'utf-8'\n mode : str, optional\n\n storage_options : dict, optional\n Extra options that make sense for a particular storage connection, e.g.\n host, port, username, password, etc., if using a URL that will\n be parsed by ``fsspec``, e.g., starting \"s3://\", \"gcs://\". An error\n will be raised if providing this argument with a local path or\n a file-like buffer. See the fsspec and backend storage implementation\n docs for the set of allowed keys and values\n\n .. versionadded:: 1.2.0\n\n ..versionchange:: 1.2.0\n\n Returns the dataclass IOArgs.\n \"\"\"\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n\n # handle compression dict\n compression_method, compression = get_compression_method(compression)\n compression_method = infer_compression(filepath_or_buffer, compression_method)\n\n # GH21227 internal compression is not used for non-binary handles.\n if compression_method and hasattr(filepath_or_buffer, \"write\") and \"b\" not in mode:\n warnings.warn(\n \"compression has no effect when passing a non-binary object as input.\",\n RuntimeWarning,\n stacklevel=2,\n )\n compression_method = None\n\n compression = dict(compression, method=compression_method)\n\n # uniform encoding names\n if encoding is not None:\n encoding = encoding.replace(\"_\", \"-\").lower()\n\n # bz2 and xz do not write the byte order mark for utf-16 and utf-32\n # print a warning when writing such files\n if (\n \"w\" in mode\n and compression_method in [\"bz2\", \"xz\"]\n and encoding in [\"utf-16\", \"utf-32\"]\n ):\n warnings.warn(\n f\"{compression} will not write the byte order mark for {encoding}\",\n UnicodeWarning,\n )\n\n # Use binary mode when converting path-like objects to file-like objects (fsspec)\n # except when text mode is explicitly requested. The original mode is returned if\n # fsspec is not used.\n fsspec_mode = mode\n if \"t\" not in fsspec_mode and \"b\" not in fsspec_mode:\n fsspec_mode += \"b\"\n\n if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):\n # TODO: fsspec can also handle HTTP via requests, but leaving this\n # unchanged. using fsspec appears to break the ability to infer if the\n # server responded with gzipped data\n storage_options = storage_options or {}\n\n # waiting until now for importing to match intended lazy logic of\n # urlopen function defined elsewhere in this module\n import urllib.request\n\n # assuming storage_options is to be interpreted as headers\n req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)\n with urlopen(req_info) as req:\n content_encoding = req.headers.get(\"Content-Encoding\", None)\n if content_encoding == \"gzip\":\n # Override compression based on Content-Encoding header\n compression = {\"method\": \"gzip\"}\n reader = BytesIO(req.read())\n return IOArgs(\n filepath_or_buffer=reader,\n encoding=encoding,\n compression=compression,\n should_close=True,\n mode=fsspec_mode,\n )\n\n if is_fsspec_url(filepath_or_buffer):\n assert isinstance(\n filepath_or_buffer, str\n ) # just to appease mypy for this branch\n # two special-case s3-like protocols; these have special meaning in Hadoop,\n # but are equivalent to just \"s3\" from fsspec's point of view\n # cc #11071\n if filepath_or_buffer.startswith(\"s3a://\"):\n filepath_or_buffer = filepath_or_buffer.replace(\"s3a://\", \"s3://\")\n if filepath_or_buffer.startswith(\"s3n://\"):\n filepath_or_buffer = filepath_or_buffer.replace(\"s3n://\", \"s3://\")\n fsspec = import_optional_dependency(\"fsspec\")\n\n # If botocore is installed we fallback to reading with anon=True\n # to allow reads from public buckets\n err_types_to_retry_with_anon: List[Any] = []\n try:\n import_optional_dependency(\"botocore\")\n from botocore.exceptions import ClientError, NoCredentialsError\n\n err_types_to_retry_with_anon = [\n ClientError,\n NoCredentialsError,\n PermissionError,\n ]\n except ImportError:\n pass\n\n try:\n file_obj = fsspec.open(\n filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})\n ).open()\n # GH 34626 Reads from Public Buckets without Credentials needs anon=True\n except tuple(err_types_to_retry_with_anon):\n if storage_options is None:\n storage_options = {\"anon\": True}\n else:\n # don't mutate user input.\n storage_options = dict(storage_options)\n storage_options[\"anon\"] = True\n file_obj = fsspec.open(\n filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})\n ).open()\n\n return IOArgs(\n filepath_or_buffer=file_obj,\n encoding=encoding,\n compression=compression,\n should_close=True,\n mode=fsspec_mode,\n )\n elif storage_options:\n raise ValueError(\n \"storage_options passed with file object or non-fsspec file path\"\n )\n\n if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):\n return IOArgs(\n filepath_or_buffer=_expand_user(filepath_or_buffer),\n encoding=encoding,\n compression=compression,\n should_close=False,\n mode=mode,\n )\n\n if not is_file_like(filepath_or_buffer):\n msg = f\"Invalid file path or buffer object type: {type(filepath_or_buffer)}\"\n raise ValueError(msg)\n\n return IOArgs(\n filepath_or_buffer=filepath_or_buffer,\n encoding=encoding,\n compression=compression,\n should_close=False,\n mode=mode,\n )\n\n\ndef file_path_to_url(path: str) -> str:\n \"\"\"\n converts an absolute native path to a FILE URL.\n\n Parameters\n ----------\n path : a path in native format\n\n Returns\n -------\n a valid FILE URL\n \"\"\"\n # lazify expensive import (~30ms)\n from urllib.request import pathname2url\n\n return urljoin(\"file:\", pathname2url(path))\n\n\n_compression_to_extension = {\"gzip\": \".gz\", \"bz2\": \".bz2\", \"zip\": \".zip\", \"xz\": \".xz\"}\n\n\ndef get_compression_method(\n compression: CompressionOptions,\n) -> Tuple[Optional[str], CompressionDict]:\n \"\"\"\n Simplifies a compression argument to a compression method string and\n a mapping containing additional arguments.\n\n Parameters\n ----------\n compression : str or mapping\n If string, specifies the compression method. If mapping, value at key\n 'method' specifies compression method.\n\n Returns\n -------\n tuple of ({compression method}, Optional[str]\n {compression arguments}, Dict[str, Any])\n\n Raises\n ------\n ValueError on mapping missing 'method' key\n \"\"\"\n compression_method: Optional[str]\n if isinstance(compression, Mapping):\n compression_args = dict(compression)\n try:\n compression_method = compression_args.pop(\"method\")\n except KeyError as err:\n raise ValueError(\"If mapping, compression must have key 'method'\") from err\n else:\n compression_args = {}\n compression_method = compression\n return compression_method, compression_args\n\n\ndef infer_compression(\n filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]\n) -> Optional[str]:\n \"\"\"\n Get the compression method for filepath_or_buffer. If compression='infer',\n the inferred compression method is returned. Otherwise, the input\n compression method is returned unchanged, unless it's invalid, in which\n case an error is raised.\n\n Parameters\n ----------\n filepath_or_buffer : str or file handle\n File path or object.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n If 'infer' and `filepath_or_buffer` is path-like, then detect\n compression from the following extensions: '.gz', '.bz2', '.zip',\n or '.xz' (otherwise no compression).\n\n Returns\n -------\n string or None\n\n Raises\n ------\n ValueError on invalid compression specified.\n \"\"\"\n if compression is None:\n return None\n\n # Infer compression\n if compression == \"infer\":\n # Convert all path types (e.g. pathlib.Path) to strings\n filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)\n if not isinstance(filepath_or_buffer, str):\n # Cannot infer compression of a buffer, assume no compression\n return None\n\n # Infer compression from the filename/URL extension\n for compression, extension in _compression_to_extension.items():\n if filepath_or_buffer.lower().endswith(extension):\n return compression\n return None\n\n # Compression has been specified. Check that it's valid\n if compression in _compression_to_extension:\n return compression\n\n # https://github.com/python/mypy/issues/5492\n # Unsupported operand types for + (\"List[Optional[str]]\" and \"List[str]\")\n valid = [\"infer\", None] + sorted(\n _compression_to_extension\n ) # type: ignore[operator]\n msg = (\n f\"Unrecognized compression type: {compression}\\n\"\n f\"Valid compression types are {valid}\"\n )\n raise ValueError(msg)\n\n\ndef get_handle(\n path_or_buf: FilePathOrBuffer,\n mode: str,\n encoding: Optional[str] = None,\n compression: CompressionOptions = None,\n memory_map: bool = False,\n is_text: bool = True,\n errors: Optional[str] = None,\n storage_options: StorageOptions = None,\n) -> IOHandles:\n \"\"\"\n Get file handle for given path/buffer and mode.\n\n Parameters\n ----------\n path_or_buf : str or file handle\n File path or object.\n mode : str\n Mode to open path_or_buf with.\n encoding : str or None\n Encoding to use.\n compression : str or dict, default None\n If string, specifies compression mode. If dict, value at key 'method'\n specifies compression mode. Compression mode must be one of {'infer',\n 'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'\n and `filepath_or_buffer` is path-like, then detect compression from\n the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise\n no compression). If dict and compression mode is one of\n {'zip', 'gzip', 'bz2'}, or inferred as one of the above,\n other entries passed as additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other keys as compression options if compression\n mode is 'zip'.\n\n .. versionchanged:: 1.1.0\n\n Passing compression options as keys in dict is now\n supported for compression modes 'gzip' and 'bz2' as well as 'zip'.\n\n memory_map : boolean, default False\n See parsers._parser_params for more information.\n is_text : boolean, default True\n Whether the type of the content passed to the file/buffer is string or\n bytes. This is not the same as `\"b\" not in mode`. If a string content is\n passed to a binary file/buffer, a wrapper is inserted.\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n storage_options: StorageOptions = None\n Passed to _get_filepath_or_buffer\n\n .. versionchanged:: 1.2.0\n\n Returns the dataclass IOHandles\n \"\"\"\n # Windows does not default to utf-8. Set to utf-8 for a consistent behavior\n encoding_passed, encoding = encoding, encoding or \"utf-8\"\n\n # read_csv does not know whether the buffer is opened in binary/text mode\n if _is_binary_mode(path_or_buf, mode) and \"b\" not in mode:\n mode += \"b\"\n\n # open URLs\n ioargs = _get_filepath_or_buffer(\n path_or_buf,\n encoding=encoding,\n compression=compression,\n mode=mode,\n storage_options=storage_options,\n )\n\n handle = ioargs.filepath_or_buffer\n handles: List[Buffer]\n\n # memory mapping needs to be the first step\n handle, memory_map, handles = _maybe_memory_map(\n handle, memory_map, ioargs.encoding, ioargs.mode, errors\n )\n\n is_path = isinstance(handle, str)\n compression_args = dict(ioargs.compression)\n compression = compression_args.pop(\"method\")\n\n if compression:\n # compression libraries do not like an explicit text-mode\n ioargs.mode = ioargs.mode.replace(\"t\", \"\")\n\n # GZ Compression\n if compression == \"gzip\":\n if is_path:\n assert isinstance(handle, str)\n handle = gzip.GzipFile(\n filename=handle,\n mode=ioargs.mode,\n **compression_args,\n )\n else:\n handle = gzip.GzipFile(\n fileobj=handle, # type: ignore[arg-type]\n mode=ioargs.mode,\n **compression_args,\n )\n\n # BZ Compression\n elif compression == \"bz2\":\n handle = bz2.BZ2File(\n handle, # type: ignore[arg-type]\n mode=ioargs.mode,\n **compression_args,\n )\n\n # ZIP Compression\n elif compression == \"zip\":\n handle = _BytesZipFile(handle, ioargs.mode, **compression_args)\n if handle.mode == \"r\":\n handles.append(handle)\n zip_names = handle.namelist()\n if len(zip_names) == 1:\n handle = handle.open(zip_names.pop())\n elif len(zip_names) == 0:\n raise ValueError(f\"Zero files found in ZIP file {path_or_buf}\")\n else:\n raise ValueError(\n \"Multiple files found in ZIP file. \"\n f\"Only one file per ZIP: {zip_names}\"\n )\n\n # XZ Compression\n elif compression == \"xz\":\n handle = get_lzma_file(lzma)(handle, ioargs.mode)\n\n # Unrecognized Compression\n else:\n msg = f\"Unrecognized compression type: {compression}\"\n raise ValueError(msg)\n\n assert not isinstance(handle, str)\n handles.append(handle)\n\n elif isinstance(handle, str):\n # Check whether the filename is to be opened in binary mode.\n # Binary mode does not support 'encoding' and 'newline'.\n if ioargs.encoding and \"b\" not in ioargs.mode:\n if errors is None and encoding_passed is None:\n # ignore errors when no encoding is specified\n errors = \"replace\"\n # Encoding\n handle = open(\n handle,\n ioargs.mode,\n encoding=ioargs.encoding,\n errors=errors,\n newline=\"\",\n )\n else:\n # Binary mode\n handle = open(handle, ioargs.mode)\n handles.append(handle)\n\n # Convert BytesIO or file objects passed with an encoding\n is_wrapped = False\n if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):\n handle = TextIOWrapper(\n handle, # type: ignore[arg-type]\n encoding=ioargs.encoding,\n errors=errors,\n newline=\"\",\n )\n handles.append(handle)\n # only marked as wrapped when the caller provided a handle\n is_wrapped = not (\n isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close\n )\n\n handles.reverse() # close the most recently added buffer first\n if ioargs.should_close:\n assert not isinstance(ioargs.filepath_or_buffer, str)\n handles.append(ioargs.filepath_or_buffer)\n\n assert not isinstance(handle, str)\n return IOHandles(\n handle=handle,\n created_handles=handles,\n is_wrapped=is_wrapped,\n is_mmap=memory_map,\n compression=ioargs.compression,\n )\n\n\n# error: Definition of \"__exit__\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"BytesIO\" [misc]\n# error: Definition of \"__enter__\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"BytesIO\" [misc]\n# error: Definition of \"__enter__\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"BinaryIO\" [misc]\n# error: Definition of \"__enter__\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"IO\" [misc]\n# error: Definition of \"read\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"BytesIO\" [misc]\n# error: Definition of \"read\" in base class \"ZipFile\" is incompatible with\n# definition in base class \"IO\" [misc]\nclass _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]\n \"\"\"\n Wrapper for standard library class ZipFile and allow the returned file-like\n handle to accept byte strings via `write` method.\n\n BytesIO provides attributes of file-like object and ZipFile.writestr writes\n bytes strings into a member of the archive.\n \"\"\"\n\n # GH 17778\n def __init__(\n self,\n file: FilePathOrBuffer,\n mode: str,\n archive_name: Optional[str] = None,\n **kwargs,\n ):\n mode = mode.replace(\"b\", \"\")\n self.archive_name = archive_name\n self.multiple_write_buffer: Optional[Union[StringIO, BytesIO]] = None\n\n kwargs_zip: Dict[str, Any] = {\"compression\": zipfile.ZIP_DEFLATED}\n kwargs_zip.update(kwargs)\n\n super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]\n\n def write(self, data):\n # buffer multiple write calls, write on flush\n if self.multiple_write_buffer is None:\n self.multiple_write_buffer = (\n BytesIO() if isinstance(data, bytes) else StringIO()\n )\n self.multiple_write_buffer.write(data)\n\n def flush(self) -> None:\n # write to actual handle and close write buffer\n if self.multiple_write_buffer is None or self.multiple_write_buffer.closed:\n return\n\n # ZipFile needs a non-empty string\n archive_name = self.archive_name or self.filename or \"zip\"\n with self.multiple_write_buffer:\n super().writestr(archive_name, self.multiple_write_buffer.getvalue())\n\n def close(self):\n self.flush()\n super().close()\n\n @property\n def closed(self):\n return self.fp is None\n\n\nclass _MMapWrapper(abc.Iterator):\n \"\"\"\n Wrapper for the Python's mmap class so that it can be properly read in\n by Python's csv.reader class.\n\n Parameters\n ----------\n f : file object\n File object to be mapped onto memory. Must support the 'fileno'\n method or have an equivalent attribute\n\n \"\"\"\n\n def __init__(self, f: IO):\n self.attributes = {}\n for attribute in (\"seekable\", \"readable\", \"writeable\"):\n if not hasattr(f, attribute):\n continue\n self.attributes[attribute] = getattr(f, attribute)()\n self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)\n\n def __getattr__(self, name: str):\n if name in self.attributes:\n return lambda: self.attributes[name]\n return getattr(self.mmap, name)\n\n def __iter__(self) -> _MMapWrapper:\n return self\n\n def __next__(self) -> str:\n newbytes = self.mmap.readline()\n\n # readline returns bytes, not str, but Python's CSV reader\n # expects str, so convert the output to str before continuing\n newline = newbytes.decode(\"utf-8\")\n\n # mmap doesn't raise if reading past the allocated\n # data but instead returns an empty string, so raise\n # if that is returned\n if newline == \"\":\n raise StopIteration\n return newline\n\n\ndef _maybe_memory_map(\n handle: FileOrBuffer,\n memory_map: bool,\n encoding: str,\n mode: str,\n errors: Optional[str],\n) -> Tuple[FileOrBuffer, bool, List[Buffer]]:\n \"\"\"Try to memory map file/buffer.\"\"\"\n handles: List[Buffer] = []\n memory_map &= hasattr(handle, \"fileno\") or isinstance(handle, str)\n if not memory_map:\n return handle, memory_map, handles\n\n # need to open the file first\n if isinstance(handle, str):\n if encoding and \"b\" not in mode:\n # Encoding\n handle = open(handle, mode, encoding=encoding, errors=errors, newline=\"\")\n else:\n # Binary mode\n handle = open(handle, mode)\n handles.append(handle)\n\n try:\n wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]\n handle.close()\n handles.remove(handle)\n handles.append(wrapped)\n handle = wrapped\n except Exception:\n # we catch any errors that may have occurred\n # because that is consistent with the lower-level\n # functionality of the C engine (pd.read_csv), so\n # leave the file handler as is then\n memory_map = False\n\n return handle, memory_map, handles\n\n\ndef file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:\n \"\"\"Test whether file exists.\"\"\"\n exists = False\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if not isinstance(filepath_or_buffer, str):\n return exists\n try:\n exists = os.path.exists(filepath_or_buffer)\n # gh-5874: if the filepath is too long will raise here\n except (TypeError, ValueError):\n pass\n return exists\n\n\ndef _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:\n \"\"\"Whether the handle is opened in binary mode\"\"\"\n # classes that expect string but have 'b' in mode\n text_classes = (codecs.StreamReaderWriter,)\n if isinstance(handle, text_classes):\n return False\n\n # classes that expect bytes\n binary_classes = (BufferedIOBase, RawIOBase)\n\n return isinstance(handle, binary_classes) or \"b\" in getattr(handle, \"mode\", mode)\n"
] | [
[
"pandas.compat._optional.import_optional_dependency",
"pandas.core.dtypes.common.is_file_like",
"pandas.compat.import_lzma",
"pandas.compat.get_lzma_file"
]
] |
ravihammond/hanabi-convention-adaptation | [
"5dafa91742de8e8d5810e8213e0e2771818b2f54"
] | [
"pyhanabi/common_utils/helper.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom typing import Dict\n\n\ndef to_device(data, device):\n if isinstance(data, torch.Tensor):\n return data.to(device)\n elif isinstance(data, dict):\n return {k: to_device(v, device) for k, v in data.items()}\n elif isinstance(data, list):\n return [to_device(v, device) for v in data]\n\n\ndef get_all_files(root, file_extension, contain=None):\n files = []\n for folder, _, fs in os.walk(root):\n for f in fs:\n if file_extension is not None:\n if f.endswith(file_extension):\n if contain is None or contain in os.path.join(folder, f):\n files.append(os.path.join(folder, f))\n else:\n if contain in f:\n files.append(os.path.join(folder, f))\n return files\n\n\ndef flatten(s):\n if s == []:\n return s\n if isinstance(s[0], list):\n return flatten(s[0]) + flatten(s[1:])\n return s[:1] + flatten(s[1:])\n\n\ndef moving_average(data, period):\n # padding\n left_pad = [data[0] for _ in range(period // 2)]\n right_pad = data[-period // 2 + 1 :]\n data = left_pad + data + right_pad\n weights = np.ones(period) / period\n return np.convolve(data, weights, mode=\"valid\")\n\n\ndef mem2str(num_bytes):\n assert num_bytes >= 0\n if num_bytes >= 2 ** 30: # GB\n val = float(num_bytes) / (2 ** 30)\n result = \"%.3f GB\" % val\n elif num_bytes >= 2 ** 20: # MB\n val = float(num_bytes) / (2 ** 20)\n result = \"%.3f MB\" % val\n elif num_bytes >= 2 ** 10: # KB\n val = float(num_bytes) / (2 ** 10)\n result = \"%.3f KB\" % val\n else:\n result = \"%d bytes\" % num_bytes\n return result\n\n\ndef sec2str(seconds):\n seconds = int(seconds)\n hour = seconds // 3600\n seconds = seconds % (24 * 3600)\n seconds %= 3600\n minutes = seconds // 60\n seconds %= 60\n return \"%dH %02dM %02dS\" % (hour, minutes, seconds)\n\n\ndef num2str(n):\n if n < 1e3:\n s = str(n)\n unit = \"\"\n elif n < 1e6:\n n /= 1e3\n s = \"%.3f\" % n\n unit = \"K\"\n else:\n n /= 1e6\n s = \"%.3f\" % n\n unit = \"M\"\n\n s = s.rstrip(\"0\").rstrip(\".\")\n return s + unit\n\n\ndef get_mem_usage():\n import psutil\n\n mem = psutil.virtual_memory()\n result = \"\"\n result += \"available: %s, \" % (mem2str(mem.available))\n result += \"used: %s, \" % (mem2str(mem.used))\n result += \"free: %s\" % (mem2str(mem.free))\n return result\n\n\ndef flatten_first2dim(batch):\n if isinstance(batch, torch.Tensor):\n size = batch.size()[2:]\n batch = batch.view(-1, *size)\n return batch\n elif isinstance(batch, dict):\n return {key: flatten_first2dim(batch[key]) for key in batch}\n else:\n assert False, \"unsupported type: %s\" % type(batch)\n\n\ndef _tensor_slice(t, dim, b, e):\n if dim == 0:\n return t[b:e]\n elif dim == 1:\n return t[:, b:e]\n elif dim == 2:\n return t[:, :, b:e]\n else:\n raise ValueError(\"unsupported %d in tensor_slice\" % dim)\n\n\ndef tensor_slice(t, dim, b, e):\n if isinstance(t, dict):\n return {key: tensor_slice(t[key], dim, b, e) for key in t}\n elif isinstance(t, torch.Tensor):\n return _tensor_slice(t, dim, b, e).contiguous()\n else:\n assert False, \"Error: unsupported type: %s\" % (type(t))\n\n\ndef tensor_index(t, dim, i):\n if isinstance(t, dict):\n return {key: tensor_index(t[key], dim, i) for key in t}\n elif isinstance(t, torch.Tensor):\n return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()\n else:\n assert False, \"Error: unsupported type: %s\" % (type(t))\n\n\ndef one_hot(x, n):\n assert x.dim() == 2 and x.size(1) == 1\n one_hot_x = torch.zeros(x.size(0), n, device=x.device)\n one_hot_x.scatter_(1, x, 1)\n return one_hot_x\n\n\ndef set_all_seeds(rand_seed):\n random.seed(rand_seed)\n np.random.seed(rand_seed + 1)\n torch.manual_seed(rand_seed + 2)\n torch.cuda.manual_seed(rand_seed + 3)\n\n\ndef weights_init(m):\n \"\"\"custom weights initialization\"\"\"\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal(m.weight.data)\n nn.init.orthogonal_(m.weight.data)\n else:\n print(\"%s is not custom-initialized.\" % m.__class__)\n\n\ndef init_net(net, net_file):\n if net_file:\n net.load_state_dict(torch.load(net_file))\n else:\n net.apply(weights_init)\n\n\ndef count_output_size(input_shape, model):\n fake_input = torch.FloatTensor(*input_shape)\n output_size = model.forward(fake_input).view(-1).size()[0]\n return output_size\n"
] | [
[
"numpy.ones",
"torch.FloatTensor",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.random.seed",
"numpy.convolve",
"torch.nn.init.orthogonal_"
]
] |
gboehl/pymetalog | [
"bcc1bfbf658f44f48d63a594d2b9de8b700a11a7"
] | [
"pymetalog/pdf_quantile_functions.py"
] | [
"import numpy as np\nfrom .support import pdfMetalog, quantileMetalog\n\n\ndef pdf_quantile_builder(temp, y, term_limit, bounds, boundedness):\n \"\"\"Builds the metalog pdf and quantile arrays based on the a coefficients found by fitting metalog distribution.\n\n Args:\n temp (:obj: `numpy.ndarray` of type float): Array of a coefficients found by fitting metalog distribution.\n - Fit method is specified by metalog.fit_method attribute\n\n y (:obj: `numpy.ndarray` of type float): Array of bin widths specified for `a` parameter\n\n term_limit (:obj: `int`): The upper limit of the range of metalog terms to use to fit the data.\n - metalog.term_limit attribute\n - in range [3,30]\n\n bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.\n - metalog.bounds attribute\n - Default: [0,1]\n\n boundedness (:obj: `str`): String that is used to specify the type of metalog to fit.\n - metalog.boundedness attribute\n\n Returns:\n q_dict (:obj:`dict` with keys ['m', 'M', 'y', 'valid']): Initialized output_dict variable from metalog class.\n - q_dict['m']: (:obj:`numpy.ndarray` of type float): Array of metalog pdf values.\n * Returned by `pdfMetalog` method\n * Influenced by `boundedness` parameter\n * A valid metalog fit will return an array having all elements strictly > 0\n\n - q_dict['M']: (:obj:`numpy.ndarray` of type float): Array of metalog quantile values.\n * Returned by `quantileMetalog` method\n * Influenced by `boundedness` parameter\n - `boundedness` = 'sl': Inserts `bounds`[0] to the front of the quantile array\n - `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array\n - `boundedness` = 'b': Inserts `bounds`[0] to the front of the quantile array\n and appends `bounds`[1] to the end of the quantile array\n\n - q_dict['y']: (:obj:`numpy.ndarray` of type float): Array of bin widths specified for the pdfs/quantiles.\n * Influenced by `boundedness` parameter\n - `boundedness` = 'sl': Inserts `bounds`[0] at the front of the quantile array\n - `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array\n - `boundedness` = 'b': Inserts `bounds`[0] at the front of the quantile array\n and appends `bounds`[1] to the end of the quantile array\n\n - q_dict['valid']: (:obj:`str`): A string indicating if the metalog pdf generated by `pdfMetalog` method is valid or not.\n * If all values in the metalog pdf are >= 0, q_dict['valid'] = 'yes'\n * If any values in the metalog pdf are < 0, q_dict['valid'] = 'no'\n\n \"\"\"\n q_dict = {}\n\n # build pdf\n m = pdfMetalog(temp, y[0], term_limit, bounds=bounds, boundedness=boundedness)\n\n for j in range(2, len(y) + 1):\n tempPDF = pdfMetalog(\n temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness\n )\n m = np.append(m, tempPDF)\n\n # Build quantile values\n M = quantileMetalog(temp, y[1], term_limit, bounds=bounds, boundedness=boundedness)\n\n for j in range(2, len(y) + 1):\n tempQant = quantileMetalog(\n temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness\n )\n M = np.append(M, tempQant)\n\n # Add trailing and leading zero's for pdf bounds\n if boundedness == \"sl\":\n m = np.append(0, m)\n M = np.append(bounds[0], M)\n\n if boundedness == \"su\":\n m = np.append(m, 0)\n M = np.append(M, bounds[1])\n\n if boundedness == \"b\":\n m = np.append(0, m)\n m = np.append(m, 0)\n M = np.append(bounds[0], M)\n M = np.append(M, bounds[1])\n\n # Add y values for bounded models\n if boundedness == \"sl\":\n y = np.append(0, y)\n\n if boundedness == \"su\":\n y = np.append(y, 1)\n\n if boundedness == \"b\":\n y = np.append(0, y)\n y = np.append(y, 1)\n\n q_dict[\"m\"] = m\n q_dict[\"M\"] = M\n q_dict[\"y\"] = y\n\n # PDF validation\n q_dict[\"valid\"] = pdfMetalogValidation(q_dict[\"m\"])\n\n return q_dict\n\n\ndef pdfMetalogValidation(x):\n \"\"\"Validation that all calculated metalog pdf values are greater than or equal to 0.\n\n Args:\n x (:obj: `numpy.ndarray` of type float): Array of metalog pdf values.\n - Returned by `pdfMetalog` method\n - Influenced by `boundedness` parameter\n\n Returns:\n 'yes' | 'no' (:obj:`str`): 'yes' if all elements strictly >= 0, else 'no'.\n \"\"\"\n y = np.min(x)\n if y >= 0:\n return \"yes\"\n else:\n return \"no\"\n"
] | [
[
"numpy.append",
"numpy.min"
]
] |
vishalbelsare/DESlib | [
"64260ae7c6dd745ef0003cc6322c9f829c807708"
] | [
"deslib/dcs/a_posteriori.py"
] | [
"# coding=utf-8\n\n# Author: Rafael Menelau Oliveira e Cruz <[email protected]>\n#\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom deslib.dcs.base import BaseDCS\n\n\nclass APosteriori(BaseDCS):\n \"\"\"A Posteriori Dynamic classifier selection.\n\n The A Posteriori method uses the probability of correct classification of a\n given base classifier :math:`c_{i}` for each neighbor :math:`x_{k}` with\n respect to a single class. Consider a classifier :math:`c_{i}` that assigns\n a test sample to class :math:`w_{l}`. Then, only the samples belonging to\n class :math:`w_{l}` are taken into account during the competence level\n estimates. Base classifiers with a higher probability of correct\n classification have a higher competence level. Moreover, the method also\n weights the influence of each neighbor :math:`x_{k}` according to its\n Euclidean distance to the query sample. The closest neighbors have a higher\n influence on the competence level estimate. In cases where no sample in the\n region of competence belongs to the predicted class, :math:`w_{l}`, the\n competence level estimate of the base classifier is equal to zero.\n\n A single classifier is selected only if its competence level is\n significantly higher than that of the other base classifiers in the pool\n (higher than a pre-defined threshold). Otherwise, all classifiers in the\n pool are combined using the majority voting rule. The selection methodology\n can be modified by modifying the hyper-parameter selection_method.\n\n Parameters\n ----------\n pool_classifiers : list of classifiers (Default = None)\n The generated_pool of classifiers trained for the corresponding\n classification problem. Each base classifiers should support the method\n \"predict\" and \"predict_proba\". If None, then the pool of classifiers is\n a bagging classifier.\n\n k : int (Default = 7)\n Number of neighbors used to estimate the competence of the base\n classifiers.\n\n DFP : Boolean (Default = False)\n Determines if the dynamic frienemy pruning is applied.\n\n with_IH : Boolean (Default = False)\n Whether the hardness level of the region of competence is used to\n decide between using the DS algorithm or the KNN for classification of\n a given query sample.\n\n safe_k : int (default = None)\n The size of the indecision region.\n\n IH_rate : float (default = 0.3)\n Hardness threshold. If the hardness level of the competence region is\n lower than the IH_rate the KNN classifier is used. Otherwise, the DS\n algorithm is used for classification.\n\n selection_method : String (Default = \"best\")\n Determines which method is used to select the base classifier after\n the competences are estimated.\n\n diff_thresh : float (Default = 0.1)\n Threshold to measure the difference between the competence level of the\n base classifiers for the random and diff selection schemes. If the\n difference is lower than the threshold, their performance are\n considered equivalent.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n knn_classifier : {'knn', 'faiss', None} (Default = 'knn')\n The algorithm used to estimate the region of competence:\n\n - 'knn' will use :class:`KNeighborsClassifier` from sklearn\n :class:`KNNE` available on `deslib.utils.knne`\n\n - 'faiss' will use Facebook's Faiss similarity search through the\n class :class:`FaissKNNClassifier`\n\n - None, will use sklearn :class:`KNeighborsClassifier`.\n\n knne : bool (Default=False)\n Whether to use K-Nearest Neighbor Equality (KNNE) for the region\n of competence estimation.\n\n DSEL_perc : float (Default = 0.5)\n Percentage of the input data used to fit DSEL.\n Note: This parameter is only used if the pool of classifier is None or\n unfitted.\n\n n_jobs : int, default=-1\n The number of parallel jobs to run. None means 1 unless in\n a joblib.parallel_backend context. -1 means using all processors.\n Doesn’t affect fit method.\n\n References\n ----------\n G. Giacinto and F. Roli, Methods for Dynamic Classifier Selection\n 10th Int. Conf. on Image Anal. and Proc., Venice, Italy (1999), 659-664.\n\n Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr. \"From dynamic\n classifier selection to dynamic ensemble selection.\"\n Pattern Recognition 41.5 (2008): 1718-1731.\n\n Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. \"Dynamic selection\n of classifiers—a comprehensive review.\"\n Pattern Recognition 47.11 (2014): 3665-3680.\n\n R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier\n selection: Recent advances and perspectives,”\n Information Fusion, vol. 41, pp. 195 – 216, 2018.\n\n \"\"\"\n\n def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,\n safe_k=None, IH_rate=0.30, selection_method='diff',\n diff_thresh=0.1, random_state=None, knn_classifier='knn',\n knne=False, DSEL_perc=0.5, n_jobs=-1):\n super(APosteriori, self).__init__(pool_classifiers=pool_classifiers,\n k=k, DFP=DFP, with_IH=with_IH,\n safe_k=safe_k, IH_rate=IH_rate,\n selection_method=selection_method,\n diff_thresh=diff_thresh,\n knn_classifier=knn_classifier,\n random_state=random_state,\n knne=knne,\n DSEL_perc=DSEL_perc, n_jobs=n_jobs)\n\n def fit(self, X, y):\n \"\"\"Prepare the DS model by setting the KNN algorithm and\n pre-processing the information required to apply the DS\n method.\n\n Parameters\n ----------\n X : array of shape (n_samples, n_features)\n Data used to fit the model.\n\n y : array of shape (n_samples)\n class labels of each example in X.\n\n Returns\n -------\n self\n \"\"\"\n super(APosteriori, self).fit(X, y)\n self._check_predict_proba()\n\n self.dsel_scores_ = self._predict_proba_base(self.DSEL_data_)\n return self\n\n def estimate_competence(self, competence_region, distances,\n predictions=None):\n \"\"\"Estimate the competence of each base classifier :math:`c_{i}` for\n the classification of the query sample using the A Posteriori method.\n\n The competence level is estimated based on the probability of correct\n classification of the base classifier :math:`c_{i}`, for each neighbor\n :math:`x_{k}` belonging to a specific class :math:`w_{l}`.\n In this case, :math:`w_{l}` is the class predicted by the base\n classifier :math:`c_{i}`, for the query sample. This method also\n weights the influence of each training sample according to its\n Euclidean distance to the query instance. The closest samples have a\n higher influence in the computation of the competence level. The\n competence level estimate is represented by the following equation:\n\n .. math:: \\\\delta_{i,j} = \\\\frac{\\\\sum_{\\\\mathbf{x}_{k} \\\\in\n \\\\omega_{l}}P(\\\\omega_{l} \\\\mid \\\\mathbf{x}_{k}, c_{i} )W_{k}}\n {\\\\sum_{k = 1}^{K}P(\\\\omega_{l} \\\\mid \\\\mathbf{x}_{k}, c_{i} )W_{k}}\n\n where :math:`\\\\delta_{i,j}` represents the competence level of\n :math:`c_{i}` for the classification of query.\n\n Parameters\n ----------\n competence_region : array of shape (n_samples, n_neighbors)\n Indices of the k nearest neighbors.\n\n distances : array of shape (n_samples, n_neighbors)\n Distances from the k nearest neighbors to the query.\n\n predictions : array of shape (n_samples, n_classifiers)\n Predictions of the base classifiers for the test examples.\n\n Returns\n -------\n competences : array of shape (n_samples, n_classifiers)\n Competence level estimated for each base classifier and test\n example.\n \"\"\"\n # Guarantee that these arrays are view as a 2D array for the case where\n # a single test sample is passed down.\n predictions = np.atleast_2d(predictions)\n distances[distances == 0] = 1e-10\n\n # Normalize the distances\n dists_normalized = 1.0 / distances\n\n # Expanding the dimensions of the predictions and target arrays in\n # order to compare both.\n predictions_3d = np.expand_dims(predictions, axis=1)\n target_3d = self.DSEL_target_[competence_region, np.newaxis]\n\n # Create a mask to remove the neighbors belonging to a different class\n # than the predicted by the base classifier\n mask = (predictions_3d != target_3d)\n\n # Broadcast the distance array to the same shape as the pre-processed\n # information for future calculations\n dists_normalized = np.repeat(np.expand_dims(dists_normalized, axis=2),\n self.n_classifiers_, axis=2)\n\n # Multiply the pre-processed correct predictions by the base\n # classifiers to the distance array\n scores_target = self.dsel_scores_[competence_region, :,\n self.DSEL_target_[competence_region]]\n scores_target_norm = scores_target * dists_normalized\n\n # Create masked arrays to remove samples with different label in the\n # calculations\n masked_preprocessed = np.ma.MaskedArray(scores_target_norm, mask=mask)\n masked_dist = np.ma.MaskedArray(dists_normalized, mask=mask)\n\n # Consider only the neighbor samples where the predicted label is\n # equals to the neighbor label\n competences_masked = np.ma.sum(masked_preprocessed,\n axis=1) / np.ma.sum(masked_dist, axis=1)\n\n # Fill 0 to the masked values in the resulting array (when no neighbors\n # belongs to the class predicted by the corresponding base classifier)\n competences = np.ma.filled(competences_masked, 0)\n\n return competences\n"
] | [
[
"numpy.atleast_2d",
"numpy.ma.filled",
"numpy.ma.sum",
"numpy.expand_dims",
"numpy.ma.MaskedArray"
]
] |
Masao-Someki/CycleVAE_VC | [
"be4a27637a3f8b6272d96105f9b3c9327f6c16f7"
] | [
"src/decode/decoder.py"
] | [
"# Copyright 2020 Masao Someki\n# MIT License (https://opensource.org/licenses/MIT)\nimport os\nimport glob\nimport h5py\nimport logging\n\nimport librosa\nimport numpy as np\nfrom scipy.io import wavfile\n\nfrom speech import Synthesizer\n\nIRLEN = 1024\nINTERVALS = 10\nSEED = 1\nLP_CUTOFF = 20\n\n\nclass Decoder(object):\n def __init__(self, args, scaler, logger=None):\n # directory to save wav files\n self.save_dir = args.exp_dir\n self.fs = args.fs\n self.shiftms = args.shiftms\n self.fftl = args.fftl\n\n # mcep_alpha\n if args.fs == 16000:\n self.mcep_alpha = 0.41\n elif args.fs == 22050:\n self.mcep_alpha = 0.455\n elif args.fs == 24000:\n self.mcep_alpha = 0.466\n elif args.fs == 44100:\n self.mcep_alpha = 0.544\n elif args.fs == 48000:\n self.mcep_alpha = 0.554\n else:\n raise ValueError('sampling rate should be one of \\\n 16000, 22050, 24000, 44100, 48000')\n\n # scaler\n self.scaler = scaler\n\n # synthesizer\n self.synthesizer = Synthesizer(fs=args.fs, fftl=args.fftl, shiftms=args.shiftms)\n\n # logger\n if logger is not None:\n self.logger = logger\n else:\n self.logger = logging.getLogger(__name__)\n\n def _inverse_transform(self, key, x):\n m = self.scaler[key].mean_\n s = self.scaler[key].scale_\n return x * s + m\n\n def decode(self, inputs, output, iter_count, i):\n # directory\n wav_dir = os.path.join(self.save_dir, str(iter_count))\n\n if not os.path.exists(wav_dir):\n os.mkdir(wav_dir)\n\n # process over all data\n for b in range(len(output['reconst_half'][0])):\n # flen\n flen = inputs['flen'][b]\n\n # mcep\n mcep = inputs['mcep'][b][:flen].cpu().detach().numpy()\n mcep = self._inverse_transform('mcep', mcep).astype(np.float64)\n\n # process src-src wav\n cvmcep = output['reconst_half'][0][b][:flen].cpu().detach().numpy()\n cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)\n\n # codeap\n codeap = inputs['codeap'][b][:flen].cpu().detach().numpy().astype(np.float64)\n codeap = self._inverse_transform('codeap', codeap)\n\n # synthesize\n wav = self.synthesizer.synthesis(\n inputs['f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64),\n cvmcep,\n codeap,\n alpha=self.mcep_alpha,\n rmcep=mcep\n )\n wav = np.clip(wav, -32768, 32767)\n wav_file = os.path.join(\n wav_dir,\n '%s_%s_%d.wav' % (inputs['src'][b], inputs['src'][b], i)\n )\n wavfile.write(wav_file, self.fs, wav.astype(np.int16))\n\n # process src-trg wav\n cvmcep = output['trg_reconst'][b][:flen].cpu().detach().numpy()\n cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)\n\n # convert f0\n cvf0 = inputs['cv_f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64)\n\n # synthesize\n wav = self.synthesizer.synthesis(\n cvf0,\n cvmcep,\n codeap,\n alpha=self.mcep_alpha,\n rmcep=mcep\n )\n wav = np.clip(wav, -32768, 32767)\n wav_file = os.path.join(\n wav_dir,\n '%s_%s_%d.wav' % (inputs['src'][b], inputs['trg'][b], i)\n )\n wavfile.write(wav_file, self.fs, wav.astype(np.int16))\n"
] | [
[
"numpy.clip"
]
] |
srinivasans/DeepSepsis | [
"8647a2ec93ad5a937638acfc279a756bbfa04f7f"
] | [
"deprecated/Imputation/GRUI/Run_GAN_imputed.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 26 10:47:41 2018\n\n@author: yonghong\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nsys.path.append(\"..\")\nimport argparse\nimport os\nimport tensorflow as tf\nfrom Physionet2019ImputedSepsisData import readImputed\nimport gru_delta_forGAN\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='manual to this script')\n parser.add_argument('--gpus', type=str, default = None)\n parser.add_argument('--batch-size', type=int, default=128)\n parser.add_argument('--run-type', type=str, default='test')\n parser.add_argument('--data-path', type=str, default=\"../Gan_Imputation/imputation_train_results/WGAN_no_mask/\")\n #输入填充之后的训练数据集的完整路径 Gan_Imputation/imputation_train_results/WGAN_no_mask/30_8_128_64_0.001_400_True_True_True_0.15_0.5\n parser.add_argument('--model-path', type=str, default=None)\n parser.add_argument('--result-path', type=str, default=None)\n parser.add_argument('--lr', type=float, default=0.01)\n #parser.add_argument('--epoch', type=int, default=20)\n parser.add_argument('--n-inputs', type=int, default=41)\n parser.add_argument('--n-hidden-units', type=int, default=64)\n parser.add_argument('--n-classes', type=int, default=2)\n parser.add_argument('--checkpoint-dir', type=str, default='checkpoint_physionet_imputed',\n help='Directory name to save the checkpoints')\n parser.add_argument('--log-dir', type=str, default='logs_physionet_imputed',\n help='Directory name to save training logs')\n parser.add_argument('--isNormal',type=int,default=1)\n parser.add_argument('--isSlicing',type=int,default=1)\n #0 false 1 true\n parser.add_argument('--isBatch-normal',type=int,default=1)\n args = parser.parse_args()\n\n\n if args.isBatch_normal==0:\n args.isBatch_normal=False\n if args.isBatch_normal==1:\n args.isBatch_normal=True\n if args.isNormal==0:\n args.isNormal=False\n if args.isNormal==1:\n args.isNormal=True\n if args.isSlicing==0:\n args.isSlicing=False\n if args.isSlicing==1:\n args.isSlicing=True\n\n\n checkdir=args.checkpoint_dir\n logdir=args.log_dir\n base=args.data_path\n data_paths=[\"30_8_128_64_0.001_400_True_True_False_0.15_0.5\"]\n max_auc = 0.0\n for d in data_paths:\n args.data_path=os.path.join(base,d)\n path_splits=args.data_path.split(\"/\")\n if len(path_splits[-1])==0:\n datasetName=path_splits[-2]\n else:\n datasetName=path_splits[-1]\n args.checkpoint_dir=checkdir+\"/\"+datasetName\n args.log_dir=logdir+\"/\"+datasetName\n\n dt_train=readImputed.ReadImputedPhysionetData(args.data_path)\n dt_train.load()\n\n dt_test=readImputed.ReadImputedPhysionetData(args.data_path.replace(\"imputation_train_results\",\"imputation_test_results\"))\n dt_test.load()\n\n lrs=[0.004,0.003,0.005,0.006,0.007,0.008,0.009,0.01,0.012,0.015]\n #lrs = [0.0075,0.0085]\n for lr in lrs:\n args.lr=lr\n epoch=30\n args.epoch=epoch\n print(\"epoch: %2d\"%(epoch))\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n model = gru_delta_forGAN.grui(sess,\n args=args,\n dataset=dt_train,\n test_set = dt_test\n )\n\n # build graph\n model.build()\n\n auc = model.train()\n if auc > max_auc:\n max_auc = auc\n\n print(\"\")\n print(\"max auc is: \" + str(max_auc))\n f2 = open(\"max_auc\",\"w\")\n f2.write(str(max_auc))\n f2.close()\n\n\n"
] | [
[
"tensorflow.reset_default_graph",
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
middleprince/fashionAi | [
"c512936b4983c2fb093008f06e04753180af0a90"
] | [
"run_local_mertric.py"
] | [
"import os\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport math\n\nimport config as cfg\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(\n description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--prediction', default='',\n help='The path of file containing the prediction of keypoints.')\nparser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help=\"whether print Normarlized Error for each catgory\")\nparser.add_argument('--gt', default='./stage1_testb_gt.csv',\n help='The path of file containing the ground truth of keypoints.')\n\nargs = parser.parse_args()\n\ndef run():\n if args.prediction.strip() == '' or args.gt.strip() == '':\n parser.error('Must specify the file path of the prediction and ground truth.')\n\n pred_df = pd.read_csv(args.prediction, encoding='utf-8')\n gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')\n\n\n num_v = 0.\n sum_dist = 0.\n for index, row in pred_df.iterrows():\n gt = gt_df.loc[row['image_id']]\n img_cat = gt['image_category']\n gt_points = {}\n pred_points = {}\n\n for kp in cfg.all_keys:\n pred_kp = row[kp].strip().split('_')\n gt_kp = gt[kp].strip().split('_')\n pred_points[kp] = [int(_) for _ in pred_kp]\n gt_points[kp] = [int(_) for _ in gt_kp]\n\n lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]\n lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]\n norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)\n\n\n for kp in cfg.all_keys:\n if gt_points[kp][-1] == -1 or norm_value < 1e-3:\n continue\n num_v += 1.\n\n dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)\n sum_dist += dist/norm_value\n\n sum_dist = sum_dist/num_v\n print(sum_dist)\n\ndef run_by_cat():\n if args.prediction.strip() == '' or args.gt.strip() == '':\n parser.error('Must specify the file path of the prediction and ground truth.')\n\n pred_df = pd.read_csv(args.prediction, encoding='utf-8')\n gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')\n\n for cat_ in cfg.CATEGORIES:\n num_v = 0.\n sum_dist = 0.\n for index, row in pred_df.iterrows():\n gt = gt_df.loc[row['image_id']]\n img_cat = gt['image_category']\n if cat_ not in img_cat:\n continue\n gt_points = {}\n pred_points = {}\n\n for kp in cfg.all_keys:\n pred_kp = row[kp].strip().split('_')\n gt_kp = gt[kp].strip().split('_')\n pred_points[kp] = [int(_) for _ in pred_kp]\n gt_points[kp] = [int(_) for _ in gt_kp]\n\n lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]\n lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]\n norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)\n\n\n for kp in cfg.all_keys:\n if gt_points[kp][-1] == -1 or norm_value < 1e-3:\n continue\n num_v += 1.\n\n dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)\n sum_dist += dist/norm_value\n\n sum_dist = sum_dist/num_v\n print('{}:'.format(cat_), sum_dist)\n\nif __name__ == '__main__':\n if not args.cat:\n run()\n else:\n run_by_cat()\n"
] | [
[
"pandas.read_csv"
]
] |
aasensio/Lightweaver | [
"9a261e72235f05df548148da140012f40dbd1e4b"
] | [
"examples/plot_SimpleLineTest.py"
] | [
"\"\"\"\n===============================================================\nComputing a simple NLTE 8542 line profile in a FAL C atmosphere\n===============================================================\n\"\"\"\n#%%\n# First, we import everything we need. Lightweaver is typically imported as\n# `lw`, but things like the library of model atoms and Fal atmospheres need to\n# be imported separately.\nfrom lightweaver.fal import Falc82\nfrom lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, Si_atom, Al_atom, \\\nCaII_atom, Fe_atom, He_9_atom, He_atom, MgII_atom, N_atom, Na_atom, S_atom\nimport lightweaver as lw\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\n\n\n#%%\n# Now, we define the functions that will be used in our spectral synthesise.\n# First `synth_8542` which synthesises and returns the line given by an\n# atmosphere.\ndef synth_8542(atmos, conserve, useNe, wave):\n '''\n Synthesise a spectral line for given atmosphere with different\n conditions.\n\n Parameters\n ----------\n atmos : lw.Atmosphere\n The atmospheric model in which to synthesise the line.\n conserve : bool\n Whether to start from LTE electron density and conserve charge, or\n simply use from the electron density present in the atomic model.\n useNe : bool\n Whether to use the electron density present in the model as the\n starting solution, or compute the LTE electron density.\n wave : np.ndarray\n Array of wavelengths over which to resynthesise the final line\n profile for muz=1.\n\n Returns\n -------\n ctx : lw.Context\n The Context object that was used to compute the equilibrium\n populations.\n Iwave : np.ndarray\n The intensity at muz=1 for each wavelength in `wave`.\n '''\n # Configure the atmospheric angular quadrature\n atmos.quadrature(5)\n # Configure the set of atomic models to use.\n aSet = lw.RadiativeSet([H_6_atom(), C_atom(), O_atom(), Si_atom(),\n Al_atom(), CaII_atom(), Fe_atom(), He_9_atom(),\n MgII_atom(), N_atom(), Na_atom(), S_atom()\n ])\n # Set H and Ca to \"active\" i.e. NLTE, everything else participates as an\n # LTE background.\n aSet.set_active('H', 'Ca')\n # Compute the necessary wavelength dependent information (SpectrumConfiguration).\n spect = aSet.compute_wavelength_grid()\n\n # Either compute the equilibrium populations at the fixed electron density\n # provided in the model, or iterate an LTE electron density and compute the\n # corresponding equilibrium populations (SpeciesStateTable).\n if useNe:\n eqPops = aSet.compute_eq_pops(atmos)\n else:\n eqPops = aSet.iterate_lte_ne_eq_pops(atmos)\n\n # Configure the Context which holds the state of the simulation for the\n # backend, and provides the python interface to the backend.\n # Feel free to increase Nthreads to increase the number of threads the\n # program will use.\n ctx = lw.Context(atmos, spect, eqPops, conserveCharge=conserve, Nthreads=1)\n start = time.time()\n # Iterate the Context to convergence\n iterate_ctx(ctx)\n end = time.time()\n print('%.2f s' % (end - start))\n # Update the background populations based on the converged solution and\n # compute the final intensity for mu=1 on the provided wavelength grid.\n eqPops.update_lte_atoms_Hmin_pops(atmos)\n Iwave = ctx.compute_rays(wave, [atmos.muz[-1]], stokes=False)\n return ctx, Iwave\n\ndef iterate_ctx(ctx, Nscatter=3, NmaxIter=500):\n '''\n Iterate a Context to convergence.\n '''\n for i in range(NmaxIter):\n # Compute the formal solution\n dJ = ctx.formal_sol_gamma_matrices()\n # Just update J for Nscatter iterations\n if i < Nscatter:\n continue\n # Update the active populations under statistical equilibrium,\n # conserving charge if this option was set on the Context.\n delta = ctx.stat_equil()\n\n # If we are converged in both relative change of J and populations,\n # then print a message and return\n # N.B. as this is just a simple case, there is no checking for failure\n # to converge within the NmaxIter. This could be achieved simpy with an\n # else block after this for.\n if dJ < 3e-3 and delta < 1e-3:\n print('%d iterations' % i)\n print('-'*80)\n return\n\n\n#%%\n# The wavelength grid to output the final synthesised line on.\nwave = np.linspace(853.9444, 854.9444, 1001)\n\n#%%\n# Load an lw.Atmosphere object containing the FAL C atmosphere with 82 points\n# in depth, before synthesising the Ca II 8542 \\AA line profile using:\n#\n# - The given electron density.\n# - The electron density charge conserved from a starting LTE solution.\n# - The LTE electron density.\n#\n# These results are then plotted.\n\natmosRef = Falc82()\nctxRef, IwaveRef = synth_8542(atmosRef, conserve=False, useNe=True, wave=wave)\natmosCons = Falc82()\nctxCons, IwaveCons = synth_8542(atmosCons, conserve=True, useNe=False, wave=wave)\natmosLte = Falc82()\nctx, IwaveLte = synth_8542(atmosLte, conserve=False, useNe=False, wave=wave)\n\nplt.plot(wave, IwaveRef, label='Reference FAL')\nplt.plot(wave, IwaveCons, label='Reference Cons')\nplt.plot(wave, IwaveLte, label='Reference LTE n_e')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
satishjasthi/convnet-study | [
"ccd20c90e449fc8db694abf706db178e9413e57b"
] | [
"rme/datasets/mnist.py"
] | [
"from __future__ import absolute_import\n\nimport os\nimport numpy as np\nimport gzip\nimport struct\n\nfrom .preprocessing import one_hotify\n\ndef load(data_dir, valid_ratio=0.0, one_hot=True, shuffle=False, dtype='float32'):\n\n train_set, valid_set, test_set = {}, {}, {}\n # Get data from binary files\n for img_set, file_name in zip((train_set, test_set), ('train', 't10k')):\n # Load images\n img_path = os.path.join(data_dir, file_name + '-images-idx3-ubyte.gz')\n with gzip.open(img_path, 'rb') as f:\n magic_num, num_imgs, num_rows, num_cols = struct.unpack('>iiii',\n f.read(16))\n shape = (num_imgs, num_rows, num_cols, 1)\n img_set['data'] = np.fromstring(f.read(),\n dtype='uint8').astype(dtype).reshape(shape)\n\n # Load labels\n label_path = os.path.join(data_dir, file_name + '-labels-idx1-ubyte.gz')\n with gzip.open(label_path, 'rb') as f:\n magic_num, num_labels = struct.unpack('>ii', f.read(8))\n img_set['labels'] = np.fromstring(f.read(),\n dtype='uint8').astype('int')\n if one_hot:\n img_set['labels'] = one_hotify(img_set['labels'])\n\n N = train_set['data'].shape[0]\n if shuffle:\n # Shuffle and separate between training and validation set\n new_order = np.random.permutation(np.arange(N))\n train_set['data'] = train_set['data'][new_order]\n train_set['labels'] = train_set['labels'][new_order]\n\n # Get the number of samples on the training set\n M = int((1 - valid_ratio)*N)\n # Separate validation set\n valid_set['data'] = train_set['data'][M:]\n valid_set['labels'] = train_set['labels'][M:]\n train_set['data'] = train_set['data'][:M]\n train_set['labels'] = train_set['labels'][:M]\n\n return train_set, valid_set, test_set\n\ndef preprocess(dataset):\n mean = 33.3\n std = 78.6\n\n dataset -= mean\n dataset /= std\n\n return dataset\n"
] | [
[
"numpy.arange"
]
] |
Giorgiobientinesi/Workshop2 | [
"f454499d4befdb705b4672be25d8698ef2b37116"
] | [
"Model.py"
] | [
"import pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\n\n\ndf = pd.read_csv(\"Airbnb-cleaned.csv\")\ndf.columns\ndel df[\"Unnamed: 0\"]\n\ndf1 = df[['neighbourhood', 'property_type', 'room_type']]\n# IMPORT ENCODER\nfrom sklearn.preprocessing import OneHotEncoder\n\n# FIT ENCODER ON THE ORIGINAL DATASET TO MAKE IT REMEMBER CATEGORIES\nenc = OneHotEncoder(sparse=False)\nenc.fit(df1)\n\n\n\ndf[\"neighbourhood\"].unique()\n\ndf[['Bijlmer-Oost', 'Noord-Oost', 'Noord-West', 'Oud-Noord',\n 'IJburg - Zeeburgereiland', 'Centrum-West',\n 'Oostelijk Havengebied - Indische Buurt', 'Centrum-Oost',\n 'Oud-Oost', 'Watergraafsmeer', 'Gaasperdam - Driemond',\n 'Westerpark', 'Bijlmer-Centrum', 'De Pijp - Rivierenbuurt', 'Zuid',\n 'Buitenveldert - Zuidas', 'De Baarsjes - Oud-West',\n 'Bos en Lommer', 'Geuzenveld - Slotermeer', 'Slotervaart',\n 'Osdorp', 'De Aker - Nieuw Sloten',\n 'Apartment', 'Bed & Breakfast', 'House',\n 'Entire home/apt', 'Private room', 'Shared room']] = enc.transform(\n df1[[\"neighbourhood\", \"property_type\", \"room_type\"]])\n\n\ndf = df.drop([\"neighbourhood\", \"property_type\", \"room_type\"], axis =1)\ndf[\"Distance_from_center(m)\"] = df[\"Distance_from_center(m)\"]/1000\n\ny = df['price']\ndata = df.drop(['price'], axis=1)\nX_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=7)\nmodel = RandomForestRegressor()\nmodel.fit(X_train,y_train)\npred = model.predict(X_test)\n\nmean_absolute_error(y_test, pred)\n\n\nfrom joblib import dump, load\ndump(model, 'Airbnb.joblib')\n\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.metrics.mean_absolute_error",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder"
]
] |
DiamondLightSource/SuRVoS2 | [
"42bacfb6a5cc267f38ca1337e51a443eae1a9d2b"
] | [
"survos2/improc/regions/ccl.py"
] | [
"import logging\r\n\r\nimport os.path as op\r\n\r\nimport numpy as np\r\n\r\nimport pycuda.driver as cuda\r\nimport pycuda.gpuarray as gpuarray\r\nimport pycuda.autoinit\r\nfrom pycuda.compiler import SourceModule\r\n\r\nfrom ..improc_types import int3\r\nfrom ..utils import gpuregion, cpuregion\r\nfrom ..cuda import asgpuarray, grid_kernel_config\r\n\r\nfrom ._ccl import _remap, _relabel2d, _relabel3d, _merge_small3d\r\n\r\n\r\n__dirname__ = op.dirname(__file__)\r\n\r\n\r\n@gpuregion\r\ndef ccl3d(labels, remap=True):\r\n assert labels.ndim == 3\r\n assert labels.dtype == np.uint32\r\n\r\n with open(op.join(__dirname__, \"kernels\", \"ccl3d.cu\"), \"r\") as f:\r\n _mod_conv = SourceModule(f.read())\r\n gpu_ccl_local = _mod_conv.get_function(\"uf_local\")\r\n gpu_ccl_global = _mod_conv.get_function(\"uf_global\")\r\n gpu_ccl_final = _mod_conv.get_function(\"uf_final\")\r\n\r\n labels_gpu = asgpuarray(labels, dtype=np.uint32)\r\n result_gpu = gpuarray.zeros_like(labels_gpu)\r\n shape = np.asarray(tuple(labels.shape[::-1]), dtype=int3)\r\n\r\n block, grid = grid_kernel_config(gpu_ccl_local, labels.shape)\r\n shared = int(np.prod(block) * 8)\r\n\r\n gpu_ccl_local(labels_gpu, result_gpu, shape, block=block, grid=grid, shared=shared)\r\n gpu_ccl_global(labels_gpu, result_gpu, shape, block=block, grid=grid)\r\n gpu_ccl_final(result_gpu, shape, block=block, grid=grid)\r\n\r\n if remap:\r\n return remap_labels(result_gpu.get())\r\n\r\n return result_gpu\r\n\r\n\r\ndef remap_labels(labels):\r\n assert labels.dtype == np.uint32\r\n new_labels = _remap(labels.ravel())\r\n new_labels.shape = labels.shape\r\n return new_labels\r\n\r\n\r\ndef relabel(labels):\r\n assert labels.dtype == np.uint32\r\n\r\n if labels.ndim == 2:\r\n new_labels = _relabel2d(labels.ravel(), labels.shape[1])\r\n elif labels.ndim == 3:\r\n new_labels = _relabel3d(labels.ravel(), labels.shape[1], labels.shape[2])\r\n else:\r\n raise ValueError(\r\n \"Input array has to be 2 or 3 dimensional: {}\".format(labels.ndim)\r\n )\r\n\r\n new_labels.shape = labels.shape\r\n return new_labels\r\n\r\n\r\n@cpuregion\r\ndef merge_small(data, labels, min_size=1, **kwargs):\r\n if data.ndim != labels.ndim + 1:\r\n data = data[..., None]\r\n assert data.ndim == labels.ndim + 1\r\n return _merge_small3d(data, labels, labels.max() + 1, min_size)\r\n"
] | [
[
"numpy.prod"
]
] |
SamuelWiqvist/snpla | [
"9d586c5d09de3eecd2536485af6fc28a915443e4"
] | [
"mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py"
] | [
"# Imports\nimport sys\nimport torch\nimport os\nimport time\nimport numpy as np\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\n# Initial set up\nlunarc = int(sys.argv[1])\ndim = int(sys.argv[2])\nseed = int(sys.argv[3])\nseed_data = int(sys.argv[4])\nhp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp\nlambda_val = float(sys.argv[6]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp\n\nprint(\"Input args:\")\nprint(\"Dim: \" + str(dim))\nprint(\"seed: \" + str(seed))\nprint(\"seed_data: \" + str(seed_data))\n\n\nid_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)\n\nif hp_tuning > 0:\n id_job = id_job + \"_\" + str(hp_tuning)\n\nif lambda_val > 0:\n id_job = id_job + \"_\" + str(lambda_val)\n\n# Set wd\nprint(os.getcwd())\n\n# set the wd to the base folder for the project\nif lunarc == 1:\n os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')\nelse:\n os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')\n\nsys.path.append('./')\n\nprint(os.getcwd())\n\n# Load all utility functions for all methods\nimport mv_gaussian.low_dim_w_summary_stats.functions as func\nimport algorithms.snpla as snpla\n\n# Set model and generate data\n\nx_o, conj_model, analytical_posterior = func.set_up_model(seed)\n\n# set up posterior network\nflow_lik, flow_post = func.set_up_networks()\n\n## Generate test data\n\nN_prior_pred_test = 1000\nx_test, theta_test = func.run_model_sim(N_prior_pred_test, seed + 2, conj_model, analytical_posterior,\n conj_model.model.covariance_matrix, dim, True)\n\n# Generate test data for obs data set\nprint(conj_model.model_sim(theta_test).shape)\n\n\nN_test_obs_data = 1000\n\nx_test_obs_data = torch.zeros(N_test_obs_data, 5)\ntheta_test_obs_data = torch.zeros(N_test_obs_data, dim)\n\nfor i in range(N_test_obs_data):\n\n x_test_obs_data[i, :] = func.calc_summary_stats(x_o)\n theta_test_obs_data[i, :] = conj_model.model.loc\n\n# Set up networks for the likelihood model\n\n# Base dist for posterior model\nflow_lik, flow_post = func.set_up_networks()\n\nhyper_params = [0.001, 0.002, 0.95, 0.7] # lr_like, lr_post, gamma_post, gamma\n\nif lambda_val > 0:\n hyper_params[-1] = lambda_val\n\n\nif hp_tuning >= 2:\n hyper_params = func.sample_hp(\"snpla\", hp_tuning)\n\noptimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0])\noptimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1])\ndecay_rate_post = hyper_params[2] # no adaptation of Adam's base rate\n\nnbr_rounds = 10\nprob_prior_decay_rate = hyper_params[3]\nprob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)\n\nprint(prob_prior)\n\n#nbr_lik = [2000, 2000, 2000, 2000]\n#nbr_epochs_lik = [25, 25, 25, 25]\n#batch_size = 50\n#batch_size_post = 50\n#nbr_post = [10000, 10000, 10000, 10000]\n#nbr_epochs_post = [25, 25, 25, 25]\n\n\nnbr_lik = [2500 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]\nnbr_epochs_lik = [75 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]\nbatch_size = 50\nbatch_size_post = 1000\nnbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]\nnbr_epochs_post = [75 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]\n\n\nx_o_batch_post = torch.zeros(batch_size_post, 5)\n\nfor i in range(batch_size_post):\n x_o_batch_post[i, :] = func.calc_summary_stats(x_o)\n\ntorch.manual_seed(seed)\nnp.random.seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nstart = time.time()\n\n# TODO check prior and simulator\nmodels_lik, models_post = snpla.inference_snpla(flow_lik,\n flow_post,\n conj_model.prior,\n conj_model.model_sim,\n optimizer_lik,\n optimizer_post,\n decay_rate_post,\n func.calc_summary_stats(x_o),\n x_o_batch_post,\n dim,\n prob_prior,\n nbr_lik,\n nbr_epochs_lik,\n nbr_post,\n nbr_epochs_post,\n batch_size,\n batch_size_post)\n\nend = time.time()\nrun_time = end - start\n\nprint(\"\")\nprint(\"Runtime:\" + str(round(run_time, 2)))\n\nkl_divs_trained = []\nstart = time.time()\ntorch.manual_seed(seed)\n\nfor i in range(nbr_rounds):\n print(i)\n posterior_sample = models_post[i].sample(1000, context=func.calc_summary_stats(x_o))\n posterior_sample = posterior_sample.reshape((1000, 2))\n\n kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))\n\n if hp_tuning == 0 and lambda_val > 0:\n\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/lambda_val/post_samples_snpla_' + str(i + 1) + \"_\" + id_job + '.csv',\n posterior_sample.detach().numpy(), delimiter=\",\")\n\n elif hp_tuning == 0:\n\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/post_samples_snpla_' + str(i + 1) + \"_\" + id_job + '.csv',\n posterior_sample.detach().numpy(), delimiter=\",\")\n\n else:\n\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/hp_tuning/post_samples_snpla_' + str(i + 1) + \"_\" + id_job + '.csv',\n posterior_sample.detach().numpy(), delimiter=\",\")\n\nend = time.time()\nrun_time_inference = (end - start) / nbr_rounds\n\nif hp_tuning == 0 and lambda_val > 0:\n\n with open('mv_gaussian/low_dim_w_summary_stats/lambda_val/snpla_' + id_job + '.txt', 'w') as f:\n for h in hyper_params:\n f.write('%.6f\\n' % h)\n for p in prob_prior:\n f.write('%.6f\\n' % p)\n f.write('%.4f\\n' % run_time)\n f.write('%.4f\\n' % run_time_inference)\n for i in range(nbr_rounds):\n f.write('%.4f\\n' % kl_divs_trained[i])\n\n\nelif hp_tuning == 0:\n\n with open('mv_gaussian/low_dim_w_summary_stats/results/snpla_' + id_job + '.txt', 'w') as f:\n f.write('%.4f\\n' % run_time)\n f.write('%.4f\\n' % run_time_inference)\n for i in range(nbr_rounds):\n f.write('%.4f\\n' % kl_divs_trained[i])\n\nelse:\n\n with open('mv_gaussian/low_dim_w_summary_stats/hp_tuning/snpla_' + id_job + '.txt', 'w') as f:\n f.write('%.4f\\n' % hp_tuning)\n for h in hyper_params:\n f.write('%.6f\\n' % h)\n f.write('%.4f\\n' % run_time)\n f.write('%.4f\\n' % run_time_inference)\n for i in range(nbr_rounds):\n f.write('%.4f\\n' % kl_divs_trained[i])\n\nif hp_tuning == 0:\n # Inference\n\n # Sample data from post pred\n N_post_pred_test = 1000\n x_post_pred, theta_post_pred = func.run_model_sim(N_post_pred_test, seed + 3, conj_model, analytical_posterior,\n conj_model.model.covariance_matrix, dim, False)\n\n torch.manual_seed(seed)\n x_prior = flow_lik.sample(1, context=theta_test)\n x_theta_true = flow_lik.sample(1, context=theta_test_obs_data)\n x_post = flow_lik.sample(1, context=theta_post_pred)\n\n x_prior = x_prior.reshape(x_test.shape)\n x_theta_true = x_theta_true.reshape(x_test_obs_data.shape)\n x_post = x_post.reshape(x_post_pred.shape)\n\n\n # Write results\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_snpla_' + id_job +\n '.csv', x_theta_true.detach().numpy(), delimiter=\",\")\n\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_prior_snpla_' + id_job + '.csv',\n x_prior.detach().numpy(), delimiter=\",\")\n\n np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_post_snpla_' + id_job + '.csv',\n x_post.detach().numpy(), delimiter=\",\")\n"
] | [
[
"torch.zeros",
"torch.manual_seed",
"numpy.random.seed"
]
] |
victor-estrade/SystGradDescent | [
"822e7094290301ec47a99433381a8d6406798aff"
] | [
"model/summaries.py"
] | [
"# coding: utf-8\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nDEFAULT_N_BINS = 10\n\ndef compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):\n proba = clf.predict_proba(X)\n count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)\n return count\n\n\nclass ClassifierSummaryComputer():\n def __init__(self, clf, n_bins=DEFAULT_N_BINS):\n self.clf = clf\n self.n_bins = n_bins\n\n def __call__(self, X, W):\n proba = self.clf.predict_proba(X)\n count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)\n return count\n\n\n\nclass HistogramSummaryComputer():\n def __init__(self, n_bins=DEFAULT_N_BINS):\n self.n_bins = n_bins\n\n def fit(self, X):\n self.edges_list = []\n for i in range(X.shape[1]):\n x = X[:, i]\n maximum = np.max(x)\n minimum = np.min(x)\n diff = maximum - minimum\n maximum = maximum + diff / self.n_bins # be a bit more inclusive\n minimum = minimum - diff / self.n_bins # be a bit more inclusive\n count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)\n self.edges_list.append(bin_edges)\n return self\n\n def predict(self, X, W):\n counts = [] \n for i, bin_edges in enumerate(self.edges_list):\n x = X[:, i]\n count, _ = np.histogram(x, bins=bin_edges, weights=W)\n counts.extend(count)\n return counts\n\n def __call__(self, X, W):\n counts = self.predict(X, W)\n return np.array(counts)\n"
] | [
[
"numpy.histogram",
"numpy.max",
"numpy.array",
"numpy.min"
]
] |
Aieener/SUS_3D | [
"8fc5a768a2339238939522baf96bce98bf61902e"
] | [
"DATA/10_64_64_64_1E7/analy.py"
] | [
"# analy.py\n# A python program to analyze the SUS weighting function in order to reach the following goals:\n# 1. plot the weight function\n# 2. generate the normalized distribution for Z=1\n# 3. extrapolate the N distribution for different Zs given by the user.\n# Author: Yuding Ai\n# Date: 2015 Oct 23\n\nimport math\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n## for Palatino and other serif fonts use:\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\ndef PN():\n\tWF = [] # a list of my target Weighting function\n\tPN = [] # a list of number distribution\n\n\twith open(\"SUSWeight_function.txt\",\"r\") as file:\n\t\tfor line in file:\n\t\t\twords = line.split()\n\t\t\tn = float(words[0]) #take the value\n\t\t\tWF.append(n); #append value into my WF list\n\n\tmaxi = max(WF)\n\tif maxi > 500:\n\t\tfor i in range(len(WF)):\n\t\t\tWF[i] = WF[i]-maxi +500\n\t\t\tPN.append(math.exp(WF[i]));\n\n\tPN = [float(i)/sum(PN) for i in PN]\n\treturn WF,PN\n\n\ndef Pplot(PN,z):\n\tfig = plt.figure()\t\n\tplt.plot(PN,'+b',markersize=3)\n\tZ = str(z)\n\tylabel = 'P(N;Z='+ Z + ')'\n\tplt.ylabel(ylabel)\n\tplt.xlabel('N')\n\ttitle = 'P(N;Z='+ Z + ').png'\n\tfig.savefig(title, dpi=300, bbox_inches='tight')\n\ndef enlargePplot(PN,z):\n\tfig = plt.figure()\t\n\tplt.plot(PN,'+b-',markersize=3,linewidth = 0.1)\n\tplt.xlim(8600,9600)\n\tplt.ylim(0,0.007)\n\tZ = str(z)\n\tylabel = 'P(N;Z='+ Z + ')'\n\tplt.ylabel(ylabel)\n\tplt.xlabel('N')\n\ttitle = 'ENLP(N;Z='+ Z + ').png'\n\tfig.savefig(title, dpi=300, bbox_inches='tight')\n\ndef Wplot(WN):\n\tfig = plt.figure()\t\n\tplt.plot(WN,'+r',markersize=1,)\n\tplt.ylabel('Weighting Function')\n\tplt.xlabel('N')\n\ttitle = 'WeightingFunc.png'\n\tfig.savefig(title, dpi=300, bbox_inches='tight')\n\n\ndef exploPN(W,z):\n\tP = [] # a list of number distribution\n\tfor i in range(len(W)):\n\t\tW[i] = W[i] + i*math.log(z)\n\n\tmaxi = max(W)\n\tif maxi > 500:\n\t\tfor j in range(len(W)):\n\t\t\tW[j] = W[j]-maxi +500\n\t\t\tP.append(math.exp(W[j]));\n\tP = [float(k)/sum(P) for k in P]\n\treturn P\n\n\ndef main():\n\n\n\tP = PN()[1] # take the P(N;z=1)\n\tW = PN()[0] # take the original weighting function \n\n\tWplot(W)\n\t# Pplot(P,\"1\")\n\t# Pe = exploPN(W,4.44)\n\t# enlargePplot(Pe,4.44)\n\n\t# for i in range(10):\n\t# \tW = PN()[0] # take the original weighting function \t\t\n\t# \tt = 3.83 + 0.02*i\n\t# \tPe = exploPN(W,t)\n\t# \t# Pplot(Pe,t)\n\t# \tenlargePplot(Pe,t)\n\nmain()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
golmschenk/ramjet | [
"77fb4481a15088923308fda09804d80455d1a9cf"
] | [
"ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py"
] | [
"\"\"\"\nCode for managing the TESS eclipsing binary metadata.\n\"\"\"\nimport pandas as pd\nfrom pathlib import Path\nfrom peewee import IntegerField, SchemaManager\n\nfrom ramjet.data_interface.metadatabase import MetadatabaseModel, metadatabase\n\n\nbrian_powell_eclipsing_binary_csv_path = Path('data/tess_eclipsing_binaries/TESS_EB_catalog_23Jun.csv')\n\n\nclass TessEclipsingBinaryMetadata(MetadatabaseModel):\n \"\"\"\n A model for the TESS eclipsing binary metadatabase table.\n \"\"\"\n tic_id = IntegerField(index=True, unique=True)\n\n\nclass TessEclipsingBinaryMetadataManager:\n \"\"\"\n A class for managing the TESS eclipsing binary metadata.\n \"\"\"\n @staticmethod\n def build_table():\n \"\"\"\n Builds the TESS eclipsing binary metadata table.\n \"\"\"\n print('Building TESS eclipsing binary metadata table...')\n eclipsing_binary_data_frame = pd.read_csv(brian_powell_eclipsing_binary_csv_path, usecols=['ID'])\n row_count = 0\n metadatabase.drop_tables([TessEclipsingBinaryMetadata])\n metadatabase.create_tables([TessEclipsingBinaryMetadata])\n SchemaManager(TessEclipsingBinaryMetadata).drop_indexes()\n rows = []\n for index, tic_id in enumerate(eclipsing_binary_data_frame['ID'].values):\n row = {'tic_id': tic_id}\n rows.append(row)\n row_count += 1\n if row_count % 1000 == 0:\n with metadatabase.atomic():\n TessEclipsingBinaryMetadata.insert_many(rows).execute()\n rows = []\n with metadatabase.atomic():\n TessEclipsingBinaryMetadata.insert_many(rows).execute()\n SchemaManager(TessEclipsingBinaryMetadata).create_indexes()\n print(f'Table built. {row_count} rows added.')\n\n\nif __name__ == '__main__':\n metadata_manager = TessEclipsingBinaryMetadataManager()\n metadata_manager.build_table()\n"
] | [
[
"pandas.read_csv"
]
] |
BreezeDawn/numpy-pandas-matplotlib- | [
"e55dccb2442e57c2fccb2081966a7c19e731083a"
] | [
"pandas_study/PandasTest.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef base():\n index = pd.date_range('20181023', periods=9) # 生成9个行索引\n column = ['a', 'b', 'c', 'd'] # 生成4个列索引\n a = np.random.randn(9, 4) # 随便生成的9行4列的数据\n df = pd.DataFrame(a, index=index, columns=column)\n print(df)\n print(pd.DataFrame(np.arange(9).reshape((3, 3)))) # 行和列的默认索引为从0开始的数字\n print(df.dtypes) # 查看每列的数据类型\n print(df.index) # 查看每行的行索引\n print(df.columns) # 查看每列的列索引\n print(df.values) # 查看所有值\n print(df.describe()) # 查看每列的详细统计 数目/平均值/....\n print(df.T) # pandas的转置\n print(df.sort_index(axis=1, ascending=False)) # 按索引排序 axis: 1列排序 0行排序 ascending: False反排序(从小向大) True正排序(从大向小)\n print(df.sort_values(by='a')) # 把a列的值进行排序 默认从小向大\n\n\ndef select():\n index = pd.date_range('20181023', periods=6)\n df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])\n print(df)\n print(df.A) # 取出A列数据(带索引)\n print(df[2:3]) # 切片取数据\n print(df[2:3]) # 切片取数据\n print(df['2018-10-25':'2018-10-26']) # 切片取数据\n print(df.loc['2018-10-25', ['A', 'B']]) # 按照标签取数据\n print(df.iloc[[1, 3, 5], 1:5]) # 按照数字取数据\n print(df.ix['2018-10-25':'2018-10-26', 1:5]) # 数字标签结合取数据\n print(df[df.A > 8]) # A列中的元素大于8的都显示\n\n\ndef update():\n index = pd.date_range('20181023', periods=6)\n df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])\n df.iloc[2, 3] = -555 # 修改值 选中就能修改\n df.B[df.A > 8] = 0 # A列中的元素大于8的都把B修改为0\n print(df)\n df['E'] = pd.Series(np.arange(6), pd.date_range('20181023', periods=6)) # 增加一列\n print(df)\n\n\ndef handle_NaN():\n index = pd.date_range('20181023', periods=6)\n df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])\n df.iloc[1, 2] = np.nan\n df.iloc[0, 1] = np.nan\n print(df)\n print(df.dropna(axis=1, how='any')) # 丢掉缺失值(返回新的结果不影响原始数据) axis: 1丢掉列 0丢掉行 how: any任何一个是NaN就丢掉 all全是NaN就丢掉\n print(df.fillna(value=0)) # 填充缺失值 填充为0\n print(df.isnull()) # 检查每个元素是否缺失值,结果返回一个bool填充\n print(np.any(df.isnull())) # np.any 检查至少有一个False,是的话返回True\n\n\ndef read_save_data():\n data = pd.read_csv('./pand.csv') # 读取csv文件数据(csv内部逗号分隔)\n print(data)\n data.to_pickle('./pand.pickle') # 保存数据到pickle文件\n\n\ndef merge_DataFrame():\n df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])\n df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])\n df3 = pd.DataFrame(2 * np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])\n print(df1)\n print(df2)\n print(df3)\n res = pd.concat([df1, df2, df3], axis=0) # axis: 0上下合并 1左右合并\n print(res)\n res = pd.concat([df1, df2, df3], axis=1, ignore_index=True) # ignore_index 忽略前面所有的index并重新排序\n print(res)\n\n df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])\n df2 = pd.DataFrame(np.ones((3, 4)), columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])\n res = pd.concat([df1, df2], axis=0, join='outer', sort=True) # 上下合并,outer如果有不一样的列字段,就用NaN填充\n print(res)\n res = pd.concat([df1, df2], axis=0, join='inner', sort=True, ignore_index=True) # 上下合并, inner有不一样的列字段就丢掉那一列,保留相同字段\n print(res)\n res = pd.concat([df1, df2], axis=1, ) # 左右合并,有不一样的行字段就用NaN填充\n print(res)\n res = pd.concat([df1, df2], axis=1, join_axes=[df1.index]) # 左右合并,行字段按照df1的行字段来,缺失值用NaN填充,其余df1没有的字段丢掉\n print(res)\n\n df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])\n df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])\n df3 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])\n res = df1.append(df2, ignore_index=True) # df1后面加上df2\n print(res)\n res = df1.append([df2, df3], ignore_index=True) # df1后面加上df2,df3\n print(res)\n sl = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n res = df1.append(sl, ignore_index=True)\n print(res)\n\n\ndef merge():\n left = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']\n })\n right = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']\n })\n print(left)\n print(right)\n res = pd.merge(left, right, on='key') # 左右合并,key字段保留一个\n print(res)\n\n left = pd.DataFrame({\n 'key1': ['K0', 'K0', 'K1', 'K2'],\n 'key2': ['K0', 'K1', 'K0', 'K1'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']\n })\n right = pd.DataFrame({\n 'key1': ['K0', 'K1', 'K1', 'K2'],\n 'key2': ['K0', 'K0', 'K0', 'K0'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']\n })\n res = pd.merge(left, right, on=['key1', 'key2'], how='inner') # 解释不清,看结果\n print(res)\n res = pd.merge(left, right, on=['key1', 'key2'], how='outer',indicator='indicator_column') # 不管一不一样都保留 indicator写出哪些一样哪些不一样,写字符串可改名\n print(res)\n res = pd.merge(left, right, on=['key1', 'key2'], how='left') # 左的on字段完全不动的保留\n print(res)\n res = pd.merge(left, right, on=['key1', 'key2'], how='right') # 右的on字段完全不动的保留\n print(res)\n res = pd.merge(left, right, left_index=True,right_index=True, how='right') # 根据索引保留\n print(res)\n\n\ndef plot_test():\n # 1000个一维数据累加\n data = pd.Series(np.random.randn(1000),index=np.arange(1000))\n data = data.cumsum()\n # data.plot()\n # plt.show()\n\n # 矩阵\n data = pd.DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))\n data = data.cumsum()\n print(data.head()) # head显示前五个数据,默认5个\n data.plot() # 线性\n ax = data.plot.scatter(x='A',y='B',color='DarkBlue', label='Class 1') # scatter 数据点 只有x,y\n data.plot.scatter(x='A',y='C',color='DarkGreen', label='Class 2',ax=ax) # ax和前面的在一张图上\n plt.show()\n # plot method : bar条形图 hist box kde area scatter hexbin pie\n\n\n\n\nif __name__ == '__main__':\n # base()\n # select()\n # update()\n # handle_NaN()\n # read_save_data()\n # merge_DataFrame()\n # merge()\n plot_test()"
] | [
[
"numpy.ones",
"pandas.Series",
"pandas.date_range",
"numpy.zeros",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.arange",
"matplotlib.pyplot.show",
"pandas.merge",
"pandas.concat"
]
] |
maximskorik/matchms | [
"922f5afaef123a793194bdd74391027477cbb844",
"922f5afaef123a793194bdd74391027477cbb844"
] | [
"matchms/exporting/save_as_json.py",
"matchms/Fragments.py"
] | [
"import json\nfrom typing import List\nimport numpy\nfrom ..Spectrum import Spectrum\n\n\ndef save_as_json(spectrums: List[Spectrum], filename: str):\n \"\"\"Save spectrum(s) as json file.\n\n :py:attr:`~matchms.Spectrum.losses` of spectrum will not be saved.\n\n Example:\n\n .. code-block:: python\n\n import numpy\n from matchms import Spectrum\n from matchms.exporting import save_as_json\n\n # Create dummy spectrum\n spectrum = Spectrum(mz=numpy.array([100, 200, 300], dtype=\"float\"),\n intensities=numpy.array([10, 10, 500], dtype=\"float\"),\n metadata={\"charge\": -1,\n \"inchi\": '\"InChI=1S/C6H12\"',\n \"precursor_mz\": 222.2})\n\n # Write spectrum to test file\n save_as_json(spectrum, \"test.json\")\n\n Parameters\n ----------\n spectrums:\n Expected input is a list of :py:class:`~matchms.Spectrum.Spectrum` objects.\n filename:\n Provide filename to save spectrum(s).\n \"\"\"\n if not isinstance(spectrums, list):\n # Assume that input was single Spectrum\n spectrums = [spectrums]\n\n # Write to json file\n with open(filename, 'w', encoding=\"utf-8\") as fout:\n json.dump(spectrums, fout, cls=SpectrumJSONEncoder)\n\n\nclass SpectrumJSONEncoder(json.JSONEncoder):\n # See https://github.com/PyCQA/pylint/issues/414 for reference\n def default(self, o):\n \"\"\"JSON Encoder which can encode a :py:class:`~matchms.Spectrum.Spectrum` object\"\"\"\n if isinstance(o, Spectrum):\n spec = o.clone()\n peaks_list = numpy.vstack((spec.peaks.mz, spec.peaks.intensities)).T.tolist()\n\n # Convert matchms.Spectrum() into dictionaries\n spectrum_dict = {key: spec.metadata[key] for key in spec.metadata}\n spectrum_dict[\"peaks_json\"] = peaks_list\n return spectrum_dict\n return json.JSONEncoder.default(self, o)\n",
"import numpy\n\n\nclass Fragments:\n \"\"\"\n Stores arrays of intensities and M/z values, with some checks on their internal consistency.\n\n For example\n\n .. testcode::\n\n import numpy as np\n from matchms import Fragments\n\n mz = np.array([10, 20, 30], dtype=\"float\")\n intensities = np.array([100, 20, 300], dtype=\"float\")\n\n peaks = Fragments(mz=mz, intensities=intensities)\n print(peaks[2])\n\n Should output\n\n .. testoutput::\n\n [ 30. 300.]\n\n Attributes\n ----------\n mz:\n Numpy array of m/z values.\n intensities:\n Numpy array of peak intensity values.\n\n \"\"\"\n def __init__(self, mz=None, intensities=None):\n assert isinstance(mz, numpy.ndarray), \"Input argument 'mz' should be a numpy.array.\"\n assert isinstance(intensities, numpy.ndarray), \"Input argument 'intensities' should be a numpy.array.\"\n assert mz.shape == intensities.shape, \"Input arguments 'mz' and 'intensities' should be the same shape.\"\n assert mz.dtype == \"float\", \"Input argument 'mz' should be an array of type float.\"\n assert intensities.dtype == \"float\", \"Input argument 'intensities' should be an array of type float.\"\n\n self._mz = mz\n self._intensities = intensities\n\n assert self._is_sorted(), \"mz values are out of order.\"\n\n def __eq__(self, other):\n return \\\n self.mz.shape == other.mz.shape and \\\n numpy.allclose(self.mz, other.mz) and \\\n self.intensities.shape == other.intensities.shape and \\\n numpy.allclose(self.intensities, other.intensities)\n\n def __len__(self):\n return self._mz.size\n\n def __getitem__(self, item):\n return numpy.asarray([self.mz[item], self.intensities[item]])\n\n def _is_sorted(self):\n return numpy.all(self.mz[:-1] <= self.mz[1:])\n\n def clone(self):\n return Fragments(self.mz, self.intensities)\n\n @property\n def mz(self):\n \"\"\"getter method for mz private variable\"\"\"\n return self._mz.copy()\n\n @property\n def intensities(self):\n \"\"\"getter method for intensities private variable\"\"\"\n return self._intensities.copy()\n\n @property\n def to_numpy(self):\n \"\"\"getter method to return stacked numpy array of both peak mz and\n intensities\"\"\"\n return numpy.vstack((self.mz, self.intensities)).T\n"
] | [
[
"numpy.vstack"
],
[
"numpy.vstack",
"numpy.allclose",
"numpy.all",
"numpy.asarray"
]
] |
LenKerr/Colorization-1 | [
"bcfcdb24fc8ab107d34644d5a63b018f86784e21"
] | [
"transfer_subnet/xiaoketransfer2.py"
] | [
"\"\"\"\nCopyright (c) 2019 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport os\nos.environ['CUDA_VISIBLE_DEVICES']='1'\nimport sys\nsys.path.append('./segmentation')\nimport os\nimport tqdm\nimport argparse\n\nimport torch\nfrom torchvision.utils import save_image\nimport torch.nn as nn\n# from model import WaveEncoder, WaveDecoder\n\nfrom utils.core import feature_wct\nfrom utils.core import feature_adin\nfrom utils.core import feature_adin_without_segment\nfrom utils.core import feature_wct_without_segment\nfrom utils.io import Timer, open_image, load_segment, compute_label_info\nfrom xiaokemodel import XiaoKeEncoder, XiaoKeDecoder\nimport numpy as np\nimport torchvision.transforms as transforms\n\nfrom scipy.io import loadmat\nfrom PIL import Image\nfrom scipy.misc import imread, imresize\nimport cv2\nfrom lib.nn import user_scattered_collate, async_copy_to\nfrom lib.utils import as_numpy, mark_volatile\nimport datetime\n\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG',\n]\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\nclass WCT2:\n def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):\n\n self.transfer_at = set(transfer_at)\n assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)\n assert self.transfer_at, 'empty transfer_at'\n model_path = './xiaoke_video_checkpoints/'\n encoder_path = 'xiaoke_encoder.pth'\n decoder_path = 'xiaoke_decoder_0.0001_4.pth'\n\n model_path = './xiaoke_checkpoints/'\n encoder_path = 'xiaoke_encoder.pth'\n decoder_path = 'xiaoke_decoder_87.pth'\n\n\n self.device = torch.device(device)\n self.verbose = verbose\n # self.encoder = WaveEncoder(option_unpool).to(self.device)\n # self.decoder = WaveDecoder(option_unpool).to(self.device)\n # self.encoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))\n # self.decoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))\n\n self.encoder = XiaoKeEncoder(option_unpool).to(self.device)\n self.decoder = XiaoKeDecoder(option_unpool).to(self.device)\n self.encoder.load_state_dict(torch.load(os.path.join(model_path,encoder_path),map_location=lambda storage, loc: storage))\n self.decoder.load_state_dict(torch.load(os.path.join(model_path,decoder_path),map_location=lambda storage, loc: storage)) \n\n\n def print_(self, msg):\n if self.verbose:\n print(msg)\n\n def encode(self, x, skips, level):\n return self.encoder.encode(x, skips, level)\n\n def decode(self, x, skips, level):\n return self.decoder.decode(x, skips, level)\n\n def get_all_feature(self, x):\n skips = {}\n feats = {'encoder': {}, 'decoder': {}}\n for level in [1, 2, 3, 4]:\n x = self.encode(x, skips, level)\n if 'encoder' in self.transfer_at:\n feats['encoder'][level] = x\n\n if 'encoder' not in self.transfer_at:\n feats['decoder'][4] = x\n for level in [4, 3, 2]:\n x = self.decode(x, skips, level)\n if 'decoder' in self.transfer_at:\n feats['decoder'][level - 1] = x\n return feats, skips\n\n def transfer(self, content, style, content_segment, style_segment, alpha=1,is_wct=False):\n content_feat, content_skips = content, {}\n style_feats, style_skips = self.get_all_feature(style)\n\n wct2_enc_level = [1, 2, 3, 4]\n wct2_dec_level = [1, 2, 3, 4]\n wct2_skip_level = ['pool1', 'pool2', 'pool3']\n label_set,label_indicator = None, None\n for level in [1, 2, 3, 4]:\n content_feat = self.encode(content_feat, content_skips, level)\n if 'encoder' in self.transfer_at and level in wct2_enc_level:\n if is_wct:\n content_feat = feature_wct(content_feat, style_feats['encoder'][level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n else:\n content_feat = feature_adin_without_segment(content_feat, style_feats['encoder'][level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n self.print_('transfer at encoder {}'.format(level))\n if 'skip' in self.transfer_at:\n for skip_level in wct2_skip_level:\n if is_wct:\n content_skips[skip_level] = feature_wct(content_skips[skip_level], style_skips[skip_level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n else :\n content_skips[skip_level] = feature_adin_without_segment(content_skips[skip_level], style_skips[skip_level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n self.print_('transfer at skip {}'.format(skip_level))\n\n for level in [4, 3, 2, 1]:\n if 'decoder' in self.transfer_at and level in style_feats['decoder'] and level in wct2_dec_level:\n if is_wct:\n content_feat = feature_wct(content_feat, style_feats['decoder'][level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n else :\n content_feat = feature_adin_without_segment(content_feat, style_feats['decoder'][level],\n content_segment, style_segment,\n label_set, label_indicator,\n alpha=alpha, device=self.device)\n self.print_('transfer at decoder {}'.format(level))\n content_feat = self.decode(content_feat, content_skips, level)\n return content_feat\n\n\ndef get_all_transfer():\n ret = []\n for e in ['encoder']:\n for d in ['decoder']:\n for s in ['skip']:\n _ret = set([e, d, s]) & set(['encoder', 'decoder', 'skip'])\n if _ret:\n ret.append(_ret)\n return ret\n\n# def get_single_transfer():\n# return ['encoder', 'decoder', 'skip']\n\n\n\ndef run_bulk():\n accurate_segment = True\n device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'\n device = torch.device(device)\n\n transfer_at = set()\n if config.transfer_at_encoder:\n transfer_at.add('encoder')\n if config.transfer_at_decoder:\n transfer_at.add('decoder')\n if config.transfer_at_skip:\n transfer_at.add('skip')\n # cw, ch = 640,360 \n cw, ch = 640,400 \n\n # The filenames of the content and style pair should match\n c_transforms = transforms.Compose([transforms.Resize((ch,cw), interpolation=Image.NEAREST),transforms.CenterCrop((ch // 16 * 16, cw // 16 * 16)),transforms.ToTensor()])\n\n fnames = os.listdir(config.content)\n fnames.sort()\n print('transfer at ~~~~',transfer_at)\n style = Image.open(config.style).convert('RGB')\n style = c_transforms(style).unsqueeze(0).to(device)\n sample_fnames = fnames[:50]\n for fname in tqdm.tqdm(sample_fnames):\n if not is_image_file(fname):\n print('invalid file (is not image), ', fname)\n continue\n print('config.wct is ',config.is_wct)\n\n # content\n _content = os.path.join(config.content, fname)\n content = Image.open(_content).convert('RGB') # 别忘了这边的to(device)\n \n content = c_transforms(content).unsqueeze(0).to(device)\n print('current frame {} and shape is {}'.format(fname,content.shape))\n\n\n # _content_segment = os.path.join(config.content_segment, fname) if config.content_segment else None\n # _style_segment = os.path.join(config.style_segment, fname) if config.style_segment else None\n _output = os.path.join(config.output, fname)\n\n content_segment,style_segment = None,None\n if not config.transfer_all:\n with Timer('Elapsed time in whole WCT: {}', config.verbose):\n postfix = '_'.join(sorted(list(transfer_at)))\n fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))\n print('------ transfer:', _output)\n wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)\n with torch.no_grad():\n img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)\n\n save_image(img.clamp_(0, 1), fname_output, padding=0)\n else:\n for _transfer_at in get_all_transfer():\n print('location for transfer at~~~~',_transfer_at)\n with Timer('Elapsed time in whole WCT: {}', config.verbose):\n postfix = '_'.join(sorted(list(_transfer_at)))\n fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))\n print('------ transfer:', fname,'-',_transfer_at)\n wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)\n # print('wct2 model encoder ',wct2.encoder)\n # print('wcr2 model decoder ',wct2.decoder)\n with torch.no_grad():\n starttime = datetime.datetime.now()\n img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)\n endtime = datetime.datetime.now()\n print('xiaoke with adin 运行时间为----',(endtime - starttime))\n save_image(img.clamp_(0, 1), fname_output, padding=0)\n # break\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--content', type=str, default='./examples/content')\n parser.add_argument('--content_segment', type=str, default='./examples/content_segment')\n parser.add_argument('--style', type=str, default='./examples/style')\n parser.add_argument('--style_segment', type=str, default='./examples/style_segment')\n parser.add_argument('--output', type=str, default='./outputs')\n parser.add_argument('--image_size', type=int, default=512)\n parser.add_argument('--alpha', type=float, default=1)\n parser.add_argument('--option_unpool', type=str, default='cat5', choices=['sum', 'cat5'])\n parser.add_argument('-e', '--transfer_at_encoder', action='store_true')\n parser.add_argument('-d', '--transfer_at_decoder', action='store_true')\n parser.add_argument('-s', '--transfer_at_skip', action='store_true')\n parser.add_argument('-a', '--transfer_all', action='store_true')\n parser.add_argument('--cpu', action='store_true')\n parser.add_argument('--verbose', action='store_true')\n parser.add_argument('--is_wct',action='store_true')\n parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')\n parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')\n parser.add_argument('--arch_encoder', default='resnet50_dilated8', help=\"architecture of net_encoder\")\n parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help=\"architecture of net_decoder\")\n parser.add_argument('--suffix', default='_epoch_20.pth', help=\"which snapshot to load\")\n parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')\n parser.add_argument('--num_class', default=150, type=int, help='number of classes')\n parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')\n parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')\n parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')\n\n # 不能定义两次同样的参数\n config = parser.parse_args()\n\n\n transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])\n print(config)\n if not os.path.exists(os.path.join(config.output)):\n os.makedirs(os.path.join(config.output))\n run_bulk()\n\n# 树林的图片\n# 171124_D1_HD_01\n# 170216A_122_ForestTrail_1080\n# 170216A_070_LookingUpThroughForest_1080 \n# 180705_01_0\n# 190416_10_Drone1_0\n# Forest_15_1_Videv\n# Forest_15_4_Videv\n# on\n# WalkingThroughTreesatSunsetVidev\n\n\n# 树叶\n# Autumn_leaves_in_motion_0\n# autumn_leaves\n# autumn-leaves-blowing-in-the-wind-H264\n# 180705_01_0\n\n# 海浪\n# 46234354\n# walking_on_the_beac\n\n# 雪山\n# 180607_A_00\n\n# 开车\n# 180607_A_10\n\n# 飞机\n# Airbus_A380_Landing_2__Videv\n# Evening_landin\n# PlaneLand\n\n# 海边瑜伽\n# Ao_Nang_Beach_Yoga_MP4_HDV_1080p25__TanuriX_Stock_Footage_N\n# MVI_126\n\n# 水稻\n# Barley_3_Videv\n# HandStrokin\n\n# wild_gras\n# windygrassnoaudi-\n\n# 船\n# beach1\n# sailing_boa\n\n# 天空\n# Becco_di_Filadonna_su_Vall\n# Blue_Sky_and_Clouds_Timelapse_0892__Videv\n\n# 老鼠\n# CotswoldSequence\n\n\n# 奶牛\n# cow\n# Cow_Mother_and_cal\n# Cows_\n# Limousin_Cows_1__VIdev\n# Limousin_Cows_2__Videv\n\n# 日落\n# Lonely_tree_at_sunset_CCBY_NatureCli\n# MilkyWaywithTreeVidev\n# SilhouetteJogge\n# Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W\n# Sunris\n# TimelapseSunse\n# Wakeboarding_on_the_Lak\n\n\n# 马\n# Dirty_Hors\n\n# 黑白鸟\n# Pigeon-Stock-Vide\n# Red_fod\n# Weave\n\n# 海鸥\n# seagul-H264\n# seagulls_on_the_beac\n\n\n# 建筑\n# Run_5_wo_metadata_h264420_720p_UH\n\n# 鸭子\n# SeaBirdsSwimming_\n# Swans__1287_\n\n# 羊\n# Shee\n\n'''\nCUDA_VISIBLE_DEVICES=6 python transfer.py --content ./examples/content --style ./examples/style --content_segment ./examples/content_segment --style_segment ./examples/style_segment/ --output ./outputs/ --verbose --image_size 512 -a\n'''\n\n\n\n'''\npython xiaoketransfer.py --content ./examples/demo_content/ --style ./examples/demo_style/ -a --output ./examples/demo_stylization --is_wct --image_size 400\nCUDA_VISIBLE_DEVICES=1 python xiaoketransfer.py --content ./examples/dataset/alley_2/ --style ./examples/dataset/fangao.png -a --output ./examples/stylization \n\nCUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization \n\nCUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/mountain_1 --style ./examples/data/fangao.png -a --output ./examples/stylization \n\nCUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization --is_wct \n\n'''\n\n\n'''\n'../data/video-picture/160825_26_WindTurbines4_1080'\npython xiaoketransfer2.py --content ../data/video-picture/160825_26_WindTurbines4_1080 --style ./examples/data/fangao.png -a --output ./examples/160825_26_WindTurbines4_1080_adain \n\n'''\n\n'''\n'../data/video-picture/xxx'\npython xiaoketransfer2.py --content ../data/video-picture/180705_01_0 --style ../data/reference/tar0056_orange_forest.png -a --output ./examples/Forest_15_4_Videv \n\npython xiaoketransfer.py --content ../data/video-picture/Red_fod --style ../data/video-picture/Weave/frame_0001.png -a --output ./examples/Red_fod_seg\n\n\npython xiaoketransfer.py --content ../data/video-picture/seagulls_on_the_beac --style ../data/video-picture/seagul-H264/frame_0001.png -a --output ./examples/seagulls_on_the_beac_seg\n\npython xiaoketransfer2.py --content ../data/video-picture/HandStrokin --style ../data/video-picture/Barley_3_Videv/frame_0001.png -a --output ./examples/HandStrokin\n\npython xiaoketransfer.py --content ../data/video-picture/Swans__1287_ --style ../data/video-picture/SeaBirdsSwimming_/frame_0001.png -a --output ./examples/Swans__1287_\n\npython xiaoketransfer.py --content ../data/video-picture/Becco_di_Filadonna_su_Vall --style ../data/video-picture/Blue_Sky_and_Clouds_Timelapse_0892__Videv/frame_0001.png -a --output ./examples/Becco_di_Filadonna_su_Vall\n\npython xiaoketransfer.py --content ../data/video-picture/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W --style ../data/video-picture/Lonely_tree_at_sunset_CCBY_NatureCli/frame_0004.png -a --output ./examples/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W\n\n\npython xiaoketransfer2.py --content ../data/video-picture/Wakeboarding_on_the_Lak --style ../data/video-picture/Sunris/frame_0004.png -a --output ./examples/Wakeboarding_on_the_Lak\n\n# Barley_3_Videv\n# HandStrokin\n\n# Pigeon-Stock-Vide\n# Red_fod\n'''"
] | [
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] |
mukulbhave/YAD2K | [
"a6174285e036f95df83783b7b4d951094cbb08c8"
] | [
"retrain_yolo.py"
] | [
"\"\"\"\nThis is a script that can be used to retrain the YOLOv2 model for your own dataset.\n\"\"\"\nimport argparse\n\nimport os\nfrom PIL import ImageOps\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Input, Lambda, Conv2D\nfrom keras.models import load_model, Model\nfrom keras import regularizers\nfrom keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping\n\nfrom yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,\n yolo_eval, yolo_head, yolo_loss)\nfrom yad2k.utils.draw_boxes import draw_boxes\n\nimport h5py\nimport io\nfrom yolo_data_gen import *\n\n# Args\nargparser = argparse.ArgumentParser(\n description=\"Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.\")\n\nargparser.add_argument(\n '-d',\n '--data_path',\n help=\"path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'\",\n default=os.path.join('..', 'DATA', 'underwater_data.npz'))\n\nargparser.add_argument(\n '-a',\n '--anchors_path',\n help='path to anchors file, defaults to yolo_anchors.txt',\n default=os.path.join('model_data', 'yolo_anchors.txt'))\n\nargparser.add_argument(\n '-c',\n '--classes_path',\n help='path to classes file, defaults to pascal_classes.txt',\n default=os.path.join('..', 'DATA', 'underwater_classes.txt'))\n\n# Default anchor boxes\nYOLO_ANCHORS = np.array(\n ((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),\n (7.88282, 3.52778), (9.77052, 9.16828)))\n\ndef _main(args):\n \n data_path = os.path.expanduser(args.data_path)\n classes_path = os.path.expanduser(args.classes_path)\n anchors_path = os.path.expanduser(args.anchors_path)\n\n class_names = get_classes(classes_path)\n anchors = get_anchors(anchors_path)\n \n dataset = h5py.File(data_path,'r+') \n \n anchors = YOLO_ANCHORS\n\n #detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)\n\n model_body, model = create_model(anchors, class_names)\n\n train( model, class_names, anchors, dataset) # image_data, boxes, detectors_mask, matching_true_boxes )\n\n # TODO use data generator for draw as well\n \n \n # draw(model_body,\n # class_names,\n # anchors,\n # image_data,\n # image_set='all', # assumes test set is 0.9\n # weights_name='trained_stage_3_best.h5',\n # save_all=True)\n\n\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n if os.path.isfile(anchors_path):\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n else:\n Warning(\"Could not open anchors file, using default.\")\n return YOLO_ANCHORS\n \n\n\n#Exactly Same as process data but handles images of different sizes in dataset\ndef scale_data(images, boxes=None):\n '''processes the data'''\n img_shape = (416,416)\n images = [PIL.Image.open(io.BytesIO(i)) for i in images]\n \n \n # Box preprocessing.\n if boxes is not None: \n # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.\n boxes = [box.reshape((-1, 5)) for box in boxes]\n # Get box parameters as x_center, y_center, box_width, box_height, class.\n boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]\n boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]\n \n # get original size of each image and and convert the coordinates and w h \n processed_images = []\n for i,img in enumerate(images):\n orig_size = np.array([images[i].width, images[i].height])\n boxes_xy[i] = boxes_xy[i] / orig_size\n boxes_wh[i] = boxes_wh[i] / orig_size\n images_i = images[i].resize(img_shape, PIL.Image.BICUBIC)\n \n images_i = np.array(images_i, dtype=np.float)\n processed_images.append(images_i/255)\n \n boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]\n\n # find the max number of boxes\n max_boxes = 0\n for boxz in boxes:\n if boxz.shape[0] > max_boxes:\n max_boxes = boxz.shape[0]\n\n # add zero pad for training\n for i, boxz in enumerate(boxes):\n if boxz.shape[0] < max_boxes:\n zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)\n boxes[i] = np.vstack((boxz, zero_padding))\n\n return np.array(processed_images), np.array(boxes)\n \n else:\n processed_images = [resize_image(i,img_shape[0],img_shape[1],False) for i in images]\n processed_images = [np.array(image, dtype=np.float) for image in processed_images]\n processed_images = [image/255. for image in processed_images]\n return np.array(processed_images)\n \n \ndef process_data(images, boxes=None):\n '''processes the data'''\n #images = [PIL.Image.fromarray(i) for i in images]\n images = [PIL.Image.open(io.BytesIO(i)) for i in images]\n orig_size = np.array([images[0].width, images[0].height])\n orig_size = np.expand_dims(orig_size, axis=0)\n print(type(images[0]))\n # Image preprocessing.\n processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]\n #processed_images = [resize_image(i,416,416,False) for i in images]\n \n processed_images = [np.array(image, dtype=np.float) for image in processed_images]\n processed_images = [image/255. for image in processed_images]\n\n if boxes is not None:\n # Box preprocessing.\n # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.\n boxes = [box.reshape((-1, 5)) for box in boxes]\n # Get extents as y_min, x_min, y_max, x_max, class for comparision with\n # model output.\n #boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]\n\n # Get box parameters as x_center, y_center, box_width, box_height, class.\n boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]\n boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]\n boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]\n boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]\n boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]\n\n # find the max number of boxes\n max_boxes = 0\n for boxz in boxes:\n if boxz.shape[0] > max_boxes:\n max_boxes = boxz.shape[0]\n\n # add zero pad for training\n for i, boxz in enumerate(boxes):\n if boxz.shape[0] < max_boxes:\n zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)\n boxes[i] = np.vstack((boxz, zero_padding))\n\n return np.array(processed_images), np.array(boxes)\n else:\n return np.array(processed_images)\n\ndef get_detector_mask(boxes, anchors):\n '''\n Precompute detectors_mask and matching_true_boxes for training.\n Detectors mask is 1 for each spatial position in the final conv layer and\n anchor that should be active for the given boxes and 0 otherwise.\n Matching true boxes gives the regression targets for the ground truth box\n that caused a detector to be active or 0 otherwise.\n '''\n detectors_mask = [0 for i in range(len(boxes))]\n matching_true_boxes = [0 for i in range(len(boxes))]\n for i, box in enumerate(boxes):\n detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])\n\n return np.array(detectors_mask), np.array(matching_true_boxes)\n\ndef create_model(anchors, class_names, load_pretrained=True, freeze_body=True):\n '''\n returns the body of the model and the model\n\n # Params:\n\n load_pretrained: whether or not to load the pretrained model or initialize all weights\n\n freeze_body: whether or not to freeze all weights except for the last layer's\n\n # Returns:\n\n model_body: YOLOv2 with new output layer\n\n model: YOLOv2 with custom loss Lambda layer\n\n '''\n\n detectors_mask_shape = (13, 13, 5, 1)\n matching_boxes_shape = (13, 13, 5, 5)\n\n # Create model input layers.\n image_input = Input(shape=(416, 416, 3))\n boxes_input = Input(shape=(None, 5))\n detectors_mask_input = Input(shape=detectors_mask_shape)\n matching_boxes_input = Input(shape=matching_boxes_shape)\n\n # Create model body.\n yolo_model = yolo_body(image_input, len(anchors), len(class_names))\n topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)\n\n if load_pretrained:\n # Save topless yolo:\n topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')\n if not os.path.exists(topless_yolo_path):\n print(\"CREATING TOPLESS WEIGHTS FILE\")\n yolo_path = os.path.join('model_data', 'yolo.h5')\n model_body = load_model(yolo_path)\n model_body = Model(model_body.inputs, model_body.layers[-2].output)\n model_body.save_weights(topless_yolo_path)\n topless_yolo.load_weights(topless_yolo_path)\n\n if freeze_body:\n for layer in topless_yolo.layers:\n layer.trainable = False\n final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear',kernel_regularizer= regularizers.l2(5e-4))(topless_yolo.output)\n\n model_body = Model(image_input, final_layer)\n\n # Place model loss on CPU to reduce GPU memory usage.\n with tf.device('/cpu:0'):\n # TODO: Replace Lambda with custom Keras layer for loss.\n model_loss = Lambda(\n yolo_loss,\n output_shape=(1, ),\n name='yolo_loss',\n arguments={'anchors': anchors,\n 'num_classes': len(class_names)})([\n model_body.output, boxes_input,\n detectors_mask_input, matching_boxes_input\n ])\n\n model = Model(\n [model_body.input, boxes_input, detectors_mask_input,\n matching_boxes_input], model_loss)\n\n return model_body, model\n\ndef train(model, class_names, anchors, dataset):#image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):\n '''\n retrain/fine-tune the model\n\n logs training with tensorboard\n\n saves training weights in current directory\n\n best weights according to val_loss is saved as trained_stage_3_best.h5\n '''\n model.compile(\n optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n }) # This is a hack to use the custom loss function in the last layer.\n\n\n logging = TensorBoard()#log_dir='./train_logs', histogram_freq=1, write_graph=False, write_images=True)\n checkpoint = ModelCheckpoint(\"trained_stage_3_best.h5\", monitor='val_loss',\n save_weights_only=True, save_best_only=True)\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')\n\n batch_size = 8\n dataTrain = dataset['train']\n dataVal= dataset['val']\n train_set_size =dataTrain.attrs['dataset_size']\n val_set_size =dataVal.attrs['dataset_size']\n training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)\n validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)\n # model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=validation_split,\n # batch_size=8,\n # epochs=5,\n # callbacks=[logging])\n model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n use_multiprocessing=False,\n epochs=5,verbose = 1, callbacks=[logging])\n model.save_weights('trained_stage_1.h5')\n\n model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)\n\n model.load_weights('trained_stage_1.h5')\n\n model.compile(\n optimizer='adam', loss={\n 'yolo_loss': lambda y_true, y_pred: y_pred\n }) # This is a hack to use the custom loss function in the last layer.\n\n \n # model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=validation_split,\n # batch_size=8,\n # epochs=30,\n # callbacks=[logging])\n training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)\n validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)\n model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n use_multiprocessing=False,\n epochs=30,verbose = 1, callbacks=[logging])\n model.save_weights('trained_stage_2.h5')\n \n training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)\n validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)\n model.fit_generator(generator=training_generator,\n validation_data=validation_generator,\n use_multiprocessing=False,\n epochs=30,verbose = 1, callbacks=[logging, checkpoint, early_stopping])\n # model.fit([image_data, boxes, detectors_mask, matching_true_boxes],\n # np.zeros(len(image_data)),\n # validation_split=validation_split,\n # batch_size=8,\n # epochs=30,\n # callbacks=[logging, checkpoint, early_stopping])\n\n model.save_weights('trained_stage_3.h5')\n\ndef draw(model_body, class_names, anchors, image_data, image_set='val',\n weights_name='trained_stage_3_best.h5', out_path=\"output_images\", save_all=True):\n '''\n Draw bounding boxes on image data\n '''\n if image_set == 'train':\n image_data = np.array([np.expand_dims(image, axis=0)\n for image in image_data[:int(len(image_data)*.9)]])\n elif image_set == 'val':\n image_data = np.array([np.expand_dims(image, axis=0)\n for image in image_data[int(len(image_data)*.9):]])\n elif image_set == 'all':\n image_data = np.array([np.expand_dims(image, axis=0)\n for image in image_data])\n else:\n ValueError(\"draw argument image_set must be 'train', 'val', or 'all'\")\n # model.load_weights(weights_name)\n print(image_data.shape)\n model_body.load_weights(weights_name)\n\n # Create output variables for prediction.\n yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))\n input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(\n yolo_outputs, input_image_shape, score_threshold=0.7, iou_threshold=0.7)\n\n # Run prediction on overfit image.\n sess = K.get_session() # TODO: Remove dependence on Tensorflow session.\n\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n for i in range(len(image_data)):\n out_boxes, out_scores, out_classes = sess.run(\n [boxes, scores, classes],\n feed_dict={\n model_body.input: image_data[i],\n input_image_shape: [image_data.shape[2], image_data.shape[3]],\n K.learning_phase(): 0\n })\n print('Found {} boxes for image.'.format(len(out_boxes)))\n print(out_boxes)\n\n # Plot image with predicted boxes.\n image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,\n class_names, out_scores,out_path+\"\\\\\"+str(i)+'.jpg')\n # Save the image:\n if save_all :\n image = PIL.Image.fromarray(image_with_boxes)\n image.save(os.path.join(out_path,str(i)+'.jpg'))\n\n # To display (pauses the program):\n plt.imshow(image_with_boxes, interpolation='nearest')\n plt.show()\n\n\n\nif __name__ == '__main__':\n args = argparser.parse_args()\n _main(args)\n"
] | [
[
"numpy.vstack",
"numpy.zeros",
"tensorflow.device",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.expand_dims",
"numpy.array",
"numpy.concatenate"
]
] |
traffic-ai/EvalDeT | [
"3b52698e1b03fb9066e3203c2f36aebfa0030aba"
] | [
"tests/unit/test_clearmot.py"
] | [
"import numpy as np\nimport pytest\n\nfrom evaldet import Tracks\nfrom evaldet.mot_metrics.clearmot import calculate_clearmot_metrics\n\n\ndef test_missing_frame_hyp():\n gt = Tracks()\n gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n metrics = calculate_clearmot_metrics(gt, hyp)\n\n assert metrics[\"FN_CLEAR\"] == 1\n assert metrics[\"FP_CLEAR\"] == 0\n assert metrics[\"IDS\"] == 0\n\n\ndef test_missing_frame_gt():\n gt = Tracks()\n gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n hyp.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n metrics = calculate_clearmot_metrics(gt, hyp)\n\n assert metrics[\"IDS\"] == 0\n assert metrics[\"FN_CLEAR\"] == 0\n assert metrics[\"FP_CLEAR\"] == 1\n\n\ndef test_no_association_made():\n gt = Tracks()\n gt.add_frame(0, [0], np.array([[10, 10, 11, 11]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n metrics = calculate_clearmot_metrics(gt, hyp)\n\n assert metrics[\"IDS\"] == 0\n assert metrics[\"FN_CLEAR\"] == 1\n assert metrics[\"FP_CLEAR\"] == 1\n assert metrics[\"MOTA\"] == -1 # Stange but ok\n assert np.isnan(metrics[\"MOTP\"])\n\n\[email protected](\"threshold\", [0.3, 0.5, 0.7])\ndef test_dist_threshold(threshold: float):\n gt = Tracks()\n gt.add_frame(\n 0,\n [0, 1, 2, 3],\n np.array([[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 1, 1]]),\n )\n\n hyp = Tracks()\n hyp.add_frame(\n 0,\n [0, 1, 2, 3],\n np.array([[0, 0, 1, 0.2], [0, 0, 1, 0.4], [0, 0, 1, 0.6], [0, 0, 1, 0.8]]),\n )\n\n fn_res = {0.3: 3, 0.5: 2, 0.7: 1}\n\n metrics = calculate_clearmot_metrics(gt, hyp, dist_threshold=threshold)\n assert fn_res[threshold] == metrics[\"FN_CLEAR\"]\n\n\ndef test_sticky_association():\n \"\"\"Test that as long as distance is below threshold, the association does\n not switch, even if a detection with better IoU score appears.\n \"\"\"\n gt = Tracks()\n gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [0, 0, 1, 1]]))\n\n metrics = calculate_clearmot_metrics(gt, hyp)\n assert metrics[\"FN_CLEAR\"] == 0\n assert metrics[\"IDS\"] == 0\n assert metrics[\"FP_CLEAR\"] == 1\n\n\ndef test_mismatch():\n gt = Tracks()\n gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n hyp.add_frame(1, [1], np.array([[0, 0, 1, 1]]))\n\n metrics = calculate_clearmot_metrics(gt, hyp)\n assert metrics[\"FN_CLEAR\"] == 0\n assert metrics[\"IDS\"] == 1\n assert metrics[\"FP_CLEAR\"] == 0\n\n\ndef test_persistent_mismatch():\n \"\"\"Test that association (and therefore mismatch) persists even\n when the first matched hypothesis is gone, as long as another one\n is not assigned.\"\"\"\n gt = Tracks()\n gt.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n gt.add_frame(1, [0], np.array([[0, 0, 1, 1]]))\n gt.add_frame(2, [0], np.array([[0, 0, 1, 1]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0], np.array([[0, 0, 1, 1]]))\n hyp.add_frame(2, [1], np.array([[0, 0, 1, 1]]))\n\n metrics = calculate_clearmot_metrics(gt, hyp)\n assert metrics[\"FN_CLEAR\"] == 1\n assert metrics[\"IDS\"] == 1\n assert metrics[\"FP_CLEAR\"] == 0\n\n\ndef test_simple_case():\n \"\"\"Test a simple case with 3 frames and 2 detections/gts per frame.\"\"\"\n gt = Tracks()\n gt.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))\n gt.add_frame(1, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))\n gt.add_frame(2, [0, 1], np.array([[0, 0, 1, 1], [2, 2, 3, 3]]))\n\n hyp = Tracks()\n hyp.add_frame(0, [0, 1], np.array([[0, 0, 1, 1], [1, 1, 2, 2]]))\n hyp.add_frame(1, [0, 1], np.array([[0.1, 0.1, 1.1, 1.1], [1, 1, 2, 2]]))\n hyp.add_frame(2, [2, 1], np.array([[0.05, 0.05, 1.05, 1.05], [2, 2, 3, 3]]))\n\n metrics = calculate_clearmot_metrics(gt, hyp)\n assert metrics[\"FN_CLEAR\"] == 1\n assert metrics[\"IDS\"] == 1\n assert metrics[\"FP_CLEAR\"] == 1\n assert metrics[\"MOTA\"] == 0.5\n assert metrics[\"MOTP\"] == 0.0994008537355717\n"
] | [
[
"numpy.array",
"numpy.isnan"
]
] |
robustmetalearning/robust-meta-learning | [
"08fc3e9302c9fbd1fcfc3e001e0b080a3c783c81"
] | [
"MAML-ADML/meta.py"
] | [
"import torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch import optim\nimport numpy as np\n\nfrom learner import Learner\nfrom copy import deepcopy\n\ndef zero_nontrainable_grads(grads, trainable_layers=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]):\n for index, grad_tensor in enumerate(grads):\n if index not in trainable_layers:\n grad_tensor = torch.zeros_like(grad_tensor)\n\ndef inputsPGD(metalearner, net, inputs, targets, params = False, evaluate = False):\n if evaluate:\n attack_steps = metalearner.eval_attack_steps\n else:\n attack_steps = metalearner.attack_steps\n x = inputs.detach()\n if not metalearner.no_random_start:\n x = x + torch.zeros_like(x).uniform_(-metalearner.attack_epsilon, metalearner.attack_epsilon)\n for i in range(attack_steps):\n x.requires_grad_()\n with torch.enable_grad():\n if params:\n loss = F.cross_entropy(net(x, params), targets, size_average=False)\n else:\n loss = F.cross_entropy(net(x), targets, size_average=False)\n grad = torch.autograd.grad(loss, [x])[0]\n if metalearner.targeted:\n x = x.detach() - metalearner.attack_step_size*torch.sign(grad.detach())\n else:\n x = x.detach() + metalearner.attack_step_size*torch.sign(grad.detach())\n x = torch.min(torch.max(x, inputs - metalearner.attack_epsilon), inputs + metalearner.attack_epsilon)\n x = torch.clamp(x, 0.0, 1.0)\n return x\n\nclass Meta(nn.Module):\n \"\"\"\n Meta Learner\n \"\"\"\n def __init__(self, args, config):\n \"\"\"\n\n :param args:\n \"\"\"\n super(Meta, self).__init__()\n self.finetune_trainable = args.finetune_trainable\n self.update_lr = args.update_lr\n self.meta_lr = args.meta_lr\n self.n_way = args.n_way\n self.k_spt = args.k_spt\n self.k_qry = args.k_qry\n self.task_num = args.task_num\n self.update_step = args.update_step\n self.update_step_test = args.update_step_test\n self.attack_query = args.attack_query\n self.attack_support = args.attack_support\n self.no_attack_validation = args.no_attack_validation\n self.attack_epsilon = args.attack_epsilon\n self.attack_step_size = args.attack_step_size\n self.attack_steps = args.attack_steps\n self.eval_attack_steps = args.eval_attack_steps\n self.net = Learner(config, args.imgc, args.imgsz)\n self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)\n self.no_random_start = args.no_random_start\n self.targeted = args.targeted\n\n def clip_grad_by_norm_(self, grad, max_norm):\n \"\"\"\n in-place gradient clipping.\n :param grad: list of gradients\n :param max_norm: maximum norm allowable\n :return:\n \"\"\"\n\n total_norm = 0\n counter = 0\n for g in grad:\n param_norm = g.data.norm(2)\n total_norm += param_norm.item() ** 2\n counter += 1\n total_norm = total_norm ** (1. / 2)\n\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for g in grad:\n g.data.mul_(clip_coef)\n\n return total_norm/counter\n\n\n def forward(self, x_spt, y_spt, x_qry, y_qry):\n \"\"\"\n\n :param x_spt: [b, setsz, c_, h, w]\n :param y_spt: [b, setsz]\n :param x_qry: [b, querysz, c_, h, w]\n :param y_qry: [b, querysz]\n :return:\n \"\"\"\n\n task_num, setsz, c_, h, w = x_spt.size()\n querysz = x_qry.size(1)\n losses_q = [0 for _ in range(self.update_step + 1)] # losses_q[i] is the loss on step i\n corrects = [0 for _ in range(self.update_step + 1)] \n\n for i in range(task_num):\n\n # 1. run the i-th task and compute loss for k=0\n if self.attack_support:\n logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i]), vars=None, bn_training=True)\n else:\n logits = self.net(x_spt[i], vars=None, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i])\n\n grad = torch.autograd.grad(loss, self.net.parameters())\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))\n\n # this is the loss and accuracy before first update\n\n with torch.no_grad():\n # [setsz, nway]\n logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[0] += loss_q\n\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item()\n corrects[0] = corrects[0] + correct\n\n # this is the loss and accuracy after the first update\n\n with torch.no_grad():\n # [setsz, nway]\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[1] += loss_q\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item()\n corrects[1] = corrects[1] + correct\n\n for k in range(1, self.update_step):\n # 1. run the i-th task and compute loss for k=1~K-1\n if self.attack_support:\n logits = self.net(inputsPGD(self, self.net, x_spt[i], y_spt[i], params = fast_weights), fast_weights, bn_training=True)\n else:\n logits = self.net(x_spt[i], fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt[i])\n # 2. compute grad on theta_pi\n grad = torch.autograd.grad(loss, fast_weights)\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n # 3. theta_pi = theta_pi - train_lr * grad\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n if self.attack_query:\n logits_q = self.net(inputsPGD(self, self.net, x_qry[i], y_qry[i], params = fast_weights), fast_weights, bn_training=True)\n else:\n logits_q = self.net(x_qry[i], fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n loss_q = F.cross_entropy(logits_q, y_qry[i])\n losses_q[k + 1] += loss_q\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n correct = torch.eq(pred_q, y_qry[i]).sum().item() # convert to numpy\n corrects[k + 1] = corrects[k + 1] + correct\n\n\n\n # end of all tasks\n # sum over all losses on query set across all tasks\n loss_q = losses_q[-1] / task_num\n\n # optimize theta parameters\n self.meta_optim.zero_grad()\n loss_q.backward()\n # print('meta update')\n # for p in self.net.parameters()[:5]:\n # \tprint(torch.norm(p).item())\n self.meta_optim.step()\n\n\n accs = np.array(corrects) / (querysz * task_num)\n\n return accs\n\n def finetunning(self, x_spt, y_spt, x_qry, y_qry):\n \"\"\"\n\n :param x_spt: [setsz, c_, h, w]\n :param y_spt: [setsz]\n :param x_qry: [querysz, c_, h, w]\n :param y_qry: [querysz]\n :return:\n \"\"\"\n assert len(x_spt.shape) == 4\n print('Validating...')\n\n querysz = x_qry.size(0)\n\n natural_corrects = [0 for _ in range(self.update_step_test + 1)]\n robust_corrects = [0 for _ in range(self.update_step_test + 1)]\n\n # in order to not ruin the state of running_mean/variance and bn_weight/bias\n # we finetunning on the copied model instead of self.net\n net = deepcopy(self.net)\n\n # 1. run the i-th task and compute loss for k=0\n logits = net(x_spt)\n loss = F.cross_entropy(logits, y_spt)\n grad = torch.autograd.grad(loss, net.parameters())\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))\n\n # this is the loss and accuracy before first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, net.parameters(), bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n natural_correct = torch.eq(pred_q, y_qry).sum().item()\n natural_corrects[0] = natural_corrects[0] + natural_correct\n\n # [setsz, nway]\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)\n # [setsz]\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n # scalar\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()\n robust_corrects[0] = robust_corrects[0] + robust_correct\n\n # this is the loss and accuracy after the first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n correct = torch.eq(pred_q, y_qry).sum().item()\n natural_corrects[1] = natural_corrects[1] + natural_correct\n\n # [setsz, nway]\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)\n # [setsz]\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n # scalar\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()\n robust_corrects[1] = robust_corrects[1] + robust_correct\n\n for k in range(1, self.update_step_test):\n # 1. run the i-th task and compute loss for k=1~K-1\n logits = net(x_spt, fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt)\n # 2. compute grad on theta_pi\n grad = torch.autograd.grad(loss, fast_weights)\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n # 3. theta_pi = theta_pi - train_lr * grad\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n loss_q = F.cross_entropy(logits_q, y_qry)\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy\n natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct\n\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)\n\n with torch.no_grad():\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy\n robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct\n\n del net\n\n natural_accs = np.array(natural_corrects) / querysz\n robust_accs = np.array(robust_corrects) / querysz\n\n\n ########################### DO THE SAME THING BUT ADVERSARIALLY TRAINED ON SUPPORT ########################\n\n natural_corrects = [0 for _ in range(self.update_step_test + 1)]\n robust_corrects = [0 for _ in range(self.update_step_test + 1)]\n\n # in order to not ruin the state of running_mean/variance and bn_weight/bias\n # we finetunning on the copied model instead of self.net\n net = deepcopy(self.net)\n\n # 1. run the i-th task and compute loss for k=0\n logits = net(inputsPGD(self, net, x_spt, y_spt), bn_training=True)\n loss = F.cross_entropy(logits, y_spt)\n grad = torch.autograd.grad(loss, net.parameters())\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, net.parameters())))\n\n # this is the loss and accuracy before first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, net.parameters(), bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n natural_correct = torch.eq(pred_q, y_qry).sum().item()\n natural_corrects[0] = natural_corrects[0] + natural_correct\n\n # [setsz, nway]\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, net.parameters(), evaluate=True), net.parameters(), bn_training=True)\n # [setsz]\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n # scalar\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()\n robust_corrects[0] = robust_corrects[0] + robust_correct\n\n # this is the loss and accuracy after the first update\n with torch.no_grad():\n # [setsz, nway]\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # [setsz]\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n # scalar\n correct = torch.eq(pred_q, y_qry).sum().item()\n natural_corrects[1] = natural_corrects[1] + natural_correct\n\n # [setsz, nway]\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)\n # [setsz]\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n # scalar\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item()\n robust_corrects[1] = robust_corrects[1] + robust_correct\n\n for k in range(1, self.update_step_test):\n # 1. run the i-th task and compute loss for k=1~K-1\n logits = net(inputsPGD(self, net, x_spt, y_spt, params = fast_weights), fast_weights, bn_training=True)\n loss = F.cross_entropy(logits, y_spt)\n # 2. compute grad on theta_pi\n grad = torch.autograd.grad(loss, fast_weights)\n zero_nontrainable_grads(grad, trainable_layers=self.finetune_trainable)\n # 3. theta_pi = theta_pi - train_lr * grad\n fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))\n\n logits_q = net(x_qry, fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n loss_q = F.cross_entropy(logits_q, y_qry)\n\n with torch.no_grad():\n pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)\n natural_correct = torch.eq(pred_q, y_qry).sum().item() # convert to numpy\n natural_corrects[k + 1] = natural_corrects[k + 1] + natural_correct\n\n robust_logits_q = net(inputsPGD(self, net, x_qry, y_qry, fast_weights, evaluate=True), fast_weights, bn_training=True)\n # loss_q will be overwritten and just keep the loss_q on last update step.\n robust_loss_q = F.cross_entropy(robust_logits_q, y_qry)\n\n with torch.no_grad():\n robust_pred_q = F.softmax(robust_logits_q, dim=1).argmax(dim=1)\n robust_correct = torch.eq(robust_pred_q, y_qry).sum().item() # convert to numpy\n robust_corrects[k + 1] = robust_corrects[k + 1] + robust_correct\n\n del net\n\n natural_accs_advTrained = np.array(natural_corrects) / querysz\n robust_accs_advTrained = np.array(robust_corrects) / querysz\n\n return natural_accs, robust_accs, natural_accs_advTrained, robust_accs_advTrained\n\n\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.autograd.grad",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.no_grad",
"torch.eq",
"torch.enable_grad",
"torch.nn.functional.cross_entropy",
"torch.max",
"numpy.array",
"torch.clamp"
]
] |
ondrejklejch/learning_to_adapt | [
"6de0b98370769596da16a1688582925ea2e1fa29"
] | [
"steps/nnet3/train.py"
] | [
"import sys\nimport numpy as np\n\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler\nfrom keras.models import Model\nfrom keras.layers import Input, Activation, Conv1D, BatchNormalization\nfrom keras.optimizers import Adam\n\nfrom learning_to_adapt.model import LHUC, Renorm\nfrom learning_to_adapt.utils import load_dataset, load_utt_to_spk, load_utt_to_pdfs, load_lda\n\nimport keras\nimport tensorflow as tf\n\nconfig = tf.ConfigProto()\nconfig.intra_op_parallelism_threads=1\nconfig.inter_op_parallelism_threads=1\nkeras.backend.tensorflow_backend.set_session(tf.Session(config=config))\n\n\ndef create_model(hidden_dim=350, lda_path=None):\n lda, bias = load_lda(lda_path)\n lda = lda.reshape((5, 40, 200))\n\n feats = Input(shape=(None, 40))\n x = Conv1D(200, kernel_size=5, name=\"lda\", trainable=False, weights=[lda, bias])(feats)\n\n layers = [(1, 1), (2, 3), (2, 6), (2, 9), (2, 6), (1, 1)]\n for i, (kernel_size, dilation_rate) in enumerate(layers):\n name = \"tdnn%d\" % (i + 1)\n x = Conv1D(hidden_dim, kernel_size=kernel_size, dilation_rate=dilation_rate, activation=\"relu\", name=\"%s.affine\" % name)(x)\n x = BatchNormalization(name=\"%s.batchnorm\" % name)(x)\n x = LHUC(name=\"lhuc.%s\" % name, trainable=False)(x)\n\n y = Conv1D(4208, kernel_size=1, activation=\"softmax\", name=\"output.affine\")(x)\n\n return Model(inputs=[feats], outputs=[y])\n\n\nif __name__ == '__main__':\n train_data = sys.argv[1]\n val_data = sys.argv[2]\n utt2spk = sys.argv[3]\n pdfs = sys.argv[4]\n left_context = int(sys.argv[5])\n right_context = int(sys.argv[6])\n lda_path = sys.argv[7]\n output_path = sys.argv[8]\n\n num_epochs = 400\n batch_size = 256\n learning_rate = 0.0015\n\n utt_to_spk = load_utt_to_spk(utt2spk)\n utt_to_pdfs = load_utt_to_pdfs(pdfs)\n\n train_dataset = load_dataset(train_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)\n train_dataset = train_dataset.batch(batch_size, drop_remainder=True)\n train_dataset = train_dataset.prefetch(1024)\n x, _, y = train_dataset.make_one_shot_iterator().get_next()\n\n val_dataset = load_dataset(val_data, utt_to_spk, utt_to_pdfs, chunk_size=8, subsampling_factor=1, left_context=left_context, right_context=right_context)\n val_dataset = val_dataset.batch(batch_size, drop_remainder=True)\n val_dataset = val_dataset.take(512).cache().repeat()\n val_x, _, val_y = val_dataset.make_one_shot_iterator().get_next()\n\n model = create_model(600, lda_path)\n model.compile(\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'],\n optimizer=Adam(lr=learning_rate, amsgrad=True, clipvalue=1.)\n )\n\n callbacks = [\n CSVLogger(output_path + \"model.csv\"),\n ModelCheckpoint(filepath=output_path + \"model.{epoch:02d}.h5\", save_best_only=False, period=10),\n ModelCheckpoint(filepath=output_path + \"model.best.h5\", save_best_only=True),\n LearningRateScheduler(lambda epoch, lr: learning_rate - epoch * (learning_rate - learning_rate / 10) / num_epochs, verbose=0)\n ]\n\n model.fit(x, y,\n steps_per_epoch=2000,\n epochs=num_epochs,\n validation_data=(val_x, val_y),\n validation_steps=512,\n callbacks=callbacks\n )\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
madhukarkm/NeMo | [
"648c97f076147684bee6aaada209f2f20adcaf5d",
"648c97f076147684bee6aaada209f2f20adcaf5d"
] | [
"nemo/collections/nlp/data/data_utils/data_preprocessing.py",
"tests/core/test_fileio.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport csv\nimport json\nimport os\nimport pickle\nimport random\nimport re\nimport string\nfrom collections import Counter\n\nimport numpy as np\nimport torch\nfrom tqdm.auto import tqdm\n\nfrom nemo.utils import logging\nfrom nemo.utils.env_var_parsing import get_envint\n\n__all__ = [\n 'DataProcessor',\n 'get_label_stats',\n 'partition_data',\n 'write_files',\n 'write_data',\n 'create_dataset',\n 'read_csv',\n 'get_dataset',\n 'partition',\n 'map_entities',\n 'get_entities',\n 'get_data',\n 'reverse_dict',\n 'get_intent_labels',\n 'get_stats',\n 'DATABASE_EXISTS_TMP',\n 'MODE_EXISTS_TMP',\n 'is_whitespace',\n 'write_vocab',\n 'if_exist',\n 'remove_punctuation_from_sentence',\n 'dataset_to_ids',\n 'get_freq_weights',\n 'fill_class_weights',\n 'normalize_answer',\n 'get_labels_to_labels_id_mapping',\n 'get_vocab',\n 'find_newlines',\n 'load_data_indices',\n 'chinese_punctuation',\n 'check_chinese_char',\n 'normalize_chinese_answer',\n]\n\nDATABASE_EXISTS_TMP = '{} dataset has already been processed and stored at {}'\nMODE_EXISTS_TMP = '{} mode of {} dataset has already been processed and stored at {}'\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n # if sys.version_info[0] == 2:\n # line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n\nchinese_punctuation = {\n '——',\n '‘',\n '’',\n '“',\n '”',\n '…',\n '、',\n '。',\n '〈',\n '〉',\n '《',\n '》',\n '「',\n '」',\n '『',\n '』',\n '【',\n '】',\n '〔',\n '〕',\n '!',\n '(',\n ')',\n ',',\n '.',\n ':',\n ';',\n '?',\n}\n\n\ndef check_chinese_char(ch):\n \"\"\"Check if a character is in Chinese.\"\"\"\n if u'\\u4e00' <= ch <= u'\\u9fff' or ch in chinese_punctuation:\n return True\n else:\n return False\n\n\ndef normalize_chinese_answer(text):\n \"\"\"Remove the Chinese punctuation and separate Chinese answers to char-level\"\"\"\n\n def remove_punc(text):\n exclude = chinese_punctuation\n return ''.join(ch for ch in text if ch not in exclude)\n\n def separate_char(text):\n ch_list = []\n for ch in text:\n ch_list.append(ch)\n return ch_list\n\n return separate_char(remove_punc(text))\n\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef get_label_stats(labels, outfile='stats.tsv', verbose=True):\n '''\n\n Args:\n labels: list of all labels\n outfile: path to the file where to save label stats\n\n Returns:\n total (int): total number of labels\n label_frequencies (list of tuples): each tuple represent (label, label frequency)\n max id of the labels\n '''\n labels = Counter(labels)\n total = sum(labels.values())\n out = open(outfile, 'w')\n i = 0\n freq_dict = {}\n label_frequencies = labels.most_common()\n for k, v in label_frequencies:\n out.write(f'{k}\\t\\t{round(v/total,5)}\\t\\t{v}\\n')\n if verbose and i < 3:\n logging.info(f'label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).')\n i += 1\n freq_dict[k] = v\n\n return total, freq_dict, max(labels.keys())\n\n\ndef partition_data(intent_queries, slot_tags, split=0.1):\n n = len(intent_queries)\n n_dev = int(n * split)\n dev_idx = set(random.sample(range(n), n_dev))\n dev_intents, dev_slots, train_intents, train_slots = [], [], [], []\n\n dev_intents.append('sentence\\tlabel\\n')\n train_intents.append('sentence\\tlabel\\n')\n\n for i, item in enumerate(intent_queries):\n if i in dev_idx:\n dev_intents.append(item)\n dev_slots.append(slot_tags[i])\n else:\n train_intents.append(item)\n train_slots.append(slot_tags[i])\n return train_intents, train_slots, dev_intents, dev_slots\n\n\ndef write_files(data, outfile):\n with open(outfile, 'w') as f:\n for item in data:\n item = f'{item.strip()}\\n'\n f.write(item)\n\n\ndef write_data(data, slot_dict, intent_dict, outfold, mode, uncased):\n intent_file = open(f'{outfold}/{mode}.tsv', 'w')\n intent_file.write('sentence\\tlabel\\n')\n slot_file = open(f'{outfold}/{mode}_slots.tsv', 'w')\n for tokens, slots, intent in data:\n text = ' '.join(tokens)\n if uncased:\n text = text.lower()\n intent_file.write(f'{text}\\t{intent_dict[intent]}\\n')\n slots = [str(slot_dict[slot]) for slot in slots]\n slot_file.write(' '.join(slots) + '\\n')\n intent_file.close()\n slot_file.close()\n\n\ndef create_dataset(train, dev, slots, intents, uncased, outfold):\n os.makedirs(outfold, exist_ok=True)\n if 'O' in slots:\n slots.remove('O')\n slots = sorted(list(slots)) + ['O']\n intents = sorted(list(intents))\n slots = write_vocab(slots, f'{outfold}/dict.slots.csv')\n intents = write_vocab(intents, f'{outfold}/dict.intents.csv')\n write_data(train, slots, intents, outfold, 'train', uncased)\n write_data(dev, slots, intents, outfold, 'test', uncased)\n\n\ndef read_csv(file_path):\n rows = []\n with open(file_path, 'r') as csvfile:\n read_csv = csv.reader(csvfile, delimiter=',')\n for row in read_csv:\n rows.append(row)\n return rows\n\n\ndef get_dataset(files, dev_split=0.1):\n # entity2value, value2entity = get_entities(files)\n data, slots, intents = get_data(files)\n if len(data) == 1:\n train, dev = partition(data[0], split=dev_split)\n else:\n train, dev = data[0], data[1]\n return train, dev, slots, intents\n\n\ndef partition(data, split=0.1):\n n = len(data)\n n_dev = int(n * split)\n dev_idx = set(random.sample(range(n), n_dev))\n dev, train = [], []\n\n for i, item in enumerate(data):\n if i in dev_idx:\n dev.append(item)\n else:\n train.append(item)\n return train, dev\n\n\ndef map_entities(entity2value, entities):\n for key in entities:\n if 'data' in entities[key]:\n if key not in entity2value:\n entity2value[key] = set([])\n\n values = []\n for value in entities[key]['data']:\n values.append(value['value'])\n values.extend(value['synonyms'])\n entity2value[key] = entity2value[key] | set(values)\n\n return entity2value\n\n\ndef get_entities(files):\n entity2value = {}\n for file in files:\n with open(file, 'r') as json_file:\n data = json.load(json_file)\n entity2value = map_entities(entity2value, data['entities'])\n\n value2entity = reverse_dict(entity2value)\n return entity2value, value2entity\n\n\ndef get_data(files):\n all_data, all_slots, all_intents = [], set(['O']), set()\n for file in files:\n file_data = []\n with open(file, 'r') as json_file:\n data = json.load(json_file)\n for intent in data['intents']:\n all_intents.add(intent)\n utterances = data['intents'][intent]['utterances']\n for utterance in utterances:\n tokens, slots = [], []\n for frag in utterance['data']:\n frag_tokens = frag['text'].strip().split()\n tokens.extend(frag_tokens)\n if 'slot_name' not in frag:\n slot = 'O'\n else:\n slot = frag['slot_name']\n all_slots.add(slot)\n slots.extend([slot] * len(frag_tokens))\n file_data.append((tokens, slots, intent))\n all_data.append(file_data)\n return all_data, all_slots, all_intents\n\n\ndef reverse_dict(entity2value):\n value2entity = {}\n for entity in entity2value:\n for value in entity2value[entity]:\n value2entity[value] = entity\n return value2entity\n\n\ndef get_intent_labels(intent_file):\n labels = {}\n label = 0\n with open(intent_file, 'r') as f:\n for line in f:\n intent = line.strip()\n labels[intent] = label\n label += 1\n return labels\n\n\ndef get_stats(lengths):\n logging.info('Some stats of the lengths of the sequences:')\n lengths = np.asarray(lengths)\n logging.info(\n f'Min: {np.min(lengths)} | \\\n Max: {np.max(lengths)} | \\\n Mean: {np.mean(lengths)} | \\\n Median: {np.median(lengths)}'\n )\n logging.info(f'75 percentile: {np.percentile(lengths, 75):.2f}')\n logging.info(f'99 percentile: {np.percentile(lengths, 99):.2f}')\n\n\ndef is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n\ndef write_vocab(items, outfile):\n vocab = {}\n idx = 0\n with open(outfile, 'w') as f:\n for item in items:\n f.write(item + '\\n')\n vocab[item] = idx\n idx += 1\n return vocab\n\n\ndef get_labels_to_labels_id_mapping(file):\n '''\n Reads labels from the file and returns labels to id mapping dictionary\n Args:\n file: path to file\n Returns:\n labels to id mapping dictionary\n '''\n lines = open(file, 'r').readlines()\n lines = [line.strip() for line in lines if line.strip()]\n label_ids = {lines[i]: i for i in range(len(lines))}\n return label_ids\n\n\ndef if_exist(outfold, files):\n if not os.path.exists(outfold):\n return False\n for file in files:\n if not os.path.exists(f'{outfold}/{file}'):\n return False\n return True\n\n\ndef remove_punctuation_from_sentence(sentence):\n sentence = re.sub('[' + string.punctuation + ']', '', sentence)\n sentence = sentence.lower()\n return sentence\n\n\ndef dataset_to_ids(dataset, tokenizer, cache_ids=False, add_bos_eos=True, cache_data_per_node=False, use_cache=False):\n \"\"\"\n Reads dataset from file line by line, tokenizes each line with tokenizer,\n and returns list of lists which corresponds to ids of tokenized strings.\n\n Args:\n dataset (str): path to dataset\n tokenizer: tokenizer to convert text into ids\n cache_ids (bool): if True, ids are saved to disk as pickle file\n with similar name (e.g., data.txt --> data.txt.pkl)\n add_bos_eos (bool): whether to add <s> and </s> symbols (e.g., for NMT)\n cache_data_per_node (bool): Cache data on local_rank 0. Use when there is not a shared-filesystem.\n use_cache (bool): Use cached ids if they exist.\n Returns:\n ids: list of ids which correspond to tokenized strings of the dataset\n \"\"\"\n\n cached_ids_dataset = dataset + str(\".pkl\")\n if use_cache and os.path.isfile(cached_ids_dataset):\n logging.info(\"Loading cached tokenized dataset ...\")\n ids = pickle.load(open(cached_ids_dataset, \"rb\"))\n else:\n logging.info(f\"Tokenizing dataset {dataset}...\")\n data = open(dataset, \"rb\").readlines()\n ids = []\n for sentence in tqdm(data, desc='Tokenizing sentence'):\n sent_ids = tokenizer.text_to_ids(sentence.decode(\"utf-8\"))\n if add_bos_eos:\n sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]\n ids.append(sent_ids)\n if cache_ids and (\n not torch.distributed.is_initialized() or (cache_data_per_node and get_envint(\"LOCAL_RANK\", 0) == 0)\n ):\n logging.info(\"Caching tokenized dataset ...\")\n pickle.dump(ids, open(cached_ids_dataset, \"wb\"))\n return ids\n\n\ndef get_freq_weights(label_freq):\n \"\"\"\n Goal is to give more weight to the classes with less samples\n so as to match the ones with the higher frequencies. We achieve this by\n dividing the total frequency by the freq of each label to calculate its weight.\n \"\"\"\n total_size = 0\n for lf in label_freq.values():\n total_size += lf\n weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}\n return weighted_slots\n\n\ndef fill_class_weights(weights, max_id=-1):\n \"\"\"\n Gets a dictionary of labels with their weights and creates a list with size of the labels filled with those weights.\n Missing labels in the dictionary would get value 1.\n\n Args:\n weights: dictionary of weights for labels, labels as keys and weights are their values\n max_id: the largest label id in the dataset, default=-1 would consider the largest label in the weights dictionary as max_id\n Returns:\n weights_list: list of weights for labels\n \"\"\"\n if max_id < 0:\n max_id = 0\n for l in weights.keys():\n max_id = max(max_id, l)\n\n all_weights = [1.0] * (max_id + 1)\n for i in range(len(all_weights)):\n if i in weights:\n all_weights[i] = weights[i]\n return all_weights\n\n\ndef get_vocab(file):\n lines = open(file, 'r').readlines()\n lines = [line.strip() for line in lines if line.strip()]\n labels = {i: lines[i] for i in range(len(lines))}\n return labels\n\n\ndef find_newlines(contents):\n \"\"\"\n Finds all of the newline positions in a text file.\n \"\"\"\n start = 0\n\n while True:\n try:\n # index and split are much faster than Python for loops\n new_start = contents.index(b\"\\n\", start)\n line = (\n contents[start:new_start]\n .replace(b\"\\xc2\\x99\", b\" \")\n .replace(b\"\\xc2\\xa0\", b\" \")\n .decode(\"utf-8\", errors=\"ignore\")\n )\n\n if len(line.split()) > 0:\n yield start\n\n start = new_start + 1\n\n except ValueError:\n break\n\n\ndef load_data_indices(idx_file: str, data_file: str, savename: str):\n \"\"\"\n Loads dataset index file if it exsits\n \"\"\"\n data_dir = data_file[: data_file.rfind('/')]\n mode = data_file[data_file.rfind('/') + 1 : data_file.rfind('.')]\n idx_file = f\"{data_dir}/{mode}_{savename}.pkl\"\n\n if os.path.isfile(idx_file):\n # If the sentence indices file already exists, load from it\n with open(idx_file, \"rb\") as f:\n indices = pickle.load(f)\n\n return indices, idx_file, data_dir\n\n return None, idx_file, data_dir\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\n\nimport numpy as np\nimport pytest\nimport torch\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom nemo.collections.asr.models import EncDecCTCModel\n\ntry:\n from eff.cookbooks import NeMoCookbook\n\n _EFF_PRESENT_ = True\nexcept ImportError:\n _EFF_PRESENT_ = False\n\n# A decorator marking the EFF requirement.\nrequires_eff = pytest.mark.skipif(not _EFF_PRESENT_, reason=\"Export File Format library required to run test\")\n\n\[email protected]()\ndef asr_model():\n preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}\n encoder = {\n 'cls': 'nemo.collections.asr.modules.ConvASREncoder',\n 'params': {\n 'feat_in': 64,\n 'activation': 'relu',\n 'conv_mask': True,\n 'jasper': [\n {\n 'filters': 1024,\n 'repeat': 1,\n 'kernel': [1],\n 'stride': [1],\n 'dilation': [1],\n 'dropout': 0.0,\n 'residual': False,\n 'separable': True,\n 'se': True,\n 'se_context_size': -1,\n }\n ],\n },\n }\n\n decoder = {\n 'cls': 'nemo.collections.asr.modules.ConvASRDecoder',\n 'params': {\n 'feat_in': 1024,\n 'num_classes': 28,\n 'vocabulary': [\n ' ',\n 'a',\n 'b',\n 'c',\n 'd',\n 'e',\n 'f',\n 'g',\n 'h',\n 'i',\n 'j',\n 'k',\n 'l',\n 'm',\n 'n',\n 'o',\n 'p',\n 'q',\n 'r',\n 's',\n 't',\n 'u',\n 'v',\n 'w',\n 'x',\n 'y',\n 'z',\n \"'\",\n ],\n },\n }\n modelConfig = DictConfig(\n {'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}\n )\n\n model_instance = EncDecCTCModel(cfg=modelConfig)\n return model_instance\n\n\nclass TestFileIO:\n @pytest.mark.unit\n def test_to_from_config_file(self, asr_model):\n \"\"\"\" Test makes sure that the second instance created with the same configuration (BUT NOT checkpoint)\n has different weights. \"\"\"\n\n with tempfile.NamedTemporaryFile() as fp:\n yaml_filename = fp.name\n asr_model.to_config_file(path2yaml_file=yaml_filename)\n next_instance = EncDecCTCModel.from_config_file(path2yaml_file=yaml_filename)\n\n assert isinstance(next_instance, EncDecCTCModel)\n\n assert len(next_instance.decoder.vocabulary) == 28\n assert asr_model.num_weights == next_instance.num_weights\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = next_instance.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert not np.array_equal(w1, w2)\n\n @pytest.mark.unit\n def test_save_restore_from_nemo_file(self, asr_model):\n \"\"\"\" Test makes sure that the second instance created from the same configuration AND checkpoint \n has the same weights. \"\"\"\n\n with tempfile.NamedTemporaryFile() as fp:\n filename = fp.name\n\n # Save model (with random artifact).\n with tempfile.NamedTemporaryFile() as artifact:\n asr_model.register_artifact(config_path=\"abc\", src=artifact.name)\n asr_model.save_to(save_path=filename)\n\n # Restore the model.\n asr_model2 = EncDecCTCModel.restore_from(restore_path=filename)\n\n assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)\n assert asr_model.num_weights == asr_model2.num_weights\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert np.array_equal(w1, w2)\n\n @requires_eff\n @pytest.mark.unit\n def test_eff_save_restore_from_nemo_file_encrypted(self, asr_model):\n \"\"\"\" Test makes sure that after encrypted save-restore the model has the same weights. \"\"\"\n\n with tempfile.NamedTemporaryFile() as fp:\n filename = fp.name\n\n # Set key - use checkpoint encryption.\n NeMoCookbook.set_encryption_key(\"test_key\")\n\n # Save model (with random artifact).\n with tempfile.NamedTemporaryFile() as artifact:\n asr_model.register_artifact(config_path=\"abc\", src=artifact.name)\n asr_model.save_to(save_path=filename)\n\n # Try to restore the encrypted archive (weights) without the encryption key.\n NeMoCookbook.set_encryption_key(None)\n with pytest.raises(PermissionError):\n # Restore the model.\n asr_model2 = EncDecCTCModel.restore_from(restore_path=filename)\n\n # Restore the model.\n NeMoCookbook.set_encryption_key(\"test_key\")\n asr_model3 = EncDecCTCModel.restore_from(restore_path=filename)\n # Reset encryption so it won't mess up with other save/restore.\n NeMoCookbook.set_encryption_key(None)\n\n assert asr_model.num_weights == asr_model3.num_weights\n\n @pytest.mark.unit\n def test_save_restore_from_nemo_file_with_override(self, asr_model, tmpdir):\n \"\"\"\" Test makes sure that the second instance created from the same configuration AND checkpoint\n has the same weights.\n\n Args:\n tmpdir: fixture providing a temporary directory unique to the test invocation.\n \"\"\"\n # Name of the archive in tmp folder.\n filename = os.path.join(tmpdir, \"eff.nemo\")\n\n # Get path where the command is executed - the artifacts will be \"retrieved\" there.\n # (original .nemo behavior)\n cwd = os.getcwd()\n\n with tempfile.NamedTemporaryFile(mode='a+') as conf_fp:\n\n # Create a \"random artifact\".\n with tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as artifact:\n artifact.write(\"magic content 42\")\n # Remember the filename of the artifact.\n _, artifact_filename = os.path.split(artifact.name)\n # Add artifact to model.\n asr_model.register_artifact(config_path=\"abc\", src=artifact.name)\n # Save model (with \"random artifact\").\n asr_model.save_to(save_path=filename)\n\n # Modify config slightly\n cfg = asr_model.cfg\n cfg.encoder.activation = 'swish'\n yaml_cfg = OmegaConf.to_yaml(cfg)\n conf_fp.write(yaml_cfg)\n conf_fp.seek(0)\n\n # Restore the model.\n asr_model2 = EncDecCTCModel.restore_from(restore_path=filename, override_config_path=conf_fp.name)\n\n assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)\n assert asr_model.num_weights == asr_model2.num_weights\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert np.array_equal(w1, w2)\n\n assert asr_model2.cfg.encoder.activation == 'swish'\n\n @pytest.mark.unit\n def test_save_model_level_pt_ckpt(self, asr_model):\n with tempfile.TemporaryDirectory() as ckpt_dir:\n nemo_file = os.path.join(ckpt_dir, 'asr.nemo')\n asr_model.save_to(nemo_file)\n\n # Save model level PT checkpoint\n asr_model.extract_state_dict_from(nemo_file, ckpt_dir)\n ckpt_path = os.path.join(ckpt_dir, 'model_weights.ckpt')\n\n assert os.path.exists(ckpt_path)\n\n # Restore the model.\n asr_model2 = EncDecCTCModel.restore_from(restore_path=nemo_file)\n\n assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)\n assert asr_model.num_weights == asr_model2.num_weights\n\n # Change weights values\n asr_model2.encoder.encoder[0].mconv[0].conv.weight.data += 1.0\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert not np.array_equal(w1, w2)\n\n # Restore from checkpoint\n asr_model2.load_state_dict(torch.load(ckpt_path))\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert np.array_equal(w1, w2)\n\n @pytest.mark.unit\n def test_save_module_level_pt_ckpt(self, asr_model):\n with tempfile.TemporaryDirectory() as ckpt_dir:\n nemo_file = os.path.join(ckpt_dir, 'asr.nemo')\n asr_model.save_to(nemo_file)\n\n # Save model level PT checkpoint\n asr_model.extract_state_dict_from(nemo_file, ckpt_dir, split_by_module=True)\n encoder_path = os.path.join(ckpt_dir, 'encoder.ckpt')\n decoder_path = os.path.join(ckpt_dir, 'decoder.ckpt')\n preprocessor_path = os.path.join(ckpt_dir, 'preprocessor.ckpt')\n\n assert os.path.exists(encoder_path)\n assert os.path.exists(decoder_path)\n assert os.path.exists(preprocessor_path)\n\n # Restore the model.\n asr_model2 = EncDecCTCModel.restore_from(restore_path=nemo_file)\n\n assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)\n assert asr_model.num_weights == asr_model2.num_weights\n\n # Change weights values\n asr_model2.encoder.encoder[0].mconv[0].conv.weight.data += 1.0\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert not np.array_equal(w1, w2)\n\n # Restore from checkpoint\n asr_model2.encoder.load_state_dict(torch.load(encoder_path))\n\n w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()\n\n assert np.array_equal(w1, w2)\n"
] | [
[
"numpy.median",
"numpy.asarray",
"torch.distributed.is_initialized",
"numpy.max",
"numpy.min",
"numpy.percentile",
"numpy.mean"
],
[
"torch.load",
"numpy.array_equal"
]
] |
Kenneth-Wong/tf-faster-rcnn | [
"a6bd798df1b9075ebdfeb7744fffc13226c3a65e"
] | [
"lib/model/config.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Memory options\n#\n__C.MEM = edict()\n\n# Number of memory iterations\n__C.MEM.ITER = 2\n\n# Height of the memory\n__C.MEM.INIT_H = 20\n# Width of the memory\n__C.MEM.INIT_W = 20\n\n# Channel of the memory\n__C.MEM.C = 512\n\n# Basic stds in the memory\n__C.MEM.STD = 0.01\n# Base stds in the memory update function for input features\n__C.MEM.U_STD = 0.01\n# Region classification\n__C.MEM.C_STD = 0.01\n\n# Feature to memory ratio\n__C.MEM.FM_R = 1.\n# Value to gate ratio\n__C.MEM.VG_R = 1.\n# FC to Pool ratio when combing the input\n__C.MEM.FP_R = 1.\n\n# Conv kernel size for memory\n__C.MEM.CONV = 3\n\n# Canonical region size\n__C.MEM.CROP_SIZE = 7\n\n# Context aggregation\n__C.MEM.CT_L = 3\n__C.MEM.CT_CONV = 3\n__C.MEM.CT_FCONV = 3\n\n# Input feature\n__C.MEM.IN_L = 2\n__C.MEM.IN_CONV = 3\n\n# Memory final fc layer channels\n__C.MEM.FC_C = 4096\n__C.MEM.FC_L = 2\n\n# The weight for the memory based prediction\n__C.MEM.WEIGHT = 1.\n\n__C.MEM.REL_WEIGHT = 1.\n\n# Final supervision weight\n__C.MEM.WEIGHT_FINAL = 1.\n# The threshold to control the entropy of the distribution\n__C.MEM.BETA = .5\n\n# The dimension of predicted tag\n__C.MEM.TAG_D = 16\n\n\n#\n# Training options\n#\n__C.TRAIN = edict()\n\n# Initial learning rate\n__C.TRAIN.RATE = 0.0005\n\n# Momentum\n__C.TRAIN.MOMENTUM = 0.9\n\n# Weight decay, for regularization\n__C.TRAIN.WEIGHT_DECAY = 0.0001\n\n# Factor for reducing the learning rate\n__C.TRAIN.GAMMA = 0.1\n\n# Step size for reducing the learning rate, currently only support one step\n__C.TRAIN.STEPSIZE = [30000]\n\n# Iteration intervals for showing the loss during training, on command line interface\n__C.TRAIN.DISPLAY = 10\n\n# Whether to double the learning rate for bias\n__C.TRAIN.DOUBLE_BIAS = True\n\n# Whether to initialize the weights with truncated normal distribution \n__C.TRAIN.TRUNCATED = False\n\n# Whether to have weight decay on bias as well\n__C.TRAIN.BIAS_DECAY = False\n\n# Whether to add ground truth boxes to the pool when sampling regions\n__C.TRAIN.USE_GT = False\n\n# Whether to use aspect-ratio grouping of training images, introduced merely for saving\n# GPU memory\n__C.TRAIN.ASPECT_GROUPING = False\n\n# The number of snapshots kept, older ones are deleted to save space\n__C.TRAIN.SNAPSHOT_KEPT = 3\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_INTERVAL = 180\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_ITERS = 500\n\n# Scale to use during training (can list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 1\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 128\n\n__C.TRAIN.REL_BATCH_SIZE = 128\n\n__C.TRAIN.POS_REL_FRACTION = 0.5\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.1\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n__C.TRAIN.BBOX_TARGET_NORMALIZATION_FILE = 'bbox_distribution.npy'\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False\n\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'gt'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n\n# IOU >= thresh: positive example\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n\n# IOU < thresh: negative example\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n\n# If an anchor satisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 256\n\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# Whether to use all ground truth bounding boxes for training, \n# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\n__C.TRAIN.USE_ALL_GT = True\n\n__C.TRAIN.USE_RPN_DB = True\n\n__C.TRAIN.NUM_NEG_RELS = 128\n\n#\n# Testing options\n#\n__C.TEST = edict()\n\n# Scale to use during testing (can NOT list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'gt'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n# __C.TEST.RPN_MIN_SIZE = 16\n\n# Testing mode, default to be 'nms', 'top' is slower but better\n# See report for details\n__C.TEST.MODE = 'nms'\n\n# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\n__C.TEST.RPN_TOP_N = 5000\n\n#\n# ResNet options\n#\n\n__C.RESNET = edict()\n\n# Option to set if max-pooling is appended after crop_and_resize. \n# if true, the region will be resized to a square of 2xPOOLING_SIZE, \n# then 2x2 max-pooling is applied; otherwise the region will be directly\n# resized to a square of POOLING_SIZE\n__C.RESNET.MAX_POOL = False\n\n# Number of fixed blocks during training, by default the first of all 4 blocks is fixed\n# Range: 0 (none) to 3 (all)\n__C.RESNET.FIXED_BLOCKS = 1\n\n#\n# MobileNet options\n#\n\n__C.MOBILENET = edict()\n\n# Whether to regularize the depth-wise filters during training\n__C.MOBILENET.REGU_DEPTH = False\n\n# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed\n# Range: 0 (none) to 12 (all)\n__C.MOBILENET.FIXED_LAYERS = 5\n\n# Weight decay for the mobilenet weights\n__C.MOBILENET.WEIGHT_DECAY = 0.00004\n\n# Depth multiplier\n__C.MOBILENET.DEPTH_MULTIPLIER = 1.\n\n#\n# MISC\n#\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n__C.VG_DIR = osp.abspath(osp.join(__C.DATA_DIR, 'vg'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Use an end-to-end tensorflow model.\n# Note: models in E2E tensorflow mode have only been tested in feed-forward mode,\n# but these models are exportable to other tensorflow instances as GraphDef files.\n__C.USE_E2E_TF = True\n\n# Default pooling mode, only 'crop' is available\n__C.POOLING_MODE = 'crop'\n\n# Size of the pooled region after RoI pooling\n__C.POOLING_SIZE = 7\n\n# Anchor scales for RPN\n__C.ANCHOR_SCALES = [8, 16, 32]\n\n# Anchor ratios for RPN\n__C.ANCHOR_RATIOS = [0.5, 1, 2]\n\n# Number of filters for the RPN layer\n__C.RPN_CHANNELS = 512\n\n__C.BOX_SCALE = 1024\n\n__C.IMG_SCALE = 1024\n\ncfg.BOTTLE_SCALE = 16.0\n\n# EPS, a small number for numerical issue\n__C.EPS = 1e-14\n\n__C.GROUP_DIST_THRESH = 20.\n\n__C.PUSH_WEIGHT = 0.1\n\n__C.PULL_WEIGHT = 0.1\n\n\ndef get_output_dir(imdb, weights_filename):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef get_output_tb_dir(imdb, weights_filename):\n \"\"\"Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n"
] | [
[
"numpy.array"
]
] |
Crazy-Jack/RL4GRN | [
"e683e17758eb468bd42e0ea0020e2246051c258c"
] | [
"RL_TD3/src/pe_model.py"
] | [
"'''\n The probabilistic ensemble dynamics model\n'''\n# pylint: disable=C0103, R0902, R0913, W0201, E0401, E1120\nimport time\nimport itertools\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom collections import defaultdict\n\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nclass PEModel(keras.Model):\n '''\n An individual Probabilistic Neural Network.\n Multiple Networks with identical structure form the Probabilistic Ensemble.\n Notice that each PEModel network predicts the mean and variance of\n reward, done, delta_state in order.\n Therefore, the output layer has (state_dim + 1 + 1) * 2\n '''\n def __init__(self, state_dim, action_dim):\n super().__init__()\n\n self.l1 = keras.layers.Dense(256, activation=\"relu\")\n self.l2 = keras.layers.Dense(256, activation=\"relu\")\n self.l3 = keras.layers.Dense(256, activation=\"relu\")\n # mean and variance for reward, done, delta_state (in this order)\n # Note: we change done to not_done\n self.l4 = keras.layers.Dense((state_dim + 2) * 2)\n # this step to populate trainable_weights. Without this step,\n # PE.trainable_weights will be empty.\n self.forward(np.zeros((1, state_dim + action_dim)))\n\n def forward(self, net_input):\n '''\n Calls the network on a batch of inputs.\n net_input should have size (batch_size, state_dim+action_dim)\n '''\n out = self.l1(net_input)\n out = self.l2(out)\n out = self.l3(out)\n out = self.l4(out)\n return out\n\nclass PE():\n '''\n The probabilistic ensemble dynamics model class.\n Contains code to initialize, train and then predict with the ensemble.\n You will implement part of this class.\n '''\n def __init__(\n self,\n state_dim,\n action_dim,\n num_networks = 7,\n num_elites = 5,\n learning_rate = 1e-3,\n ):\n self.num_networks = num_networks\n self.num_elites = num_elites\n self.networks = [PEModel(state_dim, action_dim) for i in range(num_networks)]\n self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate)\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.output_dim = state_dim + 2\n # For smoothing the log-variance output\n self.max_logvar = tf.convert_to_tensor(-3 * np.ones([1, self.state_dim + 2]), \\\n dtype=tf.float32)\n self.min_logvar = tf.convert_to_tensor(-7 * np.ones([1, self.state_dim + 2]), \\\n dtype=tf.float32)\n\n self.total_it = 0\n self._model_inds = list(range(self.num_networks)) # for choosing elite models in inference!\n\n def get_output(self, output, ret_logvar=False):\n \"\"\"\n output: tf tensor, shape (batch_size, (state_dim+2) * 2)\n Given network outputs, returns mean and log variance tf tensors if ret_logvar = True.\n mean: shape (batch_size, state_dim + 2)\n logvar: shape (batch_size, state_dim + 2)\n Do not modify\n \"\"\"\n mean = output[:, 0:self.output_dim]\n raw_v = output[:, self.output_dim:]\n # Log variance smoothing\n logvar = self.max_logvar - tf.math.softplus(self.max_logvar - raw_v)\n logvar = self.min_logvar + tf.math.softplus(logvar - self.min_logvar)\n if ret_logvar: # for training\n return mean, logvar\n return mean, tf.math.exp(logvar) # for testing\n\n def _train_loss_one(self, network, train_in, train_targ):\n '''\n Compute the MLE Training Loss for a given Probabilistic Neural Network.\n train_in: tf tensor, shape (batch_size, state_dim + action_dim)\n tarin_targ: tf tensor, shape (batch_size, state_dim + 2), target output\n This function should compute the Gaussian MLE loss, summed across the entire batch.\n\n User note: this contain not done!!\n ''' \n # raise NotImplementedError\n\n pred_mean, pred_var = self.get_output(network.forward(train_in), ret_logvar=True)\n\n train_loss = (pred_mean - train_targ) ** 2 / tf.math.exp(pred_var) + pred_var # [batch_size, state_dim + 2]\n\n train_loss = tf.math.reduce_sum(train_loss)\n\n # regularization step. populate train_loss with correct Gaussian MLE loss\n train_loss += 0.01 * (tf.math.reduce_sum(self.max_logvar) - \\\n tf.math.reduce_sum(self.min_logvar))\n return train_loss\n\n\n def _MSE_loss(self, valid_in, valid_targ, final=False):\n \"\"\"\n Computes the MSE loss for each Probabilistic Neural Network, for validation only.\n valid_in: tf tensor, shape (batch_size, state_dim + action_dim), validation input\n valid_targ: tf tensor, shape (batch_size, state_dim + 2), validation target\n Do not modify.\n \"\"\"\n mse_losses = np.zeros(self.num_networks)\n rew_losses = np.zeros(self.num_networks)\n not_done_losses = np.zeros(self.num_networks)\n dynamics_losses = np.zeros(self.num_networks)\n\n for i, network in enumerate(self.networks):\n mean, _ = self.get_output(network.forward(valid_in), ret_logvar=True)\n if final:\n mse_loss = tf.reduce_mean(((mean - valid_targ) ** 2), 0)\n rew_loss = mse_loss[0]\n not_done_loss = mse_loss[1]\n dynamics_loss = tf.reduce_mean(mse_loss[2:], 0)\n mse_losses[i] = tf.reduce_mean(mse_loss, 0)\n rew_losses[i] = rew_loss\n not_done_losses[i] = not_done_loss\n dynamics_losses[i] = dynamics_loss\n else:\n mse_loss = tf.reduce_mean((mean - valid_targ) ** 2, 0)\n mse_losses[i] = tf.reduce_mean(mse_loss, 0)\n if final:\n return mse_losses, rew_losses, not_done_losses, dynamics_losses\n return mse_losses\n\n def _prepare_dataset(self, buffer):\n '''\n Given a replay buffer containing real environment transitions,\n prepare a dataset for training the PE of neural networks.\n The dataset contains ALL transitions in the replay buffer.\n Do not modify.\n inputs: tf tensor, shape (buffer_size, state_dim + action_dim)\n targets: tf tensor, shape (buffer_size, state_dim + 2)\n '''\n state, action, next_state, reward, not_done = buffer.sample_all() # already shuffled\n\n delta_state = next_state - state\n inputs = tf.concat((state, action), -1)\n targets = tf.concat((reward, not_done, delta_state), -1)\n # Both TF tensors\n return inputs, targets\n\n def _start_train(self, max_epochs_since_update):\n '''\n Setup some internal bookkeeping variables to determine convergence.\n Do not modify.\n '''\n self._snapshots = np.array([1e10 for i in range(self.num_networks)])\n self._epochs_since_update = 0\n self._max_epochs_since_update = max_epochs_since_update\n\n def _end_train(self):\n '''\n Book keeping and console output. Do not modify.\n '''\n sorted_inds = np.argsort(self._snapshots)\n self._model_inds = sorted_inds[:self.num_elites].tolist() # first elite models\n print('Final holdout_losses: ', self._snapshots)\n print('Model MSE', np.mean(self._snapshots[self._model_inds]))\n print('Rew MSE', np.mean(self._reward_mse[self._model_inds]))\n print('Not Done MSE', np.mean(self._not_done_mse[self._model_inds]))\n print('Dyn MSE', np.mean(self._dynamics_mse[self._model_inds]))\n\n def _save_best(self, epoch, holdout_losses):\n '''\n Determines the stopping condition for PE model training.\n The training is determined to have converged if for max_epochs_since_update epochs,\n no network in the ensemble has improved for more than 1%.\n Do not modify.\n '''\n updated = False\n for i in range(len(holdout_losses)):\n current = holdout_losses[i]\n best = self._snapshots[i]\n improvement = (best - current) / best\n if improvement > 0.01: # if decrease over 1%, save\n self._snapshots[i] = current\n #self._save_model(i)\n updated = True\n # improvement = (best - current) / best\n print('epoch {} | updated {} | improvement: {:.4f} | best: {:.4f} | current: {:.4f}'.format(\\\n epoch, i, improvement, best, current))\n\n if updated:\n self._epochs_since_update = 0\n else:\n self._epochs_since_update += 1\n\n if self._epochs_since_update > self._max_epochs_since_update:\n print('[ PE ] Breaking at epoch {}: {} epochs since update ({} max)'.format(epoch,\n self._epochs_since_update, self._max_epochs_since_update))\n return True\n else:\n return False\n\n\n def train(self, buffer, batch_size=256, holdout_ratio=0.2, max_logging=5000,\n max_grad_updates=None, max_t=None, max_epochs_since_update=5):\n '''\n For model training, uses all transitions in real buffer, and train to convergence\n in valid set. You will implement part of this training function.\n '''\n self._start_train(max_epochs_since_update)\n inputs, targets = self._prepare_dataset(buffer)\n\n # Split into training and holdout sets\n num_holdout = min(int(inputs.shape[0] * holdout_ratio), max_logging)\n inputs, holdout_inputs = inputs[num_holdout:], inputs[:num_holdout]\n targets, holdout_targets = targets[num_holdout:], targets[:num_holdout]\n\n print('[ Euler PE ] Training {} | Target {} | Holdout: {}'.format(inputs.shape, targets.shape,\n holdout_inputs.shape))\n\n idxs = tf.convert_to_tensor(np.random.randint(inputs.shape[0], size=(inputs.shape[0],)))\n num_batch = int(np.ceil(idxs.shape[-1] / batch_size))\n\n # global counter\n t0 = time.time()\n grad_updates = 0\n\n for epoch in itertools.count(): # infinite loop\n for batch_num in range(num_batch):\n batch_idxs = idxs[batch_num * batch_size:(batch_num + 1) * batch_size]\n # (N, <=B): will include the remainder batch even if out of bounds!\n train_in = tf.gather(inputs, batch_idxs)\n train_targ = tf.gather(targets, batch_idxs)\n \n # For each network, get loss, compute gradient of loss\n # And apply optimizer step.\n # raise NotImplementedError\n\n for network in self.networks:\n with tf.GradientTape() as tape:\n train_loss = self._train_loss_one(network, train_in, train_targ)\n \n network_grad = tape.gradient(train_loss, network.trainable_variables)\n self.optimizer.apply_gradients(zip(network_grad, network.trainable_variables))\n \n grad_updates += 1\n\n idxs = tf.random.shuffle(idxs) # shuffle its dataset for each model\n\n # validate each model using same valid set\n holdout_losses = self._MSE_loss(holdout_inputs, holdout_targets) # (N,)\n break_train = self._save_best(epoch, holdout_losses)\n print(\"[ PE ] holdout_losses: \", f\"Epoch {epoch}\", holdout_losses) # write to log.txt\n\n t = time.time() - t0\n if break_train or (max_grad_updates and grad_updates > max_grad_updates):\n break\n\n if max_t and t > max_t:\n print('Breaking because of timeout: {}! (max: {})'.format(t, max_t))\n break\n\n self._snapshots, self._reward_mse, self._not_done_mse, self._dynamics_mse \\\n = self._MSE_loss(holdout_inputs, holdout_targets, final=True)\n\n self._end_train()\n print(f\"End of Model training {epoch} epochs and time {t:.0f}s\")\n print('Model training epoch', epoch)\n print('Model training time', int(t))\n return grad_updates\n\n ### Rollout / Inference Code\n\n def _prepare_input(self, state, action):\n '''\n Prepares inputs for inference.\n state: tf tensor, size (batch_size, state_dim) or (state_dim, )\n action: tf tensor, size (batch_size, action_dim) or (action_dim, )\n inputs: tf tensor, size (batch_size, state_dim + action_dim)\n Do not modify.\n '''\n if state.ndim == 1:\n state = tf.expand_dims(state, 0)\n if action.ndim == 1:\n action = tf.expand_dims(action, 0) \\\n if action.shape[0] == self.action_dim else tf.expand_dims(action, 1)\n inputs = tf.concat((state, action), -1)\n assert inputs.ndim == 2\n return inputs\n\n def _random_inds(self, batch_size):\n '''\n Uniformly randomly pick one *elite* model for each (state, action) in batch.\n This may help you implement predict.\n '''\n inds = np.random.choice(self._model_inds, size=batch_size)\n return inds\n\n def predict(self, state, action, deterministic=False):\n '''\n Predicts next states, rewards and not_done using the probabilistic ensemble\n For each (state, action) pair, pick a elite model uniformly at random, then\n use that elite model to predict next state, reward and not_done. The model\n can de different for each sample in the batch.\n If deterministic=True, then the prediction should simply be the predicted mean.\n If deterministic=False, then the prediction should be sampled from N(mean, var),\n where mean is the predicted mean and var is the predicted variance.\n state: tf tensor, shape (batch_size, state_dim) or (state_dim, )\n action: tf tensor, shape (batch_size, action_dim) or (action_dim, )\n samples (return value): np array, shape (batch_size, state_dim+2)\n samples[:, 0] should be the rewards, samples[:, 1] should be the not-done signals,\n and samples[:, 2:] should be the next states.\n '''\n inputs = self._prepare_input(state, action)\n\n # raise NotImplementedError\n batch_size = state.shape[0] if len(state.shape) > 1 else 1\n inds = self._random_inds(batch_size) # get random idx\n\n # group idx by network number -> network_number: list(random idx)\n network_2_batch_mapping = defaultdict(list)\n for batch_number, model_idx in enumerate(inds):\n network_2_batch_mapping[model_idx].append(batch_number)\n\n # model forward (for loop by network)\n samples = [0] * batch_size\n for model_idx, batch_numbers in network_2_batch_mapping.items():\n model_inputs = tf.gather_nd(inputs, [[i] for i in batch_numbers])\n pred_mean, pred_var = self.get_output(self.networks[model_idx].forward(model_inputs), ret_logvar=False)\n\n zeros_padding = tf.zeros([len(batch_numbers), 2])\n cur_state = tf.concat([zeros_padding, tf.gather_nd(state, [[i] for i in batch_numbers])], 1)\n pred_mean = pred_mean + cur_state\n\n if deterministic == True:\n for idx, bi in enumerate(batch_numbers):\n samples[bi] = pred_mean[idx, :]\n else:\n for idx, bi in enumerate(batch_numbers):\n samples[bi] = tf.random.normal(shape = (1, self.state_dim + 2), mean = pred_mean[idx,:], stddev = tf.sqrt(pred_var[idx,:]))\n\n samples = tf.squeeze(tf.convert_to_tensor(samples), 1)\n\n # zeros_padding = tf.zeros([batch_size, 2])\n # padded_state_only = tf.concat([zeros_padding, state], 1)\n\n # samples += padded_state_only\n return samples\n\n\n# Sanity Check to test your PE model implementation.\nif __name__ == '__main__':\n import pybullet_envs\n import gym\n import utils\n\n env = gym.make(\"InvertedPendulumBulletEnv-v0\")\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.shape[0]\n replay_buffer = utils.ReplayBuffer(state_size, action_size, max_size=int(1e6))\n\n o = env.reset()\n total_steps = 25000 # one episode has 1000 steps\n step = 0\n while step < total_steps:\n a = env.action_space.sample()\n o2, r, d, info = env.step(a)\n step += 1\n replay_buffer.add(o, a, o2, r, float(d))\n o = o2\n if d:\n o = env.reset()\n\n model = PE(state_size, action_size)\n model.train(replay_buffer)\n"
] | [
[
"numpy.ones",
"tensorflow.keras.optimizers.Adam",
"tensorflow.gather_nd",
"numpy.argsort",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.random.shuffle",
"numpy.random.choice",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.math.exp",
"numpy.mean",
"numpy.ceil",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.math.softplus",
"tensorflow.math.reduce_sum",
"tensorflow.sqrt",
"tensorflow.reduce_mean",
"tensorflow.gather",
"numpy.random.randint"
]
] |
162/catalyst | [
"b4ba36be52c51160e0fabecdcb084a8d5cd96cb7"
] | [
"catalyst/dl/utils/trace.py"
] | [
"from typing import Type\n\nimport torch\nfrom torch import nn\nfrom torch.jit import ScriptModule\n\nfrom catalyst.dl.core import Experiment, Runner\n\n\nclass _ForwardOverrideModel(nn.Module):\n \"\"\"\n Model that calls specified method instead of forward\n\n (Workaround, single method tracing is not supported)\n \"\"\"\n\n def __init__(self, model, method_name):\n super().__init__()\n self.model = model\n self.method = method_name\n\n def forward(self, *args, **kwargs):\n return getattr(self.model, self.method)(*args, **kwargs)\n\n\nclass _TracingModelWrapper(nn.Module):\n \"\"\"\n Wrapper that traces model with batch instead of calling it\n\n (Workaround, to use native model batch handler)\n \"\"\"\n\n def __init__(self, model, method_name):\n super().__init__()\n self.method_name = method_name\n self.model = model\n self.tracing_result: ScriptModule\n\n def __call__(self, *args, **kwargs):\n method_model = _ForwardOverrideModel(\n self.model, self.method_name\n )\n\n self.tracing_result = \\\n torch.jit.trace(\n method_model,\n *args, **kwargs\n )\n\n\ndef _get_native_batch(\n experiment: Experiment, stage: str\n):\n \"\"\"Returns dataset from first loader provided by experiment\"\"\"\n loaders = experiment.get_loaders(stage)\n assert loaders, \\\n \"Experiment must have at least one loader to support tracing\"\n # Take first loader\n loader = next(iter(loaders.values()))\n dataset = loader.dataset\n collate_fn = loader.collate_fn\n\n sample = collate_fn([dataset[0]])\n\n return sample\n\n\ndef trace_model(\n model: nn.Module,\n experiment: Experiment,\n runner_type: Type[Runner],\n method_name: str = \"forward\"\n) -> ScriptModule:\n \"\"\"\n Traces model using it's native experiment and runner.\n\n Args:\n model: Model to trace\n NOTICE: will be switched to eval and\n requires_grad=False will be set on all params\n experiment: Native experiment that was used to train model\n runner_type: Model's native runner that was used to train model\n method_name: Model's method name that will be\n used as entrypoint during tracing\n\n Returns:\n Traced model ScriptModule\n \"\"\"\n stage = list(experiment.stages)[0]\n\n model.eval()\n for p in model.parameters():\n p.requires_grad_(False)\n\n tracer = _TracingModelWrapper(model, method_name)\n runner: Runner = runner_type(tracer.cpu(), torch.device(\"cpu\"))\n\n batch = _get_native_batch(experiment, stage)\n batch = runner._batch2device(batch, device=runner.device)\n\n runner.predict_batch(batch)\n\n return tracer.tracing_result\n\n\n__all__ = [\"trace_model\"]\n"
] | [
[
"torch.device",
"torch.jit.trace"
]
] |
Diva-Pant/tensorflow | [
"f926d8c10efb07176ae559d0e098cdfdb4d03219"
] | [
"tensorflow/python/distribute/multi_process_runner_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `multi_process_runner`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport threading\nimport time\nfrom absl import logging\n\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.eager import test\n\n\ndef proc_func_that_adds_task_type_in_return_data():\n return multi_worker_test_base.get_task_type()\n\n\ndef proc_func_that_errors():\n raise ValueError('This is an error.')\n\n\ndef proc_func_that_does_nothing():\n pass\n\n\ndef proc_func_that_adds_simple_return_data():\n return 'dummy_data'\n\n\ndef proc_func_that_return_args_and_kwargs(*args, **kwargs):\n return list(args) + list(kwargs.items())\n\n\ndef proc_func_with_barrier():\n return multi_process_runner.barrier()\n\n\nclass MultiProcessRunnerTest(test.TestCase):\n\n def _worker_idx(self):\n config_task = json.loads(os.environ['TF_CONFIG'])['task']\n return config_task['index']\n\n def test_multi_process_runner(self):\n mpr_result = multi_process_runner.run(\n proc_func_that_adds_task_type_in_return_data,\n multi_worker_test_base.create_cluster_spec(\n num_workers=2, num_ps=3, has_eval=1))\n\n job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}\n for data in mpr_result.return_value:\n job_count_dict[data] -= 1\n\n self.assertEqual(job_count_dict['worker'], 0)\n self.assertEqual(job_count_dict['ps'], 0)\n self.assertEqual(job_count_dict['evaluator'], 0)\n\n def test_multi_process_runner_error_propagates_from_subprocesses(self):\n runner = multi_process_runner.MultiProcessRunner(\n proc_func_that_errors,\n multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),\n max_run_time=20)\n runner.start()\n with self.assertRaisesRegexp(ValueError, 'This is an error.'):\n runner.join()\n\n def test_multi_process_runner_queue_emptied_between_runs(self):\n cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)\n return_value = multi_process_runner.run(\n proc_func_that_adds_simple_return_data, cluster_spec).return_value\n self.assertTrue(return_value)\n self.assertEqual(return_value[0], 'dummy_data')\n self.assertEqual(return_value[1], 'dummy_data')\n return_value = multi_process_runner.run(proc_func_that_does_nothing,\n cluster_spec).return_value\n self.assertFalse(return_value)\n\n def test_multi_process_runner_args_passed_correctly(self):\n return_value = multi_process_runner.run(\n proc_func_that_return_args_and_kwargs,\n multi_worker_test_base.create_cluster_spec(num_workers=1),\n args=('a', 'b'),\n kwargs={\n 'c_k': 'c_v'\n }).return_value\n self.assertEqual(return_value[0][0], 'a')\n self.assertEqual(return_value[0][1], 'b')\n self.assertEqual(return_value[0][2], ('c_k', 'c_v'))\n\n def test_stdout_captured(self):\n\n def simple_print_func():\n print('This is something printed.', flush=True)\n return 'This is returned data.'\n\n mpr_result = multi_process_runner.run(\n simple_print_func,\n multi_worker_test_base.create_cluster_spec(num_workers=2),\n list_stdout=True)\n std_stream_results = mpr_result.stdout\n return_value = mpr_result.return_value\n self.assertIn('[worker-0]: This is something printed.\\n',\n std_stream_results)\n self.assertIn('[worker-1]: This is something printed.\\n',\n std_stream_results)\n self.assertIn('This is returned data.', return_value)\n\n def test_process_that_exits(self):\n\n def func_to_exit_in_25_sec():\n logging.error('foo')\n time.sleep(100)\n logging.error('bar')\n\n mpr = multi_process_runner.MultiProcessRunner(\n func_to_exit_in_25_sec,\n multi_worker_test_base.create_cluster_spec(num_workers=1),\n list_stdout=True,\n max_run_time=25)\n\n mpr.start()\n stdout = mpr.join().stdout\n self.assertLen([msg for msg in stdout if 'foo' in msg], 1)\n self.assertLen([msg for msg in stdout if 'bar' in msg], 0)\n\n def test_termination(self):\n\n def proc_func():\n for i in range(0, 10):\n print(\n 'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)\n time.sleep(5)\n\n mpr = multi_process_runner.MultiProcessRunner(\n proc_func,\n multi_worker_test_base.create_cluster_spec(num_workers=2),\n list_stdout=True)\n mpr.start()\n time.sleep(5)\n mpr.terminate('worker', 0)\n std_stream_results = mpr.join().stdout\n\n # Worker 0 is terminated in the middle, so it should not have iteration 9\n # printed.\n self.assertIn('[worker-0]: index 0, iteration 0\\n', std_stream_results)\n self.assertNotIn('[worker-0]: index 0, iteration 9\\n',\n std_stream_results)\n self.assertIn('[worker-1]: index 1, iteration 0\\n', std_stream_results)\n self.assertIn('[worker-1]: index 1, iteration 9\\n', std_stream_results)\n\n def test_termination_and_start_single_process(self):\n\n def proc_func():\n for i in range(0, 10):\n print(\n 'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)\n time.sleep(1)\n\n mpr = multi_process_runner.MultiProcessRunner(\n proc_func,\n multi_worker_test_base.create_cluster_spec(num_workers=2),\n list_stdout=True)\n mpr.start()\n time.sleep(3)\n mpr.terminate('worker', 0)\n mpr.start_single_process('worker', 0)\n std_stream_results = mpr.join().stdout\n\n # Worker 0 is terminated in the middle, but a new worker 0 is added, so it\n # should still have iteration 9 printed. Moreover, iteration 0 of worker 0\n # should happen twice.\n self.assertLen(\n [s for s in std_stream_results if 'index 0, iteration 0' in s], 2)\n self.assertIn('[worker-0]: index 0, iteration 9\\n', std_stream_results)\n self.assertIn('[worker-1]: index 1, iteration 0\\n', std_stream_results)\n self.assertIn('[worker-1]: index 1, iteration 9\\n', std_stream_results)\n\n def test_streaming(self):\n\n def proc_func():\n for i in range(5):\n logging.info('(logging) %s-%d, i: %d',\n multi_worker_test_base.get_task_type(), self._worker_idx(),\n i)\n print(\n '(print) {}-{}, i: {}'.format(\n multi_worker_test_base.get_task_type(), self._worker_idx(), i),\n flush=True)\n time.sleep(1)\n\n mpr = multi_process_runner.MultiProcessRunner(\n proc_func,\n multi_worker_test_base.create_cluster_spec(\n has_chief=True, num_workers=2, num_ps=2, has_eval=True),\n list_stdout=True)\n mpr._dependence_on_chief = False\n\n mpr.start()\n mpr.start_single_process('worker', 2)\n mpr.start_single_process('ps', 2)\n mpr_result = mpr.join()\n\n list_to_assert = mpr_result.stdout\n\n for job in ['chief', 'evaluator']:\n for iteration in range(5):\n self.assertTrue(\n any('(logging) {}-0, i: {}'.format(job, iteration) in line\n for line in list_to_assert))\n self.assertTrue(\n any('(print) {}-0, i: {}'.format(job, iteration) in line\n for line in list_to_assert))\n\n for job in ['worker', 'ps']:\n for iteration in range(5):\n for task in range(3):\n self.assertTrue(\n any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line\n for line in list_to_assert))\n self.assertTrue(\n any('(print) {}-{}, i: {}'.format(job, task, iteration) in line\n for line in list_to_assert))\n task = 3\n self.assertFalse(\n any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line\n for line in list_to_assert))\n self.assertFalse(\n any('(print) {}-{}, i: {}'.format(job, task, iteration) in line\n for line in list_to_assert))\n\n def test_start_in_process_as(self):\n\n def proc_func():\n for i in range(5):\n logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),\n self._worker_idx(), i)\n time.sleep(1)\n\n mpr = multi_process_runner.MultiProcessRunner(\n proc_func,\n multi_worker_test_base.create_cluster_spec(\n has_chief=True, num_workers=1),\n list_stdout=True)\n\n def eval_func():\n time.sleep(1)\n mpr.start_single_process(task_type='evaluator', task_id=0)\n\n eval_thread = threading.Thread(target=eval_func)\n eval_thread.start()\n mpr.start_in_process_as(as_task_type='chief', as_task_id=0)\n eval_thread.join()\n list_to_assert = mpr.join().stdout\n for job in ['worker', 'evaluator']:\n for iteration in range(5):\n self.assertTrue(\n any('{}-0, i: {}'.format(job, iteration) in line\n for line in list_to_assert))\n\n def test_terminate_all_does_not_ignore_error(self):\n mpr = multi_process_runner.MultiProcessRunner(\n proc_func_that_errors,\n multi_worker_test_base.create_cluster_spec(num_workers=2),\n list_stdout=True)\n mpr.start()\n time.sleep(60)\n mpr.terminate_all()\n with self.assertRaisesRegexp(ValueError, 'This is an error.'):\n mpr.join()\n\n def test_barrier(self):\n multi_process_runner.run(\n proc_func_with_barrier,\n cluster_spec=multi_worker_test_base.create_cluster_spec(\n has_chief=True, num_workers=1),\n )\n\n def test_barrier_called_in_main_process(self):\n with self.assertRaises(ValueError):\n multi_process_runner.barrier()\n\n def test_stdout_available_when_timeout(self):\n\n def proc_func():\n for i in range(50):\n logging.info('(logging) %s-%d, i: %d',\n multi_worker_test_base.get_task_type(), self._worker_idx(),\n i)\n time.sleep(1)\n\n with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:\n multi_process_runner.run(\n proc_func,\n multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),\n list_stdout=True,\n timeout=5)\n\n list_to_assert = cm.exception.mpr_result.stdout\n for job in ['worker', 'ps']:\n for iteration in range(0, 5):\n self.assertTrue(\n any('(logging) {}-0, i: {}'.format(job, iteration) in line\n for line in list_to_assert))\n\n\nif __name__ == '__main__':\n multi_process_runner.test_main()\n"
] | [
[
"tensorflow.python.distribute.multi_process_runner.barrier",
"tensorflow.python.distribute.multi_process_runner.run",
"tensorflow.python.distribute.multi_process_runner.test_main",
"tensorflow.python.distribute.multi_worker_test_base.get_task_type",
"tensorflow.python.distribute.multi_worker_test_base.create_cluster_spec"
]
] |
sano307/lambda-container-demo | [
"6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3"
] | [
"lambda/index.py"
] | [
"import json\n\nimport pandas as pd\n\n\ndef handler(event, context):\n df = pd.DataFrame({\"id\": [1, 2], \"value\": [\"foo\", \"boo\"]})\n print(df)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"This is a container lambda.\"\n })\n }\n"
] | [
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.