repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
koelling/amplimap | [
"cbd5b7b8c2f703982d8964a3c77bd350a47f08a6"
] | [
"amplimap/coverage.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains methods for processing and aggregating coverage files generated by ``bedtools``.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\n\nfrom .reader import read_sample_info\n\ncov_cols = ['Target', 'min_coverage', 'sum_coverage', 'basepairs', 'cov_per_bp', 'fraction_zero_coverage', 'fraction_10x_coverage', 'fraction_30x_coverage']\ncov_cols_dtypes = dict(zip(cov_cols, [str, int, int, int, float, float]))\n\ndef fraction_zero_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 0.\"\"\"\n return 1.0 * (coverage == 0).sum() / len(coverage)\n\ndef fraction_10x_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 10 or more.\"\"\"\n return 1.0 * (coverage >= 10).sum() / len(coverage)\n\ndef fraction_30x_coverage(coverage):\n \"\"\"Calculate fraction of bases with coverage 30 or more.\"\"\"\n return 1.0 * (coverage >= 30).sum() / len(coverage)\n\ndef process_file(input: str, output: str):\n \"\"\"Read raw bedtools coverage file, calculate summary statistics and output them as CSV file.\n\n Args:\n input: path to a bedtools coverage file\n output: path to the summary CSV file\n \"\"\"\n\n # read bedtools output\n depth = pd.read_csv(input, sep='\\t', names = ['chr', 'start_0', 'end', 'id', 'score', 'strand', 'position', 'coverage'], low_memory=False)\n\n # summarize\n summary = depth.groupby('id').aggregate({'coverage': [np.min, np.sum, len, np.mean, fraction_zero_coverage, fraction_10x_coverage, fraction_30x_coverage]})\n\n # make id index into normal column, then reset column names\n summary.reset_index(level=0, inplace=True)\n summary.columns = cov_cols\n\n # write file\n summary.to_csv(output, index = False)\n\ndef aggregate(input, output):\n \"\"\"Read coverage summary files and create aggregate files.\n\n Args:\n input: dict containing 'csvs', the list of csvs fils to aggregate, and optionally 'sample_info', a table with additional sample annotation\n output: dict containing paths for output files: merged, min_coverage, cov_per_bp, fraction_zero_coverage\n \"\"\"\n # load sample information table\n sample_info = None\n if 'sample_info' in input and len(input['sample_info']) > 0:\n sample_info = read_sample_info(input['sample_info'][0])\n\n merged = None\n for file in input['csvs']:\n sname = os.path.basename(file)\n sname = re.sub(r'\\.coverage\\.csv$', '', sname)\n\n print('Reading', file, 'for', sname, '...')\n df = pd.read_csv(file,\n index_col = False,\n dtype = cov_cols_dtypes)\n df['Sample'] = sname\n print(sname, 'coverage data shape:', str(df.shape))\n\n if merged is None:\n merged = df\n else:\n merged = merged.append(df, ignore_index = True)\n\n assert merged is not None, \\\n '\\n\\nABORTED: Did not find any coverage data!\\n\\n'\n\n print('Merged data shape:', str(merged.shape))\n print(merged.head())\n\n print('Duplicated:')\n print(merged[merged.duplicated(['Target', 'Sample'], keep=False)])\n\n if sample_info is not None:\n merged = merged.join(sample_info, on = ['Sample', 'Target'], how = 'left')\n\n # make matrices\n for column in ['min_coverage', 'cov_per_bp', 'fraction_zero_coverage']:\n pivoted = merged.pivot(index='Target', columns='Sample', values=column)\n print('Made pivot table for', column, ' with shape', str(pivoted.shape))\n pivoted.to_csv(output[column])\n print(output[column])\n\n # output full merged data set\n merged.to_csv(output['merged'], index = False)"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
joey12300/Paddle | [
"59102c6dcd2def3091f5c37816354ac69d669809"
] | [
"python/paddle/fluid/tests/unittests/xpu/test_softmax_with_cross_entropy_op_xpu.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport sys\nsys.path.append(\"..\")\n\nfrom test_softmax_op import stable_softmax\nfrom op_test import OpTest\nimport paddle.fluid.core as core\nimport paddle\n\nimport unittest\nimport numpy as np\n\n\ndef cross_entropy(softmax, label, soft_label, axis, ignore_index=-1):\n if soft_label:\n return (-label * np.log(softmax)).sum(axis=axis, keepdims=True)\n\n shape = softmax.shape\n axis %= len(shape)\n n = int(np.prod(shape[:axis]))\n axis_dim = shape[axis]\n remain = int(np.prod(shape[axis + 1:]))\n softmax_reshape = softmax.reshape((n, axis_dim, remain))\n label_reshape = label.reshape((n, 1, remain))\n result = np.zeros_like(label_reshape, dtype=softmax.dtype)\n for i in range(n):\n for j in range(remain):\n lbl = label_reshape[i, 0, j]\n if lbl != ignore_index:\n result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j])\n return result.reshape(label.shape)\n\n\nclass TestSoftmaxWithCrossEntropyOp(OpTest):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = False\n self.soft_label = False\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [41, 37]\n self.use_xpu = True\n\n def setUp(self):\n self.initParams()\n\n logits = getattr(\n self, \"logits\",\n np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))\n softmax = np.apply_along_axis(stable_softmax, self.axis, logits)\n\n if self.soft_label:\n labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)\n labels /= np.sum(labels, axis=self.axis, keepdims=True)\n else:\n axis_dim = self.shape[self.axis]\n self.shape[self.axis] = 1\n labels = np.random.randint(0, axis_dim, self.shape, dtype=\"int64\")\n\n loss = cross_entropy(softmax, labels, self.soft_label, self.axis,\n self.ignore_index)\n\n self.inputs = {\"Logits\": logits, \"Label\": labels}\n self.outputs = {\n \"Softmax\": softmax.astype(self.dtype),\n \"Loss\": loss.astype(self.dtype)\n }\n self.attrs = {\n \"numeric_stable_mode\": self.numeric_stable_mode,\n \"soft_label\": self.soft_label,\n }\n if self.ignore_index >= 0:\n self.attrs['ignore_index'] = self.ignore_index\n if self.axis != -1:\n self.attrs['axis'] = self.axis\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp(TestSoftmaxWithCrossEntropyOp):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.use_xpu = True\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp2(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with soft labels.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = True\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [41, 37]\n self.use_xpu = True\n\n def test_check_output(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_output_with_place(place, atol=1e-2)\n\n def test_check_grad(self):\n if paddle.is_compiled_with_xpu():\n paddle.enable_static()\n place = paddle.XPUPlace(0)\n self.check_grad_with_place(\n place, [\"Logits\"], \"Loss\", max_relative_error=0.2)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOp3(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with ignore_index.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [41, 37]\n self.ignore_index = 5\n self.axis = -1\n self.dtype = np.float32\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis1(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 0\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis2(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 1\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpAxis3(TestXPUSoftmaxWithCrossEntropyOp):\n# \"\"\"\n# Test softmax with cross entropy operator with discreate one-hot labels.\n# Given axis != -1\n# \"\"\"\n\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.dtype = np.float32\n# self.axis = 2\n# self.ignore_index = -1\n# self.shape = [3, 5, 7, 11]\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpAxis4(TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n Given axis != -1\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.dtype = np.float32\n self.axis = 3\n self.ignore_index = -1\n self.shape = [3, 5, 7, 11]\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpAxisDimEqualOne(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test softmax with cross entropy operator with discreate one-hot labels.\n Given axis != -1\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.dtype = np.float32\n self.axis = -1\n self.ignore_index = -1\n self.shape = [3, 5, 7, 1]\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis1(\n# TestXPUSoftmaxWithCrossEntropyOp):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 0\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis2(\n# TestXPUSoftmaxWithCrossEntropyOp2):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 1\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis3(\n# TestXPUSoftmaxWithCrossEntropyOp2):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = True\n# self.shape = [3, 5, 7, 11]\n# self.axis = 2\n# self.ignore_index = -1\n# self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpSoftLabelAxis4(\n TestXPUSoftmaxWithCrossEntropyOp2):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = True\n self.shape = [3, 5, 7, 11]\n self.axis = 3\n self.ignore_index = -1\n self.dtype = np.float32\n\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 1\n# self.axis = 0\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 0\n# self.axis = 1\n# self.dtype = np.float32\n\n# xpu only support axis = rank -1\n# class TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3(\n# TestXPUSoftmaxWithCrossEntropyOp3):\n# def initParams(self):\n# self.op_type = \"softmax_with_cross_entropy\"\n# self.numeric_stable_mode = True\n# self.soft_label = False\n# self.shape = [3, 5, 7, 11]\n# self.ignore_index = 3\n# self.axis = 2\n# self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4(\n TestXPUSoftmaxWithCrossEntropyOp3):\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.ignore_index = 3\n self.axis = 3\n self.dtype = np.float32\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpBoundary0(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test stable softmax with cross entropy operator will not product INF\n with small logits value.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.logits = np.full(self.shape, -500.0).astype(self.dtype)\n\n\nclass TestXPUSoftmaxWithCrossEntropyOpBoundary1(\n TestXPUSoftmaxWithCrossEntropyOp):\n \"\"\"\n Test stable softmax with cross entropy operator will not product INF\n with small logits value.\n \"\"\"\n\n def initParams(self):\n self.op_type = \"softmax_with_cross_entropy\"\n self.numeric_stable_mode = True\n self.soft_label = False\n self.shape = [3, 5, 7, 11]\n self.axis = -1\n self.ignore_index = -1\n self.dtype = np.float32\n self.logits = np.full(self.shape, 1000.0).astype(self.dtype)\n self.logits[:, :, 0, :] = -1000.0\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.log",
"numpy.full",
"numpy.apply_along_axis",
"numpy.zeros_like",
"numpy.prod",
"numpy.random.uniform",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
computationalartist/tensorflow | [
"b89cf636c412abdff53b3e8f201bde671c92209d",
"b89cf636c412abdff53b3e8f201bde671c92209d",
"b89cf636c412abdff53b3e8f201bde671c92209d"
] | [
"tensorflow/python/kernel_tests/math_ops/argmax_op_test.py",
"tensorflow/compiler/mlir/tfrt/python_tests/tf_const_test.py",
"tensorflow/lite/testing/op_tests/multinomial.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.argmax_op.\"\"\"\nimport functools\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass ArgMaxTest(test.TestCase):\n\n def _testArg(self,\n method,\n x,\n axis,\n expected_values,\n use_gpu=False,\n expected_err_re=None):\n with self.session(use_gpu=use_gpu):\n ans = method(x, axis=axis)\n if expected_err_re is None:\n tf_ans = self.evaluate(ans)\n # Defaults to int64 output.\n self.assertEqual(np.int64, tf_ans.dtype)\n self.assertAllEqual(tf_ans, expected_values)\n self.assertShapeEqual(expected_values, ans)\n else:\n with self.assertRaisesOpError(expected_err_re):\n self.evaluate(ans)\n\n def _testBothArg(self,\n method,\n x,\n axis,\n expected_values,\n expected_err_re=None):\n self._testArg(method, x, axis, expected_values, True, expected_err_re)\n # Compilation time is too large with XLA/CPU autojit.\n if not test_util.is_xla_enabled():\n self._testArg(method, x, axis, expected_values, False, expected_err_re)\n\n def _testBasic(self, dtype):\n x = np.arange(200, dtype=np.float32).astype(dtype)\n np.random.shuffle(x)\n\n # Check that argmin and argmax match numpy along the primary axis\n self._testBothArg(math_ops.argmax, x, 0, x.argmax())\n self._testBothArg(math_ops.argmin, x, 0, x.argmin())\n\n def _testTieBreaking(self, dtype):\n x = np.zeros(200, dtype=dtype)\n\n # Check that argmin and argmax match numpy along the primary axis for\n # breaking ties.\n self._testBothArg(math_ops.argmax, x, 0, x.argmax())\n self._testBothArg(math_ops.argmin, x, 0, x.argmin())\n\n # Check that argmin and argmax match numpy along axis=1 for\n # breaking ties.\n x = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [0, 1, 0, 1]], dtype=dtype)\n self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))\n self._testBothArg(math_ops.argmin, x, 1, x.argmin(axis=1))\n\n def _testDim(self, dtype):\n shape = (3, 2, 4, 5, 6, 3, 7)\n x = np.arange(\n functools.reduce(lambda x, y: x * y, shape),\n dtype=np.float32).astype(dtype)\n np.random.shuffle(x)\n x = x.reshape(shape)\n\n # Check that argmin and argmax match numpy along all axes\n for axis in range(-7, 7):\n self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))\n self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))\n\n def testFloat(self):\n self._testBasic(np.float32)\n self._testTieBreaking(np.float32)\n self._testDim(np.float32)\n\n def testFloatInt32Output(self):\n x = np.asarray(100 * np.random.randn(200), dtype=np.float32)\n expected_values = x.argmax()\n with self.session():\n ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)\n tf_ans = self.evaluate(ans)\n self.assertEqual(np.int32, tf_ans.dtype)\n # The values are equal when comparing int32 to int64 because\n # the values don't have a range that exceeds 32-bit integers.\n self.assertAllEqual(tf_ans, expected_values)\n expected_values = x.argmin()\n with self.session():\n ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)\n tf_ans = self.evaluate(ans)\n self.assertEqual(np.int32, tf_ans.dtype)\n self.assertAllEqual(tf_ans, expected_values)\n\n def testDouble(self):\n self._testBasic(np.float64)\n self._testTieBreaking(np.float64)\n self._testDim(np.float64)\n\n def testInt32(self):\n self._testBasic(np.int32)\n self._testTieBreaking(np.int32)\n self._testDim(np.int32)\n\n def testInt64(self):\n self._testBasic(np.int64)\n self._testTieBreaking(np.int64)\n self._testDim(np.int64)\n\n def testBool(self):\n self._testBasic(np.bool_)\n self._testTieBreaking(np.bool_)\n self._testDim(np.bool_)\n\n def testEmpty(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n with self.assertRaisesOpError(\n r\"Reduction axis 0 is empty in shape \\[0\\]\"):\n op([], 0).eval()\n\n @test_util.run_deprecated_v1\n def testDefaultAxis(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n ans = op([1]).eval()\n self.assertAllEqual(ans, 0)\n\n @test_util.run_deprecated_v1\n def testOutputEmpty(self):\n with self.cached_session():\n for op in math_ops.argmin, math_ops.argmax:\n ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()\n self.assertEqual(ret.shape, (1, 0))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Tensorflow -> CPURT compilation.\"\"\"\n\nimport numpy as np\n\nfrom tensorflow.compiler.mlir.tfrt.jit.python_binding import tf_cpurt\nfrom tensorflow.python.platform import test\n\ncpurt = tf_cpurt.TfCpurtExecutor()\n\n\nclass TfConstTest(test.TestCase):\n\n def test_const_i32(self):\n mlir_function = \"\"\"\n func @test() -> tensor<1xi32> {\n %0 = \"tf.Const\"() {\n value = dense<1> : tensor<1xi32>\n } : () -> tensor<1xi32>\n return %0 : tensor<1xi32>\n }\"\"\"\n\n compiled = cpurt.compile(mlir_function, 'test')\n [res] = cpurt.execute(compiled, [])\n np.testing.assert_allclose(res, 1, rtol=0.0)\n\n def test_constant_folding_i32(self):\n mlir_function = \"\"\"\n func @test() -> tensor<2xi32> {\n %0 = \"tf.Const\"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>\n %1 = \"tf.Const\"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>\n %2 = \"tf.Pack\"(%0, %1) {axis = 0 : i64}\n : (tensor<i32>, tensor<i32>) -> tensor<2xi32>\n return %2 : tensor<2xi32>\n }\"\"\"\n\n compiled = cpurt.compile(mlir_function, 'test')\n [res] = cpurt.execute(compiled, [])\n np.testing.assert_allclose(res, [0, 1], rtol=0.0)\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test configs for multinomial.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.lite.testing.zip_test_utils import create_tensor_data\nfrom tensorflow.lite.testing.zip_test_utils import make_zip_of_tests\nfrom tensorflow.lite.testing.zip_test_utils import register_make_test_function\n\n\n@register_make_test_function()\ndef make_multinomial_tests(options):\n \"\"\"Make a set of tests to do multinomial.\"\"\"\n test_parameters = [{\n \"logits_shape\": [[1, 2], [2, 5]],\n \"seed\": [None, 1234],\n \"seed2\": [5678],\n }, {\n \"logits_shape\": [[1, 2]],\n \"seed\": [1234],\n \"seed2\": [None]\n }]\n\n def build_graph(parameters):\n \"\"\"Build the op testing graph.\"\"\"\n tf.set_random_seed(seed=parameters[\"seed\"])\n logits_tf = tf.compat.v1.placeholder(\n name=\"logits\", dtype=tf.float32, shape=parameters[\"logits_shape\"])\n num_samples_tf = tf.compat.v1.placeholder(\n name=\"num_samples\", dtype=tf.int32, shape=None)\n out = tf.random.categorical(\n logits=logits_tf, num_samples=num_samples_tf, seed=parameters[\"seed2\"])\n return [logits_tf, num_samples_tf], [out]\n\n def build_inputs(parameters, sess, inputs, outputs):\n input_values = [\n create_tensor_data(\n dtype=tf.float32, shape=parameters[\"logits_shape\"], min_value=-2,\n max_value=-1),\n create_tensor_data(\n dtype=tf.int32, shape=None, min_value=10, max_value=100)\n ]\n return input_values, sess.run(\n outputs, feed_dict=dict(zip(inputs, input_values)))\n\n make_zip_of_tests(options, test_parameters, build_graph, build_inputs)\n"
] | [
[
"tensorflow.python.ops.math_ops.argmax",
"numpy.arange",
"numpy.random.shuffle",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.main",
"numpy.random.randn",
"tensorflow.python.framework.test_util.is_xla_enabled",
"numpy.array",
"numpy.zeros",
"tensorflow.python.ops.math_ops.argmin"
],
[
"tensorflow.compiler.mlir.tfrt.jit.python_binding.tf_cpurt.TfCpurtExecutor",
"tensorflow.python.platform.test.main",
"numpy.testing.assert_allclose"
],
[
"tensorflow.lite.testing.zip_test_utils.make_zip_of_tests",
"tensorflow.lite.testing.zip_test_utils.create_tensor_data",
"tensorflow.compat.v1.compat.v1.placeholder",
"tensorflow.compat.v1.random.categorical",
"tensorflow.lite.testing.zip_test_utils.register_make_test_function",
"tensorflow.compat.v1.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QuESt-Calculator/pyscf | [
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910",
"0ed03633b699505c7278f1eb501342667d0aa910"
] | [
"pyscf/grad/casci.py",
"pyscf/symm/test/test_Dmatrix.py",
"pyscf/ao2mo/test/test_incore.py",
"pyscf/lib/test/test_misc.py",
"pyscf/mp/test/test_ump2.py",
"pyscf/pbc/cc/eom_kccsd_rhf_ip.py",
"pyscf/scf/_response_functions.py",
"pyscf/dft/uks.py",
"pyscf/gw/gw_exact.py",
"pyscf/agf2/mpi_helper.py"
] | [
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nCASCI analytical nuclear gradients\n\nRef.\nJ. Comput. Chem., 5, 589\n'''\n\nimport sys\n\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import ao2mo\nfrom pyscf.lib import logger\nfrom pyscf.grad import rhf as rhf_grad\nfrom pyscf.grad.mp2 import _shell_prange\nfrom pyscf.scf import cphf\n\nif sys.version_info < (3,):\n RANGE_TYPE = list\nelse:\n RANGE_TYPE = range\n\n\ndef grad_elec(mc_grad, mo_coeff=None, ci=None, atmlst=None, verbose=None):\n mc = mc_grad.base\n if mo_coeff is None: mo_coeff = mc._scf.mo_coeff\n if ci is None: ci = mc.ci\n\n time0 = logger.process_clock(), logger.perf_counter()\n log = logger.new_logger(mc_grad, verbose)\n mol = mc_grad.mol\n ncore = mc.ncore\n ncas = mc.ncas\n nocc = ncore + ncas\n nelecas = mc.nelecas\n nao, nmo = mo_coeff.shape\n nao_pair = nao * (nao+1) // 2\n mo_energy = mc._scf.mo_energy\n\n mo_occ = mo_coeff[:,:nocc]\n mo_core = mo_coeff[:,:ncore]\n mo_cas = mo_coeff[:,ncore:nocc]\n neleca, nelecb = mol.nelec\n assert(neleca == nelecb)\n orbo = mo_coeff[:,:neleca]\n orbv = mo_coeff[:,neleca:]\n\n casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)\n dm_core = numpy.dot(mo_core, mo_core.T) * 2\n dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))\n aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)\n aapa = aapa.reshape(ncas,ncas,nmo,ncas)\n vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))\n h1 = mc.get_hcore()\n vhf_c = vj[0] - vk[0] * .5\n vhf_a = vj[1] - vk[1] * .5\n # Imat = h1_{pi} gamma1_{iq} + h2_{pijk} gamma_{iqkj}\n Imat = numpy.zeros((nmo,nmo))\n Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2\n Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))\n Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)\n aapa = vj = vk = vhf_c = vhf_a = h1 = None\n\n ee = mo_energy[:,None] - mo_energy\n zvec = numpy.zeros_like(Imat)\n zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]\n zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]\n zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]\n zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]\n\n zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))\n vhf = mc._scf.get_veff(mol, zvec_ao) * 2\n xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))\n xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T\n def fvind(x):\n x = x.reshape(xvo.shape)\n dm = reduce(numpy.dot, (orbv, x, orbo.T))\n v = mc._scf.get_veff(mol, dm + dm.T)\n v = reduce(numpy.dot, (orbv.T, v, orbo))\n return v * 2\n dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]\n zvec[neleca:,:neleca] = dm1resp\n\n zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)\n zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))\n\n zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))\n p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)\n vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))\n\n Imat[:ncore,ncore:neleca] = 0\n Imat[ncore:neleca,:ncore] = 0\n Imat[nocc:,neleca:nocc] = 0\n Imat[neleca:nocc,nocc:] = 0\n Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T\n im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))\n\n casci_dm1 = dm_core + dm_cas\n hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)\n hcore_deriv = mc_grad.hcore_generator(mol)\n s1 = mc_grad.get_ovlp(mol)\n\n diag_idx = numpy.arange(nao)\n diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx\n casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)\n dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,\n (0, nao, 0, nao)).reshape(ncas**2,nao,nao)\n dm2buf = lib.pack_tril(dm2buf)\n dm2buf[:,diag_idx] *= .5\n dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)\n casdm2 = casdm2_cc = None\n\n if atmlst is None:\n atmlst = range(mol.natm)\n aoslices = mol.aoslice_by_atom()\n de = numpy.zeros((len(atmlst),3))\n\n max_memory = mc_grad.max_memory - lib.current_memory()[0]\n blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))\n blksize = min(nao, max(2, blksize))\n\n for k, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n h1ao = hcore_deriv(ia)\n de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)\n de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)\n\n q1 = 0\n for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):\n q0, q1 = q1, q1 + nf\n dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])\n shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)\n eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',\n shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)\n de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2\n\n for i in range(3):\n eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))\n eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)\n de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2\n de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2\n de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])\n\n #:vhf1c, vhf1a = mc_grad.get_veff(mol, (dm_core, dm_cas))\n #:de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], casci_dm1[p0:p1]) * 2\n #:de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2\n de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])\n de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2\n de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])\n eri1 = eri1tmp = None\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2\n\n de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2\n de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2\n\n log.timer('CASCI nuclear gradients', *time0)\n return de\n\n\ndef as_scanner(mcscf_grad, state=None):\n '''Generating a nuclear gradients scanner/solver (for geometry optimizer).\n\n The returned solver is a function. This function requires one argument\n \"mol\" as input and returns energy and first order nuclear derivatives.\n\n The solver will automatically use the results of last calculation as the\n initial guess of the new calculation. All parameters assigned in the\n nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are\n automatically applied in the solver.\n\n Note scanner has side effects. It may change many underlying objects\n (_scf, with_df, with_x2c, ...) during calculation.\n\n Examples:\n\n >>> from pyscf import gto, scf, mcscf\n >>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)\n >>> mc_grad_scanner = mcscf.CASCI(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))\n >>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))\n '''\n from pyscf import gto\n from pyscf.mcscf.addons import StateAverageMCSCFSolver\n if isinstance(mcscf_grad, lib.GradScanner):\n return mcscf_grad\n if (state is not None and\n isinstance(mcscf_grad.base, StateAverageMCSCFSolver)):\n raise RuntimeError('State-Average MCSCF Gradients does not support '\n 'state-specific nuclear gradients.')\n\n logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)\n\n class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):\n def __init__(self, g):\n lib.GradScanner.__init__(self, g)\n def __call__(self, mol_or_geom, state=state, **kwargs):\n if isinstance(mol_or_geom, gto.Mole):\n mol = mol_or_geom\n else:\n mol = self.mol.set_geom_(mol_or_geom, inplace=False)\n\n if state is None:\n state = self.state\n\n mc_scanner = self.base\n# TODO: Check root flip\n e_tot = mc_scanner(mol)\n ci = mc_scanner.ci\n if isinstance(mc_scanner, StateAverageMCSCFSolver):\n e_tot = mc_scanner.e_average\n elif not isinstance(e_tot, float):\n if state >= mc_scanner.fcisolver.nroots:\n raise ValueError('State ID greater than the number of CASCI roots')\n e_tot = e_tot[state]\n # target at a specific state, to avoid overwriting self.state\n # in self.kernel\n ci = ci[state]\n\n self.mol = mol\n de = self.kernel(ci=ci, state=state, **kwargs)\n return e_tot, de\n return CASCI_GradScanner(mcscf_grad)\n\n\nclass Gradients(rhf_grad.GradientsMixin):\n '''Non-relativistic restricted Hartree-Fock gradients'''\n def __init__(self, mc):\n from pyscf.mcscf.addons import StateAverageMCSCFSolver\n if isinstance(mc, StateAverageMCSCFSolver):\n self.state = None # not a specific state\n else:\n self.state = 0 # of which the gradients to be computed.\n rhf_grad.GradientsMixin.__init__(self, mc)\n\n def dump_flags(self, verbose=None):\n log = logger.new_logger(self, verbose)\n log.info('\\n')\n if not self.base.converged:\n log.warn('Ground state %s not converged', self.base.__class__)\n log.info('******** %s for %s ********',\n self.__class__, self.base.__class__)\n if self.state is None:\n weights = self.base.weights\n log.info('State-average gradients over %d states with weights %s',\n len(weights), weights)\n elif self.state != 0 and self.base.fcisolver.nroots > 1:\n log.info('State ID = %d', self.state)\n log.info('max_memory %d MB (current use %d MB)',\n self.max_memory, lib.current_memory()[0])\n return self\n\n grad_elec = grad_elec\n\n def kernel(self, mo_coeff=None, ci=None, atmlst=None,\n state=None, verbose=None):\n log = logger.new_logger(self, verbose)\n if ci is None: ci = self.base.ci\n if self.state is None: # state average MCSCF calculations\n assert(state is None)\n elif isinstance(ci, (list, tuple, RANGE_TYPE)):\n if state is None:\n state = self.state\n else:\n self.state = state\n ci = ci[state]\n log.info('Multiple roots are found in CASCI solver. '\n 'Nuclear gradients of root %d are computed.', state)\n\n if atmlst is None:\n atmlst = self.atmlst\n else:\n self.atmlst = atmlst\n\n if self.verbose >= logger.WARN:\n self.check_sanity()\n if self.verbose >= logger.INFO:\n self.dump_flags()\n\n de = self.grad_elec(mo_coeff, ci, atmlst, log)\n self.de = de = de + self.grad_nuc(atmlst=atmlst)\n if self.mol.symmetry:\n self.de = self.symmetrize(self.de, atmlst)\n self._finalize()\n return self.de\n\n # Initialize hcore_deriv with the underlying SCF object because some\n # extensions (e.g. x2c, QM/MM, solvent) modifies the SCF object only.\n def hcore_generator(self, mol=None):\n mf_grad = self.base._scf.nuc_grad_method()\n return mf_grad.hcore_generator(mol)\n\n # Calling the underlying SCF nuclear gradients because it may be modified\n # by external modules (e.g. QM/MM, solvent)\n def grad_nuc(self, mol=None, atmlst=None):\n mf_grad = self.base._scf.nuc_grad_method()\n return mf_grad.grad_nuc(mol, atmlst)\n\n def _finalize(self):\n if self.verbose >= logger.NOTE:\n if self.state is None:\n logger.note(self, '--------- %s gradients ----------',\n self.base.__class__.__name__)\n else:\n logger.note(self, '--------- %s gradients for state %d ----------',\n self.base.__class__.__name__, self.state)\n self._write(self.mol, self.de, self.atmlst)\n logger.note(self, '----------------------------------------------')\n\n as_scanner = as_scanner\n\nGrad = Gradients\n\nfrom pyscf import mcscf\nmcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import mcscf\n\n mol = gto.Mole()\n mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'\n mol.build()\n mf = scf.RHF(mol).run(conv_tol=1e-14)\n mc = mcscf.CASCI(mf, 4, 4).run()\n g1 = mc.Gradients().kernel()\n print(lib.finger(g1) - -0.066025991364829367)\n\n mcs = mc.as_scanner()\n mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')\n e1 = mcs(mol)\n mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')\n e2 = mcs(mol)\n print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)\n",
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import reduce\nimport unittest\nimport numpy\nfrom pyscf import gto, lib\nfrom pyscf.symm import Dmatrix, geom\n\n\nclass KnownValues(unittest.TestCase):\n def test_Dmatrix(self):\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(0, -.7, .5, .2)), 1, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(1, -.7, .5, .2)), 0.7014811805222106, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(2, -.7, .5, .2)), 1.247436140965072 , 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(3, -.7, .5, .2)), 0.9226598665854279, 12)\n self.assertAlmostEqual(lib.finger(Dmatrix.Dmatrix(4, -.7, .5, .2)), -0.425143083298510, 12)\n\n def test_real_sph_vec(self):\n c0 = c = numpy.random.random(3)\n\n mol1 = gto.M(atom=['H1 0 0 0', ['H2', c]],\n basis = {'H1': [[0, (1, 1)]],\n 'H2': [[l, (1, 1)] for l in range(1,6)]})\n alpha = .2\n beta = .4\n gamma = -.3\n c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)\n c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)\n mol2 = gto.M(atom=['H1 0 0 0', ['H2', c1]],\n basis = {'H1': [[0, (1, 1)]],\n 'H2': [[l, (1, 1)] for l in range(1,6)]})\n\n for l in range(1, 6):\n s1 = mol1.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))\n s2 = mol2.intor('int1e_ovlp', shls_slice=(0,1,l,l+1))\n\n # Rotating a basis is equivalent to an inversed rotation over the axes.\n # The Eular angles that rotates molecule to a new geometry (axes\n # transformation) corresponds to the inversed rotation over basis.\n #r = small_dmatrix(l, -beta, reorder_p=True)\n r = Dmatrix.Dmatrix(l, -gamma, -beta, -alpha, reorder_p=True)\n self.assertAlmostEqual(abs(numpy.dot(s1, r) - s2).max(), 0, 12)\n\n def test_euler_angles(self):\n c0 = numpy.random.random(3)\n c2 = numpy.random.random(3)\n self.assertRaises(AssertionError, Dmatrix.get_euler_angles, c0, c2)\n\n c0 /= numpy.linalg.norm(c0)\n c2 /= numpy.linalg.norm(c2)\n alpha, beta, gamma = Dmatrix.get_euler_angles(c0, c2)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), gamma), c0)\n c1 = numpy.dot(geom.rotation_mat((0,1,0), beta), c1)\n c1 = numpy.dot(geom.rotation_mat((0,0,1), alpha), c1)\n self.assertAlmostEqual(abs(c2 - c1).max(), 0, 12)\n\n # transform coordinates\n numpy.random.seed(1)\n u, w, vh = numpy.linalg.svd(numpy.random.random((3,3)))\n c1 = u.dot(vh)\n u, w, vh = numpy.linalg.svd(c1+2*numpy.random.random((3,3)))\n c2 = u.dot(vh)\n alpha, beta, gamma = Dmatrix.get_euler_angles(c1, c2)\n yp = numpy.einsum('j,kj->k', c1[1], geom.rotation_mat(c1[2], alpha))\n tmp = numpy.einsum('ij,kj->ik', c1 , geom.rotation_mat(c1[2], alpha))\n tmp = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(yp , beta ))\n c2p = numpy.einsum('ij,kj->ik', tmp, geom.rotation_mat(c2[2], gamma))\n self.assertAlmostEqual((c2-c2p).max(), 0, 13)\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests for Dmatrix\")\n unittest.main()\n\n",
"#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ctypes\nimport unittest\nfrom functools import reduce\nimport tempfile\nimport numpy\nimport h5py\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf import ao2mo\n\nmol = gto.Mole()\nmol.verbose = 0\nmol.atom = '''\n o 0 0. 0\n h 0 -0.757 0.587\n h 0 0.757 0.587'''\nmol.basis = 'cc-pvdz'\nmol.build()\nnao = mol.nao_nr()\neri = mol.intor('int2e_sph', aosym='s8')\n\ndef tearDownModule():\n global mol, eri\n del mol, eri\n\ndef trans(eri, mos):\n nao = mos[0].shape[0]\n eriref = ao2mo.restore(1, eri, nao)\n eriref = lib.einsum('pjkl,pi->ijkl', eriref, mos[0].conj())\n eriref = lib.einsum('ipkl,pj->ijkl', eriref, mos[1])\n eriref = lib.einsum('ijpl,pk->ijkl', eriref, mos[2].conj())\n eriref = lib.einsum('ijkp,pl->ijkl', eriref, mos[3])\n return eriref\n\nclass KnownValues(unittest.TestCase):\n def test_incore(self):\n numpy.random.seed(15)\n nmo = 12\n mo = numpy.random.random((nao,nmo))\n eriref = trans(eri, [mo]*4)\n\n eri1 = ao2mo.incore.full(ao2mo.restore(8,eri,nao), mo)\n self.assertTrue(numpy.allclose(ao2mo.restore(1,eri1,nmo), eriref))\n eri1 = ao2mo.incore.full(ao2mo.restore(4,eri,nao), mo, compact=False)\n self.assertTrue(numpy.allclose(eri1.reshape((nmo,)*4), eriref))\n\n eri1 = ao2mo.incore.general(eri, (mo[:,:2], mo[:,1:3], mo[:,:3], mo[:,2:5]))\n eri1 = eri1.reshape(2,2,3,3)\n self.assertTrue(numpy.allclose(eri1, eriref[:2,1:3,:3,2:5]))\n\n# eri_ao = ao2mo.restore('s2ij', eri, nao)\n# eri1 = ao2mo.incore.general(eri_ao, (mo[:,:3], mo[:,1:3], mo[:,:3], mo[:,2:5]))\n# eri1 = eri1.reshape(3,2,3,3)\n# self.assertTrue(numpy.allclose(eri1, eriref[:3,1:3,:3,2:5]))\n\n eri_ao = ao2mo.restore(1, eri, nao)\n eri1 = ao2mo.incore.general(eri_ao, (mo[:,:3], mo[:,1:3], mo[:,:3], mo[:,2:5]))\n eri1 = eri1.reshape(3,2,3,3)\n self.assertTrue(numpy.allclose(eri1, eriref[:3,1:3,:3,2:5]))\n\n eri1 = ao2mo.incore.full(eri, mo[:,:0])\n self.assertTrue(eri1.size == 0)\n\n def test_incore_eri_s4(self):\n numpy.random.seed(1)\n norb = 4\n\n # A 4-index eri with 4-fold symmetry\n h2_s1 = numpy.random.random((norb, norb, norb, norb))\n h2_s1 = h2_s1 + h2_s1.transpose(1,0,2,3)\n h2_s1 = h2_s1 + h2_s1.transpose(0,1,3,2)\n\n # pack the eri to 2-index\n h2_s4 = ao2mo.restore(4, h2_s1, norb)\n\n mos = numpy.random.random((4,norb,norb-1))\n eri_mo_from_s4 = ao2mo.general(h2_s4, mos)\n eri_ref = trans(h2_s4, mos).reshape(eri_mo_from_s4.shape)\n\n self.assertAlmostEqual(abs(eri_mo_from_s4 - eri_ref).max(), 0, 12)\n\n\nif __name__ == '__main__':\n print('Full Tests for ao2mo.incore')\n unittest.main()\n\n\n",
"#!/usr/bin/env python\n# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy\nfrom pyscf import lib\n\nclass KnownValues(unittest.TestCase):\n def test_call_in_background_skip(self):\n def bg_raise():\n def raise1():\n raise ValueError\n\n with lib.call_in_background(raise1) as f:\n f()\n\n raise IndexError\n\n self.assertRaises(lib.ThreadRuntimeError, bg_raise)\n\n def test_index_tril_to_pair(self):\n i_j = (numpy.random.random((2,30)) * 100).astype(int)\n i0 = numpy.max(i_j, axis=0)\n j0 = numpy.min(i_j, axis=0)\n ij = i0 * (i0+1) // 2 + j0\n i1, j1 = lib.index_tril_to_pair(ij)\n self.assertTrue(numpy.all(i0 == i1))\n self.assertTrue(numpy.all(j0 == j1))\n\n def test_class_as_method(self):\n class A:\n def f1(self):\n return 'a'\n f2 = lib.alias(f1)\n class B(A):\n def f1(self):\n return 'b'\n b = B()\n self.assertEqual(b.f2(), 'b')\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom functools import reduce\nimport numpy\nfrom pyscf import lib\nfrom pyscf import gto\nfrom pyscf import scf\nfrom pyscf import ao2mo\nfrom pyscf import mp\n\nmol = gto.Mole()\nmol.verbose = 7\nmol.output = '/dev/null'\nmol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)]]\nmol.basis = 'cc-pvdz'\nmol.spin = 2\nmol.build()\nmf = scf.UHF(mol)\nmf.conv_tol = 1e-14\nmf.scf()\n\ndef tearDownModule():\n global mol, mf\n del mol, mf\n\n\nclass KnownValues(unittest.TestCase):\n def test_ump2(self):\n pt = mp.MP2(mf)\n emp2, t2 = pt.kernel(mf.mo_energy, mf.mo_coeff)\n self.assertAlmostEqual(emp2, -0.16575150552336643, 9)\n\n pt.max_memory = 1\n pt.frozen = None\n emp2, t2 = pt.kernel()\n self.assertAlmostEqual(emp2, -0.16575150552336643, 9)\n\n def test_ump2_dm(self):\n pt = mp.MP2(mf)\n emp2, t2 = pt.kernel()\n dm1 = pt.make_rdm1()\n dm2 = pt.make_rdm2()\n gpt = mp.GMP2(mf).run()\n dm1ref = gpt.make_rdm1()\n dm2ref = gpt.make_rdm2()\n ia = gpt._scf.mo_coeff.orbspin == 0\n ib = gpt._scf.mo_coeff.orbspin == 1\n mo_a, mo_b = mf.mo_coeff\n nmoa = mo_a.shape[1]\n nmob = mo_b.shape[1]\n nocca, noccb = mol.nelec\n\n self.assertTrue(numpy.allclose(dm1[0], dm1ref[ia][:,ia]))\n self.assertTrue(numpy.allclose(dm1[1], dm1ref[ib][:,ib]))\n self.assertTrue(numpy.allclose(dm2[0], dm2ref[ia][:,ia][:,:,ia][:,:,:,ia]))\n self.assertTrue(numpy.allclose(dm2[2], dm2ref[ib][:,ib][:,:,ib][:,:,:,ib]))\n self.assertTrue(numpy.allclose(dm2[1], dm2ref[ia][:,ia][:,:,ib][:,:,:,ib]))\n\n hcore = mf.get_hcore()\n eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4)\n eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4)\n eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)\n eriab = eriab.reshape([nmoa,nmoa,nmob,nmob])\n h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))\n h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))\n e1 = numpy.einsum('ij,ji', h1a, dm1[0])\n e1+= numpy.einsum('ij,ji', h1b, dm1[1])\n e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2[0]) * .5\n e1+= numpy.einsum('ijkl,ijkl', eriab, dm2[1])\n e1+= numpy.einsum('ijkl,ijkl', eribb, dm2[2]) * .5\n e1+= mol.energy_nuc()\n self.assertAlmostEqual(e1, pt.e_tot, 9)\n\n vhf = mf.get_veff(mol, mf.make_rdm1())\n h1a = reduce(numpy.dot, (mo_a.T, hcore+vhf[0], mo_a))\n h1b = reduce(numpy.dot, (mo_b.T, hcore+vhf[1], mo_b))\n dm1[0][numpy.diag_indices(nocca)] -= 1\n dm1[1][numpy.diag_indices(noccb)] -= 1\n e = numpy.einsum('pq,qp', h1a, dm1[0])\n e+= numpy.einsum('pq,qp', h1b, dm1[1])\n self.assertAlmostEqual(e, -emp2, 9)\n\n def test_ump2_contract_eri_dm(self):\n pt = mp.MP2(mf)\n pt.frozen = [[0,1,2,3],[1]]\n emp2, t2 = pt.kernel()\n mo_a, mo_b = mf.mo_coeff\n nmoa = mo_a.shape[1]\n nmob = mo_b.shape[1]\n dm1a,dm1b = pt.make_rdm1()\n dm2aa,dm2ab,dm2bb = pt.make_rdm2()\n eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4)\n eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4)\n eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)\n eriab = eriab.reshape([nmoa,nmoa,nmob,nmob])\n hcore = mf.get_hcore()\n h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))\n h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))\n e1 = numpy.einsum('ij,ji', h1a, dm1a)\n e1+= numpy.einsum('ij,ji', h1b, dm1b)\n e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2aa) * .5\n e1+= numpy.einsum('ijkl,ijkl', eriab, dm2ab)\n e1+= numpy.einsum('ijkl,ijkl', eribb, dm2bb) * .5\n e1+= mol.energy_nuc()\n self.assertAlmostEqual(e1, pt.e_tot, 9)\n\n def test_ump2_frozen(self):\n pt = mp.MP2(mf)\n pt.frozen = [1]\n pt.kernel(with_t2=False)\n self.assertAlmostEqual(pt.emp2, -0.11202141654451162, 9)\n\n def test_ump2_outcore_frozen(self):\n pt = mp.MP2(mf)\n pt.max_memory = 0\n pt.nmo = (12, 11)\n pt.frozen = [[4,5],[2,3]]\n e = pt.kernel(with_t2=False)[0]\n self.assertAlmostEqual(e, -0.033400699456971966, 9)\n\n pt = mp.MP2(mf)\n pt.nmo = (12, 11)\n pt.nocc = (4, 2)\n e = pt.kernel(with_t2=False)[0]\n self.assertAlmostEqual(e, -0.033400699456971966, 9)\n\n def test_ump2_with_df(self):\n pt = mp.ump2.UMP2(mf.density_fit('weigend'))\n pt.frozen = [1]\n e = pt.kernel(with_t2=False)[0]\n self.assertAlmostEqual(e, -0.11264162733420097, 9)\n\n #pt = mp.dfump2.DFUMP2(mf.density_fit('weigend'))\n #pt.frozen = [1]\n #e = pt.kernel()[0]\n #self.assertAlmostEqual(e, -0.11264162733420097, 9)\n\n #pt = mp.dfump2.DFUMP2(mf)\n #pt.frozen = [1]\n #pt.with_df = mf.density_fit('weigend').with_df\n #e = pt.kernel()[0]\n #self.assertAlmostEqual(e, -0.11264162733420097, 9)\n\n def test_ump2_ao2mo_ovov(self):\n pt = mp.UMP2(mf)\n nocca, noccb = mol.nelec\n orboa = mf.mo_coeff[0][:,:nocca]\n orbva = mf.mo_coeff[0][:,nocca:]\n orbob = mf.mo_coeff[1][:,:noccb]\n orbvb = mf.mo_coeff[1][:,noccb:]\n orbs = (orboa, orbva, orbob, orbvb)\n ftmp = lib.H5TmpFile()\n mp.ump2._ao2mo_ovov(pt, orbs, ftmp, 1)\n ovov = numpy.asarray(ftmp['ovov'])\n ovOV = numpy.asarray(ftmp['ovOV'])\n OVOV = numpy.asarray(ftmp['OVOV'])\n ovov_ref = ao2mo.general(mf._eri, (orboa,orbva,orboa,orbva))\n ovOV_ref = ao2mo.general(mf._eri, (orboa,orbva,orbob,orbvb))\n OVOV_ref = ao2mo.general(mf._eri, (orbob,orbvb,orbob,orbvb))\n self.assertAlmostEqual(numpy.linalg.norm(ovov_ref-ovov), 0, 9)\n self.assertAlmostEqual(numpy.linalg.norm(ovOV_ref-ovOV), 0, 9)\n self.assertAlmostEqual(numpy.linalg.norm(OVOV_ref-OVOV), 0, 9)\n\n def test_ump2_with_ao2mofn(self):\n pt = mp.ump2.UMP2(mf)\n mf_df = mf.density_fit('weigend')\n ao2mofn = mf_df.with_df.ao2mo\n pt.ao2mo = lambda *args: mp.ump2._make_eris(pt, *args, ao2mofn=ao2mofn)\n e1 = pt.kernel()[0]\n pt = mp.ump2.UMP2(mf.density_fit('weigend'))\n e2 = pt.kernel()[0]\n self.assertAlmostEqual(e1, e2, 9)\n\n def test_rdm_complex(self):\n mol = gto.M()\n mol.verbose = 0\n nocca,noccb = 3,2\n nvira,nvirb = 4,5\n mf = scf.UHF(mol)\n nmo = nocca + nvira\n numpy.random.seed(1)\n eri_aa = (numpy.random.random((nmo,nmo,nmo,nmo)) +\n numpy.random.random((nmo,nmo,nmo,nmo))* 1j - (.5+.5j))\n eri_aa = eri_aa + eri_aa.transpose(1,0,3,2).conj()\n eri_aa = eri_aa + eri_aa.transpose(2,3,0,1)\n eri_aa *= .1\n eri_bb = (numpy.random.random((nmo,nmo,nmo,nmo)) +\n numpy.random.random((nmo,nmo,nmo,nmo))* 1j - (.5+.5j))\n eri_bb = eri_bb + eri_bb.transpose(1,0,3,2).conj()\n eri_bb = eri_bb + eri_bb.transpose(2,3,0,1)\n eri_bb *= .1\n eri_ab = (numpy.random.random((nmo,nmo,nmo,nmo)) +\n numpy.random.random((nmo,nmo,nmo,nmo))* 1j - (.5+.5j))\n eri_ab = eri_ab + eri_ab.transpose(1,0,3,2).conj()\n eri_ab *= .1\n\n eris = lambda: None\n eris.ovov = eri_aa[:nocca,nocca:,:nocca,nocca:].reshape(nocca*nvira,nocca*nvira)\n eris.OVOV = eri_bb[:noccb,noccb:,:noccb,noccb:].reshape(noccb*nvirb,noccb*nvirb)\n eris.ovOV = eri_ab[:nocca,nocca:,:noccb,noccb:].reshape(nocca*nvira,noccb*nvirb)\n\n mo_energy = [numpy.arange(nmo), numpy.arange(nmo)+.1]\n mo_occ = numpy.zeros((2,nmo))\n mo_occ[0,:nocca] = 1\n mo_occ[1,:noccb] = 1\n dm = [numpy.diag(mo_occ[0]), numpy.diag(mo_occ[1])]\n vja = numpy.einsum('ijkl,lk->ij', eri_aa, dm[0])\n vja+= numpy.einsum('ijkl,lk->ij', eri_ab, dm[1])\n vjb = numpy.einsum('ijkl,lk->ij', eri_bb, dm[1])\n vjb+= numpy.einsum('klij,lk->ij', eri_ab, dm[0])\n vka = numpy.einsum('ijkl,jk->il', eri_aa, dm[0])\n vkb = numpy.einsum('ijkl,jk->il', eri_bb, dm[1])\n vhf = (vja - vka, vjb - vkb)\n hcore = (numpy.diag(mo_energy[0]) - vhf[0],\n numpy.diag(mo_energy[1]) - vhf[1])\n mf.get_hcore = lambda *args: hcore\n mf.get_ovlp = lambda *args: numpy.eye(nmo)\n eris.mo_energy = mf.mo_energy = mo_energy\n mf.mo_coeff = [numpy.eye(nmo)]*2\n mf.mo_occ = mo_occ\n mf.e_tot = numpy.einsum('ij,ji', hcore[0], dm[0])\n mf.e_tot+= numpy.einsum('ij,ji', hcore[1], dm[1])\n mf.e_tot+= numpy.einsum('ij,ji', vhf[0], dm[0]) * .5\n mf.e_tot+= numpy.einsum('ij,ji', vhf[1], dm[1]) * .5\n mf.converged = True\n pt = mp.MP2(mf)\n pt.ao2mo = lambda *args, **kwargs: eris\n pt.kernel(eris=eris)\n dm1 = pt.make_rdm1()\n dm2 = pt.make_rdm2()\n\n e1 = numpy.einsum('ij,ji', hcore[0], dm1[0])\n e1+= numpy.einsum('ij,ji', hcore[1], dm1[1])\n e1+= numpy.einsum('ijkl,ijkl', eri_aa, dm2[0]) * .5\n e1+= numpy.einsum('ijkl,ijkl', eri_ab, dm2[1])\n e1+= numpy.einsum('ijkl,ijkl', eri_bb, dm2[2]) * .5\n self.assertAlmostEqual(e1, pt.e_tot, 12)\n\n self.assertAlmostEqual(abs(dm2[0]-dm2[0].transpose(1,0,3,2).conj()).max(), 0, 9)\n self.assertAlmostEqual(abs(dm2[0]-dm2[0].transpose(2,3,0,1) ).max(), 0, 9)\n self.assertAlmostEqual(abs(dm2[1]-dm2[1].transpose(1,0,3,2).conj()).max(), 0, 9)\n self.assertAlmostEqual(abs(dm2[2]-dm2[2].transpose(1,0,3,2).conj()).max(), 0, 9)\n self.assertAlmostEqual(abs(dm2[2]-dm2[2].transpose(2,3,0,1) ).max(), 0, 9)\n\n def test_non_canonical_mp2(self):\n mf = scf.UHF(mol).run(max_cycle=1)\n pt = mp.MP2(mf)\n self.assertAlmostEqual(pt.kernel()[0], -0.171693954168, 7)\n\n\n\nif __name__ == \"__main__\":\n print(\"Full Tests for mp2\")\n unittest.main()\n\n",
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Artem Pulkin, pyscf authors\n\nfrom pyscf.pbc.lib.kpts_helper import VectorSplitter, VectorComposer\nfrom pyscf.pbc.mp.kmp2 import padding_k_idx\nfrom pyscf.pbc.cc import kccsd_rhf\n\nimport numpy as np\n\n\ndef iter_12(cc_or_eom, k):\n \"\"\"Iterates over IP index slices.\"\"\"\n if isinstance(cc_or_eom, kccsd_rhf.RCCSD):\n cc = cc_or_eom\n else:\n cc = cc_or_eom._cc\n o, v = padding_k_idx(cc, kind=\"split\")\n kconserv = cc.khelper.kconserv\n\n yield (o[k],)\n\n for ki in range(cc.nkpts):\n for kj in range(cc.nkpts):\n kb = kconserv[ki, k, kj]\n yield (ki,), (kj,), o[ki], o[kj], v[kb]\n\n\ndef amplitudes_to_vector(cc_or_eom, t1, t2, kshift=0, kconserv=None):\n \"\"\"IP amplitudes to vector.\"\"\"\n itr = iter_12(cc_or_eom, kshift)\n t1, t2 = np.asarray(t1), np.asarray(t2)\n\n vc = VectorComposer(t1.dtype)\n vc.put(t1[np.ix_(*next(itr))])\n for slc in itr:\n vc.put(t2[np.ix_(*slc)])\n return vc.flush()\n\n\ndef vector_to_amplitudes(cc_or_eom, vec, kshift=0):\n \"\"\"IP vector to apmplitudes.\"\"\"\n expected_vs = vector_size(cc_or_eom, kshift)\n if expected_vs != len(vec):\n raise ValueError(\"The size of the vector passed {:d} should be exactly {:d}\".format(len(vec), expected_vs))\n\n itr = iter_12(cc_or_eom, kshift)\n\n nocc = cc_or_eom.nocc\n nmo = cc_or_eom.nmo\n nkpts = cc_or_eom.nkpts\n\n vs = VectorSplitter(vec)\n r1 = vs.get(nocc, slc=next(itr))\n r2 = np.zeros((nkpts, nkpts, nocc, nocc, nmo - nocc), vec.dtype)\n for slc in itr:\n vs.get(r2, slc=slc)\n return r1, r2\n\n\ndef vector_size(cc_or_eom, kshift=0):\n \"\"\"The total number of elements in IP vector.\"\"\"\n size = 0\n for slc in iter_12(cc_or_eom, kshift):\n size += np.prod(tuple(len(i) for i in slc))\n return size\n",
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nGenerate SCF response functions\n'''\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.scf import hf, rohf, uhf, ghf, dhf\n\ndef _gen_rhf_response(mf, mo_coeff=None, mo_occ=None,\n singlet=None, hermi=0, max_memory=None):\n '''Generate a function to compute the product of RHF response function and\n RHF density matrices.\n\n Kwargs:\n singlet (None or boolean) : If singlet is None, response function for\n orbital hessian or CPHF will be generated. If singlet is boolean,\n it is used in TDDFT response kernel.\n '''\n assert(not isinstance(mf, (uhf.UHF, rohf.ROHF)))\n\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if mo_occ is None: mo_occ = mf.mo_occ\n mol = mf.mol\n if _is_dft_object(mf):\n from pyscf.dft import numint\n ni = mf._numint\n ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)\n if getattr(mf, 'nlc', '') != '':\n logger.warn(mf, 'NLC functional found in DFT object. Its second '\n 'deriviative is not available. Its contribution is '\n 'not included in the response function.')\n omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)\n hybrid = abs(hyb) > 1e-10\n\n # mf can be pbc.dft.RKS object with multigrid\n if (not hybrid and\n 'MultiGridFFTDF' == getattr(mf, 'with_df', None).__class__.__name__):\n from pyscf.pbc.dft import multigrid\n dm0 = mf.make_rdm1(mo_coeff, mo_occ)\n return multigrid._gen_rhf_response(mf, dm0, singlet, hermi)\n\n if singlet is None:\n # for ground state orbital hessian\n rho0, vxc, fxc = ni.cache_xc_kernel(mol, mf.grids, mf.xc,\n mo_coeff, mo_occ, 0)\n else:\n rho0, vxc, fxc = ni.cache_xc_kernel(mol, mf.grids, mf.xc,\n [mo_coeff]*2, [mo_occ*.5]*2, spin=1)\n dm0 = None #mf.make_rdm1(mo_coeff, mo_occ)\n\n if max_memory is None:\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mf.max_memory*.8-mem_now)\n\n if singlet is None:\n # Without specify singlet, used in ground state orbital hessian\n def vind(dm1):\n # The singlet hessian\n if hermi == 2:\n v1 = numpy.zeros_like(dm1)\n else:\n v1 = ni.nr_rks_fxc(mol, mf.grids, mf.xc, dm0, dm1, 0, hermi,\n rho0, vxc, fxc, max_memory=max_memory)\n if hybrid:\n if hermi != 2:\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n vk *= hyb\n if omega > 1e-10: # For range separated Coulomb\n vk += mf.get_k(mol, dm1, hermi, omega) * (alpha-hyb)\n v1 += vj - .5 * vk\n else:\n v1 -= .5 * hyb * mf.get_k(mol, dm1, hermi=hermi)\n elif hermi != 2:\n v1 += mf.get_j(mol, dm1, hermi=hermi)\n return v1\n\n elif singlet:\n def vind(dm1):\n if hermi == 2:\n v1 = numpy.zeros_like(dm1)\n else:\n # nr_rks_fxc_st requires alpha of dm1, dm1*.5 should be scaled\n v1 = numint.nr_rks_fxc_st(ni, mol, mf.grids, mf.xc, dm0, dm1, 0,\n True, rho0, vxc, fxc,\n max_memory=max_memory)\n v1 *= .5\n if hybrid:\n if hermi != 2:\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n vk *= hyb\n if omega > 1e-10: # For range separated Coulomb\n vk += mf.get_k(mol, dm1, hermi, omega) * (alpha-hyb)\n v1 += vj - .5 * vk\n else:\n v1 -= .5 * hyb * mf.get_k(mol, dm1, hermi=hermi)\n elif hermi != 2:\n v1 += mf.get_j(mol, dm1, hermi=hermi)\n return v1\n else: # triplet\n def vind(dm1):\n if hermi == 2:\n v1 = numpy.zeros_like(dm1)\n else:\n # nr_rks_fxc_st requires alpha of dm1, dm1*.5 should be scaled\n v1 = numint.nr_rks_fxc_st(ni, mol, mf.grids, mf.xc, dm0, dm1, 0,\n False, rho0, vxc, fxc,\n max_memory=max_memory)\n v1 *= .5\n if hybrid:\n vk = mf.get_k(mol, dm1, hermi=hermi)\n vk *= hyb\n if omega > 1e-10: # For range separated Coulomb\n vk += mf.get_k(mol, dm1, hermi, omega) * (alpha-hyb)\n v1 += -.5 * vk\n return v1\n\n else: # HF\n if (singlet is None or singlet) and hermi != 2:\n def vind(dm1):\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n return vj - .5 * vk\n else:\n def vind(dm1):\n return -.5 * mf.get_k(mol, dm1, hermi=hermi)\n\n return vind\n\n\ndef _gen_uhf_response(mf, mo_coeff=None, mo_occ=None,\n with_j=True, hermi=0, max_memory=None):\n '''Generate a function to compute the product of UHF response function and\n UHF density matrices.\n '''\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if mo_occ is None: mo_occ = mf.mo_occ\n mol = mf.mol\n if _is_dft_object(mf):\n ni = mf._numint\n ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)\n if getattr(mf, 'nlc', '') != '':\n logger.warn(mf, 'NLC functional found in DFT object. Its second '\n 'deriviative is not available. Its contribution is '\n 'not included in the response function.')\n omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)\n hybrid = abs(hyb) > 1e-10\n\n # mf can be pbc.dft.UKS object with multigrid\n if (not hybrid and\n 'MultiGridFFTDF' == getattr(mf, 'with_df', None).__class__.__name__):\n from pyscf.pbc.dft import multigrid\n dm0 = mf.make_rdm1(mo_coeff, mo_occ)\n return multigrid._gen_uhf_response(mf, dm0, with_j, hermi)\n\n rho0, vxc, fxc = ni.cache_xc_kernel(mol, mf.grids, mf.xc,\n mo_coeff, mo_occ, 1)\n #dm0 =(numpy.dot(mo_coeff[0]*mo_occ[0], mo_coeff[0].T.conj()),\n # numpy.dot(mo_coeff[1]*mo_occ[1], mo_coeff[1].T.conj()))\n dm0 = None\n\n if max_memory is None:\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mf.max_memory*.8-mem_now)\n\n def vind(dm1):\n if hermi == 2:\n v1 = numpy.zeros_like(dm1)\n else:\n v1 = ni.nr_uks_fxc(mol, mf.grids, mf.xc, dm0, dm1, 0, hermi,\n rho0, vxc, fxc, max_memory=max_memory)\n if not hybrid:\n if with_j:\n vj = mf.get_j(mol, dm1, hermi=hermi)\n v1 += vj[0] + vj[1]\n else:\n if with_j:\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n vk *= hyb\n if omega > 1e-10: # For range separated Coulomb\n vk += mf.get_k(mol, dm1, hermi, omega) * (alpha-hyb)\n v1 += vj[0] + vj[1] - vk\n else:\n vk = mf.get_k(mol, dm1, hermi=hermi)\n vk *= hyb\n if omega > 1e-10: # For range separated Coulomb\n vk += mf.get_k(mol, dm1, hermi, omega) * (alpha-hyb)\n v1 -= vk\n return v1\n\n elif with_j:\n def vind(dm1):\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n v1 = vj[0] + vj[1] - vk\n return v1\n\n else:\n def vind(dm1):\n return -mf.get_k(mol, dm1, hermi=hermi)\n\n return vind\n\n\ndef _gen_ghf_response(mf, mo_coeff=None, mo_occ=None,\n with_j=True, hermi=0, max_memory=None):\n '''Generate a function to compute the product of GHF response function and\n GHF density matrices.\n '''\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if mo_occ is None: mo_occ = mf.mo_occ\n mol = mf.mol\n if _is_dft_object(mf):\n raise NotImplementedError\n\n elif with_j:\n def vind(dm1):\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n return vj - vk\n\n else:\n def vind(dm1):\n return -mf.get_k(mol, dm1, hermi=hermi)\n\n return vind\n\n\ndef _gen_dhf_response(mf, mo_coeff=None, mo_occ=None,\n with_j=True, hermi=0, max_memory=None):\n '''Generate a function to compute the product of DHF response function and\n DHF density matrices.\n '''\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if mo_occ is None: mo_occ = mf.mo_occ\n mol = mf.mol\n if _is_dft_object(mf):\n raise NotImplementedError\n\n elif with_j:\n def vind(dm1):\n vj, vk = mf.get_jk(mol, dm1, hermi=hermi)\n return vj - vk\n\n else:\n def vind(dm1):\n return -mf.get_k(mol, dm1, hermi=hermi)\n\n return vind\n\n\ndef _is_dft_object(mf):\n return getattr(mf, 'xc', None) is not None and hasattr(mf, '_numint')\n\n\nhf.RHF.gen_response = _gen_rhf_response\nuhf.UHF.gen_response = _gen_uhf_response\nghf.GHF.gen_response = _gen_ghf_response\n# Use UHF response function for ROHF because in second order solver uhf\n# response function is called to compute ROHF orbital hessian\nrohf.ROHF.gen_response = _gen_uhf_response\ndhf.DHF.gen_response = _gen_dhf_response\n",
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nNon-relativistic Unrestricted Kohn-Sham\n'''\n\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.scf import uhf\nfrom pyscf.dft import rks\n\ndef get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):\n '''Coulomb + XC functional for UKS. See pyscf/dft/rks.py\n :func:`get_veff` fore more details.\n '''\n if mol is None: mol = ks.mol\n if dm is None: dm = ks.make_rdm1()\n if not isinstance(dm, numpy.ndarray):\n dm = numpy.asarray(dm)\n if dm.ndim == 2: # RHF DM\n dm = numpy.asarray((dm*.5,dm*.5))\n ground_state = (dm.ndim == 3 and dm.shape[0] == 2)\n\n t0 = (logger.process_clock(), logger.perf_counter())\n\n if ks.grids.coords is None:\n ks.grids.build(with_non0tab=True)\n if ks.small_rho_cutoff > 1e-20 and ground_state:\n ks.grids = rks.prune_small_rho_grids_(ks, mol, dm[0]+dm[1], ks.grids)\n t0 = logger.timer(ks, 'setting up grids', *t0)\n if ks.nlc != '':\n if ks.nlcgrids.coords is None:\n ks.nlcgrids.build(with_non0tab=True)\n if ks.small_rho_cutoff > 1e-20 and ground_state:\n ks.nlcgrids = rks.prune_small_rho_grids_(ks, mol, dm[0]+dm[1], ks.nlcgrids)\n t0 = logger.timer(ks, 'setting up nlc grids', *t0)\n\n ni = ks._numint\n if hermi == 2: # because rho = 0\n n, exc, vxc = (0,0), 0, 0\n else:\n max_memory = ks.max_memory - lib.current_memory()[0]\n n, exc, vxc = ni.nr_uks(mol, ks.grids, ks.xc, dm, max_memory=max_memory)\n if ks.nlc:\n assert 'VV10' in ks.nlc.upper()\n _, enlc, vnlc = ni.nr_rks(mol, ks.nlcgrids, ks.xc+'__'+ks.nlc, dm[0]+dm[1],\n max_memory=max_memory)\n exc += enlc\n vxc += vnlc\n logger.debug(ks, 'nelec by numeric integration = %s', n)\n t0 = logger.timer(ks, 'vxc', *t0)\n\n #enabling range-separated hybrids\n omega, alpha, hyb = ni.rsh_and_hybrid_coeff(ks.xc, spin=mol.spin)\n\n if abs(hyb) < 1e-10 and abs(alpha) < 1e-10:\n vk = None\n if (ks._eri is None and ks.direct_scf and\n getattr(vhf_last, 'vj', None) is not None):\n ddm = numpy.asarray(dm) - numpy.asarray(dm_last)\n vj = ks.get_j(mol, ddm[0]+ddm[1], hermi)\n vj += vhf_last.vj\n else:\n vj = ks.get_j(mol, dm[0]+dm[1], hermi)\n vxc += vj\n else:\n if (ks._eri is None and ks.direct_scf and\n getattr(vhf_last, 'vk', None) is not None):\n ddm = numpy.asarray(dm) - numpy.asarray(dm_last)\n vj, vk = ks.get_jk(mol, ddm, hermi)\n vk *= hyb\n if abs(omega) > 1e-10:\n vklr = ks.get_k(mol, ddm, hermi, omega)\n vklr *= (alpha - hyb)\n vk += vklr\n vj = vj[0] + vj[1] + vhf_last.vj\n vk += vhf_last.vk\n else:\n vj, vk = ks.get_jk(mol, dm, hermi)\n vj = vj[0] + vj[1]\n vk *= hyb\n if abs(omega) > 1e-10:\n vklr = ks.get_k(mol, dm, hermi, omega)\n vklr *= (alpha - hyb)\n vk += vklr\n vxc += vj - vk\n\n if ground_state:\n exc -=(numpy.einsum('ij,ji', dm[0], vk[0]).real +\n numpy.einsum('ij,ji', dm[1], vk[1]).real) * .5\n if ground_state:\n ecoul = numpy.einsum('ij,ji', dm[0]+dm[1], vj).real * .5\n else:\n ecoul = None\n\n vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)\n return vxc\n\ndef get_vsap(ks, mol=None):\n '''Superposition of atomic potentials\n\n S. Lehtola, Assessment of initial guesses for self-consistent\n field calculations. Superposition of Atomic Potentials: simple yet\n efficient, J. Chem. Theory Comput. 15, 1593 (2019). DOI:\n 10.1021/acs.jctc.8b01089. arXiv:1810.11659.\n\n This function evaluates the effective charge of a neutral atom,\n given by exchange-only LDA on top of spherically symmetric\n unrestricted Hartree-Fock calculations as described in\n\n S. Lehtola, L. Visscher, E. Engel, Efficient implementation of the\n superposition of atomic potentials initial guess for electronic\n structure calculations in Gaussian basis sets, J. Chem. Phys., in\n press (2020).\n\n The potentials have been calculated for the ground-states of\n spherically symmetric atoms at the non-relativistic level of theory\n as described in\n\n S. Lehtola, \"Fully numerical calculations on atoms with fractional\n occupations and range-separated exchange functionals\", Phys. Rev. A\n 101, 012516 (2020). DOI: 10.1103/PhysRevA.101.012516\n\n using accurate finite-element calculations as described in\n\n S. Lehtola, \"Fully numerical Hartree-Fock and density functional\n calculations. I. Atoms\", Int. J. Quantum Chem. e25945 (2019).\n DOI: 10.1002/qua.25945\n\n .. note::\n This function will modify the input ks object.\n\n Args:\n ks : an instance of :class:`RKS`\n XC functional are controlled by ks.xc attribute. Attribute\n ks.grids might be initialized.\n\n Returns:\n matrix Vsap = Vnuc + J + Vxc.\n '''\n Vsap = rks.get_vsap(ks, mol)\n return numpy.asarray([Vsap, Vsap])\n\ndef energy_elec(ks, dm=None, h1e=None, vhf=None):\n if dm is None: dm = ks.make_rdm1()\n if h1e is None: h1e = ks.get_hcore()\n if vhf is None or getattr(vhf, 'ecoul', None) is None:\n vhf = ks.get_veff(ks.mol, dm)\n if not (isinstance(dm, numpy.ndarray) and dm.ndim == 2):\n dm = dm[0] + dm[1]\n return rks.energy_elec(ks, dm, h1e, vhf)\n\n\nclass UKS(rks.KohnShamDFT, uhf.UHF):\n '''Unrestricted Kohn-Sham\n See pyscf/dft/rks.py RKS class for document of the attributes'''\n def __init__(self, mol, xc='LDA,VWN'):\n uhf.UHF.__init__(self, mol)\n rks.KohnShamDFT.__init__(self, xc)\n\n def dump_flags(self, verbose=None):\n uhf.UHF.dump_flags(self, verbose)\n rks.KohnShamDFT.dump_flags(self, verbose)\n return self\n\n get_veff = get_veff\n get_vsap = get_vsap\n energy_elec = energy_elec\n\n init_guess_by_vsap = rks.init_guess_by_vsap\n\n def nuc_grad_method(self):\n from pyscf.grad import uks\n return uks.Gradients(self)\n\n\nif __name__ == '__main__':\n from pyscf import gto\n mol = gto.Mole()\n mol.verbose = 7\n mol.output = '/dev/null'#'out_rks'\n\n mol.atom.extend([['He', (0.,0.,0.)], ])\n mol.basis = { 'He': 'cc-pvdz'}\n #mol.grids = { 'He': (10, 14),}\n mol.build()\n\n m = UKS(mol)\n m.xc = 'b3lyp'\n print(m.scf()) # -2.89992555753\n\n",
"#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors: Timothy Berkelbach <[email protected]>\n# Qiming Sun <[email protected]>\n#\n\n'''\nSpin-restricted G0W0 approximation with exact frequency integration\n'''\n\n\nfrom functools import reduce\nimport numpy\nimport numpy as np\nfrom scipy.optimize import newton\n\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import ao2mo\nfrom pyscf import dft\nfrom pyscf.mp.mp2 import get_nocc, get_nmo, get_frozen_mask, _mo_without_core\nfrom pyscf import __config__\n\neinsum = lib.einsum\n\nBLKMIN = getattr(__config__, 'cc_ccsd_blkmin', 4)\nMEMORYMIN = getattr(__config__, 'cc_ccsd_memorymin', 2000)\n\ndef kernel(gw, mo_energy, mo_coeff, td_e, td_xy, eris=None,\n orbs=None, verbose=logger.NOTE):\n '''GW-corrected quasiparticle orbital energies\n\n Returns:\n A list : converged, mo_energy, mo_coeff\n '''\n # mf must be DFT; for HF use xc = 'hf'\n mf = gw._scf\n assert(isinstance(mf, (dft.rks.RKS , dft.uks.UKS,\n dft.roks.ROKS , dft.uks.UKS,\n dft.rks_symm.RKS , dft.uks_symm.UKS,\n dft.rks_symm.ROKS, dft.uks_symm.UKS)))\n assert(gw.frozen == 0 or gw.frozen is None)\n\n if eris is None:\n eris = gw.ao2mo(mo_coeff)\n if orbs is None:\n orbs = range(gw.nmo)\n\n v_mf = mf.get_veff() - mf.get_j()\n v_mf = reduce(numpy.dot, (mo_coeff.T, v_mf, mo_coeff))\n\n nocc = gw.nocc\n nmo = gw.nmo\n nvir = nmo-nocc\n\n vk_oo = -np.einsum('piiq->pq', eris.oooo)\n vk_ov = -np.einsum('iqpi->pq', eris.ovoo)\n vk_vv = -np.einsum('ipqi->pq', eris.ovvo).conj()\n vk = np.array(np.bmat([[vk_oo, vk_ov],[vk_ov.T, vk_vv]]))\n\n nexc = len(td_e)\n # factor of 2 for normalization, see tdscf/rhf.py\n td_xy = 2*np.asarray(td_xy) # (nexc, 2, nocc, nvir)\n td_z = np.sum(td_xy, axis=1).reshape(nexc,nocc,nvir)\n tdm_oo = einsum('via,iapq->vpq', td_z, eris.ovoo)\n tdm_ov = einsum('via,iapq->vpq', td_z, eris.ovov)\n tdm_vv = einsum('via,iapq->vpq', td_z, eris.ovvv)\n tdm = []\n for oo,ov,vv in zip(tdm_oo,tdm_ov,tdm_vv):\n tdm.append(np.array(np.bmat([[oo, ov],[ov.T, vv]])))\n tdm = np.asarray(tdm)\n\n conv = True\n mo_energy = np.zeros_like(gw._scf.mo_energy)\n for p in orbs:\n tdm_p = tdm[:,:,p]\n if gw.linearized:\n de = 1e-6\n ep = gw._scf.mo_energy[p]\n #TODO: analytic sigma derivative\n sigma = get_sigma_element(gw, ep, tdm_p, tdm_p, td_e).real\n dsigma = get_sigma_element(gw, ep+de, tdm_p, tdm_p, td_e).real - sigma\n zn = 1.0/(1-dsigma/de)\n e = ep + zn*(sigma.real + vk[p,p] - v_mf[p,p])\n mo_energy[p] = e\n else:\n def quasiparticle(omega):\n sigma = get_sigma_element(gw, omega, tdm_p, tdm_p, td_e)\n return omega - gw._scf.mo_energy[p] - (sigma.real + vk[p,p] - v_mf[p,p])\n try:\n e = newton(quasiparticle, gw._scf.mo_energy[p], tol=1e-6, maxiter=100)\n mo_energy[p] = e\n except RuntimeError:\n conv = False\n mo_coeff = gw._scf.mo_coeff\n\n if gw.verbose >= logger.DEBUG:\n numpy.set_printoptions(threshold=nmo)\n logger.debug(gw, ' GW mo_energy =\\n%s', mo_energy)\n numpy.set_printoptions(threshold=1000)\n\n return conv, mo_energy, mo_coeff\n\n\ndef get_sigma_element(gw, omega, tdm_p, tdm_q, td_e, eta=None, vir_sgn=1):\n if eta is None:\n eta = gw.eta\n\n nocc = gw.nocc\n evi = lib.direct_sum('v-i->vi', td_e, gw._scf.mo_energy[:nocc])\n eva = lib.direct_sum('v+a->va', td_e, gw._scf.mo_energy[nocc:])\n sigma = np.sum( tdm_p[:,:nocc]*tdm_q[:,:nocc]/(omega + evi - 1j*eta) )\n sigma += np.sum( tdm_p[:,nocc:]*tdm_q[:,nocc:]/(omega - eva + vir_sgn*1j*eta) )\n return sigma\n\n\ndef get_g(omega, mo_energy, mo_occ, eta):\n sgn = mo_occ - 1\n return 1.0/(omega - mo_energy + 1j*eta*sgn)\n\n\nclass GWExact(lib.StreamObject):\n '''non-relativistic restricted GW\n\n Saved results\n\n mo_energy :\n Orbital energies\n mo_coeff\n Orbital coefficients\n '''\n\n eta = getattr(__config__, 'gw_gw_GW_eta', 1e-3)\n linearized = getattr(__config__, 'gw_gw_GW_linearized', False)\n\n def __init__(self, mf, frozen=None, tdmf=None):\n self.mol = mf.mol\n self._scf = mf\n self._tdscf = tdmf\n self.verbose = self.mol.verbose\n self.stdout = self.mol.stdout\n self.max_memory = mf.max_memory\n\n self.frozen = frozen\n\n##################################################\n# don't modify the following attributes, they are not input options\n self._nocc = None\n self._nmo = None\n self.mo_energy = None\n self.mo_coeff = mf.mo_coeff\n self.mo_occ = mf.mo_occ\n\n keys = set(('eta', 'linearized'))\n self._keys = set(self.__dict__.keys()).union(keys)\n\n def dump_flags(self, verbose=None):\n log = logger.new_logger(self, verbose)\n log.info('')\n log.info('******** %s ********', self.__class__)\n log.info('method = %s', self.__class__.__name__)\n nocc = self.nocc\n nvir = self.nmo - nocc\n log.info('GW nocc = %d, nvir = %d', nocc, nvir)\n if self.frozen is not None:\n log.info('frozen = %s', self.frozen)\n logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)\n return self\n\n @property\n def nocc(self):\n return self.get_nocc()\n @nocc.setter\n def nocc(self, n):\n self._nocc = n\n\n @property\n def nmo(self):\n return self.get_nmo()\n @nmo.setter\n def nmo(self, n):\n self._nmo = n\n\n get_nocc = get_nocc\n get_nmo = get_nmo\n get_frozen_mask = get_frozen_mask\n\n def get_g0(self, omega, eta=None):\n if eta is None:\n eta = self.eta\n return get_g(omega, self._scf.mo_energy, self.mo_occ, eta)\n\n def get_g(self, omega, eta=None):\n if eta is None:\n eta = self.eta\n return get_g(omega, self.mo_energy, self.mo_occ, eta)\n\n def kernel(self, mo_energy=None, mo_coeff=None, td_e=None, td_xy=None,\n eris=None, orbs=None):\n if mo_coeff is None:\n mo_coeff = self._scf.mo_coeff\n if mo_energy is None:\n mo_energy = self._scf.mo_energy\n if self._tdscf is None:\n from pyscf import tdscf\n self._tdscf = tdscf.dRPA(self._scf)\n nocc, nvir = self.nocc, self.nmo-self.nocc\n self._tdscf.nstates = nocc*nvir\n self._tdscf.verbose = 0\n self._tdscf.kernel()\n if td_e is None:\n td_e = self._tdscf.e\n if td_xy is None:\n td_xy = self._tdscf.xy\n\n cput0 = (logger.process_clock(), logger.perf_counter())\n self.dump_flags()\n self.converged, self.mo_energy, self.mo_coeff = \\\n kernel(self, mo_energy, mo_coeff, td_e, td_xy,\n eris=eris, orbs=orbs, verbose=self.verbose)\n\n logger.timer(self, 'GW', *cput0)\n return self.mo_energy\n\n def reset(self, mol=None):\n if mol is not None:\n self.mol = mol\n self._scf.reset(mol)\n self._tdscf.reset(mol)\n return self\n\n def ao2mo(self, mo_coeff=None):\n nmo = self.nmo\n nao = self.mo_coeff.shape[0]\n nmo_pair = nmo * (nmo+1) // 2\n nao_pair = nao * (nao+1) // 2\n mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6\n mem_now = lib.current_memory()[0]\n if (self._scf._eri is not None and\n (mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):\n return _make_eris_incore(self, mo_coeff)\n\n elif getattr(self._scf, 'with_df', None):\n logger.warn(self, 'GW detected DF being used in the HF object. '\n 'MO integrals are computed based on the DF 3-index tensors.\\n'\n 'Developer TODO: Write dfgw.GW for the '\n 'DF-GW calculations')\n raise NotImplementedError\n #return _make_df_eris_outcore(self, mo_coeff)\n\n else:\n return _make_eris_outcore(self, mo_coeff)\n\n\nclass _ChemistsERIs:\n '''(pq|rs)\n\n Identical to rccsd _ChemistsERIs except no vvvv.'''\n def __init__(self, mol=None):\n self.mol = mol\n self.mo_coeff = None\n self.nocc = None\n self.fock = None\n\n self.oooo = None\n self.ovoo = None\n self.oovv = None\n self.ovvo = None\n self.ovov = None\n self.ovvv = None\n\n def _common_init_(self, mycc, mo_coeff=None):\n if mo_coeff is None:\n mo_coeff = mycc.mo_coeff\n self.mo_coeff = mo_coeff = _mo_without_core(mycc, mo_coeff)\n# Note: Recomputed fock matrix since SCF may not be fully converged.\n dm = mycc._scf.make_rdm1(mycc.mo_coeff, mycc.mo_occ)\n fockao = mycc._scf.get_hcore() + mycc._scf.get_veff(mycc.mol, dm)\n self.fock = reduce(numpy.dot, (mo_coeff.conj().T, fockao, mo_coeff))\n self.nocc = mycc.nocc\n self.mol = mycc.mol\n\n mo_e = self.fock.diagonal()\n try:\n gap = abs(mo_e[:self.nocc,None] - mo_e[None,self.nocc:]).min()\n if gap < 1e-5:\n logger.warn(mycc, 'HOMO-LUMO gap %s too small for GW', gap)\n except ValueError: # gap.size == 0\n pass\n return self\n\ndef _make_eris_incore(mycc, mo_coeff=None, ao2mofn=None):\n cput0 = (logger.process_clock(), logger.perf_counter())\n eris = _ChemistsERIs()\n eris._common_init_(mycc, mo_coeff)\n nocc = eris.nocc\n nmo = eris.fock.shape[0]\n\n if callable(ao2mofn):\n eri1 = ao2mofn(eris.mo_coeff).reshape([nmo]*4)\n else:\n eri1 = ao2mo.incore.full(mycc._scf._eri, eris.mo_coeff)\n eri1 = ao2mo.restore(1, eri1, nmo)\n eris.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()\n eris.ovoo = eri1[:nocc,nocc:,:nocc,:nocc].copy()\n eris.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()\n eris.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()\n eris.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()\n eris.ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()\n logger.timer(mycc, 'GW integral transformation', *cput0)\n return eris\n\ndef _make_eris_outcore(mycc, mo_coeff=None):\n cput0 = (logger.process_clock(), logger.perf_counter())\n log = logger.Logger(mycc.stdout, mycc.verbose)\n eris = _ChemistsERIs()\n eris._common_init_(mycc, mo_coeff)\n\n mol = mycc.mol\n mo_coeff = eris.mo_coeff\n nocc = eris.nocc\n nao, nmo = mo_coeff.shape\n nvir = nmo - nocc\n eris.feri1 = lib.H5TmpFile()\n eris.oooo = eris.feri1.create_dataset('oooo', (nocc,nocc,nocc,nocc), 'f8')\n eris.ovoo = eris.feri1.create_dataset('ovoo', (nocc,nvir,nocc,nocc), 'f8', chunks=(nocc,1,nocc,nocc))\n eris.ovov = eris.feri1.create_dataset('ovov', (nocc,nvir,nocc,nvir), 'f8', chunks=(nocc,1,nocc,nvir))\n eris.ovvo = eris.feri1.create_dataset('ovvo', (nocc,nvir,nvir,nocc), 'f8', chunks=(nocc,1,nvir,nocc))\n eris.ovvv = eris.feri1.create_dataset('ovvv', (nocc,nvir,nvir,nvir), 'f8')\n eris.oovv = eris.feri1.create_dataset('oovv', (nocc,nocc,nvir,nvir), 'f8', chunks=(nocc,nocc,1,nvir))\n max_memory = max(MEMORYMIN, mycc.max_memory-lib.current_memory()[0])\n\n ftmp = lib.H5TmpFile()\n ao2mo.full(mol, mo_coeff, ftmp, max_memory=max_memory, verbose=log)\n eri = ftmp['eri_mo']\n\n nocc_pair = nocc*(nocc+1)//2\n tril2sq = lib.square_mat_in_trilu_indices(nmo)\n oo = eri[:nocc_pair]\n eris.oooo[:] = ao2mo.restore(1, oo[:,:nocc_pair], nocc)\n oovv = lib.take_2d(oo, tril2sq[:nocc,:nocc].ravel(), tril2sq[nocc:,nocc:].ravel())\n eris.oovv[:] = oovv.reshape(nocc,nocc,nvir,nvir)\n oo = oovv = None\n\n tril2sq = lib.square_mat_in_trilu_indices(nmo)\n blksize = min(nvir, max(BLKMIN, int(max_memory*1e6/8/nmo**3/2)))\n for p0, p1 in lib.prange(0, nvir, blksize):\n q0, q1 = p0+nocc, p1+nocc\n off0 = q0*(q0+1)//2\n off1 = q1*(q1+1)//2\n buf = lib.unpack_tril(eri[off0:off1])\n\n tmp = buf[ tril2sq[q0:q1,:nocc] - off0 ]\n eris.ovoo[:,p0:p1] = tmp[:,:,:nocc,:nocc].transpose(1,0,2,3)\n eris.ovvo[:,p0:p1] = tmp[:,:,nocc:,:nocc].transpose(1,0,2,3)\n eris.ovov[:,p0:p1] = tmp[:,:,:nocc,nocc:].transpose(1,0,2,3)\n eris.ovvv[:,p0:p1] = tmp[:,:,nocc:,nocc:].transpose(1,0,2,3)\n\n buf = tmp = None\n log.timer('GW integral transformation', *cput0)\n return eris\n\n\nif __name__ == '__main__':\n from pyscf import gto\n mol = gto.Mole()\n mol.verbose = 5\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -0.757 , 0.587)],\n [1 , (0. , 0.757 , 0.587)]]\n mol.basis = 'cc-pvdz'\n mol.build()\n\n mf = dft.RKS(mol)\n mf.xc = 'hf'\n mf.kernel()\n\n gw = GWExact(mf)\n gw.kernel()\n print(gw.mo_energy)\n# [-20.10555946 -1.2264067 -0.68160939 -0.53066326 -0.44679868\n# 0.17291986 0.24457082 0.74758227 0.80045129 1.11748735\n# 1.1508353 1.19081928 1.40406947 1.43593681 1.63324734\n# 1.81839838 1.86943727 2.37827782 2.48829939 3.26028229\n# 3.3247595 3.4958492 3.77735135 4.14572189]\n\n nocc = mol.nelectron//2\n\n gw.linearized = True\n gw.kernel(orbs=[nocc-1,nocc])\n print(gw.mo_energy[nocc-1] - -0.44684106)\n print(gw.mo_energy[nocc] - 0.17292032)\n\n",
"# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Oliver Backhouse <[email protected]>\n# George Booth <[email protected]>\n#\n\n'''\nMPI helper functions using mpi4py\n'''\n\nimport numpy as np\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf import __config__\n\nINT_MAX = 2147483647\nBLKSIZE = INT_MAX // 32 + 1\n\n# attempt to successfully load and init the MPI, else assume 1 core:\ntry:\n from mpi4py import MPI as mpi\n comm = mpi.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\nexcept Exception:\n mpi = None\n comm = None\n size = 1\n rank = 0\n\nSCALE_PRANGE_STEP = False\n\n\ndef bcast(buf, root=0):\n if size == 1:\n return buf\n\n is_array = isinstance(buf, np.ndarray)\n buf = np.asarray(buf, order='C')\n buf = buf.astype(buf.dtype.char)\n shape, mpi_dtype = comm.bcast((buf.shape, buf.dtype.char))\n\n if rank != root:\n buf = np.empty(shape, dtype=mpi_dtype)\n\n buf_seg = np.ndarray(buf.size, dtype=buf.dtype, buffer=buf)\n for p0, p1 in lib.prange(0, buf.size, BLKSIZE):\n comm.Bcast(buf_seg[p0:p1], root)\n\n return buf if is_array else buf.ravel()[0]\n\n\ndef bcast_dict(buf, root=0):\n if size == 1:\n return buf\n\n buf = comm.bcast(buf, root)\n\n return buf\n\n\ndef reduce(sendbuf, root=0, op=getattr(mpi, 'SUM', None)):\n if size == 1:\n return sendbuf\n\n is_array = isinstance(sendbuf, np.ndarray)\n sendbuf = np.asarray(sendbuf, order='C')\n sendbuf = sendbuf.astype(sendbuf.dtype.char)\n shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))\n assert sendbuf.shape == shape and sendbuf.dtype.char == mpi_dtype\n\n recvbuf = np.zeros_like(sendbuf)\n send_seg = np.ndarray(sendbuf.size, dtype=sendbuf.dtype, buffer=sendbuf)\n recv_seg = np.ndarray(recvbuf.size, dtype=recvbuf.dtype, buffer=recvbuf)\n for p0, p1 in lib.prange(0, sendbuf.size, BLKSIZE):\n comm.Reduce(send_seg[p0:p1], recv_seg[p0:p1], op, root)\n\n if rank == root:\n return recvbuf if is_array else recvbuf.ravel()[0]\n else:\n return sendbuf if is_array else sendbuf.ravel()[0]\n\n\ndef allreduce(sendbuf, root=0, op=getattr(mpi, 'SUM', None)):\n if size == 1:\n return sendbuf\n\n is_array = isinstance(sendbuf, np.ndarray)\n sendbuf = np.asarray(sendbuf, order='C')\n sendbuf = sendbuf.astype(sendbuf.dtype.char)\n shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))\n assert sendbuf.shape == shape and sendbuf.dtype.char == mpi_dtype\n\n recvbuf = np.zeros_like(sendbuf)\n send_seg = np.ndarray(sendbuf.size, dtype=sendbuf.dtype, buffer=sendbuf)\n recv_seg = np.ndarray(recvbuf.size, dtype=recvbuf.dtype, buffer=recvbuf)\n for p0, p1 in lib.prange(0, sendbuf.size, BLKSIZE):\n comm.Allreduce(send_seg[p0:p1], recv_seg[p0:p1], op)\n\n return recvbuf if is_array else recvbuf.ravel()[0]\n\n\ndef allreduce_safe_inplace(array):\n if size == 1:\n return array\n\n from pyscf.pbc.mpitools.mpi_helper import safeAllreduceInPlace\n\n safeAllreduceInPlace(comm, array)\n\n\ndef barrier():\n if comm is not None:\n comm.Barrier()\n\n\ndef nrange(start, stop=None, step=1):\n if stop is None:\n start, stop = 0, start\n\n for i in range(start+rank, stop, step*size):\n yield i\n\n\ndef prange(start, stop, step):\n ''' :func:`lib.prange` distributed over MPI processes. Returns\n the range for a single MPI rank.\n '''\n\n if size == 1:\n for p0, p1 in lib.prange(start, stop, step):\n yield p0, p1\n else:\n if SCALE_PRANGE_STEP:\n step //= size\n\n split = lambda x : x * (stop-start) // size\n\n start0 = split(rank)\n stop0 = stop if rank == (size-1) else split(rank+1)\n\n for p0, p1 in lib.prange(start0, stop0, step):\n yield p0, p1\n"
] | [
[
"numpy.dot",
"numpy.einsum",
"numpy.arange",
"numpy.zeros_like",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.random.seed"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.all",
"numpy.max",
"numpy.random.random",
"numpy.min"
],
[
"numpy.diag",
"numpy.random.random",
"numpy.allclose",
"numpy.random.seed",
"numpy.einsum",
"numpy.asarray",
"numpy.arange",
"numpy.eye",
"numpy.linalg.norm",
"numpy.diag_indices",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.ix_",
"numpy.zeros"
],
[
"numpy.zeros_like"
],
[
"numpy.asarray",
"numpy.einsum"
],
[
"numpy.einsum",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.bmat",
"numpy.zeros_like",
"numpy.sum",
"scipy.optimize.newton"
],
[
"numpy.asarray",
"numpy.zeros_like",
"numpy.ndarray",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsheaff/keras | [
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"f1e9c76675981ee6683f54a3ce569212d551d12d",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b",
"ee227dda766d769b7499a5549e8ed77b5e88105b"
] | [
"keras/utils/layer_utils_test.py",
"keras/optimizers/optimizer_v2/rmsprop.py",
"keras/layers/preprocessing/hashing_test.py",
"keras/engine/base_layer_test.py",
"keras/layers/reshaping/up_sampling3d.py",
"keras/preprocessing/dataset_utils.py",
"keras/layers/reshaping/permute.py",
"keras/layers/pooling/max_pooling1d.py",
"keras/tests/saver_test.py",
"keras/engine/training_utils_v1.py",
"keras/layers/reshaping/up_sampling2d.py",
"keras/utils/np_utils_test.py",
"keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py",
"keras/distribute/worker_training_state.py",
"keras/layers/regularization/spatial_dropout1d.py",
"keras/layers/regularization/gaussian_dropout.py",
"keras/benchmarks/keras_examples_benchmarks/antirectifier_benchmark_test.py",
"keras/layers/reshaping/zero_padding_test.py",
"keras/engine/compile_utils.py",
"keras/layers/rnn/bidirectional.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for layer_utils.\"\"\"\n\nimport keras\nimport tensorflow.compat.v2 as tf\n\nimport collections\nimport contextlib\nimport multiprocessing.dummy\nimport os\nimport pickle\nimport shutil\nimport sys\nimport time\nimport timeit\n\nimport numpy as np\nfrom keras.utils import io_utils\nfrom keras.utils import layer_utils\n\n\n_PICKLEABLE_CALL_COUNT = collections.Counter()\n\n\nclass MyPickleableObject(tf.__internal__.tracking.AutoTrackable):\n \"\"\"Needed for InterfaceTests.test_property_cache_serialization.\n\n This class must be at the top level. This is a constraint of pickle,\n unrelated to `cached_per_instance`.\n \"\"\"\n\n @property\n @layer_utils.cached_per_instance\n def my_id(self):\n _PICKLEABLE_CALL_COUNT[self] += 1\n return id(self)\n\n\nclass LayerUtilsTest(tf.test.TestCase):\n\n def test_print_summary(self):\n model = keras.Sequential()\n model.add(\n keras.layers.Conv2D(\n filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))\n model.add(keras.layers.Flatten(name='flat'))\n model.add(keras.layers.Dense(5, name='dense'))\n\n file_name = 'model_1.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(model, print_fn=print_to_file)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n self.assertEqual(len(lines), 15)\n except ImportError:\n pass\n\n def test_print_summary_without_print_fn(self):\n model = keras.Sequential([\n keras.layers.Dense(5, input_shape=(10,), name='dense')])\n io_utils.enable_interactive_logging()\n with self.captureWritesToStream(sys.stdout) as printed:\n layer_utils.print_summary(model)\n self.assertIn('dense (Dense)', printed.contents())\n\n def test_print_summary_expand_nested(self):\n shape = (None, None, 3)\n\n def make_model():\n x = inputs = keras.Input(shape)\n x = keras.layers.Conv2D(3, 1)(x)\n x = keras.layers.BatchNormalization()(x)\n return keras.Model(inputs, x)\n\n x = inner_inputs = keras.Input(shape)\n x = make_model()(x)\n inner_model = keras.Model(inner_inputs, x)\n\n inputs = keras.Input(shape)\n model = keras.Model(inputs, inner_model(inputs))\n\n file_name = 'model_2.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, print_fn=print_to_file, expand_nested=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: \"model_2\"\\n'\n '_________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # \\n'\n '=================================================================\\n'\n ' input_3 (InputLayer) [(None, None, None, 3)] 0 \\n'\n ' \\n'\n ' model_1 (Functional) (None, None, None, 3) 24 \\n'\n '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n'\n '| input_1 (InputLayer) [(None, None, None, 3)] 0 |\\n'\n '| |\\n'\n '| model (Functional) (None, None, None, 3) 24 |\\n'\n '||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\\n'\n '|| input_2 (InputLayer) [(None, None, None, 3)] 0 ||\\n'\n '|| ||\\n'\n '|| conv2d (Conv2D) (None, None, None, 3) 12 ||\\n'\n '|| ||\\n'\n '|| batch_normalization (BatchN (None, None, None, 3) 12 ||\\n'\n '|| ormalization) ||\\n'\n '|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n'\n '¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n'\n '=================================================================\\n'\n 'Total params: 24\\n'\n 'Trainable params: 18\\n'\n 'Non-trainable params: 6\\n'\n '_________________________________________________________________\\n')\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 25)\n except ImportError:\n pass\n\n def test_summary_subclass_model_expand_nested(self):\n\n class Sequential(keras.Model):\n\n def __init__(self, *args):\n super(Sequential, self).__init__()\n self.module_list = list(args) if args else []\n\n def call(self, x):\n for module in self.module_list:\n x = module(x)\n return x\n\n class Block(keras.Model):\n\n def __init__(self):\n super(Block, self).__init__()\n self.module = Sequential(\n keras.layers.Dense(10),\n keras.layers.Dense(10),\n )\n\n def call(self, input_tensor):\n x = self.module(input_tensor)\n return x\n\n class Base(keras.Model):\n\n def __init__(self):\n super(Base, self).__init__()\n self.module = Sequential(Block(), Block())\n\n def call(self, input_tensor):\n x = self.module(input_tensor)\n y = self.module(x)\n return x, y\n\n class Network(keras.Model):\n\n def __init__(self):\n super(Network, self).__init__()\n self.child = Base()\n\n def call(self, inputs):\n return self.child(inputs)\n\n net = Network()\n inputs = keras.Input(shape=(10,))\n outputs = net(inputs)\n model = keras.models.Model(inputs=inputs, outputs=outputs)\n\n file_name = 'model_3.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, line_length=120, print_fn=print_to_file, expand_nested=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n # The output content are slightly different for the input shapes between\n # v1 and v2.\n if tf.__internal__.tf2.enabled():\n self.assertEqual(len(lines), 39)\n else:\n self.assertEqual(len(lines), 40)\n except ImportError:\n pass\n\n def test_print_summary_show_trainable(self):\n model = keras.Sequential(name='trainable')\n untrained = keras.layers.Conv2D(\n filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')\n model.add(untrained)\n model.add(keras.layers.Flatten(name='flat'))\n model.add(keras.layers.Dense(5, name='dense'))\n\n untrained.trainable = False\n\n file_name = 'model_4.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model, print_fn=print_to_file, show_trainable=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: '\n '\"trainable\"\\n____________________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # '\n 'Trainable '\n '\\n============================================================================\\n'\n ' conv (Conv2D) (None, 2, 3, 2) 62 N'\n ' \\n'\n ' '\n '\\n flat (Flatten) (None, 12) 0 '\n 'Y \\n'\n ' '\n '\\n dense (Dense) (None, 5) 65 '\n 'Y \\n'\n ' '\n '\\n============================================================================\\nTotal'\n ' params: 127\\nTrainable params: 65\\nNon-trainable params: '\n '62\\n____________________________________________________________________________\\n'\n '____________________________________________________________________________\\n'\n )\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 15)\n except ImportError:\n pass\n\n def test_print_summary_expand_nested_show_trainable(self):\n shape = (None, None, 3)\n\n def make_model():\n x = inputs = keras.Input(shape, name='input2')\n untrainable = keras.layers.Conv2D(3, 1)\n untrainable.trainable = False\n x = untrainable(x)\n x = keras.layers.BatchNormalization()(x)\n return keras.Model(inputs, x)\n\n x = inner_inputs = keras.Input(shape, name='input1')\n x = make_model()(x)\n inner_model = keras.Model(inner_inputs, x)\n\n inputs = keras.Input(shape, name='input3')\n model = keras.Model(inputs, inner_model(inputs))\n\n file_name = 'model_6.txt'\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)\n fpath = os.path.join(temp_dir, file_name)\n writer = open(fpath, 'w')\n\n def print_to_file(text):\n print(text, file=writer)\n\n try:\n layer_utils.print_summary(\n model,\n print_fn=print_to_file,\n expand_nested=True,\n show_trainable=True)\n self.assertTrue(tf.io.gfile.exists(fpath))\n writer.close()\n reader = open(fpath, 'r')\n lines = reader.readlines()\n reader.close()\n check_str = (\n 'Model: '\n '\"model_2\"\\n____________________________________________________________________________\\n'\n ' Layer (type) Output Shape Param # '\n 'Trainable '\n '\\n============================================================================\\n'\n ' input3 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' \\n'\n ' '\n '\\n model_1 (Functional) (None, None, None, 3) 24 '\n 'Y '\n '\\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n|'\n ' input1 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' |\\n|'\n ' '\n '|\\n| model (Functional) (None, None, None, 3) 24 '\n 'Y '\n '|\\n||¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯||\\n||'\n ' input2 (InputLayer) [(None, None, None, 3)] 0 Y'\n ' ||\\n||'\n ' '\n '||\\n|| conv2d (Conv2D) (None, None, None, 3) 12 '\n 'N ||\\n||'\n ' '\n '||\\n|| batch_normalization (BatchN (None, None, None, 3) 12 '\n 'Y ||\\n|| ormalization)'\n ' '\n '||\\n|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|\\n¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n============================================================================\\nTotal'\n ' params: 24\\nTrainable params: 6\\nNon-trainable params: '\n '18\\n____________________________________________________________________________\\n'\n '____________________________________________________________________________\\n'\n )\n\n fin_str = ''\n for line in lines:\n fin_str += line\n\n self.assertIn(fin_str, check_str)\n self.assertEqual(len(lines), 25)\n except ImportError:\n pass\n\n def test_property_cache(self):\n test_counter = collections.Counter()\n\n class MyObject(tf.__internal__.tracking.AutoTrackable):\n\n def __init__(self):\n super(MyObject, self).__init__()\n self._frozen = True\n\n def __setattr__(self, key, value):\n \"\"\"Enforce that cache does not set attribute on MyObject.\"\"\"\n if getattr(self, '_frozen', False):\n raise ValueError('Cannot mutate when frozen.')\n return super(MyObject, self).__setattr__(key, value)\n\n @property\n @layer_utils.cached_per_instance\n def test_property(self):\n test_counter[id(self)] += 1\n return id(self)\n\n first_object = MyObject()\n second_object = MyObject()\n\n # Make sure the objects return the correct values\n self.assertEqual(first_object.test_property, id(first_object))\n self.assertEqual(second_object.test_property, id(second_object))\n\n # Make sure the cache does not share across objects\n self.assertNotEqual(first_object.test_property, second_object.test_property)\n\n # Check again (Now the values should be cached.)\n self.assertEqual(first_object.test_property, id(first_object))\n self.assertEqual(second_object.test_property, id(second_object))\n\n # Count the function calls to make sure the cache is actually being used.\n self.assertAllEqual(tuple(test_counter.values()), (1, 1))\n\n def test_property_cache_threaded(self):\n call_count = collections.Counter()\n\n class MyObject(tf.__internal__.tracking.AutoTrackable):\n\n @property\n @layer_utils.cached_per_instance\n def test_property(self):\n # Random sleeps to ensure that the execution thread changes\n # mid-computation.\n call_count['test_property'] += 1\n time.sleep(np.random.random() + 1.)\n\n # Use a RandomState which is seeded off the instance's id (the mod is\n # because numpy limits the range of seeds) to ensure that an instance\n # returns the same value in different threads, but different instances\n # return different values.\n return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))\n\n def get_test_property(self, _):\n \"\"\"Function provided to .map for threading test.\"\"\"\n return self.test_property\n\n # Test that multiple threads return the same value. This requires that\n # the underlying function is repeatable, as cached_property makes no attempt\n # to prioritize the first call.\n test_obj = MyObject()\n with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:\n # Intentionally make a large pool (even when there are only a small number\n # of cpus) to ensure that the runtime switches threads.\n results = pool.map(test_obj.get_test_property, range(64))\n self.assertEqual(len(set(results)), 1)\n\n # Make sure we actually are testing threaded behavior.\n self.assertGreater(call_count['test_property'], 1)\n\n # Make sure new threads still cache hit.\n with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:\n start_time = timeit.default_timer() # Don't time pool instantiation.\n results = pool.map(test_obj.get_test_property, range(4))\n total_time = timeit.default_timer() - start_time\n\n # Note(taylorrobie): The reason that it is safe to time a unit test is that\n # a cache hit will be << 1 second, and a cache miss is\n # guaranteed to be >= 1 second. Empirically confirmed by\n # 100,000 runs with no flakes.\n self.assertLess(total_time, 0.95)\n\n def test_property_cache_serialization(self):\n # Reset call count. .keys() must be wrapped in a list, because otherwise we\n # would mutate the iterator while iterating.\n for k in list(_PICKLEABLE_CALL_COUNT.keys()):\n _PICKLEABLE_CALL_COUNT.pop(k)\n\n first_instance = MyPickleableObject()\n self.assertEqual(id(first_instance), first_instance.my_id)\n\n # Test that we can pickle and un-pickle\n second_instance = pickle.loads(pickle.dumps(first_instance))\n\n self.assertEqual(id(second_instance), second_instance.my_id)\n self.assertNotEqual(first_instance.my_id, second_instance.my_id)\n\n # Make sure de-serialized object uses the cache.\n self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)\n\n # Make sure the decorator cache is not being serialized with the object.\n expected_size = len(pickle.dumps(second_instance))\n for _ in range(5):\n # Add some more entries to the cache.\n _ = MyPickleableObject().my_id\n self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)\n size_check_instance = MyPickleableObject()\n _ = size_check_instance.my_id\n self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RMSprop optimizer implementation.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n# pylint: disable=g-classes-have-attributes\n\nimport numpy as np\nfrom keras import backend_config\nfrom keras.optimizers.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export(\"keras.optimizers.RMSprop\", \"keras.optimizers_legacy.RMSprop\")\nclass RMSprop(optimizer_v2.OptimizerV2):\n r\"\"\"Optimizer that implements the RMSprop algorithm.\n\n The gist of RMSprop is to:\n\n - Maintain a moving (discounted) average of the square of gradients\n - Divide the gradient by the root of this average\n\n This implementation of RMSprop uses plain momentum, not Nesterov momentum.\n\n The centered version additionally maintains a moving average of the\n gradients, and uses that average to estimate the variance.\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable\n that takes no arguments and returns the actual value to use. The\n learning rate. Defaults to 0.001.\n rho: Discounting factor for the history/coming gradient. Defaults to 0.9.\n momentum: A scalar or a scalar `Tensor`. Defaults to 0.0.\n epsilon: A small constant for numerical stability. This epsilon is\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to\n 1e-7.\n centered: Boolean. If `True`, gradients are normalized by the estimated\n variance of the gradient; if False, by the uncentered second moment.\n Setting this to `True` may help with training, but is slightly more\n expensive in terms of computation and memory. Defaults to `False`.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to `\"RMSprop\"`.\n **kwargs: keyword arguments. Allowed arguments are `clipvalue`,\n `clipnorm`, `global_clipnorm`.\n If `clipvalue` (float) is set, the gradient of each weight\n is clipped to be no higher than this value.\n If `clipnorm` (float) is set, the gradient of each weight\n is individually clipped so that its norm is no higher than this value.\n If `global_clipnorm` (float) is set the gradient of all weights is\n clipped so that their global norm is no higher than this value.\n\n Note that in the dense implementation of this algorithm, variables and their\n corresponding accumulators (momentum, gradient moving average, square\n gradient moving average) will be updated even if the gradient is zero\n (i.e. accumulators will decay, momentum will be applied). The sparse\n implementation (used when the gradient is an `IndexedSlices` object,\n typically because of `tf.gather` or an embedding lookup in the forward pass)\n will not update variable slices or their accumulators unless those slices\n were used in the forward pass (nor is there an \"eventual\" correction to\n account for these omitted updates). This leads to more efficient updates for\n large embedding lookup tables (where most of the slices are not accessed in\n a particular graph execution), but differs from the published algorithm.\n\n Usage:\n\n >>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)\n >>> var1 = tf.Variable(10.0)\n >>> loss = lambda: (var1 ** 2) / 2.0 # d(loss) / d(var1) = var1\n >>> step_count = opt.minimize(loss, [var1]).numpy()\n >>> var1.numpy()\n 9.683772\n\n Reference:\n - [Hinton, 2012](\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self,\n learning_rate=0.001,\n rho=0.9,\n momentum=0.0,\n epsilon=1e-7,\n centered=False,\n name=\"RMSprop\",\n **kwargs):\n \"\"\"Construct a new RMSprop optimizer.\n\n Args:\n learning_rate: A `Tensor`, floating point value, or a schedule that is a\n `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable\n that takes no arguments and returns the actual value to use. The\n learning rate. Defaults to 0.001.\n rho: Discounting factor for the history/coming gradient. Defaults to 0.9.\n momentum: A scalar or a scalar `Tensor`. Defaults to 0.0.\n epsilon: A small constant for numerical stability. This epsilon is\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\n Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to\n 1e-7.\n centered: Boolean. If `True`, gradients are normalized by the estimated\n variance of the gradient; if False, by the uncentered second moment.\n Setting this to `True` may help with training, but is slightly more\n expensive in terms of computation and memory. Defaults to `False`.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"RMSprop\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n\n @compatibility(eager)\n When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and\n `epsilon` can each be a callable that takes no arguments and returns the\n actual value to use. This can be useful for changing these values across\n different invocations of optimizer functions.\n @end_compatibility\n \"\"\"\n super(RMSprop, self).__init__(name, **kwargs)\n self._set_hyper(\"learning_rate\", kwargs.get(\"lr\", learning_rate))\n self._set_hyper(\"decay\", self._initial_decay)\n self._set_hyper(\"rho\", rho)\n\n self._momentum = False\n if isinstance(momentum, tf.Tensor) or callable(momentum) or momentum > 0:\n self._momentum = True\n if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):\n raise ValueError(f\"`momentum` must be between [0, 1]. Received: \"\n f\"momentum={momentum} (of type {type(momentum)}).\")\n self._set_hyper(\"momentum\", momentum)\n\n self.epsilon = epsilon or backend_config.epsilon()\n self.centered = centered\n\n def _create_slots(self, var_list):\n for var in var_list:\n self.add_slot(var, \"rms\")\n if self._momentum:\n for var in var_list:\n self.add_slot(var, \"momentum\")\n if self.centered:\n for var in var_list:\n self.add_slot(var, \"mg\")\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(RMSprop, self)._prepare_local(var_device, var_dtype, apply_state)\n\n rho = tf.identity(self._get_hyper(\"rho\", var_dtype))\n apply_state[(var_device, var_dtype)].update(\n dict(\n neg_lr_t=-apply_state[(var_device, var_dtype)][\"lr_t\"],\n epsilon=tf.convert_to_tensor(\n self.epsilon, var_dtype),\n rho=rho,\n momentum=tf.identity(self._get_hyper(\"momentum\", var_dtype)),\n one_minus_rho=1. - rho))\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n rms = self.get_slot(var, \"rms\")\n if self._momentum:\n mom = self.get_slot(var, \"momentum\")\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n return tf.raw_ops.ResourceApplyCenteredRMSProp(\n var=var.handle,\n mg=mg.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceApplyRMSProp(\n var=var.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n use_locking=self._use_locking)\n else:\n rms_t = (coefficients[\"rho\"] * rms +\n coefficients[\"one_minus_rho\"] * tf.square(grad))\n rms_t = tf.compat.v1.assign(rms, rms_t, use_locking=self._use_locking)\n denom_t = rms_t\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n mg_t = coefficients[\"rho\"] * mg + coefficients[\"one_minus_rho\"] * grad\n mg_t = tf.compat.v1.assign(mg, mg_t, use_locking=self._use_locking)\n denom_t = rms_t - tf.square(mg_t)\n var_t = var - coefficients[\"lr_t\"] * grad / (\n tf.sqrt(denom_t) + coefficients[\"epsilon\"])\n return tf.compat.v1.assign(var, var_t, use_locking=self._use_locking).op\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n var_device, var_dtype = var.device, var.dtype.base_dtype\n coefficients = ((apply_state or {}).get((var_device, var_dtype))\n or self._fallback_apply_state(var_device, var_dtype))\n\n rms = self.get_slot(var, \"rms\")\n if self._momentum:\n mom = self.get_slot(var, \"momentum\")\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n return tf.raw_ops.ResourceSparseApplyCenteredRMSProp(\n var=var.handle,\n mg=mg.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n else:\n return tf.raw_ops.ResourceSparseApplyRMSProp(\n var=var.handle,\n ms=rms.handle,\n mom=mom.handle,\n lr=coefficients[\"lr_t\"],\n rho=coefficients[\"rho\"],\n momentum=coefficients[\"momentum\"],\n epsilon=coefficients[\"epsilon\"],\n grad=grad,\n indices=indices,\n use_locking=self._use_locking)\n else:\n rms_scaled_g_values = (grad * grad) * coefficients[\"one_minus_rho\"]\n rms_t = tf.compat.v1.assign(rms, rms * coefficients[\"rho\"],\n use_locking=self._use_locking)\n with tf.control_dependencies([rms_t]):\n rms_t = self._resource_scatter_add(rms, indices, rms_scaled_g_values)\n rms_slice = tf.gather(rms_t, indices)\n denom_slice = rms_slice\n if self.centered:\n mg = self.get_slot(var, \"mg\")\n mg_scaled_g_values = grad * coefficients[\"one_minus_rho\"]\n mg_t = tf.compat.v1.assign(mg, mg * coefficients[\"rho\"],\n use_locking=self._use_locking)\n with tf.control_dependencies([mg_t]):\n mg_t = self._resource_scatter_add(mg, indices, mg_scaled_g_values)\n mg_slice = tf.gather(mg_t, indices)\n denom_slice = rms_slice - tf.square(mg_slice)\n var_update = self._resource_scatter_add(\n var, indices, coefficients[\"neg_lr_t\"] * grad / (\n tf.sqrt(denom_slice) + coefficients[\"epsilon\"]))\n if self.centered:\n return tf.group(*[var_update, rms_t, mg_t])\n return tf.group(*[var_update, rms_t])\n\n def set_weights(self, weights):\n params = self.weights\n # Override set_weights for backward compatibility of Keras V1 optimizer\n # since it does not include iteration at head of the weight list. Set\n # iteration to 0.\n if len(params) == len(weights) + 1:\n weights = [np.array(0)] + weights\n super(RMSprop, self).set_weights(weights)\n\n def get_config(self):\n config = super(RMSprop, self).get_config()\n config.update({\n \"learning_rate\": self._serialize_hyperparameter(\"learning_rate\"),\n \"decay\": self._initial_decay,\n \"rho\": self._serialize_hyperparameter(\"rho\"),\n \"momentum\": self._serialize_hyperparameter(\"momentum\"),\n \"epsilon\": self.epsilon,\n \"centered\": self.centered,\n })\n return config\n\n\nRMSProp = RMSprop\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for hashing layer.\"\"\"\n\nimport os\nfrom absl.testing import parameterized\n\nimport keras\nfrom keras.engine import input_layer\nfrom keras.engine import training\nfrom keras.layers.preprocessing import hashing\nfrom keras.layers.preprocessing import preprocessing_test_utils\nfrom keras.testing_infra import test_combinations\nfrom keras.testing_infra import test_utils\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\n@test_combinations.run_all_keras_modes(always_skip_v1=True)\nclass HashingTest(test_combinations.TestCase):\n\n @parameterized.named_parameters(\n ('list', list),\n ('tuple', tuple),\n ('numpy', np.array),\n ('array_like', preprocessing_test_utils.ArrayLike),\n )\n def test_tensor_like_inputs(self, data_fn):\n input_data = data_fn([0, 1, 2, 3, 4])\n expected_output = [1, 0, 1, 0, 2]\n\n layer = hashing.Hashing(num_bins=3)\n output_data = layer(input_data)\n self.assertAllEqual(output_data, expected_output)\n\n def test_hash_single_bin(self):\n layer = hashing.Hashing(num_bins=1)\n inp = np.asarray([['A'], ['B'], ['C'], ['D'], ['E']])\n output = layer(inp)\n self.assertAllClose([[0], [0], [0], [0], [0]], output)\n\n def test_hash_dense_input_farmhash(self):\n layer = hashing.Hashing(num_bins=2)\n inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],\n ['skywalker']])\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n self.assertAllClose([[0], [0], [1], [0], [0]], output)\n\n def test_hash_dense_input_mask_value_farmhash(self):\n empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')\n omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')\n inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],\n ['skywalker']])\n empty_mask_output = empty_mask_layer(inp)\n omar_mask_output = omar_mask_layer(inp)\n # Outputs should be one more than test_hash_dense_input_farmhash (the zeroth\n # bin is now reserved for masks).\n self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)\n # 'omar' should map to 0.\n self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)\n\n def test_hash_dense_list_input_farmhash(self):\n layer = hashing.Hashing(num_bins=2)\n inp = [['omar'], ['stringer'], ['marlo'], ['wire'], ['skywalker']]\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n self.assertAllClose([[0], [0], [1], [0], [0]], output)\n\n inp = ['omar', 'stringer', 'marlo', 'wire', 'skywalker']\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n self.assertAllClose([0, 0, 1, 0, 0], output)\n\n def test_hash_dense_int_input_farmhash(self):\n layer = hashing.Hashing(num_bins=3)\n inp = np.asarray([[0], [1], [2], [3], [4]])\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n self.assertAllClose([[1], [0], [1], [0], [2]], output)\n\n def test_hash_dense_input_siphash(self):\n layer = hashing.Hashing(num_bins=2, salt=[133, 137])\n inp = np.asarray([['omar'], ['stringer'], ['marlo'], ['wire'],\n ['skywalker']])\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n # Note the result is different from FarmHash.\n self.assertAllClose([[0], [1], [0], [1], [0]], output)\n\n layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])\n output_2 = layer_2(inp)\n # Note the result is different from (133, 137).\n self.assertAllClose([[1], [0], [1], [0], [1]], output_2)\n\n def test_hash_dense_int_input_siphash(self):\n layer = hashing.Hashing(num_bins=3, salt=[133, 137])\n inp = np.asarray([[0], [1], [2], [3], [4]])\n output = layer(inp)\n # Assert equal for hashed output that should be true on all platforms.\n self.assertAllClose([[1], [1], [2], [0], [1]], output)\n\n def test_hash_sparse_input_farmhash(self):\n layer = hashing.Hashing(num_bins=2)\n indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]\n inp = tf.SparseTensor(\n indices=indices,\n values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],\n dense_shape=[3, 2])\n output = layer(inp)\n self.assertAllClose(indices, output.indices)\n self.assertAllClose([0, 0, 1, 0, 0], output.values)\n\n def test_hash_sparse_input_mask_value_farmhash(self):\n empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')\n omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')\n indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]\n inp = tf.SparseTensor(\n indices=indices,\n values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],\n dense_shape=[3, 2])\n empty_mask_output = empty_mask_layer(inp)\n omar_mask_output = omar_mask_layer(inp)\n self.assertAllClose(indices, omar_mask_output.indices)\n self.assertAllClose(indices, empty_mask_output.indices)\n # Outputs should be one more than test_hash_sparse_input_farmhash (the\n # zeroth bin is now reserved for masks).\n self.assertAllClose([1, 1, 2, 1, 1], empty_mask_output.values)\n # 'omar' should map to 0.\n self.assertAllClose([0, 1, 2, 1, 1], omar_mask_output.values)\n\n def test_hash_sparse_int_input_farmhash(self):\n layer = hashing.Hashing(num_bins=3)\n indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]\n inp = tf.SparseTensor(\n indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])\n output = layer(inp)\n self.assertAllClose(indices, output.indices)\n self.assertAllClose([1, 0, 1, 0, 2], output.values)\n\n def test_hash_sparse_input_siphash(self):\n layer = hashing.Hashing(num_bins=2, salt=[133, 137])\n indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]\n inp = tf.SparseTensor(\n indices=indices,\n values=['omar', 'stringer', 'marlo', 'wire', 'skywalker'],\n dense_shape=[3, 2])\n output = layer(inp)\n self.assertAllClose(output.indices, indices)\n # The result should be same with test_hash_dense_input_siphash.\n self.assertAllClose([0, 1, 0, 1, 0], output.values)\n\n layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])\n output = layer_2(inp)\n # The result should be same with test_hash_dense_input_siphash.\n self.assertAllClose([1, 0, 1, 0, 1], output.values)\n\n def test_hash_sparse_int_input_siphash(self):\n layer = hashing.Hashing(num_bins=3, salt=[133, 137])\n indices = [[0, 0], [1, 0], [1, 1], [2, 0], [2, 1]]\n inp = tf.SparseTensor(\n indices=indices, values=[0, 1, 2, 3, 4], dense_shape=[3, 2])\n output = layer(inp)\n self.assertAllClose(indices, output.indices)\n self.assertAllClose([1, 1, 2, 0, 1], output.values)\n\n def test_hash_ragged_string_input_farmhash(self):\n layer = hashing.Hashing(num_bins=2)\n inp_data = tf.ragged.constant(\n [['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],\n dtype=tf.string)\n out_data = layer(inp_data)\n # Same hashed output as test_hash_sparse_input_farmhash\n expected_output = [[0, 0, 1, 0], [1, 0, 0]]\n self.assertAllEqual(expected_output, out_data)\n\n inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)\n out_t = layer(inp_t)\n model = training.Model(inputs=inp_t, outputs=out_t)\n self.assertAllClose(out_data, model.predict(inp_data))\n\n def test_hash_ragged_input_mask_value(self):\n empty_mask_layer = hashing.Hashing(num_bins=3, mask_value='')\n omar_mask_layer = hashing.Hashing(num_bins=3, mask_value='omar')\n inp_data = tf.ragged.constant(\n [['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],\n dtype=tf.string)\n empty_mask_output = empty_mask_layer(inp_data)\n omar_mask_output = omar_mask_layer(inp_data)\n # Outputs should be one more than test_hash_ragged_string_input_farmhash\n # (the zeroth bin is now reserved for masks).\n expected_output = [[1, 1, 2, 1], [2, 1, 1]]\n self.assertAllClose(expected_output, empty_mask_output)\n # 'omar' should map to 0.\n expected_output = [[0, 1, 2, 1], [2, 1, 1]]\n self.assertAllClose(expected_output, omar_mask_output)\n\n def test_hash_ragged_int_input_farmhash(self):\n layer = hashing.Hashing(num_bins=3)\n inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)\n out_data = layer(inp_data)\n # Same hashed output as test_hash_sparse_input_farmhash\n expected_output = [[1, 0, 0, 2], [1, 0, 1]]\n self.assertAllEqual(expected_output, out_data)\n\n inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)\n out_t = layer(inp_t)\n model = training.Model(inputs=inp_t, outputs=out_t)\n self.assertAllClose(out_data, model.predict(inp_data))\n\n def test_hash_ragged_string_input_siphash(self):\n layer = hashing.Hashing(num_bins=2, salt=[133, 137])\n inp_data = tf.ragged.constant(\n [['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],\n dtype=tf.string)\n out_data = layer(inp_data)\n # Same hashed output as test_hash_dense_input_siphash\n expected_output = [[0, 1, 0, 1], [0, 0, 1]]\n self.assertAllEqual(expected_output, out_data)\n\n inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.string)\n out_t = layer(inp_t)\n model = training.Model(inputs=inp_t, outputs=out_t)\n self.assertAllClose(out_data, model.predict(inp_data))\n\n layer_2 = hashing.Hashing(num_bins=2, salt=[211, 137])\n out_data = layer_2(inp_data)\n expected_output = [[1, 0, 1, 0], [1, 1, 0]]\n self.assertAllEqual(expected_output, out_data)\n\n out_t = layer_2(inp_t)\n model = training.Model(inputs=inp_t, outputs=out_t)\n self.assertAllClose(out_data, model.predict(inp_data))\n\n def test_hash_ragged_int_input_siphash(self):\n layer = hashing.Hashing(num_bins=3, salt=[133, 137])\n inp_data = tf.ragged.constant([[0, 1, 3, 4], [2, 1, 0]], dtype=tf.int64)\n out_data = layer(inp_data)\n # Same hashed output as test_hash_sparse_input_farmhash\n expected_output = [[1, 1, 0, 1], [2, 1, 1]]\n self.assertAllEqual(expected_output, out_data)\n\n inp_t = input_layer.Input(shape=(None,), ragged=True, dtype=tf.int64)\n out_t = layer(inp_t)\n model = training.Model(inputs=inp_t, outputs=out_t)\n self.assertAllClose(out_data, model.predict(inp_data))\n\n def test_invalid_inputs(self):\n with self.assertRaisesRegex(ValueError, 'cannot be `None`'):\n _ = hashing.Hashing(num_bins=None)\n with self.assertRaisesRegex(ValueError, 'cannot be `None`'):\n _ = hashing.Hashing(num_bins=-1)\n with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):\n _ = hashing.Hashing(num_bins=2, salt='string')\n with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):\n _ = hashing.Hashing(num_bins=2, salt=[1])\n with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):\n _ = hashing.Hashing(num_bins=1, salt=tf.constant([133, 137]))\n\n def test_one_hot_output(self):\n input_array = np.array([0, 1, 2, 3, 4])\n\n expected_output = [[0., 1., 0.],\n [1., 0., 0.],\n [0., 1., 0.],\n [1., 0., 0.],\n [0., 0., 1.]]\n expected_output_shape = [None, 3]\n\n inputs = keras.Input(shape=(1,), dtype='int32')\n layer = hashing.Hashing(num_bins=3, output_mode='one_hot')\n outputs = layer(inputs)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n\n model = keras.Model(inputs, outputs)\n output_data = model(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_multi_hot_output(self):\n input_array = np.array([0, 1, 2, 3, 4])\n\n expected_output = [1., 1., 1.]\n expected_output_shape = [None, 3]\n\n inputs = keras.Input(shape=(3,), dtype='int32')\n layer = hashing.Hashing(num_bins=3, output_mode='multi_hot')\n outputs = layer(inputs)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n\n model = keras.Model(inputs, outputs)\n output_data = model(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n def test_count_output(self):\n input_array = np.array([0, 1, 2, 3, 4])\n\n expected_output = [2., 2., 1.]\n expected_output_shape = [None, 3]\n\n inputs = keras.Input(shape=(3,), dtype='int32')\n layer = hashing.Hashing(num_bins=3, output_mode='count')\n outputs = layer(inputs)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n\n model = keras.Model(inputs, outputs)\n output_data = model(input_array)\n self.assertAllEqual(expected_output, output_data)\n\n @parameterized.named_parameters(\n ('int32', tf.int32),\n ('int64', tf.int64),\n )\n def test_output_dtype(self, dtype):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype='string')\n layer = hashing.Hashing(num_bins=3, dtype=dtype)\n output = layer(input_data)\n self.assertAllEqual(output.dtype, dtype)\n\n def test_legacy_dtype_compat(self):\n inputs = keras.Input(batch_size=16, shape=(4,), dtype='string')\n layer = hashing.Hashing(num_bins=3, dtype='float32')\n outputs = layer(inputs)\n self.assertAllEqual(outputs.dtype, tf.int64)\n # In TF1 we sometimes face an explicit dtype=None in the config.\n layer = hashing.Hashing(num_bins=3, dtype=None)\n outputs = layer(inputs)\n self.assertAllEqual(outputs.dtype, tf.int64)\n\n @parameterized.named_parameters(\n ('float32', tf.float32),\n ('float64', tf.float64),\n )\n def test_one_hot_output_dtype(self, dtype):\n input_data = keras.Input(batch_size=16, shape=(1,), dtype='string')\n layer = hashing.Hashing(num_bins=3, output_mode='one_hot', dtype=dtype)\n output = layer(input_data)\n self.assertAllEqual(output.dtype, dtype)\n\n def test_hash_compute_output_signature(self):\n input_shape = tf.TensorShape([2, 3])\n input_spec = tf.TensorSpec(input_shape, tf.string)\n layer = hashing.Hashing(num_bins=2)\n output_spec = layer.compute_output_signature(input_spec)\n self.assertEqual(output_spec.shape.dims, input_shape.dims)\n self.assertEqual(output_spec.dtype, tf.int64)\n\n @test_utils.run_v2_only\n def test_config_with_custom_name(self):\n layer = hashing.Hashing(num_bins=2, name='hashing')\n config = layer.get_config()\n layer_1 = hashing.Hashing.from_config(config)\n self.assertEqual(layer_1.name, layer.name)\n\n def test_saved_model(self):\n input_data = np.array(['omar', 'stringer', 'marlo', 'wire', 'skywalker'])\n\n inputs = keras.Input(shape=(None,), dtype=tf.string)\n outputs = hashing.Hashing(num_bins=100)(inputs)\n model = keras.Model(inputs=inputs, outputs=outputs)\n\n original_output_data = model(input_data)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')\n model.save(output_path, save_format='tf')\n loaded_model = keras.models.load_model(output_path)\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_data = loaded_model(input_data)\n self.assertAllClose(new_output_data, original_output_data)\n\n @parameterized.named_parameters(\n (\n 'list_input',\n [1, 2, 3],\n [1, 1, 1],\n ),\n (\n 'list_input_2d',\n [[1], [2], [3]],\n [[1], [1], [1]],\n ),\n (\n 'list_input_2d_multiple',\n [[1, 2], [2, 3], [3, 4]],\n [[1, 1], [1, 1], [1, 1]],\n ),\n (\n 'list_input_3d',\n [[[1], [2]], [[2], [3]], [[3], [4]]],\n [[[1], [1]], [[1], [1]], [[1], [1]]],\n ),\n )\n def test_hash_list_input(self, input_data, expected):\n layer = hashing.Hashing(num_bins=2)\n out_data = layer(input_data)\n self.assertAllEqual(expected, out_data.numpy().tolist())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TensorFlow 2.0 layer behavior.\"\"\"\n# pylint: disable=g-bad-import-order\nimport tensorflow.compat.v2 as tf\n\nimport copy\nimport os\n\nimport numpy as np\nfrom keras import backend\nfrom keras.testing_infra import test_combinations\nfrom keras import layers\nfrom keras import regularizers\nfrom keras.testing_infra import test_utils\nfrom keras.engine import base_layer\nfrom keras.engine import input_layer\nfrom keras.engine import sequential\nfrom keras.engine import training as training_lib\nfrom keras.legacy_tf_layers import core as legacy_core\nfrom keras.optimizers.optimizer_v2 import rmsprop\nfrom keras.utils import control_flow_util\n\n\nclass DynamicLayer(base_layer.Layer):\n\n def __init__(self, dynamic=False, **kwargs):\n super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)\n\n def call(self, inputs):\n samples = tf.TensorArray(\n dtype=tf.float32, size=tf.shape(inputs)[0])\n for idx, sample in enumerate(inputs):\n samples = samples.write(idx, tf.square(sample))\n return samples.stack()\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\nclass InvalidLayer(base_layer.Layer):\n\n def call(self, inputs):\n raise ValueError('You did something wrong!')\n\n\n@test_utils.run_v2_only\nclass BaseLayerTest(test_combinations.TestCase):\n\n @test_combinations.generate(test_combinations.keras_mode_combinations())\n def test_layer_instrumentation(self):\n layer = layers.Add()\n self.assertTrue(layer._instrumented_keras_api)\n self.assertTrue(layer._instrumented_keras_layer_class)\n self.assertFalse(layer._instrumented_keras_model_class)\n self.assertTrue(base_layer.keras_api_gauge.get_cell('tf.keras.layers.Add'))\n\n # Verify this was not instrumented as a legacy layer\n self.assertFalse(\n base_layer.keras_api_gauge.get_cell('legacy_layer').value())\n base_layer.keras_api_gauge.get_cell('tf.keras.layers.Add').set(False)\n\n @test_combinations.generate(test_combinations.keras_model_type_combinations())\n def test_dynamic_layer(self):\n model = test_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n @test_combinations.generate(test_combinations.keras_model_type_combinations())\n def test_dynamic_layer_error(self):\n # Functional Models hit the `dyanamic=True` error during construction.\n # Subclass Models should just throw the original autograph error during\n # execution.\n raised_error = False\n try:\n model = test_utils.get_model_from_layers([DynamicLayer()],\n input_shape=(3,))\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n except tf.errors.OperatorNotAllowedInGraphError as e:\n if 'iterating over `tf.Tensor`' in str(e):\n raised_error = True\n elif 'Iterating over a symbolic `tf.Tensor`' in str(e):\n raised_error = True\n except TypeError as e:\n if 'attempting to use Python control flow' in str(e):\n raised_error = True\n elif 'Attempting to use Python control flow' in str(e):\n raised_error = True\n self.assertTrue(raised_error)\n\n @test_combinations.generate(test_combinations.keras_model_type_combinations())\n def test_dynamic_layer_error_running_in_graph_mode(self):\n with tf.compat.v1.get_default_graph().as_default():\n model = test_utils.get_model_from_layers([DynamicLayer(dynamic=True)],\n input_shape=(3,))\n self.assertEqual(model.dynamic, True)\n # But then you cannot run the model since you're in a graph scope.\n with self.assertRaisesRegex(ValueError,\n 'You must enable eager execution'):\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n\n def test_manual_compute_output_shape(self):\n\n class BuildCounter(base_layer.Layer):\n\n def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name\n super(BuildCounter, self).__init__(*args, **kwargs)\n self.build_counter = 0\n\n def build(self, input_shape):\n self.build_counter += 1\n self.build_shape = input_shape\n\n def call(self, inputs):\n return inputs\n\n layer = BuildCounter(dtype=tf.float64)\n output_shape = layer.compute_output_shape((None, 10))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n self.assertEqual(output_shape.as_list(), [None, 10])\n output_signature = layer.compute_output_signature(\n tf.TensorSpec(dtype=tf.float64, shape=[None, 10]))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n self.assertEqual(output_signature.dtype, tf.float64)\n self.assertEqual(output_signature.shape.as_list(), [None, 10])\n layer(np.ones((5, 10)))\n self.assertEqual(layer.build_counter, 1)\n self.assertEqual(layer.build_shape.as_list(), [None, 10])\n\n def test_dynamic_layer_with_deferred_sequential_model(self):\n model = sequential.Sequential([DynamicLayer(dynamic=True), layers.Dense(3)])\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_nested_dynamic_layers_in_eager_mode(self):\n inputs = input_layer.Input((3,))\n outputs = DynamicLayer(dynamic=True)(inputs)\n inner_model = training_lib.Model(inputs, outputs)\n self.assertEqual(inner_model.dynamic, True)\n\n inputs = input_layer.Input((3,))\n x = DynamicLayer(dynamic=True)(inputs)\n outputs = inner_model(x)\n\n model = training_lib.Model(inputs, outputs)\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n\n def test_dynamic_subclassed_model_no_shape_inference(self):\n\n class MyModel(training_lib.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = layers.Dense(3)\n self.layer2 = layers.Dense(3)\n\n def call(self, inputs):\n if tf.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n self.assertEqual(model.run_eagerly, True)\n model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))\n self.assertEqual(model.outputs, None)\n\n def test_dynamic_subclassed_model_with_shape_inference(self):\n\n class MyModel(training_lib.Model):\n\n def __init__(self):\n super(MyModel, self).__init__(dynamic=True)\n self.layer1 = layers.Dense(3)\n self.layer2 = layers.Dense(3)\n\n def call(self, inputs):\n if tf.reduce_sum(inputs) > 0:\n return self.layer1(inputs)\n else:\n return self.layer2(inputs)\n\n def compute_output_shape(self, input_shape):\n return tuple(input_shape[:-1].as_list()) + (3,)\n\n model = MyModel()\n self.assertEqual(model.dynamic, True)\n model.compile(rmsprop.RMSprop(0.001), loss='mse')\n x, y = np.random.random((2, 3)), np.random.random((2, 3))\n model.train_on_batch(x, y)\n outputs = model(x)\n self.assertEqual(outputs.shape.as_list(), [2, 3])\n\n def test_deepcopy(self):\n bias_reg = lambda x: 1e-3 * tf.reduce_sum(x)\n layer = layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)\n # Call the Layer on data to generate regularize losses.\n layer(tf.ones((1, 10, 10, 3)))\n self.assertLen(layer.losses, 1)\n new_layer = copy.deepcopy(layer)\n self.assertEqual(new_layer.bias_regularizer, bias_reg)\n self.assertEqual(layer.get_config(), new_layer.get_config())\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_invalid_forward_pass(self):\n inputs = input_layer.Input((3,))\n with self.assertRaisesRegex(ValueError, 'You did something wrong!'):\n _ = InvalidLayer()(inputs)\n\n def test_no_legacy_model(self):\n inputs = input_layer.Input((1,))\n legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')\n legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')\n\n layer = legacy_dense_0(inputs)\n layer = layers.Dense(1)(layer)\n layer = legacy_dense_1(layer)\n\n expected_regex = (r'The following are legacy tf\\.layers\\.Layers:\\n '\n '{}\\n {}'.format(legacy_dense_0, legacy_dense_1))\n\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = training_lib.Model(inputs=[inputs], outputs=[layer])\n\n model = training_lib.Model(inputs=[inputs], outputs=[inputs])\n with self.assertRaisesRegex(TypeError, expected_regex):\n model._insert_layers([legacy_dense_0, legacy_dense_1])\n\n def test_no_legacy_sequential(self):\n layer = [layers.Dense(1), legacy_core.Dense(1, name='legacy_dense_0')]\n\n expected_regex = r'legacy tf\\.layers\\.Layers:\\n {}'.format(layer[1])\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = sequential.Sequential(layer)\n\n with self.assertRaisesRegex(TypeError, expected_regex):\n _ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer)\n\n model = sequential.Sequential()\n with self.assertRaisesRegex(TypeError, expected_regex):\n for l in layer:\n model.add(l)\n\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_model_type_combinations(),\n test_combinations.combine(mode=['graph', 'eager'])))\n def test_build_with_numpy_data(self):\n model_layers = [\n layers.Dense(3, activation='relu', kernel_initializer='ones'),\n layers.Dense(1, activation='sigmoid', kernel_initializer='ones')\n ]\n model = test_utils.get_model_from_layers(model_layers, input_shape=(4,))\n model(np.zeros((2, 4), dtype='float32'))\n self.assertTrue(model.built)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_default_add_weight(self):\n\n class TestLayer(base_layer.Layer):\n\n def __init__(self):\n super(TestLayer, self).__init__()\n self.default_weight = self.add_weight()\n self.weight_without_name = self.add_weight(shape=(3, 4))\n self.regularized_weight_without_name = self.add_weight(\n shape=(3, 4), regularizer='l2')\n\n layer = TestLayer()\n self.assertEqual(layer.default_weight.shape.as_list(), [])\n self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])\n self.assertEqual(layer.default_weight.dtype.name, 'float32')\n self.assertEqual(layer.weight_without_name.dtype.name, 'float32')\n self.assertEqual(len(layer.losses), 1)\n if not tf.executing_eagerly():\n # Cannot access tensor.name in eager execution.\n self.assertIn('Variable_2/Regularizer', layer.losses[0].name)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_add_weight_by_getter(self):\n layer = base_layer.Layer()\n variable = tf.Variable('abc')\n added = layer.add_weight(\n dtype=tf.string, getter=lambda *_, **__: variable)\n self.assertIs(variable, added)\n\n @test_combinations.generate(\n test_combinations.keras_mode_combinations(mode=['eager']))\n def test_learning_phase_freezing_for_layers(self):\n\n class LearningPhaseLayer(base_layer.Layer):\n\n def call(self, inputs):\n return backend.in_train_phase(lambda: tf.ones_like(inputs),\n lambda: tf.zeros_like(inputs))\n\n def get_learning_phase_value():\n model = sequential.Sequential([LearningPhaseLayer(input_shape=(1,))])\n model._run_eagerly = test_utils.should_run_eagerly()\n return np.sum(model(np.ones((1, 1))))\n\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test scope.\n with backend.learning_phase_scope(1):\n self.assertEqual(get_learning_phase_value(), 1)\n\n # The effects of the scope end after exiting it.\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Test setting.\n backend.set_learning_phase(1)\n self.assertEqual(get_learning_phase_value(), 1)\n backend.set_learning_phase(0)\n self.assertEqual(get_learning_phase_value(), 0)\n\n # Cannot be enabled with `run_eagerly=True`, see b/123904578\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_layer_can_return_variable(self):\n\n class ComputeSum(base_layer.Layer):\n\n def __init__(self):\n super(ComputeSum, self).__init__()\n self.total = tf.Variable(\n initial_value=tf.zeros((1, 1)), trainable=False)\n if not tf.executing_eagerly():\n backend.get_session().run(self.total.initializer)\n\n def call(self, inputs):\n self.total.assign_add(inputs)\n return self.total\n\n inputs = input_layer.Input(shape=(1,))\n model = training_lib.Model(inputs, ComputeSum()(inputs))\n model.predict(np.ones((1, 1)))\n\n def _get_layer_with_training_arg(self):\n\n class TrainingLayer(base_layer.Layer):\n \"\"\"A layer with a `training` argument in a defuned `call`.\"\"\"\n\n @tf.function\n def call(self, inputs, training=None):\n if training is None:\n training = backend.learning_phase()\n return control_flow_util.smart_cond(\n training, lambda: tf.ones_like(inputs),\n lambda: tf.zeros_like(inputs))\n\n return TrainingLayer()\n\n # b/124459427: can't test with `run_eagerly=True` for now.\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_mode_combinations(),\n test_combinations.keras_model_type_combinations()))\n def test_training_arg_in_defun(self):\n layer = self._get_layer_with_training_arg()\n model = test_utils.get_model_from_layers([layer], input_shape=(1,))\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 1.)\n loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(loss, 0.)\n\n # Test that the argument injection performed in `call` is not active\n # when the argument is passed explicitly.\n layer = self._get_layer_with_training_arg()\n inputs = input_layer.Input(shape=(1,))\n # Pass `training` by name\n outputs = layer(inputs, training=False)\n model = training_lib.Model(inputs, outputs)\n model.compile(rmsprop.RMSprop(0.),\n loss='mae')\n history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))\n self.assertEqual(history.history['loss'][0], 0.)\n\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_mode_combinations(),\n test_combinations.keras_model_type_combinations()))\n def test_raw_variable_assignment(self):\n\n class RawVariableLayer(base_layer.Layer):\n\n def __init__(self, **kwargs):\n super(RawVariableLayer, self).__init__(**kwargs)\n # Test variables in nested structure.\n self.var_list = [tf.Variable(1.), {'a': tf.Variable(2.)}]\n\n def call(self, inputs):\n return inputs * self.var_list[0] * self.var_list[1]['a']\n\n model = test_utils.get_model_from_layers([RawVariableLayer()],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n x, y = np.ones((10, 10)), np.ones((10, 10))\n # Checks that variables get initialized.\n model.fit(x, y, batch_size=2, epochs=2)\n\n @test_combinations.generate(test_combinations.combine(mode=['eager']))\n def test_composite_variable_assignment(self):\n\n class Spec(tf.TypeSpec):\n\n value_type = property(lambda self: CompositeVariable)\n\n def _component_specs(self):\n pass\n\n def _serialize(self):\n pass\n\n def _to_components(self, value):\n return value._variables\n\n def _from_components(self, variable_list):\n return CompositeVariable(variable_list)\n\n class CompositeVariable(tf.__internal__.CompositeTensor):\n\n def __init__(self, variable_list):\n self._variables = variable_list\n\n @property\n def _type_spec(self):\n return Spec()\n\n class CompositeVariableLayer(base_layer.Layer):\n\n def __init__(self):\n super().__init__()\n self.composite_var = CompositeVariable(\n [tf.Variable(1.),\n tf.Variable(2.)])\n\n layer = CompositeVariableLayer()\n self.assertLen(layer.weights, 2)\n self.assertIsInstance(layer.weights[0], tf.Variable)\n self.assertIsInstance(layer.weights[1], tf.Variable)\n self.assertEqual(self.evaluate(layer.weights[0]), 1.)\n self.assertEqual(self.evaluate(layer.weights[1]), 2.)\n\n def test_exception_if_trainable_not_boolean(self):\n base_layer.Layer(trainable=True)\n base_layer.Layer(trainable=tf.constant(True))\n base_layer.Layer(trainable=tf.Variable(tf.constant(True)))\n with self.assertRaisesRegex(\n TypeError, 'Expected `trainable` argument to be a boolean'):\n base_layer.Layer(trainable=0)\n\n def test_exception_if_dynamic_not_boolean(self):\n base_layer.Layer(dynamic=True)\n with self.assertRaisesRegex(TypeError,\n 'Expected `dynamic` argument to be a boolean'):\n base_layer.Layer(dynamic=0)\n\n def test_exception_if_name_not_string_or_none(self):\n base_layer.Layer(name=None)\n base_layer.Layer(name='layer_name')\n with self.assertRaisesRegex(TypeError,\n 'Expected `name` argument to be a string'):\n base_layer.Layer(name=0)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_layer_names(self):\n inputs = input_layer.Input(shape=[2])\n add1 = inputs + inputs\n add2 = layers.Add()([inputs, inputs])\n add3 = inputs + inputs\n add4 = layers.Add()([inputs, inputs])\n model = training_lib.Model(inputs=[inputs],\n outputs=[add1, add2, add3, add4])\n actual_names = [l.name for l in model.layers]\n graph_names = [\n 'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'\n ]\n eager_names = [\n 'input_1', 'tf.__operators__.add', 'add', 'tf.__operators__.add_1',\n 'add_1'\n ]\n for actual, eager, graph in zip(actual_names, graph_names, eager_names):\n self.assertIn(actual, {eager, graph})\n\n @test_combinations.generate(test_combinations.combine(mode=['eager']))\n def test_layer_names_after_loading(self):\n backend.clear_session()\n # Mimic loading a model that already contained add layers with\n # name = 'add_1' and 'tf.__operators__.add'\n layers.Add(name='add_1')\n layers.Add(name='tf.__operators__.add')\n\n inputs = input_layer.Input(shape=[2])\n add1 = inputs + inputs\n add2 = layers.Add()([inputs, inputs])\n add3 = inputs + inputs\n add4 = layers.Add()([inputs, inputs])\n model = training_lib.Model(\n inputs=[inputs], outputs=[add1, add2, add3, add4])\n actual_names = [l.name for l in model.layers]\n # The generated op layer names should have avoided layer names seen in\n # the loaded model. (This avoiance should not apply to non-op-layers)\n expected_names = [\n 'input_1', 'tf.__operators__.add_1',\n 'add', 'tf.__operators__.add_2', 'add_1'\n ]\n self.assertAllEqual(actual_names, expected_names)\n\n def test_add_trainable_weight_on_frozen_layer(self):\n\n class TestLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.w = self.add_weight(shape=(), trainable=True)\n\n def call(self, inputs):\n return self.w * inputs\n\n layer = TestLayer()\n layer.trainable = False\n layer.build(None)\n layer.trainable = True\n self.assertListEqual(layer.trainable_weights, [layer.w])\n\n @test_combinations.generate(\n test_combinations.times(\n test_combinations.keras_mode_combinations(),\n test_combinations.keras_model_type_combinations()))\n def test_passing_initial_weights_values(self):\n kernel_value = np.random.random((10, 2))\n layer_with_weights = layers.Dense(2, use_bias=False, weights=[kernel_value])\n\n model = test_utils.get_model_from_layers([layer_with_weights],\n input_shape=(10,))\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n inputs = np.random.random((3, 10))\n out = model.predict(inputs)\n self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)\n self.assertAllClose(out, np.dot(inputs, kernel_value))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_set_weights_and_get_weights(self):\n layer = layers.Dense(2)\n layer.build((None, 10))\n kernel = np.random.random((10, 2))\n bias = np.random.random((2,))\n layer.set_weights([kernel, bias])\n weights = layer.get_weights()\n self.assertEqual(len(weights), 2)\n self.assertAllClose(weights[0], kernel)\n self.assertAllClose(weights[1], bias)\n with self.assertRaisesRegex(ValueError,\n 'but the layer was expecting 2 weights'):\n layer.set_weights([1, 2, 3])\n with self.assertRaisesRegex(ValueError,\n 'not compatible with provided weight shape'):\n layer.set_weights([kernel.T, bias])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_set_weights_accepts_output_of_get_weights(self):\n layer = layers.Layer()\n layer.add_weight(name='scalar_float', shape=(), dtype=tf.float32)\n layer.add_weight(name='scalar_string', shape=(), dtype=tf.string,\n initializer=lambda *a, **k: 'abc')\n layer.add_weight(name='vector_float', shape=(3,), dtype=tf.float32)\n layer.add_weight(name='vector_string', shape=(2,), dtype=tf.string,\n initializer=lambda *a, **k: 2 * ['abc'])\n layer.set_weights(layer.get_weights())\n\n def test_get_config_error(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayer, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n # `__init__` includes kwargs but `get_config` is not overridden, so\n # an error should be thrown:\n with self.assertRaisesRegex(NotImplementedError, 'Layer MyLayer has'):\n MyLayer('custom').get_config()\n\n class MyLayerNew(base_layer.Layer):\n\n def __init__(self, my_kwarg='default', **kwargs):\n super(MyLayerNew, self).__init__(**kwargs)\n self.my_kwarg = my_kwarg\n\n def get_config(self):\n config = super(MyLayerNew, self).get_config()\n config['my_kwarg'] = self.my_kwarg\n return config\n\n # Test to make sure that error is not raised if the method call is\n # from an overridden `get_config`:\n self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')\n\n class MyLayerNew2(base_layer.Layer):\n\n def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name\n super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)\n\n # Check that if the kwargs in `__init__` are base layer constructor\n # arguments, no error is thrown:\n self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_count_params(self):\n dense = layers.Dense(16)\n dense.build((None, 4))\n self.assertEqual(dense.count_params(), 16 * 4 + 16)\n\n dense = layers.Dense(16)\n with self.assertRaisesRegex(ValueError, 'call `count_params`'):\n dense.count_params()\n\n model = sequential.Sequential(layers.Dense(16))\n with self.assertRaisesRegex(ValueError, 'call `count_params`'):\n model.count_params()\n\n dense = layers.Dense(16, input_dim=4)\n model = sequential.Sequential(dense)\n self.assertEqual(model.count_params(), 16 * 4 + 16)\n\n def test_super_not_called(self):\n\n class CustomLayerNotCallingSuper(base_layer.Layer):\n\n def __init__(self):\n pass\n\n layer = CustomLayerNotCallingSuper()\n with self.assertRaisesRegex(RuntimeError, 'You must call `super()'):\n layer(np.random.random((10, 2)))\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_first_arg_not_called_inputs(self):\n x, y = tf.ones((10, 1)), tf.ones((10, 1))\n\n class ArgLayer(base_layer.Layer):\n\n def call(self, x, y):\n return x + y\n\n layer = ArgLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n class KwargLayer(base_layer.Layer):\n\n def call(self, x=None, y=None):\n return x + y\n\n layer = KwargLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n with self.assertRaisesRegex(ValueError, 'must always be passed'):\n layer(y=y)\n\n class TFFunctionLayer(base_layer.Layer):\n\n @tf.function\n def call(self, x, y=None):\n if y is None:\n return x\n return x + y\n\n layer = TFFunctionLayer()\n out = self.evaluate(layer(x=x, y=y))\n self.assertAllClose(out, 2 * np.ones((10, 1)))\n\n def test_build_input_shape(self):\n\n class CustomLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.add_weight('w', shape=input_shape[1:])\n super(CustomLayer, self).build(input_shape)\n\n layer = CustomLayer()\n self.assertFalse(layer.built)\n\n layer.build([None, 1, 2, 3])\n self.assertTrue(layer.built)\n self.assertEqual([None, 1, 2, 3], layer._build_input_shape)\n\n layer = CustomLayer()\n layer(input_layer.Input((3,)))\n self.assertTrue(layer.built)\n self.assertEqual([None, 3], layer._build_input_shape.as_list())\n\n def test_build_input_shape_list_with_none(self):\n\n class CustomLayer(base_layer.Layer):\n\n def build(self, input_shape):\n super().build(input_shape)\n self.build_shape = input_shape\n\n def call(self, inputs):\n return inputs[0]\n\n layer = CustomLayer()\n layer([tf.constant([1.0]), None, tf.constant([2.0])])\n self.assertEqual(layer.build_shape, [[1], None, [1]])\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_layer_input_shape_raises_error(self):\n layer = layers.Dense(3)\n with self.assertRaisesRegex(AttributeError, 'no defined input shape'):\n _ = layer.input_shape\n\n layer(tf.ones((10, 1)))\n with self.assertRaisesRegex(AttributeError, 'no defined input shape'):\n _ = layer.input_shape\n\n @test_combinations.generate(test_combinations.combine(mode=['eager']))\n def test_custom_layer_training_arg(self):\n class CustomLayerNoTrainingArg(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerNoTrainingArg, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs):\n return self._nested_layer(inputs)\n\n class CustomLayerDefaultTrainingMissing(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingMissing, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, training):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingNone(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingNone, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, training=None):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingFalse(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingFalse, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, training=False):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingTrue(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingTrue, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, training=True):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n self._test_custom_layer_training_arg(\n CustomLayerNoTrainingArg=CustomLayerNoTrainingArg,\n CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing,\n CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone,\n CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse,\n CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue)\n\n @test_combinations.generate(test_combinations.combine(mode=['eager']))\n def test_custom_layer_training_arg_kwargonly(self):\n class CustomLayerNoTrainingArg(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerNoTrainingArg, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs):\n return self._nested_layer(inputs)\n\n class CustomLayerDefaultTrainingMissing(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingMissing, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, *, training):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingNone(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingNone, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, *, training=None):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingFalse(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingFalse, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, *, training=False):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n class CustomLayerDefaultTrainingTrue(base_layer.Layer):\n\n def __init__(self, nested_layer=None):\n super(CustomLayerDefaultTrainingTrue, self).__init__()\n self._nested_layer = nested_layer or tf.identity\n\n def call(self, inputs, *, training=True):\n if training:\n return self._nested_layer(inputs)\n else:\n return self._nested_layer(inputs) * 0.5\n\n self._test_custom_layer_training_arg(\n CustomLayerNoTrainingArg=CustomLayerNoTrainingArg,\n CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing,\n CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone,\n CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse,\n CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue)\n\n def _test_custom_layer_training_arg(self,\n # pylint: disable=invalid-name\n CustomLayerNoTrainingArg,\n CustomLayerDefaultTrainingMissing,\n CustomLayerDefaultTrainingNone,\n CustomLayerDefaultTrainingFalse,\n CustomLayerDefaultTrainingTrue,\n # pylint: enable=invalid-name\n ):\n x = tf.ones(shape=(1, 1))\n\n # If the layer signature doesn't specify a default training arg,\n # run it in inference mode when to training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingMissing()\n self.assertAllEqual(layer(x), x * 0.5)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the layer signature specifies `False` as the default training arg,\n # run it in inference mode when no training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingFalse()\n self.assertAllEqual(layer(x), x * 0.5)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the layer signature specifies `True` as the default training arg,\n # explicitly run it in training mode when no training arg is passed\n # to __call__\n layer = CustomLayerDefaultTrainingTrue()\n self.assertAllEqual(layer(x), x)\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n # Outer layers/models should set the training context implicitly for all\n # nested layers, respecting whatever mode the outer layer was run with.\n layer = CustomLayerDefaultTrainingTrue(CustomLayerDefaultTrainingFalse())\n # No outer value passed: use local defaults\n self.assertAllEqual(layer(x), x) # Use outer default True\n # Outer value passed: override local defaults\n self.assertAllEqual(layer(x, training=False), x * 0.25)\n self.assertAllEqual(layer(x, training=True), x)\n\n layer = CustomLayerDefaultTrainingFalse(CustomLayerDefaultTrainingTrue())\n # No outer value passed: use local defaults\n self.assertAllEqual(layer(x), x * 0.25) # Use outer default False\n # Outer value passed: override local defaults\n self.assertAllEqual(layer(x, training=False), x * 0.25)\n self.assertAllEqual(layer(x, training=True), x)\n\n # If the outer layer `call` doesn't take a training argument at all,\n # it'll set the nested scope as None when no training arg is passed in.\n # If a training arg is passed in it won't use it directly in `call`, but\n # it will set the nested training mode.\n layer = CustomLayerNoTrainingArg(CustomLayerDefaultTrainingTrue())\n self.assertAllEqual(layer(x), x) # Use local default True\n self.assertAllEqual(layer(x, training=False), x * 0.5)\n self.assertAllEqual(layer(x, training=True), x)\n\n layer = CustomLayerDefaultTrainingNone(CustomLayerDefaultTrainingTrue())\n self.assertAllEqual(layer(x), x * 0.5) # Nested use local default True\n self.assertAllEqual(layer(x, training=False), x * 0.25)\n self.assertAllEqual(layer(x, training=True), x)\n\n def test_activity_regularizer_string(self):\n\n class MyLayer(base_layer.Layer):\n pass\n\n layer = MyLayer(activity_regularizer='l2')\n self.assertIsInstance(layer.activity_regularizer, regularizers.L2)\n\n def test_tf_module_tracking(self):\n\n class MyModule(tf.Module):\n\n def __init__(self):\n super(MyModule, self).__init__()\n self.v1 = tf.Variable(1., trainable=True, name='v1')\n self.v2 = tf.Variable(2., trainable=False, name='v2')\n\n def __call__(self, x):\n return x * self.v1 * self.v2\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self, **kwargs):\n super(MyLayer, self).__init__(**kwargs)\n self.my_modules = {}\n self.my_modules['a'] = MyModule()\n\n def call(self, x):\n return self.my_modules['a'](x)\n\n layer = MyLayer()\n self.assertLen(layer.variables, 2)\n self.assertLen(layer.trainable_variables, 1)\n self.assertLen(layer.non_trainable_variables, 1)\n\n layer.trainable = False\n self.assertLen(layer.variables, 2)\n self.assertLen(layer.trainable_variables, 0)\n self.assertLen(layer.non_trainable_variables, 2)\n\n class MyModel(training_lib.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.my_modules = []\n self.my_modules.append(MyModule())\n\n def call(self, x):\n return self.my_modules[0](x)\n\n model = MyModel()\n self.assertLen(model.variables, 2)\n self.assertLen(model.trainable_variables, 1)\n self.assertLen(model.non_trainable_variables, 1)\n\n model.trainable = False\n self.assertLen(model.variables, 2)\n self.assertLen(model.trainable_variables, 0)\n self.assertLen(model.non_trainable_variables, 2)\n\n\n@test_utils.run_v2_only\nclass SymbolicSupportTest(test_combinations.TestCase):\n\n def test_using_symbolic_tensors_with_tf_ops(self):\n # Single-input.\n x = input_layer.Input((3,))\n tf.square(x)\n\n # Multi-inputs.\n x1, x2 = input_layer.Input((3,)), input_layer.Input((3,))\n tf.concat([x1, x2], axis=1)\n\n # Mixing Keras symbolic tensors and graph tensors from the same graph works.\n with backend.get_graph().as_default():\n x1 = input_layer.Input((3,))\n x2 = input_layer.Input((3,))\n tf.matmul(x1, x2)\n\n # Creating same op type (matmul) multiple times in the Keras graph works.\n x1 = input_layer.Input((3,))\n x2 = input_layer.Input((3,))\n tf.matmul(x1, x2)\n\n def test_mixing_eager_and_graph_tensors(self):\n with tf.Graph().as_default():\n x1 = tf.ones((3, 3))\n x2 = tf.ones((3, 3))\n with self.assertRaises(TypeError):\n tf.matmul(x1, x2)\n\n def test_mixing_numpy_arrays_and_graph_tensors(self):\n with tf.Graph().as_default():\n x1 = tf.ones((3, 3))\n x2 = np.ones((3, 3), dtype='float32')\n with self.assertRaises(TypeError):\n tf.matmul(x1, x2)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):\n x1 = input_layer.Input((3,))\n x2 = tf.ones((3, 3))\n y = tf.matmul(x1, x2)\n\n fn = backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):\n x1 = input_layer.Input((3,))\n x2 = np.ones((3, 3), dtype='float32')\n y = tf.matmul(x1, x2)\n\n fn = backend.function(inputs=[x1], outputs=[y])\n x_val = np.random.random((3, 3))\n y_val = np.ones((3, 3))\n self.assertAllClose(fn([x_val])[0],\n np.matmul(x_val, y_val),\n atol=1e-5)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_reraising_exception(self):\n # When layer is not dynamic, we have some pattern matching during exception\n # handling to detect when the user is trying to use python control flow.\n # When an exception is thrown but the pattern doesn't match, we want to\n # preserve the originating stack trace. An early implementation of this\n # logic lost the stack trace. We test the correct behavior here.\n\n class TypeErrorLayer(base_layer.Layer):\n\n def call(self, inputs):\n def easily_identifiable_name():\n raise TypeError('Non-matching TypeError message.')\n easily_identifiable_name()\n\n inputs = input_layer.Input((3,))\n\n try:\n _ = TypeErrorLayer()(inputs)\n except TypeError as e:\n self.assertIn('easily_identifiable_name', str(e)) # pylint: disable=g-assert-in-except\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_summaries_in_tf_function(self):\n if not tf.executing_eagerly():\n return\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs):\n tf.summary.scalar('mean', tf.reduce_mean(inputs))\n return inputs\n\n tmp_dir = self.get_temp_dir()\n writer = tf.summary.create_file_writer(tmp_dir)\n with writer.as_default(step=1), tf.summary.record_if(True):\n my_layer = MyLayer()\n x = tf.ones((10, 10))\n\n def my_fn(x):\n return my_layer(x)\n\n _ = my_fn(x)\n\n event_file = tf.compat.v1.gfile.Glob(os.path.join(tmp_dir, 'events*'))\n self.assertLen(event_file, 1)\n event_file = event_file[0]\n tags = set()\n for e in tf.compat.v1.train.summary_iterator(event_file):\n for val in e.summary.value:\n tags.add(val.tag)\n self.assertEqual(set(['my_layer/mean']), tags)\n\n @test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']))\n def test_error_when_passing_non_tensor(self):\n # layers that have an `input_spec` will raise an error when called on\n # non-tensors. This covers all built-in layers.\n layer = layers.Dense(3)\n x = object()\n with self.assertRaisesRegex(TypeError, r'should be tensors'):\n layer(x)\n\n\n@test_utils.run_v2_only\n@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))\nclass NestedTrackingTest(tf.test.TestCase):\n\n def test_nested_layer_variable_tracking(self):\n # Test that variables from nested sublayers are\n # being tracked by subclassed layers.\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.dense1 = layers.Dense(1)\n self.dense2 = layers.BatchNormalization()\n\n def build(self, input_shape):\n self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())\n self.v2 = tf.Variable(\n name='v2',\n initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),\n trainable=False)\n\n def call(self, inputs):\n x = self.dense1(inputs) + self.dense2(inputs)\n return x + self.v1 + self.v2\n\n layer = MyLayer()\n inputs = input_layer.Input((1,))\n _ = layer(inputs)\n\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 5)\n self.assertEqual(len(layer.non_trainable_weights), 3)\n\n layer.dense1.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 5)\n\n layer.trainable = False\n self.assertEqual(len(layer.weights), 8)\n self.assertEqual(len(layer.trainable_weights), 0)\n self.assertEqual(len(layer.non_trainable_weights), 8)\n self.assertEqual(\n {id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},\n {id(v) for v in layer._trackable_children().values()})\n\n def test_nested_layer_updates_losses_tracking(self):\n # Test that updates and losses from nested sublayers are\n # being tracked by subclassed layers.\n\n class UpdateAndLossLayer(base_layer.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def call(self, inputs):\n self.add_loss(tf.reduce_sum(inputs))\n self.add_update(tf.compat.v1.assign_add(self.v1, 1))\n return inputs + 1\n\n class MyLayer(base_layer.Layer):\n\n def build(self, _):\n self.v1 = self.add_weight('v1', shape=())\n\n def __init__(self):\n super(MyLayer, self).__init__()\n self.ul1 = UpdateAndLossLayer()\n self.ul2 = UpdateAndLossLayer()\n\n def call(self, inputs):\n self.add_loss(tf.reduce_sum(inputs))\n self.add_update(tf.compat.v1.assign_add(self.v1, 1))\n x = self.ul1(inputs)\n return self.ul2(x)\n\n layer = MyLayer()\n\n if tf.executing_eagerly():\n inputs = tf.ones((3, 1))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n else:\n inputs = input_layer.Input((1,))\n _ = layer(inputs)\n self.assertEqual(len(layer.losses), 3)\n self.assertEqual(len(layer.updates), 3)\n\n def test_attribute_reassignment(self):\n l = base_layer.Layer()\n l.a = base_layer.Layer()\n l.a = []\n l.a = tf.Variable(1.)\n l.a = base_layer.Layer()\n last_assignment = base_layer.Layer()\n l.a = last_assignment\n l.b = tf.Variable(1.)\n del l.b\n l.c = base_layer.Layer()\n del l.c\n l.d = last_assignment\n del l.d\n sublayers = list(l._flatten_layers(include_self=False, recursive=False))\n self.assertEqual([last_assignment], sublayers)\n self.assertEqual([], l.trainable_weights)\n self.assertEqual([], l.non_trainable_weights)\n self.assertEqual([], l.weights)\n del l.a\n self.assertEqual([], l._self_tracked_trackables)\n\n def test_layer_class_not_tracked_as_sublayer(self):\n # See https://github.com/tensorflow/tensorflow/issues/27431 for details.\n\n class LayerWithClassAttribute(base_layer.Layer):\n\n def __init__(self):\n super(LayerWithClassAttribute, self).__init__()\n self.layer_fn = layers.Dense\n\n layer = LayerWithClassAttribute()\n self.assertEmpty(layer.variables)\n self.assertEmpty(layer.submodules)\n\n def test_layer_call_fn_args(self):\n\n class NonDefunLayer(base_layer.Layer):\n\n def call(self, inputs, a, mask, b=None, training=None):\n return inputs\n\n class DefunLayer(base_layer.Layer):\n\n @tf.function\n def call(self, x, mask, a, training=None, b=None):\n return x\n\n nondefun_layer = NonDefunLayer()\n self.assertEqual(nondefun_layer._call_fn_args,\n ['inputs', 'a', 'mask', 'b', 'training'])\n defun_layer = DefunLayer()\n self.assertEqual(defun_layer._call_fn_args,\n ['x', 'mask', 'a', 'training', 'b'])\n\n def test_sequential_model(self):\n model = sequential.Sequential(\n [layers.Dense(10, input_shape=(10,)),\n layers.Dense(5)])\n self.assertLen(model.layers, 2)\n self.assertLen(model.weights, 4)\n\n # Make sure a subclass model also works when it is called 'Sequential'.\n class Sequential(training_lib.Model):\n\n def __init__(self):\n super(Sequential, self).__init__()\n self.dense_layers = [layers.Dense(10), layers.Dense(5)]\n\n def call(self, inputs):\n x = inputs\n for d in self.dense_layers:\n x = d(x)\n return x\n\n s = Sequential()\n self.assertLen(s.layers, 2)\n self.assertLen(s.weights, 0)\n\n s(input_layer.Input((10,)))\n self.assertLen(s.weights, 4)\n\n\n@test_utils.run_v2_only\n@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))\nclass NameScopingTest(test_combinations.TestCase):\n\n def test_name_scope_layer(self):\n x = backend.placeholder(shape=(10, 10))\n layer = layers.Dense(10, name='MyName')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName/kernel:0')\n\n def test_name_scope_functional_api(self):\n inputs = input_layer.Input((3,))\n layer = layers.Dense(10, name='MyName')\n _ = layer(inputs)\n self.assertEqual(layer.bias.name, 'MyName/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName/kernel:0')\n\n def test_name_scope_functional_api_nested(self):\n\n class NestedLayer(base_layer.Layer):\n\n def __init__(self, name='OuterName'):\n super(NestedLayer, self).__init__(name=name)\n self.dense = layers.Dense(10, name='InnerName')\n\n def call(self, inputs):\n return self.dense(inputs)\n\n inputs = input_layer.Input((3,))\n layer = NestedLayer()\n _ = layer(inputs)\n self.assertEqual(layer.dense.bias.name, 'OuterName/InnerName/bias:0')\n self.assertEqual(layer.dense.kernel.name, 'OuterName/InnerName/kernel:0')\n\n def test_name_scope_sublayer(self):\n\n class NameScopeTracker(base_layer.Layer):\n\n def call(self, inputs):\n self.active_name_scope = tf.__internal__.get_name_scope()\n return inputs\n\n x = backend.placeholder(shape=(10, 10))\n sublayer = NameScopeTracker(name='Sublayer')\n layer = layers.Dense(10, activation=sublayer, name='MyName2')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName2/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')\n self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')\n\n def test_name_scope_tf_tensor(self):\n x = tf.convert_to_tensor(np.ones((10, 10)))\n layer = layers.Dense(\n 10, activation=layers.ReLU(name='MyAct'), name='MyName3')\n layer(x)\n self.assertEqual(layer.bias.name, 'MyName3/bias:0')\n self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')\n\n @test_utils.run_v2_only\n def test_apply_name_scope_on_model_declaration(self):\n if not tf.executing_eagerly():\n self.skipTest('`apply_name_scope_on_model_declaration` API is supported'\n ' only for V2 eager')\n\n base_layer._apply_name_scope_on_model_declaration(True)\n\n inputs = input_layer.Input((3,))\n x = layers.Dense(10, name='Dense1')(inputs)\n with tf.name_scope('outer'):\n x = layers.Dense(10, name='Dense2')(x)\n with tf.name_scope('inner'):\n x = layers.Dense(10, name='Dense3')(x)\n x = layers.Dense(10, name='Dense4')(x)\n outputs = layers.Dense(10, name='Dense5')(x)\n\n model = training_lib.Model(inputs, outputs)\n node_names = self._get_model_node_names(model, np.random.random((1, 3)),\n 'call_scope')\n self.assertListEqual(node_names, [\n 'call_scope/Const',\n 'call_scope/model/Cast',\n 'call_scope/model/Dense1/MatMul/ReadVariableOp/resource',\n 'call_scope/model/Dense1/MatMul/ReadVariableOp',\n 'call_scope/model/Dense1/MatMul',\n 'call_scope/model/Dense1/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/Dense1/BiasAdd/ReadVariableOp',\n 'call_scope/model/Dense1/BiasAdd',\n 'call_scope/model/outer/Dense2/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/Dense2/MatMul/ReadVariableOp',\n 'call_scope/model/outer/Dense2/MatMul',\n 'call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/Dense2/BiasAdd',\n 'call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp',\n 'call_scope/model/outer/inner/Dense3/MatMul',\n 'call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/inner/Dense3/BiasAdd',\n 'call_scope/model/outer/Dense4/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/Dense4/MatMul/ReadVariableOp',\n 'call_scope/model/outer/Dense4/MatMul',\n 'call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/Dense4/BiasAdd',\n 'call_scope/model/Dense5/MatMul/ReadVariableOp/resource',\n 'call_scope/model/Dense5/MatMul/ReadVariableOp',\n 'call_scope/model/Dense5/MatMul',\n 'call_scope/model/Dense5/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/Dense5/BiasAdd/ReadVariableOp',\n 'call_scope/model/Dense5/BiasAdd',\n 'Identity',\n 'NoOp'\n ])\n base_layer._apply_name_scope_on_model_declaration(False)\n\n @test_utils.run_v2_only\n def test_apply_name_scope_on_nested_layer_model_declaration(self):\n if not tf.executing_eagerly():\n self.skipTest('`apply_name_scope_on_model_declaration` API is supported'\n ' only for V2 eager')\n\n base_layer._apply_name_scope_on_model_declaration(True)\n\n class ThreeDenses(layers.Layer):\n\n def __init__(self, name='ThreeDenses', **kwargs):\n super().__init__(name=name, **kwargs)\n self.inner_dense_1 = layers.Dense(10, name='NestedDense1')\n with tf.name_scope('inner1/inner2'):\n self.inner_dense_2 = layers.Dense(20, name='NestedDense2')\n self.inner_dense_3 = layers.Dense(30, name='NestedDense3')\n\n def call(self, x):\n x = self.inner_dense_1(x)\n x = self.inner_dense_2(x)\n x = self.inner_dense_3(x)\n return x\n\n inputs = input_layer.Input((3,))\n with tf.name_scope('outer'):\n x = ThreeDenses()(inputs)\n outputs = layers.Dense(10, name='OuterDense')(x)\n\n model = training_lib.Model(inputs, outputs)\n node_names = self._get_model_node_names(model, np.random.random((1, 3)),\n 'call_scope')\n\n self.assertListEqual(node_names, [\n 'call_scope/Const', 'call_scope/model/Cast',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/MatMul/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/MatMul',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/MatMul/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/MatMul',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/inner1/inner2/NestedDense2/BiasAdd',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/MatMul/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/MatMul/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/MatMul',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/BiasAdd/ReadVariableOp',\n 'call_scope/model/outer/ThreeDenses/NestedDense3/BiasAdd',\n 'call_scope/model/OuterDense/MatMul/ReadVariableOp/resource',\n 'call_scope/model/OuterDense/MatMul/ReadVariableOp',\n 'call_scope/model/OuterDense/MatMul',\n 'call_scope/model/OuterDense/BiasAdd/ReadVariableOp/resource',\n 'call_scope/model/OuterDense/BiasAdd/ReadVariableOp',\n 'call_scope/model/OuterDense/BiasAdd', 'Identity', 'NoOp'\n ])\n base_layer._apply_name_scope_on_model_declaration(False)\n\n def _get_model_node_names(self, model, inputs, call_name_scope):\n \"\"\"Returns a list of model's node names.\"\"\"\n\n @tf.function()\n def wrapper():\n with tf.name_scope(call_name_scope):\n return model(inputs)\n\n return [\n node.name\n for node in wrapper.get_concrete_function().graph.as_graph_def().node\n ]\n\n\n@test_utils.run_v2_only\n@test_combinations.generate(\n test_combinations.keras_mode_combinations(mode=['eager']))\nclass AutographControlFlowTest(test_combinations.TestCase):\n\n def test_disabling_in_context_is_matched(self):\n\n test_obj = self\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):\n if tf.constant(False):\n return inputs * 1.\n return inputs * 0.\n\n @tf.function(autograph=False)\n def test_fn():\n return MyLayer()(tf.constant([[1., 2., 3.]]))\n\n test_fn()\n\n def test_if_training_pattern_output(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n return inputs * 1.\n return inputs * 0.\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 0.)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 1.)\n\n def test_if_training_pattern_loss(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n loss = tf.reduce_sum(inputs)\n else:\n loss = 0.\n self.add_loss(loss)\n return inputs\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(train_loss, 2 * 3)\n test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(test_loss, 0)\n\n def test_if_training_pattern_metric(self):\n\n class MyLayer(base_layer.Layer):\n\n def call(self, inputs, training=None):\n if training:\n metric = tf.reduce_sum(inputs)\n else:\n metric = 0.\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n\n inputs = input_layer.Input((3,))\n outputs = MyLayer()(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n for _ in range(3):\n _, train_metric = model.train_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n\n self.assertEqual(train_metric, 2 * 3)\n _, test_metric = model.test_on_batch(np.ones((2, 3)),\n np.ones((2, 3)))\n self.assertEqual(test_metric, 0)\n\n def test_if_training_pattern_update(self):\n\n class MyLayer(base_layer.Layer):\n\n def build(self, input_shape):\n self.counter = self.add_weight(\n shape=(), trainable=False, initializer='zeros')\n\n def call(self, inputs, training=None):\n if training:\n increment = 1.\n else:\n increment = 0.\n self.counter.assign_add(increment)\n return inputs\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile(\n 'sgd',\n 'mse',\n run_eagerly=test_utils.should_run_eagerly())\n model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(backend.get_value(layer.counter), 1.)\n\n def test_conditional_losses_in_call(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=test_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_loss(tf.reduce_sum(inputs))\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())\n loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(loss, 2 * 3)\n\n def test_conditional_callable_losses(self):\n model = sequential.Sequential([\n layers.Dense(\n 1, kernel_regularizer=regularizers.l2(1e-4), input_shape=(1,))\n ])\n model._run_eagerly = test_utils.should_run_eagerly()\n\n def assert_graph(t):\n if not tf.executing_eagerly():\n self.assertEqual(t.graph, tf.compat.v1.get_default_graph())\n\n @tf.function\n def get_losses(t):\n if t < 0:\n return tf.reduce_sum(model.losses) * t\n else:\n return tf.reduce_sum(model.losses)\n\n assert_graph(get_losses(tf.constant(2.)))\n assert_graph(get_losses(tf.constant(0.5)))\n\n def test_conditional_metrics_in_call(self):\n\n class MyLayer(base_layer.Layer):\n\n def __init__(self):\n super(MyLayer,\n self).__init__(dynamic=test_utils.should_run_eagerly())\n\n def call(self, inputs, training=None):\n if training:\n self.add_metric(tf.reduce_sum(inputs),\n name='sum',\n aggregation='mean')\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n inputs = input_layer.Input((3,))\n layer = MyLayer()\n outputs = layer(inputs)\n model = training_lib.Model(inputs, outputs)\n model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly())\n history = model.fit(np.ones((2, 3)), np.ones((2, 3)))\n self.assertEqual(history.history['sum'][-1], 2 * 3)\n\n def test_conditional_activity_regularizer_in_call(self):\n\n class TestModel(training_lib.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=test_utils.should_run_eagerly())\n self.layer = layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n if tf.greater(tf.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=test_utils.should_run_eagerly())\n\n x = np.ones(shape=(10, 1))\n y = np.ones(shape=(10, 2))\n\n if test_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegex(ValueError, 'ActivityRegularizer'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n def test_conditional_activity_regularizer_with_wrappers_in_call(self):\n\n class TestModel(training_lib.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(\n name='test_model', dynamic=test_utils.should_run_eagerly())\n self.layer = layers.TimeDistributed(\n layers.Dense(2, activity_regularizer='l2'), input_shape=(3, 4))\n\n def call(self, x, training=None):\n if tf.greater(tf.reduce_sum(x), 0.0):\n return self.layer(x)\n else:\n return self.layer(x)\n\n model = TestModel()\n model.compile(\n loss='mse',\n optimizer='sgd',\n run_eagerly=test_utils.should_run_eagerly())\n\n x = np.ones(shape=(10, 3, 4))\n y = np.ones(shape=(10, 3, 2))\n\n if test_utils.should_run_eagerly():\n model.fit(x, y, epochs=2, batch_size=5)\n else:\n with self.assertRaisesRegex(ValueError, 'ActivityRegularizer'):\n model.fit(x, y, epochs=2, batch_size=5)\n\n\nclass AddLayer(base_layer.Layer):\n \"\"\"A layer which adds its input to a variable.\n\n Useful for testing a layer with a variable\n \"\"\"\n\n def build(self, _):\n self.v = self.add_weight('v', (), initializer='ones')\n self.built = True\n\n def call(self, inputs):\n return inputs + self.v\n\n\nclass IdentityLayer(base_layer.Layer):\n \"\"\"A layer that returns its input.\n\n Useful for testing a layer without a variable.\n \"\"\"\n\n def call(self, inputs):\n return inputs\n\n\n@test_utils.run_v2_only\n@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))\nclass DTypeTest(test_combinations.TestCase):\n\n def _const(self, dtype):\n return tf.constant(1, dtype=dtype)\n\n @test_utils.enable_v2_dtype_behavior\n def test_dtype_defaults_to_floatx(self):\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float32')\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float32') # dtype should not change\n\n try:\n backend.set_floatx('float64')\n layer = AddLayer()\n self.assertEqual(layer.dtype, 'float64')\n finally:\n backend.set_floatx('float32')\n\n @test_utils.enable_v2_dtype_behavior\n def test_passing_dtype_to_constructor(self):\n layer = IdentityLayer(dtype='float64')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n layer = IdentityLayer(dtype='int32')\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'int32')\n\n layer = IdentityLayer(dtype=tf.float64)\n layer(self._const('float32'))\n self.assertEqual(layer.dtype, 'float64')\n\n @test_utils.enable_v2_dtype_behavior\n def input_cast_to_dtype(self):\n layer = AddLayer()\n\n # Input should be cast to layer.dtype, so output should also be layer.dtype\n self.assertEqual(layer(self._const('float64')).dtype, 'float32')\n\n layer = AddLayer(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float64')\n\n # Test inputs are not casted if layer.dtype is not floating-point\n layer = IdentityLayer(dtype='int32')\n self.assertEqual(layer(self._const('float64')).dtype, 'float64')\n\n # Test inputs are not casted if the inputs are not floating-point\n layer = IdentityLayer(dtype='float32')\n self.assertEqual(layer(self._const('int32')).dtype, 'int32')\n\n # Test Numpy arrays are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')\n\n # Test Python floats are casted\n layer = IdentityLayer(dtype='float64')\n self.assertEqual(layer(1.).dtype, 'float64')\n\n @test_utils.enable_v2_dtype_behavior\n def multiple_inputs_cast_to_dtype(self):\n\n class MultiIdentityLayer(base_layer.Layer):\n\n def call(self, inputs):\n return [tf.identity(x) for x in inputs]\n\n # Testing layer with default dtype of float32\n layer = MultiIdentityLayer()\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float32')\n self.assertEqual(y.dtype, 'float32')\n\n # Test passing dtype to the constructor\n layer = MultiIdentityLayer(dtype='float64')\n x, y = layer([self._const('float16'), self._const('float32')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float64')\n\n # Test several non-floating point types\n layer = MultiIdentityLayer(dtype='float64')\n x, y, z, w = layer([self._const('float16'), self._const('bool'),\n self._const('float64'), self._constant('complex64')])\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'bool')\n self.assertEqual(z.dtype, 'float64')\n self.assertEqual(w.dtype, 'complex64')\n\n @test_utils.enable_v2_dtype_behavior\n def test_extra_args_and_kwargs_not_casted(self):\n\n class IdentityLayerWithArgs(base_layer.Layer):\n\n def call(self, inputs, *args, **kwargs):\n kwargs.pop('training', None)\n return tf.nest.flatten([inputs, args, kwargs])\n\n layer = IdentityLayerWithArgs(dtype='float64')\n x, y, z = layer(self._const('float16'), self._const('float16'),\n kwarg=self._const('float16'))\n self.assertEqual(x.dtype, 'float64')\n self.assertEqual(y.dtype, 'float16')\n self.assertEqual(z.dtype, 'float16')\n\n @test_utils.enable_v2_dtype_behavior\n def test_layer_without_autocast(self):\n\n class IdentityLayerWithoutAutocast(IdentityLayer):\n\n def __init__(self, *args, **kwargs):\n kwargs['autocast'] = False\n super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)\n\n layer = IdentityLayerWithoutAutocast(dtype='float64')\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n @test_utils.enable_v2_dtype_behavior\n def test_compute_output_signature(self):\n\n class IdentityLayerWithOutputShape(IdentityLayer):\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n layer = IdentityLayerWithOutputShape(dtype='float64')\n output_signature = layer.compute_output_signature(\n tf.TensorSpec(shape=(), dtype='float32'))\n self.assertEqual(output_signature.shape, ())\n self.assertEqual(output_signature.dtype, 'float64')\n\n @test_utils.enable_v2_dtype_behavior\n def test_composite_tensors_input_casting(self):\n sparse = tf.SparseTensor(\n indices=tf.constant([[0, 1], [2, 3]], dtype='int64'),\n values=tf.constant([0., 1.], dtype='float32'),\n dense_shape=tf.constant([4, 4], dtype='int64'))\n ragged = tf.RaggedTensor.from_row_splits(\n values=tf.constant([1., 2., 3.], dtype='float32'),\n row_splits=tf.constant([0, 2, 2, 3], dtype='int64'))\n\n layer = IdentityLayer(dtype='float16')\n\n for x in sparse, ragged:\n self.assertEqual(x.dtype, 'float32')\n y = layer(x)\n self.assertEqual(y.dtype, 'float16')\n self.assertEqual(type(x), type(y))\n\n @test_utils.enable_v2_dtype_behavior\n def test_passing_non_tensor(self):\n layer = IdentityLayer()\n x = object()\n y = layer(x) # Layer should not cast 'x', as it's not a tensor\n self.assertIs(x, y)\n\n @test_utils.disable_v2_dtype_behavior\n def test_v1_behavior(self):\n # Test dtype defaults to None and inferred from input\n layer = IdentityLayer()\n self.assertIsNone(layer.dtype)\n layer(self._const('float64'))\n self.assertEqual(layer.dtype, 'float64')\n\n # Test layer does not cast to dtype\n self.assertEqual(layer(self._const('float32')).dtype, 'float32')\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras upsampling layer for 3D inputs.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.UpSampling3D')\nclass UpSampling3D(Layer):\n \"\"\"Upsampling layer for 3D inputs.\n\n Repeats the 1st, 2nd and 3rd dimensions\n of the data by `size[0]`, `size[1]` and `size[2]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 1, 2, 1, 3)\n >>> x = tf.constant(1, shape=input_shape)\n >>> y = tf.keras.layers.UpSampling3D(size=2)(x)\n >>> print(y.shape)\n (2, 2, 4, 2, 3)\n\n Args:\n size: Int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `channels_first` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n Input shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, dim1, dim2, dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, dim1, dim2, dim3)`\n\n Output shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\n \"\"\"\n\n def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.size = conv_utils.normalize_tuple(size, 3, 'size')\n self.input_spec = InputSpec(ndim=5)\n super(UpSampling3D, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n dim1 = self.size[0] * input_shape[\n 2] if input_shape[2] is not None else None\n dim2 = self.size[1] * input_shape[\n 3] if input_shape[3] is not None else None\n dim3 = self.size[2] * input_shape[\n 4] if input_shape[4] is not None else None\n return tf.TensorShape(\n [input_shape[0], input_shape[1], dim1, dim2, dim3])\n else:\n dim1 = self.size[0] * input_shape[\n 1] if input_shape[1] is not None else None\n dim2 = self.size[1] * input_shape[\n 2] if input_shape[2] is not None else None\n dim3 = self.size[2] * input_shape[\n 3] if input_shape[3] is not None else None\n return tf.TensorShape(\n [input_shape[0], dim1, dim2, dim3, input_shape[4]])\n\n def call(self, inputs):\n return backend.resize_volumes(\n inputs, self.size[0], self.size[1], self.size[2], self.data_format)\n\n def get_config(self):\n config = {'size': self.size, 'data_format': self.data_format}\n base_config = super(UpSampling3D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras image dataset loading utilities.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n# pylint: disable=g-classes-have-attributes\n\nimport multiprocessing\nimport os\n\nimport numpy as np\n\n\ndef index_directory(directory,\n labels,\n formats,\n class_names=None,\n shuffle=True,\n seed=None,\n follow_links=False):\n \"\"\"Make list of all files in the subdirs of `directory`, with their labels.\n\n Args:\n directory: The target directory (string).\n labels: Either \"inferred\"\n (labels are generated from the directory structure),\n None (no labels),\n or a list/tuple of integer labels of the same size as the number of\n valid files found in the directory. Labels should be sorted according\n to the alphanumeric order of the image file paths\n (obtained via `os.walk(directory)` in Python).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n class_names: Only valid if \"labels\" is \"inferred\". This is the explicit\n list of class names (must match names of subdirectories). Used\n to control the order of the classes\n (otherwise alphanumerical order is used).\n shuffle: Whether to shuffle the data. Default: True.\n If set to False, sorts the data in alphanumeric order.\n seed: Optional random seed for shuffling.\n follow_links: Whether to visits subdirectories pointed to by symlinks.\n\n Returns:\n tuple (file_paths, labels, class_names).\n file_paths: list of file paths (strings).\n labels: list of matching integer labels (same length as file_paths)\n class_names: names of the classes corresponding to these labels, in order.\n \"\"\"\n if labels is None:\n # in the no-label case, index from the parent directory down.\n subdirs = ['']\n class_names = subdirs\n else:\n subdirs = []\n for subdir in sorted(tf.io.gfile.listdir(directory)):\n if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):\n if subdir.endswith('/'):\n subdir = subdir[:-1]\n subdirs.append(subdir)\n if not class_names:\n class_names = subdirs\n else:\n if set(class_names) != set(subdirs):\n raise ValueError(\n 'The `class_names` passed did not match the '\n 'names of the subdirectories of the target directory. '\n 'Expected: %s, but received: %s' %\n (subdirs, class_names))\n class_indices = dict(zip(class_names, range(len(class_names))))\n\n # Build an index of the files\n # in the different class subfolders.\n pool = multiprocessing.pool.ThreadPool()\n results = []\n filenames = []\n\n for dirpath in (tf.io.gfile.join(directory, subdir) for subdir in subdirs):\n results.append(\n pool.apply_async(index_subdirectory,\n (dirpath, class_indices, follow_links, formats)))\n labels_list = []\n for res in results:\n partial_filenames, partial_labels = res.get()\n labels_list.append(partial_labels)\n filenames += partial_filenames\n if labels not in ('inferred', None):\n if len(labels) != len(filenames):\n raise ValueError('Expected the lengths of `labels` to match the number '\n 'of files in the target directory. len(labels) is %s '\n 'while we found %s files in %s.' % (\n len(labels), len(filenames), directory))\n else:\n i = 0\n labels = np.zeros((len(filenames),), dtype='int32')\n for partial_labels in labels_list:\n labels[i:i + len(partial_labels)] = partial_labels\n i += len(partial_labels)\n\n if labels is None:\n print('Found %d files.' % (len(filenames),))\n else:\n print('Found %d files belonging to %d classes.' %\n (len(filenames), len(class_names)))\n pool.close()\n pool.join()\n file_paths = [tf.io.gfile.join(directory, fname) for fname in filenames]\n\n if shuffle:\n # Shuffle globally to erase macro-structure\n if seed is None:\n seed = np.random.randint(1e6)\n rng = np.random.RandomState(seed)\n rng.shuffle(file_paths)\n rng = np.random.RandomState(seed)\n rng.shuffle(labels)\n return file_paths, labels, class_names\n\n\ndef iter_valid_files(directory, follow_links, formats):\n if not follow_links:\n walk = tf.io.gfile.walk(directory)\n else:\n walk = os.walk(directory, followlinks=follow_links)\n for root, _, files in sorted(walk, key=lambda x: x[0]):\n for fname in sorted(files):\n if fname.lower().endswith(formats):\n yield root, fname\n\n\ndef index_subdirectory(directory, class_indices, follow_links, formats):\n \"\"\"Recursively walks directory and list image paths and their class index.\n\n Args:\n directory: string, target directory.\n class_indices: dict mapping class names to their index.\n follow_links: boolean, whether to recursively follow subdirectories\n (if False, we only list top-level images in `directory`).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n\n Returns:\n tuple `(filenames, labels)`. `filenames` is a list of relative file\n paths, and `labels` is a list of integer labels corresponding to these\n files.\n \"\"\"\n dirname = os.path.basename(directory)\n valid_files = iter_valid_files(directory, follow_links, formats)\n labels = []\n filenames = []\n for root, fname in valid_files:\n labels.append(class_indices[dirname])\n absolute_path = tf.io.gfile.join(root, fname)\n relative_path = tf.io.gfile.join(\n dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return filenames, labels\n\n\ndef get_training_or_validation_split(samples, labels, validation_split, subset):\n \"\"\"Potentially restict samples & labels to a training or validation split.\n\n Args:\n samples: List of elements.\n labels: List of corresponding labels.\n validation_split: Float, fraction of data to reserve for validation.\n subset: Subset of the data to return.\n Either \"training\", \"validation\", or None. If None, we return all of the\n data.\n\n Returns:\n tuple (samples, labels), potentially restricted to the specified subset.\n \"\"\"\n if not validation_split:\n return samples, labels\n\n num_val_samples = int(validation_split * len(samples))\n if subset == 'training':\n print('Using %d files for training.' % (len(samples) - num_val_samples,))\n samples = samples[:-num_val_samples]\n labels = labels[:-num_val_samples]\n elif subset == 'validation':\n print('Using %d files for validation.' % (num_val_samples,))\n samples = samples[-num_val_samples:]\n labels = labels[-num_val_samples:]\n else:\n raise ValueError('`subset` must be either \"training\" '\n 'or \"validation\", received: %s' % (subset,))\n return samples, labels\n\n\ndef labels_to_dataset(labels, label_mode, num_classes):\n \"\"\"Create a tf.data.Dataset from the list/tuple of labels.\n\n Args:\n labels: list/tuple of labels to be converted into a tf.data.Dataset.\n label_mode:\n - 'binary' indicates that the labels (there can be only 2) are encoded as\n `float32` scalars with values 0 or 1 (e.g. for `binary_crossentropy`).\n - 'categorical' means that the labels are mapped into a categorical vector.\n (e.g. for `categorical_crossentropy` loss).\n num_classes: number of classes of labels.\n\n Returns:\n A `Dataset` instance.\n \"\"\"\n label_ds = tf.data.Dataset.from_tensor_slices(labels)\n if label_mode == 'binary':\n label_ds = label_ds.map(\n lambda x: tf.expand_dims(tf.cast(x, 'float32'), axis=-1),\n num_parallel_calls=tf.data.AUTOTUNE)\n elif label_mode == 'categorical':\n label_ds = label_ds.map(lambda x: tf.one_hot(x, num_classes),\n num_parallel_calls=tf.data.AUTOTUNE)\n return label_ds\n\n\ndef check_validation_split_arg(validation_split, subset, shuffle, seed):\n \"\"\"Raise errors in case of invalid argument values.\n\n Args:\n validation_split: float between 0 and 1, fraction of data to reserve for\n validation.\n subset: One of \"training\" or \"validation\". Only used if `validation_split`\n is set.\n shuffle: Whether to shuffle the data. Either True or False.\n seed: random seed for shuffling and transformations.\n \"\"\"\n if validation_split and not 0 < validation_split < 1:\n raise ValueError(\n '`validation_split` must be between 0 and 1, received: %s' %\n (validation_split,))\n if (validation_split or subset) and not (validation_split and subset):\n raise ValueError(\n 'If `subset` is set, `validation_split` must be set, and inversely.')\n if subset not in ('training', 'validation', None):\n raise ValueError('`subset` must be either \"training\" '\n 'or \"validation\", received: %s' % (subset,))\n if validation_split and shuffle and seed is None:\n raise ValueError(\n 'If using `validation_split` and shuffling the data, you must provide '\n 'a `seed` argument, to make sure that there is no overlap between the '\n 'training and validation subset.')\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the Permute layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nimport copy\n\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Permute')\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful e.g. connecting RNNs and convnets.\n\n Example:\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n Args:\n dims: Tuple of integers. Permutation pattern does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimensions\n of the input.\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n if sorted(dims) != list(range(1, len(dims) + 1)):\n raise ValueError(\n 'Invalid permutation argument `dims` for Permute Layer. '\n 'The set of indices in `dims` must be consecutive and start from 1. '\n f'Received dims={dims}')\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tf.TensorShape(output_shape)\n\n def call(self, inputs):\n return tf.transpose(inputs, perm=(0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Max pooling 1D layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nimport functools\n\nfrom keras import backend\nfrom keras.layers.pooling.base_pooling1d import Pooling1D\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')\nclass MaxPooling1D(Pooling1D):\n \"\"\"Max pooling operation for 1D temporal data.\n\n Downsamples the input representation by taking the maximum value over a\n spatial window of size `pool_size`. The window is shifted by `strides`. The\n resulting output, when using the `\"valid\"` padding option, has a shape of:\n `output_shape = (input_shape - pool_size + 1) / strides)`\n\n The resulting output shape when using the `\"same\"` padding option is:\n `output_shape = input_shape / strides`\n\n For example, for `strides=1` and `padding=\"valid\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='valid')\n >>> max_pool_1d(x)\n <tf.Tensor: shape=(1, 4, 1), dtype=float32, numpy=\n array([[[2.],\n [3.],\n [4.],\n [5.]]], dtype=float32)>\n\n For example, for `strides=2` and `padding=\"valid\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=2, padding='valid')\n >>> max_pool_1d(x)\n <tf.Tensor: shape=(1, 2, 1), dtype=float32, numpy=\n array([[[2.],\n [4.]]], dtype=float32)>\n\n For example, for `strides=1` and `padding=\"same\"`:\n\n >>> x = tf.constant([1., 2., 3., 4., 5.])\n >>> x = tf.reshape(x, [1, 5, 1])\n >>> max_pool_1d = tf.keras.layers.MaxPooling1D(pool_size=2,\n ... strides=1, padding='same')\n >>> max_pool_1d(x)\n <tf.Tensor: shape=(1, 5, 1), dtype=float32, numpy=\n array([[[2.],\n [3.],\n [4.],\n [5.],\n [5.]]], dtype=float32)>\n\n Args:\n pool_size: Integer, size of the max pooling window.\n strides: Integer, or None. Specifies how much the pooling window moves\n for each pooling step.\n If None, it will default to `pool_size`.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, steps, features)` while `channels_first`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n\n Input shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\n Output shape:\n - If `data_format='channels_last'`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n - If `data_format='channels_first'`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n \"\"\"\n\n def __init__(self, pool_size=2, strides=None,\n padding='valid', data_format='channels_last', **kwargs):\n\n super(MaxPooling1D, self).__init__(\n functools.partial(backend.pool2d, pool_mode='max'),\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n **kwargs)\n\n\n# Alias\n\nMaxPool1D = MaxPooling1D\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for tensorflow.python.training.saver.py.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport functools\nimport os\nfrom keras.engine import training\nfrom keras.layers import core\nfrom tensorflow.python.training.tracking import util as trackable_utils\n\n\nclass NonLayerTrackable(tf.Module):\n\n def __init__(self):\n super(NonLayerTrackable, self).__init__()\n self.a_variable = trackable_utils.add_variable(\n self, name=\"a_variable\", shape=[])\n\n\nclass MyModel(training.Model):\n \"\"\"A concrete Model for testing.\"\"\"\n\n def __init__(self):\n super(MyModel, self).__init__()\n self._named_dense = core.Dense(1, use_bias=True)\n self._second = core.Dense(1, use_bias=False)\n # We can still track Trackables which aren't Layers.\n self._non_layer = NonLayerTrackable()\n\n def call(self, values):\n ret = self._second(self._named_dense(values))\n return ret\n\n\nclass TrackableCompatibilityTests(tf.test.TestCase):\n\n def _initialized_model(self):\n input_value = tf.constant([[3.]])\n model = MyModel()\n optimizer = tf.compat.v1.train.AdamOptimizer(0.001)\n optimizer_step = tf.compat.v1.train.get_or_create_global_step()\n root_trackable = tf.train.Checkpoint(\n optimizer=optimizer, model=model, optimizer_step=optimizer_step)\n train_op = optimizer.minimize(\n functools.partial(model, input_value),\n global_step=optimizer_step)\n self.evaluate(trackable_utils.gather_initializers(\n root_trackable))\n self.evaluate(train_op)\n # A regular variable, a slot variable, and a non-slot Optimizer variable\n # with known values to check when loading.\n self.evaluate(model._named_dense.bias.assign([1.]))\n self.evaluate(optimizer.get_slot(\n var=model._named_dense.bias, name=\"m\").assign([2.]))\n beta1_power, _ = optimizer._get_beta_accumulators()\n self.evaluate(beta1_power.assign(3.))\n return root_trackable\n\n def _set_sentinels(self, root_trackable):\n self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))\n self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, name=\"m\")\n .assign([102.]))\n beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()\n self.evaluate(beta1_power.assign(103.))\n\n def _check_sentinels(self, root_trackable):\n self.assertAllEqual(\n [1.], self.evaluate(root_trackable.model._named_dense.bias))\n self.assertAllEqual([2.], self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, name=\"m\")))\n beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()\n self.assertAllEqual(3., self.evaluate(beta1_power))\n\n def testLoadFromObjectBasedGraph(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n save_graph = tf.Graph()\n with save_graph.as_default(), self.session(graph=save_graph) as sess:\n root = self._initialized_model()\n object_saver = tf.train.Checkpoint(root=root)\n save_path = object_saver.save(file_prefix=checkpoint_prefix)\n\n # An incompatible object-based checkpoint to check error messages\n var = tf.Variable(1., name=\"a\")\n self.evaluate(var.initializer)\n second_saver = tf.train.Checkpoint(v=var)\n second_path = second_saver.save(file_prefix=os.path.join(\n checkpoint_directory, \"second\"))\n\n restore_graph = tf.Graph()\n with restore_graph.as_default(), self.session(\n graph=restore_graph) as sess:\n root = self._initialized_model()\n self._set_sentinels(root)\n saver = tf.compat.v1.train.Saver()\n saver.restore(sess=sess, save_path=save_path)\n self._check_sentinels(root)\n before_second_restore_ops = restore_graph.get_operations()\n # Test that multiple restores do not pollute the graph\n saver.restore(sess=sess, save_path=save_path)\n self.assertEqual(before_second_restore_ops,\n restore_graph.get_operations())\n with self.assertRaisesRegex(tf.errors.NotFoundError,\n \"Could not find some variables\"):\n saver.restore(sess=sess, save_path=second_path)\n\n def testLoadFromObjectBasedEager(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n save_graph = tf.Graph()\n with save_graph.as_default(), self.session(graph=save_graph):\n root = self._initialized_model()\n object_saver = tf.train.Checkpoint(root=root)\n save_path = object_saver.save(file_prefix=checkpoint_prefix)\n\n with tf.__internal__.eager_context.eager_mode():\n root = self._initialized_model()\n self._set_sentinels(root)\n saver = tf.compat.v1.train.Saver(\n root.model.variables + root.optimizer.variables())\n saver.restore(sess=None, save_path=save_path)\n self._check_sentinels(root)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training-related utilities.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport abc\nimport atexit\nimport collections\nimport functools\nimport multiprocessing.pool\nimport threading\nimport time\n\nimport numpy as np\nfrom keras import backend\nfrom keras import callbacks as cbks\nfrom keras import losses\nfrom keras import metrics as metrics_module\nfrom keras.utils import data_utils\nfrom keras.utils import generic_utils\nfrom keras.utils import losses_utils\nfrom keras.utils import tf_inspect\nfrom tensorflow.python.platform import tf_logging as logging\n\n\ndef is_composite_or_composite_value(tensor):\n \"\"\"Returns true if 'tensor' is a CompositeTensor or a CT Value object.\"\"\"\n # TODO(b/125094323): This should be isinstance(CompositeTensor) or\n # isinstance(CompositeTensorValue) once we support that.\n return isinstance(\n tensor,\n (tf.__internal__.CompositeTensor, tf.compat.v1.SparseTensorValue,\n tf.compat.v1.ragged.RaggedTensorValue))\n\n\nclass Aggregator(object, metaclass=abc.ABCMeta):\n \"\"\"Abstract base class used to aggregate batch-level outputs of a loop.\n\n Attributes:\n use_steps: Whether the loop is using `step` or `batch_size`.\n num_samples: Total number of samples: `batch_size * num_batches`.\n steps: Total number of steps.\n batch_size: Batch size. It is used for validation checks between inputs and\n outputs.\n results: What to return at the end of the aggregation loop.\n \"\"\"\n\n def __init__(self, use_steps, num_samples=None, steps=None, batch_size=None):\n self.use_steps = use_steps\n self.num_samples = num_samples\n self.steps = steps\n self.batch_size = batch_size\n self.results = []\n\n @abc.abstractmethod\n def create(self, batch_outs):\n \"\"\"Creates the initial results from the first batch outputs.\n\n Args:\n batch_outs: A list of batch-level outputs.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n @abc.abstractmethod\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n \"\"\"Aggregates batch-level results into total results.\n\n Args:\n batch_outs: A list of batch-level outputs.\n batch_start: The start index of this batch. Always `None` if `use_steps`\n is `True`.\n batch_end: The end index of this batch. Always `None` if `use_steps` is\n `True`.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"Prepares the total results to be returned.\"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n\nclass MetricsAggregator(Aggregator):\n \"\"\"Aggregator that calculates loss and metrics info.\n\n Attributes:\n use_steps: Whether the loop is using `step` or `batch_size`.\n num_samples: Total number of samples: `batch_size*num_batches`.\n steps: Total number of steps, ie number of times to iterate over a dataset\n to cover all samples.\n \"\"\"\n\n def __init__(self, use_steps, num_samples=None, steps=None):\n super(MetricsAggregator, self).__init__(\n use_steps=use_steps,\n num_samples=num_samples,\n steps=steps,\n batch_size=None)\n\n def create(self, batch_outs):\n self.results = [0.] * len(batch_outs)\n\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n # Loss.\n if self.use_steps:\n self.results[0] += batch_outs[0]\n else:\n self.results[0] += batch_outs[0] * (batch_end - batch_start)\n # Metrics (always stateful, just grab current values.)\n self.results[1:] = batch_outs[1:]\n\n def finalize(self):\n if not self.results:\n raise ValueError('Empty training data.')\n self.results[0] /= (self.num_samples or self.steps)\n\n\ndef _append_sparse_tensor_value(target, to_append):\n \"\"\"Append sparse tensor value objects.\"\"\"\n # Make sure the sparse tensors are of the same size (except for the 0th dim).\n if len(target.dense_shape) != len(to_append.dense_shape):\n raise RuntimeError(\n 'Unable to concatenate %s and %s. The inner dense shapes do not '\n 'have the same number of dimensions (%s vs %s)' %\n (target, to_append, target.dense_shape, to_append.dense_shape))\n\n if target.dense_shape[1:] != to_append.dense_shape[1:]:\n raise RuntimeError(\n 'Unable to concatenate %s and %s. The inner dense shapes do not '\n 'match inner dimensions (%s vs %s)' %\n (target, to_append, target.dense_shape[1:], to_append.dense_shape[1:]))\n\n # Add the to_append indices to target, updating the 0th value, and keeping\n # track of the maximum so we know the final dense_shape of this tensor.\n base_dim0_value = target.dense_shape[0]\n max_dim0_value = target.dense_shape[0]\n new_indices = target.indices\n for index in to_append.indices:\n # Here, we iterate through the sparse indices of the tensor to append. For\n # each index, we update its zeroth value (the batch index) by adding the\n # number of batch items in the tensor we are appending to (so an index\n # of [0, 0, 1] for a value that is being appended to a tensor with 0th dim\n # size 3 would become [3, 0, 1].)\n index[0] += base_dim0_value\n max_dim0_value = max(max_dim0_value, index[0])\n new_indices = np.append(new_indices, [index], axis=0)\n\n # Extend the values array to contain all of the appended values. These will\n # be in the same order as the indices added above.\n new_values = np.concatenate((target.values, to_append.values), axis=0)\n\n # Create a new dense shape by replacing the value for the 0th dimension\n # with the new max dim0 value.\n new_dense_shape = list(target.dense_shape)\n new_dense_shape[0] = max_dim0_value + 1\n new_dense_shape = tuple(new_dense_shape)\n\n return tf.compat.v1.SparseTensorValue(\n indices=new_indices, values=new_values, dense_shape=new_dense_shape)\n\n\ndef _append_ragged_tensor_value(target, to_append):\n \"\"\"Append ragged tensor value objects.\"\"\"\n # Make sure the ragged tensors are of the same size (save for the 0th dim).\n if len(target.shape) != len(to_append.shape):\n raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append))\n\n if target.shape[1:] != to_append.shape[1:]:\n raise RuntimeError('Unable to concatenate %s and %s' % (target, to_append))\n\n adjusted_row_splits = to_append.row_splits[1:] + target.row_splits[-1]\n new_row_splits = np.append(target.row_splits, adjusted_row_splits)\n if isinstance(target.values, tf.compat.v1.ragged.RaggedTensorValue):\n new_values = _append_ragged_tensor_value(target.values, to_append.values)\n else:\n new_values = np.concatenate((target.values, to_append.values), axis=0)\n\n return tf.compat.v1.ragged.RaggedTensorValue(new_values, new_row_splits)\n\n\ndef _append_composite_tensor(target, to_append):\n \"\"\"Helper function to append composite tensors to each other in the 0 axis.\n\n In order to support batching within a fit/evaluate/predict call, we need\n to be able to aggregate within a CompositeTensor. Unfortunately, the CT\n API currently does not make this easy - especially in V1 mode, where we're\n working with CompositeTensor Value objects that have no connection with the\n CompositeTensors that created them.\n\n Args:\n target: CompositeTensor or CompositeTensor value object that will be\n appended to.\n to_append: CompositeTensor or CompositeTensor value object to append to.\n 'target'.\n\n Returns:\n A CompositeTensor or CompositeTensor value object.\n\n Raises:\n RuntimeError: if concatenation is not possible.\n \"\"\"\n if type(target) is not type(to_append):\n raise RuntimeError('Unable to concatenate %s and %s' %\n (type(target), type(to_append)))\n\n # Perform type-specific concatenation.\n # TODO(b/125094323): This should be replaced by a simple call to\n # target.append() that should work on all of the below classes.\n\n # If we're seeing a CompositeTensor here, we know it's because we're in\n # Eager mode (or else we'd have evaluated the CT to a CT Value object\n # already). Therefore, it's safe to call concat() on it without evaluating\n # the result any further. If not - that is, if we're seeing a\n # SparseTensorValue or a RaggedTensorValue - we need to hand-update it\n # since we're outside of the graph anyways.\n if isinstance(target, tf.SparseTensor):\n # We need to invoke the sparse version of concatenate here - tf.concat\n # won't work.\n return tf.compat.v1.sparse_concat(sp_inputs=[target, to_append], axis=0)\n elif isinstance(target, tf.RaggedTensor):\n return tf.concat([target, to_append], axis=0)\n elif isinstance(target, tf.compat.v1.SparseTensorValue):\n return _append_sparse_tensor_value(target, to_append)\n elif isinstance(target, tf.compat.v1.ragged.RaggedTensorValue):\n return _append_ragged_tensor_value(target, to_append)\n else:\n raise RuntimeError('Attempted to concatenate unsupported object %s.' %\n type(target))\n\n\nclass ConcatAggregator(Aggregator):\n \"\"\"Combine tensor-likes which cannot be merged on the fly.\n\n This class expects to aggregate a single tensor-like rather than a nested\n structure of tensor-likes.\n \"\"\"\n\n def __init__(self, batch_size):\n self.composite = None\n super(ConcatAggregator, self).__init__(\n use_steps=True, num_samples=None, steps=None, batch_size=batch_size)\n\n def create(self, batch_element):\n self.composite = is_composite_or_composite_value(batch_element)\n\n def aggregate(self, batch_element, batch_start=None, batch_end=None):\n\n # TODO(psv): Add num_samples check here to detect when output batch\n # #samples is < batch size and != input batch #samples.\n if self.batch_size and self.batch_size < batch_element.shape[0]:\n raise ValueError(\n 'Mismatch between expected batch size and model output batch size. '\n 'Output shape = {}, expected output shape = shape {}'.format(\n batch_element.shape,\n (self.batch_size,) + batch_element.shape[1:]))\n self.results.append(batch_element)\n\n def finalize(self):\n # Special case of single batch inference which skips a copy.\n if len(self.results) == 1:\n self.results = self.results[0]\n\n elif self.composite:\n # TODO(taylorrobie): efficiently concatenate.\n results = self.results[0]\n for r in self.results[1:]:\n results = _append_composite_tensor(results, r)\n self.results = results\n\n else:\n self.results = np.concatenate(self.results, axis=0)\n\n\n_COPY_THREADS = 4\n_COPY_POOL = None\n\n\ndef get_copy_pool():\n \"\"\"Shared threadpool for copying arrays.\n\n Pool instantiation takes ~ 2ms, so a singleton pool is used rather than\n creating a pool per SliceAggregator.\n\n Returns:\n The global copy threadpool.\n \"\"\"\n global _COPY_POOL\n if _COPY_POOL is None:\n _COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)\n atexit.register(_COPY_POOL.close)\n return _COPY_POOL\n\n\nclass SliceAggregator(Aggregator):\n \"\"\"Combine arrays where the final size is known.\n\n This class expects to aggregate a single tensor-like rather than a nested\n structure of tensor-likes.\n\n NumPy copies are an operation that threads handle quite well because all of\n the heavy lifting is in c and does not need the GIL. Moreover, we can perform\n lock-free writes to the same buffer in multiple threads because the nature of\n result aggregation guarantees that either the indices are disjoint or the\n aggregator will throw an exception in finalize. Moreover, because aggregation\n is performed on the slowest varying dimension, assignments for a given batch\n will write to contiguous blocks of memory, further minimizing contention.\n\n There is, however, some scheduling and context switching overhead which will\n offset the gains from pipelining the slice assignment. Below a given threshold\n it is faster to simply assign in the main thread rather than enqueue the\n assignment in a side thread. The exact threshold will vary from system to\n system, but the time is not very sensitive to the exact transition so a value\n of 2 ** 14 was chosen which should be reasonable on most systems.\n \"\"\"\n\n _BINARY_SIZE_THRESHOLD = 2 ** 14\n _MAX_COPY_SECONDS = 300\n\n def __init__(self, num_samples, batch_size):\n self._async_copies = []\n self._pool = get_copy_pool()\n self._errors = []\n super(SliceAggregator, self).__init__(\n use_steps=False,\n num_samples=num_samples,\n steps=None,\n batch_size=batch_size)\n\n def create(self, batch_element):\n # This step does not need to be pipelined because NumPy empty array\n # initialization is effectively instantaneous.\n shape = (self.num_samples,) + batch_element.shape[1:]\n dtype = batch_element.dtype\n\n self.results = np.empty(shape=shape, dtype=dtype)\n\n def aggregate(self, batch_element, batch_start, batch_end):\n # Fail early.\n if self._errors:\n raise self._errors[0]\n\n # In the special case of single batch inference, no copy is needed.\n if batch_end - batch_start == self.num_samples:\n if self.num_samples != batch_element.shape[0]:\n raise ValueError(\n 'Mismatch between expected batch size and model output batch size. '\n 'Output shape = {}, expected output shape = shape {}'.format(\n batch_element.shape, self.results.shape))\n\n self.results = batch_element\n return\n\n # This is an approximate threshold, so we don't need to consider the number\n # of bytes per element.\n num_elements = np.prod(batch_element.shape)\n if num_elements < self._BINARY_SIZE_THRESHOLD:\n self.results[batch_start:batch_end] = batch_element\n else:\n is_finished = threading.Event()\n self._pool.apply_async(\n self._slice_assign,\n args=(batch_element, batch_start, batch_end, is_finished))\n self._async_copies.append(is_finished)\n\n def _slice_assign(self, batch_element, batch_start, batch_end, is_finished):\n \"\"\"Legacy utility method to slice input arrays.\"\"\"\n try:\n self.results[batch_start:batch_end] = batch_element\n\n except Exception as e: # pylint: disable=broad-except\n # `_slice_assign` should only be called in threads and exceptions raised\n # in threads do not carry over to the main thread. So instead we perform a\n # a broad catch in the thread and then store the exception to be re-raised\n # in the main thread.\n self._errors.append(e)\n\n finally:\n is_finished.set()\n\n def finalize(self):\n start_time = time.time()\n for is_finished in self._async_copies:\n timeout = max([0., self._MAX_COPY_SECONDS - (time.time() - start_time)])\n if not is_finished.wait(timeout):\n raise ValueError('Timed out waiting for copy to complete.')\n\n if self._errors:\n raise self._errors[0]\n\n\nclass OutputsAggregator(Aggregator):\n \"\"\"Aggregator that concatenates outputs.\"\"\"\n\n _structure = None\n\n def create(self, batch_outs):\n # SparseTensorValue is a named tuple which nest will flatten, so we need\n # to guard it to properly handle the structure.\n self._structure = tf.__internal__.nest.get_traverse_shallow_structure(\n lambda x: not is_composite_or_composite_value(x), batch_outs)\n batch_outs = tf.__internal__.nest.flatten_up_to(self._structure, batch_outs)\n\n for batch_element in batch_outs:\n if is_composite_or_composite_value(batch_element):\n # If the output is not a ndarray, it will be either a composite tensor\n # or a composite tensor's Value object. In either case, we can't\n # allocate an array to hold the object - we'll handle it later.\n self.results.append(ConcatAggregator(self.batch_size))\n elif isinstance(batch_element, np.ndarray):\n self.results.append(\n (ConcatAggregator(self.batch_size) if self.use_steps else\n SliceAggregator(self.num_samples, self.batch_size)))\n else:\n # This is not a ndarray, a CompositeTensor, or a CompositeTensorValue.\n # Fail fast rather than trying to concatenate it.\n raise RuntimeError('Attempted to aggregate unsupported object {}.'\n .format(batch_element))\n\n self.results[-1].create(batch_element)\n\n def aggregate(self, batch_outs, batch_start=None, batch_end=None):\n batch_outs = tf.__internal__.nest.flatten_up_to(self._structure, batch_outs)\n for batch_element, result in zip(batch_outs, self.results):\n result.aggregate(batch_element, batch_start, batch_end)\n\n def finalize(self):\n for result in self.results:\n result.finalize()\n self.results = [i.results for i in self.results]\n self.results = tf.nest.pack_sequence_as(self._structure, self.results)\n\n\ndef get_progbar(model, count_mode, include_metrics=True):\n \"\"\"Get Progbar.\"\"\"\n if include_metrics:\n stateful_metric_names = getattr(model, 'metrics_names', None)\n if stateful_metric_names:\n stateful_metric_names = stateful_metric_names[1:] # Exclude `loss`\n else:\n stateful_metric_names = None\n return cbks.ProgbarLogger(count_mode, stateful_metrics=stateful_metric_names)\n\n\ndef check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'):\n \"\"\"Determine the number of samples provided for training and evaluation.\n\n The number of samples is not defined when running with `steps`,\n in which case the number of samples is set to `None`.\n\n Args:\n ins: List of tensors to be fed to the Keras function.\n batch_size: Integer batch size or `None` if not defined.\n steps: Total number of steps (batches of samples) before declaring\n `_predict_loop` finished. Ignored with the default value of `None`.\n steps_name: The public API's parameter name for `steps`.\n\n Raises:\n ValueError: when `steps` is `None` and the attribute `ins.shape`\n does not exist. Also raises ValueError when `steps` is not `None`\n and `batch_size` is not `None` because they are mutually\n exclusive.\n\n Returns:\n When steps is `None`, returns the number of samples to be\n processed based on the size of the first dimension of the\n first input numpy array. When steps is not `None` and\n `batch_size` is `None`, returns `None`.\n \"\"\"\n if steps is not None and batch_size is not None:\n raise ValueError('If ' + steps_name +\n ' is set, the `batch_size` must be None.')\n if check_steps_argument(ins, steps, steps_name):\n return None\n\n if hasattr(ins[0], 'shape'):\n return int(ins[0].shape[0])\n return None # Edge case where ins == [static_learning_phase]\n\n\ndef standardize_single_array(x, expected_shape=None):\n \"\"\"Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1.\"\"\"\n if x is None:\n return None\n\n if is_composite_or_composite_value(x):\n return x\n\n if isinstance(x, int):\n raise ValueError(\n 'Expected an array data type but received an integer: {}'.format(x))\n\n if (x.shape is not None and len(x.shape) == 1 and\n (expected_shape is None or len(expected_shape) != 1)):\n if tf.is_tensor(x):\n x = tf.compat.v1.expand_dims(x, axis=1)\n else:\n x = np.expand_dims(x, 1)\n return x\n\n\ndef get_composite_shape(tensor):\n \"\"\"Returns the shape of the passed composite tensor.\"\"\"\n if isinstance(tensor, tf.compat.v1.SparseTensorValue):\n # SparseTensorValues use a 'dense_shape' attribute\n return tensor.dense_shape\n else:\n return tensor.shape\n\n\ndef standardize_input_data(data,\n names,\n shapes=None,\n check_batch_axis=True,\n exception_prefix=''):\n \"\"\"Normalizes inputs and targets provided by users.\n\n Users may pass data as a list of arrays, dictionary of arrays,\n or as a single array. We normalize this to an ordered list of\n arrays (same order as `names`), while checking that the provided\n arrays have shapes that match the network's expectations.\n\n Args:\n data: User-provided input data (polymorphic).\n names: List of expected array names.\n shapes: Optional list of expected array shapes.\n check_batch_axis: Boolean; whether to check that the batch axis of the\n arrays matches the expected value found in `shapes`.\n exception_prefix: String prefix used for exception formatting.\n\n Returns:\n List of standardized input arrays (one array per model input).\n\n Raises:\n ValueError: in case of improperly formatted user-provided data.\n \"\"\"\n try:\n data_len = len(data)\n except TypeError:\n # For instance if data is `None` or a symbolic Tensor.\n data_len = None\n\n if not names:\n if data_len and not isinstance(data, dict):\n raise ValueError(\n 'Error when checking model ' + exception_prefix + ': '\n 'expected no data, but got:', data)\n return []\n if data is None:\n return [None for _ in range(len(names))]\n\n if isinstance(data, dict):\n try:\n data = [\n data[x].values\n if data[x].__class__.__name__ == 'DataFrame' else data[x]\n for x in names\n ]\n except KeyError as e:\n raise ValueError('No data provided for \"' + e.args[0] + '\". Need data '\n 'for each key in: ' + str(names))\n elif isinstance(data, (list, tuple)):\n if isinstance(data[0], (list, tuple)):\n data = [np.asarray(d) for d in data]\n elif len(names) == 1 and isinstance(data[0], (float, int)):\n data = [np.asarray(data)]\n else:\n data = [\n x.values if x.__class__.__name__ == 'DataFrame' else x for x in data\n ]\n else:\n data = data.values if data.__class__.__name__ == 'DataFrame' else data\n data = [data]\n\n if shapes is not None:\n data = [\n standardize_single_array(x, shape) for (x, shape) in zip(data, shapes)\n ]\n else:\n data = [standardize_single_array(x) for x in data]\n\n if len(data) != len(names):\n if data and hasattr(data[0], 'shape'):\n raise ValueError('Error when checking model ' + exception_prefix +\n ': the list of Numpy arrays that you are passing to '\n 'your model is not the size the model expected. '\n 'Expected to see ' + str(len(names)) + ' array(s), ' +\n 'for inputs ' + str(names) + ' but instead got the '\n 'following list of ' + str(len(data)) + ' arrays: ' +\n str(data)[:200] + '...')\n elif len(names) > 1:\n raise ValueError('Error when checking model ' + exception_prefix +\n ': you are passing a list as input to your model, '\n 'but the model expects a list of ' + str(len(names)) +\n ' Numpy arrays instead. The list you passed was: ' +\n str(data)[:200])\n elif len(data) == 1 and not hasattr(data[0], 'shape'):\n raise TypeError('Error when checking model ' + exception_prefix +\n ': data should be a Numpy array, or list/dict of '\n 'Numpy arrays. Found: ' + str(data)[:200] + '...')\n elif len(names) == 1:\n data = [np.asarray(data)]\n\n # Check shapes compatibility.\n if shapes:\n for i in range(len(names)):\n if shapes[i] is not None:\n if tf.is_tensor(data[i]):\n tensorshape = data[i].shape\n if not tensorshape:\n continue\n data_shape = tuple(tensorshape.as_list())\n elif is_composite_or_composite_value(data[i]):\n tensorshape = get_composite_shape(data[i])\n data_shape = tuple(tensorshape.as_list())\n else:\n data_shape = data[i].shape\n\n shape = shapes[i]\n if len(data_shape) != len(shape):\n raise ValueError('Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have ' +\n str(len(shape)) + ' dimensions, but got array '\n 'with shape ' + str(data_shape))\n if not check_batch_axis:\n data_shape = data_shape[1:]\n shape = shape[1:]\n for dim, ref_dim in zip(data_shape, shape):\n if ref_dim != dim and ref_dim is not None and dim is not None:\n raise ValueError('Error when checking ' + exception_prefix +\n ': expected ' + names[i] + ' to have shape ' +\n str(shape) + ' but got array with shape ' +\n str(data_shape))\n return data\n\n\ndef standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n \"\"\"Maps `sample_weight` or `class_weight` to model outputs.\n\n Args:\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\n Returns:\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\n Raises:\n ValueError: In case of invalid user-provided argument.\n \"\"\"\n if x_weight is None or (isinstance(x_weight, (list, tuple)) and\n len(x_weight) == 0): # pylint: disable=g-explicit-length-test\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, (list, tuple)):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' +\n str(len(x_weight)) + ' elements, but the model has ' +\n str(len(output_names)) + ' outputs. '\n 'You should provide one `' + weight_type + '`'\n 'array per model output.')\n return x_weight\n if isinstance(x_weight, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' + weight_type + '` '\n 'should be either a list or a dict. '\n 'Provided `' + weight_type + '` type not understood: ' +\n str(x_weight))\n\n\ndef standardize_class_weights(class_weight, output_names):\n return standardize_sample_or_class_weights(class_weight, output_names,\n 'class_weight')\n\n\ndef standardize_sample_weights(sample_weight, output_names):\n return standardize_sample_or_class_weights(sample_weight, output_names,\n 'sample_weight')\n\n\ndef check_array_lengths(inputs, targets, weights=None):\n \"\"\"Does user input validation for numpy arrays.\n\n Args:\n inputs: list of Numpy arrays of inputs.\n targets: list of Numpy arrays of targets.\n weights: list of Numpy arrays of sample weights.\n\n Raises:\n ValueError: in case of incorrectly formatted data.\n \"\"\"\n\n def is_tensor_or_composite_tensor(x):\n return tf.is_tensor(x) or is_composite_or_composite_value(x)\n\n def set_of_lengths(x):\n # Returns a set with the variation between\n # different shapes, with None => 0\n if x is None:\n return {}\n else:\n return set([\n y.shape[0]\n for y in x\n if y is not None and not is_tensor_or_composite_tensor(y)\n ])\n\n set_x = set_of_lengths(inputs)\n set_y = set_of_lengths(targets)\n set_w = set_of_lengths(weights)\n if len(set_x) > 1:\n raise ValueError('All input arrays (x) should have '\n 'the same number of samples. Got array shapes: ' +\n str([x.shape for x in inputs]))\n if len(set_y) > 1:\n raise ValueError('All target arrays (y) should have '\n 'the same number of samples. Got array shapes: ' +\n str([y.shape for y in targets]))\n if set_x and set_y and list(set_x)[0] != list(set_y)[0]:\n raise ValueError('Input arrays should have '\n 'the same number of samples as target arrays. '\n 'Found ' + str(list(set_x)[0]) + ' input samples '\n 'and ' + str(list(set_y)[0]) + ' target samples.')\n if len(set_w) > 1:\n raise ValueError('All sample_weight arrays should have '\n 'the same number of samples. Got array shapes: ' +\n str([w.shape for w in weights]))\n if set_y and set_w and list(set_y)[0] != list(set_w)[0]:\n raise ValueError('Sample_weight arrays should have '\n 'the same number of samples as target arrays. Got ' +\n str(list(set_y)[0]) + ' input samples and ' +\n str(list(set_w)[0]) + ' target samples.')\n\n\ndef check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n \"\"\"Does validation on the compatibility of targets and loss functions.\n\n This helps prevent users from using loss functions incorrectly. This check\n is purely for UX purposes.\n\n Args:\n targets: list of Numpy arrays of targets.\n loss_fns: list of loss functions.\n output_shapes: list of shapes of model outputs.\n\n Raises:\n ValueError: if a loss function or target array\n is incompatible with an output.\n \"\"\"\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None or tf.is_tensor(y):\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError('You are passing a target array of shape ' +\n str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError('A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')\n\n\ndef collect_per_output_metric_info(metrics,\n output_names,\n output_shapes,\n loss_fns,\n from_serialized=False,\n is_weighted=False):\n \"\"\"Maps metric names and functions to model outputs.\n\n Args:\n metrics: a list or a list of lists or a dict of metric functions.\n output_names: a list of the names (strings) of model outputs.\n output_shapes: a list of the shapes (strings) of model outputs.\n loss_fns: a list of the loss functions corresponding to the model outputs.\n from_serialized: whether the model the metrics are being sourced from is\n being initialized from a serialized format.\n is_weighted: Boolean indicating whether the given metrics are weighted.\n\n Returns:\n A list (one entry per model output) of dicts.\n For instance, if the model has 2 outputs, and for the first output\n we want to compute \"binary_accuracy\" and \"binary_crossentropy\",\n and just \"binary_accuracy\" for the second output,\n the list would look like: `[{\n 'acc': binary_accuracy(),\n 'ce': binary_crossentropy(),\n }, {\n 'acc': binary_accuracy(),\n }]`\n\n Raises:\n TypeError: if an incorrect type is passed for the `metrics` argument.\n \"\"\"\n if not metrics:\n return [{} for _ in output_names]\n\n if isinstance(metrics, list):\n any_sub_list = any(isinstance(m, list) for m in metrics)\n if any_sub_list:\n if len(metrics) != len(output_names):\n raise ValueError('When passing a list of lists as `metrics`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(output_names)) +\n ' outputs, but you passed metrics=' + str(metrics))\n # User has provided a list of len = len(outputs).\n nested_metrics = [generic_utils.to_list(m) for m in metrics]\n else:\n # If it is a single list we then apply all metrics to all outputs.\n if len(output_names) > 1:\n nested_metrics = []\n for _ in output_names:\n nested_metrics.append(\n [metrics_module.clone_metric(m) for m in metrics])\n else:\n nested_metrics = [metrics]\n elif isinstance(metrics, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys('metrics', metrics, output_names)\n nested_metrics = []\n for name in output_names:\n output_metrics = generic_utils.to_list(metrics.get(name, []))\n nested_metrics.append(output_metrics)\n else:\n raise TypeError('Type of `metrics` argument not understood. '\n 'Expected a list or dictionary, found: ' + str(metrics))\n\n per_output_metrics = []\n for i, metrics in enumerate(nested_metrics):\n metrics_dict = collections.OrderedDict()\n for metric in metrics:\n metric_name = get_metric_name(metric, is_weighted)\n metric_fn = get_metric_function(\n metric, output_shape=output_shapes[i], loss_fn=loss_fns[i])\n metric_fn._from_serialized = from_serialized # pylint: disable=protected-access\n\n # If the metric function is not stateful, we create a stateful version.\n if not isinstance(metric_fn, metrics_module.Metric):\n metric_fn = metrics_module.MeanMetricWrapper(\n metric_fn, name=metric_name)\n # If the metric is being revived from something stateless, such as a\n # string (e.g. \"accuracy\"), we may need to later reapply transformations\n # such as renaming.\n metric_fn._from_serialized = False # pylint: disable=protected-access\n metrics_dict[metric_name] = metric_fn\n per_output_metrics.append(metrics_dict)\n\n return per_output_metrics\n\n\ndef batch_shuffle(index_array, batch_size):\n \"\"\"Shuffles an array in a batch-wise fashion.\n\n Useful for shuffling HDF5 arrays\n (where one cannot access arbitrary indices).\n\n Args:\n index_array: array of indices to be shuffled.\n batch_size: integer.\n\n Returns:\n The `index_array` array, shuffled in a batch-wise fashion.\n \"\"\"\n batch_count = int(len(index_array) / batch_size)\n # to reshape we need to be cleanly divisible by batch size\n # we stash extra items and reappend them after shuffling\n last_batch = index_array[batch_count * batch_size:]\n index_array = index_array[:batch_count * batch_size]\n index_array = index_array.reshape((batch_count, batch_size))\n np.random.shuffle(index_array)\n index_array = index_array.flatten()\n return np.append(index_array, last_batch)\n\n\ndef standardize_weights(y,\n sample_weight=None,\n class_weight=None,\n sample_weight_mode=None):\n \"\"\"Performs sample weight validation and standardization.\n\n Everything gets normalized to a single sample-wise (or timestep-wise)\n weight array. If both `sample_weight` and `class_weight` are provided,\n the weights are multiplied.\n\n Args:\n y: Numpy array or Tensor of model targets to be weighted.\n sample_weight: User-provided `sample_weight` argument.\n class_weight: User-provided `class_weight` argument.\n sample_weight_mode: One of `None` or `\"temporal\"`. `\"temporal\"` indicated\n that we expect 2D weight data that will be applied to the last 2\n dimensions of the targets (i.e. we are weighting timesteps, not\n samples).\n\n Returns:\n A numpy array of target weights, one entry per sample to weight.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n # Iterator may return sample_weight as 1-tuple\n if isinstance(sample_weight, tuple):\n sample_weight = sample_weight[0]\n if sample_weight_mode is not None and sample_weight_mode != 'samplewise':\n if sample_weight_mode != 'temporal':\n raise ValueError('\"sample_weight_mode '\n 'should be None or \"temporal\". '\n 'Found: ' + str(sample_weight_mode))\n if len(y.shape) < 3:\n raise ValueError('Found a sample_weight array for '\n 'an input with shape ' + str(y.shape) + '. '\n 'Timestep-wise sample weighting (use of '\n 'sample_weight_mode=\"temporal\") is restricted to '\n 'outputs that are at least 3D, i.e. that have '\n 'a time dimension.')\n if sample_weight is not None and len(sample_weight.shape) != 2:\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + '. '\n 'In order to use timestep-wise sample weighting, '\n 'you should pass a 2D sample_weight array.')\n else:\n if sample_weight is not None and len(sample_weight.shape) != 1:\n raise ValueError(\n 'Found a sample_weight array with shape {}. In order to '\n 'use timestep-wise sample weights, you should specify '\n 'sample_weight_mode=\"temporal\" in compile(); founssd \"{}\" '\n 'instead. If you just mean to use sample-wise weights, '\n 'make sure your sample_weight array is 1D.'.format(\n sample_weight.shape, sample_weight_mode))\n\n if sample_weight is not None:\n if len(sample_weight.shape) > len(y.shape):\n raise ValueError('Found a sample_weight with shape' +\n str(sample_weight.shape) + '.'\n 'Expected sample_weight with rank '\n 'less than or equal to ' + str(len(y.shape)))\n\n if (not tf.is_tensor(sample_weight) and\n y.shape[:sample_weight.ndim] != sample_weight.shape):\n raise ValueError('Found a sample_weight array with shape ' +\n str(sample_weight.shape) + ' for an input with shape ' +\n str(y.shape) + '. '\n 'sample_weight cannot be broadcast.')\n\n # Class weights applied per-sample.\n class_sample_weight = None\n if isinstance(class_weight, dict):\n if len(y.shape) > 2:\n raise ValueError('`class_weight` not supported for '\n '3+ dimensional targets.')\n\n if tf.is_tensor(y):\n # Few classes are expected, so densifying is reasonable.\n keys = np.array(sorted(class_weight.keys()))\n values = np.array([class_weight[i] for i in keys])\n weight_vector = np.zeros(np.max(keys) + 1)\n weight_vector[:] = np.nan\n weight_vector[keys] = values\n\n y_classes = tf.__internal__.smart_cond.smart_cond(\n len(y.shape.as_list()) == 2 and backend.shape(y)[1] > 1,\n lambda: backend.argmax(y, axis=1),\n lambda: tf.cast(backend.reshape(y, (-1,)), tf.int64))\n class_sample_weight = tf.compat.v1.gather(weight_vector, y_classes)\n tf.debugging.check_numerics(\n class_sample_weight,\n 'Invalid classes or class weights detected. NaN values indicate that '\n 'an appropriate class weight could not be determined.')\n class_sample_weight = tf.cast(class_sample_weight, backend.floatx())\n if sample_weight is not None:\n sample_weight = tf.cast(\n tf.convert_to_tensor(sample_weight),\n backend.floatx())\n else:\n y_classes = y\n if len(y.shape) == 2:\n if y.shape[1] > 1:\n y_classes = np.argmax(y, axis=1)\n elif y.shape[1] == 1:\n y_classes = np.reshape(y, y.shape[0])\n\n class_sample_weight = np.asarray(\n [class_weight[cls] for cls in y_classes if cls in class_weight])\n\n if len(class_sample_weight) != len(y_classes):\n # subtract the sets to pick all missing classes\n existing_classes = set(y_classes)\n existing_class_weight = set(class_weight.keys())\n raise ValueError(\n '`class_weight` must contain all classes in the data.'\n ' The classes %s exist in the data but not in '\n '`class_weight`.' % (existing_classes - existing_class_weight))\n\n if class_sample_weight is not None and sample_weight is not None:\n # Multiply weights if both are provided.\n return class_sample_weight * sample_weight\n if sample_weight is not None:\n return sample_weight\n if class_sample_weight is not None:\n return class_sample_weight\n return None\n\n\ndef has_symbolic_tensors(ls):\n if tf.executing_eagerly():\n return False\n return has_tensors(ls)\n\n\ndef has_tensors(ls):\n \"\"\"Returns true if `ls` contains tensors.\"\"\"\n # Note: at some point in time ragged tensors didn't count as tensors, so this\n # returned false for ragged tensors. Making this return true fails some tests\n # which would then require a steps_per_epoch argument.\n if isinstance(ls, (list, tuple)):\n return any(\n tf.is_tensor(v) and\n not isinstance(v, tf.RaggedTensor) for v in ls)\n if isinstance(ls, dict):\n return any(\n tf.is_tensor(v) and\n not isinstance(v, tf.RaggedTensor)\n for _, v in ls.items())\n return tf.is_tensor(ls) and not isinstance(\n ls, tf.RaggedTensor)\n\n\ndef get_metric_name(metric, weighted=False):\n \"\"\"Returns the name corresponding to the given metric input.\n\n Args:\n metric: Metric function name or reference.\n weighted: Boolean indicating if the given metric is weighted.\n\n Returns:\n The metric name.\n \"\"\"\n if tf.__internal__.tf2.enabled():\n # We keep the string that the user has set in compile as the metric name.\n if isinstance(metric, str):\n return metric\n\n metric = metrics_module.get(metric)\n return metric.name if hasattr(metric, 'name') else metric.__name__\n else:\n metric_name_prefix = 'weighted_' if weighted else ''\n if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):\n if metric in ('accuracy', 'acc'):\n suffix = 'acc'\n elif metric in ('crossentropy', 'ce'):\n suffix = 'ce'\n else:\n metric_fn = metrics_module.get(metric)\n # Get metric name as string\n if hasattr(metric_fn, 'name'):\n suffix = metric_fn.name\n else:\n suffix = metric_fn.__name__\n metric_name = metric_name_prefix + suffix\n return metric_name\n\n\ndef get_metric_function(metric, output_shape=None, loss_fn=None):\n \"\"\"Returns the metric function corresponding to the given metric input.\n\n Args:\n metric: Metric function name or reference.\n output_shape: The shape of the output that this metric will be calculated\n for.\n loss_fn: The loss function used.\n\n Returns:\n The metric function.\n \"\"\"\n if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n return metrics_module.get(metric)\n\n is_sparse_categorical_crossentropy = (\n isinstance(loss_fn, losses.SparseCategoricalCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.sparse_categorical_crossentropy))\n\n is_binary_crossentropy = (\n isinstance(loss_fn, losses.BinaryCrossentropy) or\n (isinstance(loss_fn, losses.LossFunctionWrapper) and\n loss_fn.fn == losses.binary_crossentropy))\n\n if metric in ['accuracy', 'acc']:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_accuracy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_accuracy\n # If the output_shape[-1] is not 1, then we know output is `categorical`.\n # We assume it is sparse categorical only if loss is explicitly given\n # as sparse categorical crossentropy loss.\n return metrics_module.categorical_accuracy\n else:\n if output_shape[-1] == 1 or is_binary_crossentropy:\n return metrics_module.binary_crossentropy\n elif is_sparse_categorical_crossentropy:\n return metrics_module.sparse_categorical_crossentropy\n return metrics_module.categorical_crossentropy\n\n\ndef call_metric_function(metric_fn,\n y_true,\n y_pred=None,\n weights=None,\n mask=None):\n \"\"\"Invokes metric function and returns the metric result tensor.\"\"\"\n if mask is not None:\n mask = tf.cast(mask, y_pred.dtype)\n if weights is None:\n # Use mask as sample weight.\n weights = mask\n else:\n # Update dimensions of weights to match with mask.\n weights = tf.cast(weights, dtype=y_pred.dtype)\n mask, _, weights = losses_utils.squeeze_or_expand_dimensions(\n mask, sample_weight=weights)\n weights *= mask\n\n if y_pred is not None:\n return metric_fn(y_true, y_pred, sample_weight=weights)\n # `Mean` metric only takes a single value.\n return metric_fn(y_true, sample_weight=weights)\n\n\ndef get_loss_function(loss):\n \"\"\"Returns the loss corresponding to the loss input in `compile` API.\"\"\"\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):\n # It is not safe to assume that the loss takes no constructor arguments.\n raise ValueError(\n 'Received uninstantiated Loss class: {}\\nPlease call loss \"\"classes '\n 'before passing them to Model.compile.'.format(loss))\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.abc.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`\n # (both in distribution strategy context and otherwise).\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)\n\n\ndef validate_dataset_input(x, y, sample_weight, validation_split=None):\n \"\"\"Validates user input arguments when a dataset iterator is passed.\n\n Args:\n x: Input data. A `tf.data` dataset or iterator.\n y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s).\n Expected to be `None` when `x` is a dataset iterator.\n sample_weight: An optional sample-weight array passed by the user to weight\n the importance of each sample in `x`. Expected to be `None` when `x` is a\n dataset iterator\n validation_split: Float between 0 and 1. Fraction of the training data to be\n used as validation data. Expected to be `None` when `x` is a dataset\n iterator.\n\n Raises:\n ValueError: if argument `y` or `sample_weight` or `validation_split` are\n provided by user.\n \"\"\"\n if y is not None:\n raise ValueError('You passed a dataset or dataset iterator (%s) as '\n 'input `x` to your model. In that case, you should '\n 'not specify a target (`y`) argument, since the dataset '\n 'or dataset iterator generates both input data and '\n 'target data. '\n 'Received: %s' % (x, y))\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when input '\n '`x` is a dataset or a dataset iterator. Instead, you'\n 'can provide sample_weight as the third element of your'\n 'dataset, i.e. (inputs, targets, sample_weight). '\n 'Received: x=%s, sample_weight=%s' % (x, sample_weight))\n if validation_split is not None and validation_split != 0.0:\n raise ValueError(\n '`validation_split` argument is not supported when '\n 'input `x` is a dataset or a dataset iterator. '\n 'Received: x=%s, validation_split=%f' % (x, validation_split))\n\n\ndef validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):\n \"\"\"Helper function to validate either inputs or targets.\"\"\"\n if isinstance(inp, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tf.is_tensor(v) for v in inp):\n raise ValueError(\n 'Please provide as model inputs either a single array or a list of '\n 'arrays. You passed: {}={}'.format(field_name, str(orig_inp)))\n elif isinstance(inp, dict):\n if not allow_dict:\n raise ValueError(\n 'You cannot pass a dictionary as model {}.'.format(field_name))\n elif not isinstance(inp, np.ndarray) and not tf.is_tensor(inp):\n raise ValueError(\n 'Please provide as model inputs either a single array or a list of '\n 'arrays. You passed: {}={}'.format(field_name, orig_inp))\n\n\ndef check_generator_arguments(y=None, sample_weight=None,\n validation_split=None):\n \"\"\"Validates arguments passed when using a generator.\"\"\"\n if y is not None:\n raise ValueError('`y` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass targets'\n ' as the second element of the generator.')\n if sample_weight is not None:\n raise ValueError('`sample_weight` argument is not supported when data is'\n 'a generator or Sequence instance. Instead pass sample'\n ' weights as the third element of the generator.')\n if validation_split:\n raise ValueError('If your data is in the form of a Python generator, '\n 'you cannot use `validation_split`.')\n\n\ndef check_steps_argument(input_data, steps, steps_name):\n \"\"\"Validates `steps` argument based on input data's type.\n\n The cases when `steps` value must be provided are when\n 1. input data passed is an iterator.\n 2. model was built on top of symbolic tensors, input data is not\n required and is `None`.\n 3. input data passed is a symbolic tensor.\n\n Args:\n input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or\n tf.data.Dataset iterator or `None`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n steps_name: The public API's parameter name for `steps`.\n\n Returns:\n boolean, True if `steps` argument is required, else False.\n\n Raises:\n ValueError: if `steps` argument is required for given input data type\n but not provided.\n \"\"\"\n is_x_iterator = isinstance(\n input_data, (tf.compat.v1.data.Iterator, tf.data.Iterator))\n if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or\n (isinstance(input_data, list) and not input_data)):\n if steps is None:\n input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'\n raise ValueError('When using {input_type} as input to a model, you should'\n ' specify the `{steps_name}` argument.'.format(\n input_type=input_type_str, steps_name=steps_name))\n return True\n\n if isinstance(input_data, (tf.compat.v1.data.Dataset, tf.data.Dataset)):\n return True\n\n if steps is not None:\n list_types = (np.ndarray, list, tuple)\n if (isinstance(input_data, list_types) or\n (isinstance(input_data, dict) and\n any(isinstance(v, list_types) for v in input_data.values()))):\n logging.warning('When passing input data as arrays, do not specify '\n '`steps_per_epoch`/`steps` argument. '\n 'Please use `batch_size` instead.')\n return False\n\n\ndef cast_single_tensor(x, dtype=None):\n if isinstance(x, np.ndarray):\n x = tf.convert_to_tensor(x)\n dtype = dtype or backend.floatx()\n if x.dtype.is_floating:\n return tf.cast(x, dtype=dtype)\n return x\n\n\ndef cast_if_floating_dtype_and_mismatch(targets, outputs):\n \"\"\"Returns target data tensors using correct datatype.\n\n Checks that each target and output pair are the same datatype. If not, casts\n the target to the output's datatype.\n\n Args:\n targets: tensor or list of targets.\n outputs: tensor or list of outputs.\n\n Returns:\n Targets in appropriate datatype.\n \"\"\"\n if tf.is_tensor(targets):\n # There is one target, so output[0] should be the only output.\n return cast_single_tensor(targets, dtype=outputs[0].dtype)\n new_targets = []\n for target, out in zip(targets, outputs):\n if isinstance(target, np.ndarray):\n target = tf.convert_to_tensor(target)\n if target.dtype != out.dtype:\n new_targets.append(cast_single_tensor(target, dtype=out.dtype))\n else:\n new_targets.append(target)\n return new_targets\n\n\ndef cast_if_floating_dtype(x, dtype=None):\n \"\"\"Casts the given data tensors to the default floating point type.\n\n Casts only if the input is already a floating point type.\n Args:\n x: tensor or list/tuple of tensors.\n dtype: The dtype to which Tensors should be cast.\n\n Returns:\n Converted input.\n \"\"\"\n return tf.nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype),\n x)\n\n\ndef cast_to_model_input_dtypes(x, model):\n \"\"\"Casts the given data tensors to the dtypes of the model inputs.\n\n Args:\n x: tensor or list/tuple of tensors.\n model: The model.\n\n Returns:\n Converted input. Each tensor is casted to the corresponding input in\n `model.inputs`.\n \"\"\"\n input_dtypes = tf.nest.map_structure(lambda t: t.dtype, model.inputs)\n return tf.nest.map_structure(tf.cast, x, input_dtypes)\n\n\ndef prepare_sample_weight_modes(training_endpoints, sample_weight_mode):\n \"\"\"Prepares sample weight modes for the model.\n\n Args:\n training_endpoints: List of model _TrainingEndpoints.\n sample_weight_mode: sample weight mode user input passed from compile API.\n\n Raises:\n ValueError: In case of invalid `sample_weight_mode` input.\n \"\"\"\n\n if isinstance(sample_weight_mode, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'sample_weight_mode', sample_weight_mode,\n [e.output_name for e in training_endpoints])\n\n for end_point in training_endpoints:\n if not end_point.should_skip_target_weights():\n if end_point.output_name not in sample_weight_mode:\n raise ValueError('Output ' + end_point.output_name +\n 'missing from `_sample_weight_modes` dictionary')\n else:\n end_point.sample_weight_mode = sample_weight_mode.get(\n end_point.output_name)\n elif isinstance(sample_weight_mode, (list, tuple)):\n if len(sample_weight_mode) != len(training_endpoints):\n raise ValueError('When passing a list as sample_weight_mode, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed ' +\n str(len(sample_weight_mode)) + '_sample_weight_modes.')\n for mode, endpoint in zip(sample_weight_mode, training_endpoints):\n if not endpoint.should_skip_target_weights():\n endpoint.sample_weight_mode = mode\n else:\n for endpoint in training_endpoints:\n if not endpoint.should_skip_target_weights():\n endpoint.sample_weight_mode = sample_weight_mode\n\n\ndef prepare_loss_functions(loss, output_names):\n \"\"\"Converts loss to a list of loss functions.\n\n Args:\n loss: String (name of objective function), objective function or\n `tf.losses.Loss` instance. See `tf.losses`. If the model has multiple\n outputs, you can use a different loss on each output by passing a\n dictionary or a list of losses. The loss value that will be minimized by\n the model will then be the sum of all individual losses.\n output_names: List of model output names.\n\n Returns:\n A list of loss objective functions.\n\n Raises:\n ValueError: If loss is a dict with keys not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if isinstance(loss, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys('loss', loss, output_names)\n loss_functions = []\n for name in output_names:\n if name not in loss:\n logging.warning(\n 'Output {0} missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not be '\n 'expecting any data to be passed to {0}.'.format(name))\n loss_functions.append(get_loss_function(loss.get(name, None)))\n elif isinstance(loss, str):\n loss_functions = [get_loss_function(loss) for _ in output_names]\n elif isinstance(loss, collections.abc.Sequence):\n if len(loss) != len(output_names):\n raise ValueError('When passing a list as loss, it should have one entry '\n 'per model outputs. The model has {} outputs, but you '\n 'passed loss={}'.format(len(output_names), loss))\n loss_functions = tf.nest.map_structure(get_loss_function, loss)\n else:\n loss_functions = [get_loss_function(loss) for _ in range(len(output_names))]\n\n return loss_functions\n\n\ndef prepare_loss_weights(training_endpoints, loss_weights=None):\n \"\"\"Converts loss weights to a list of loss weights.\n\n The result loss weights will be populated on the training endpoint.\n\n Args:\n training_endpoints: List of model training endpoints.\n loss_weights: Optional list or dictionary specifying scalar coefficients\n (Python floats) to weight the loss contributions of different model\n outputs. The loss value that will be minimized by the model will then be\n the *weighted sum* of all individual losses, weighted by the\n `loss_weights` coefficients. If a list, it is expected to have a 1:1\n mapping to the model's outputs. If a dict, it is expected to map\n output names (strings) to scalar coefficients.\n\n Raises:\n ValueError: If loss weight is a dict with key not in model output names,\n or if loss is a list with len not equal to model outputs.\n \"\"\"\n if loss_weights is None:\n for e in training_endpoints:\n e.loss_weight = 1.\n elif isinstance(loss_weights, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(\n 'loss_weights', loss_weights,\n [e.output_name for e in training_endpoints])\n for e in training_endpoints:\n e.loss_weight = loss_weights.get(e.output_name, 1.)\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(training_endpoints):\n raise ValueError('When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(training_endpoints)) +\n ' outputs, but you passed loss_weights=' +\n str(loss_weights))\n for w, e in zip(loss_weights, training_endpoints):\n e.loss_weight = w\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n\n\n# TODO(rohanj): This is a hack to get around not depending on feature_column and\n# create a cyclical dependency. Figure out a cleaner solution\ndef is_feature_layer(layer):\n \"\"\"Returns whether `layer` is a FeatureLayer or not.\"\"\"\n return getattr(layer, '_is_feature_layer', False)\n\n\ndef is_eager_dataset_or_iterator(data):\n return tf.executing_eagerly() and isinstance(\n data, (tf.compat.v1.data.Dataset, tf.data.Dataset,\n tf.data.Iterator))\n\n\n# pylint: disable=protected-access\ndef get_dataset_graph_def(dataset):\n if tf.executing_eagerly():\n graph_def_str = dataset._as_serialized_graph().numpy()\n else:\n graph_def_str = backend.get_value(dataset._as_serialized_graph())\n return tf.compat.v1.GraphDef().FromString(graph_def_str)\n\n\ndef verify_dataset_shuffled(x):\n \"\"\"Verifies that the dataset is shuffled.\n\n Args:\n x: Dataset passed as an input to the model.\n\n Returns:\n boolean, whether the input dataset is shuffled or not.\n \"\"\"\n assert isinstance(x, tf.data.Dataset)\n graph_def = get_dataset_graph_def(x)\n for node in graph_def.node:\n if node.op.startswith('ShuffleDataset'):\n return True\n # Also check graph_def.library.function for ds.interleave or ds.flat_map\n for function in graph_def.library.function:\n for node in function.node_def:\n if node.op.startswith('ShuffleDataset'):\n return True\n logging.warning('Expected a shuffled dataset but input dataset `x` is '\n 'not shuffled. Please invoke `shuffle()` on input dataset.')\n return False\n\n\ndef is_dataset_or_iterator(data):\n return isinstance(data, (tf.compat.v1.data.Dataset, tf.data.Dataset,\n tf.compat.v1.data.Iterator, tf.data.Iterator))\n\n\ndef get_iterator(dataset):\n \"\"\"Create and initialize an iterator from a dataset.\"\"\"\n if tf.executing_eagerly():\n iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)\n else:\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator\n\n\ndef initialize_iterator(iterator):\n if not tf.executing_eagerly():\n init_op = iterator.initializer\n backend.get_session((init_op,)).run(init_op)\n\n\ndef extract_tensors_from_dataset(dataset):\n \"\"\"Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\n Args:\n dataset: Dataset instance.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n \"\"\"\n iterator = get_iterator(dataset)\n inputs, targets, sample_weight = unpack_iterator_input(iterator)\n return inputs, targets, sample_weight\n\n\ndef unpack_iterator_input(iterator):\n \"\"\"Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Args:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n \"\"\"\n try:\n next_element = iterator.get_next()\n except tf.errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n 'Please provide model inputs as a list or tuple of 2 or 3 '\n 'elements: (input, target) or (input, target, sample_weights) '\n 'Received %s' % next_element)\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights\n\n\ndef infer_steps_for_dataset(model,\n dataset,\n steps,\n epochs=1,\n steps_name='steps'):\n \"\"\"Infers steps_per_epoch needed to loop through a dataset.\n\n Args:\n model: Keras model instance.\n dataset: Input data of type tf.data.Dataset.\n steps: Number of steps to draw from the dataset (may be None if unknown).\n epochs: Number of times to iterate over the dataset.\n steps_name: The string name of the steps argument, either `steps`,\n `validation_steps`, or `steps_per_epoch`. Only used for error message\n formatting.\n\n Returns:\n Integer or `None`. Inferred number of steps to loop through the dataset.\n `None` is returned if 1) the size of the dataset is unknown and `steps` was\n not specified, or 2) this is multi-worker training and auto sharding is\n enabled.\n\n Raises:\n ValueError: In case of invalid argument values.\n \"\"\"\n assert isinstance(dataset, tf.data.Dataset)\n if (model._in_multi_worker_mode() and\n (dataset.options().experimental_distribute.auto_shard_policy !=\n tf.data.experimental.AutoShardPolicy.OFF)):\n # If the dataset would be auto-sharded, we should not infer a local\n # steps_per_epoch due to the possible imbalanced sharding between workers.\n return None\n\n size = backend.get_value(tf.data.experimental.cardinality(dataset))\n if size == tf.data.experimental.INFINITE_CARDINALITY and steps is None:\n raise ValueError('When passing an infinitely repeating dataset, you '\n 'must specify the `%s` argument.' % (steps_name,))\n if size >= 0:\n if steps is not None and steps * epochs > size:\n if epochs > 1:\n raise ValueError('The dataset you passed contains %s batches, but you '\n 'passed `epochs=%s` and `%s=%s`, which is a total of '\n '%s steps. We cannot draw that many steps from this '\n 'dataset. We suggest to set `%s=%s`.' %\n (size, epochs, steps_name, steps, steps * epochs,\n steps_name, size // epochs))\n else:\n raise ValueError('The dataset you passed contains %s batches, but you '\n 'passed `%s=%s`. We cannot draw that many steps from '\n 'this dataset. We suggest to set `%s=%s`.' %\n (size, steps_name, steps, steps_name, size))\n if steps is None:\n if size >= 0:\n return size\n return None\n return steps\n\n\nclass ModelInputs:\n \"\"\"Encapsulates model inputs.\n\n Allows for transforming model inputs while keeping the same structure.\n \"\"\"\n\n def __init__(self, inputs):\n self._inputs = inputs\n self._is_dict = isinstance(self._inputs, dict)\n self._is_single_input = not isinstance(self._inputs, (list, tuple, dict))\n\n self._flattened_inputs = []\n self._input_names = []\n\n if self._is_dict:\n for k in sorted(self._inputs.keys()):\n self._flattened_inputs.append(self._inputs[k])\n self._input_names.append(k)\n else:\n self._flattened_inputs = tf.nest.flatten(self._inputs)\n self._input_names = [\n 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs))\n ]\n\n def get_input_names(self):\n \"\"\"Returns keys to name inputs by.\n\n In case inputs provided were a list, tuple or single entry, we make up a\n key 'input_%d'. For dictionary case, we return a sorted list of keys.\n \"\"\"\n return self._input_names\n\n def get_symbolic_inputs(self, return_single_as_list=False):\n \"\"\"Returns inputs to be set as self.inputs for a model.\"\"\"\n # TODO(karmel): There is a side-effect here where what you get\n # with as_list and as_dict depends on whether you have called this\n # method first, since it modifies in place.\n for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)):\n if isinstance(v, (list, float, int)):\n v = np.asarray(v)\n if v.ndim == 1:\n v = np.expand_dims(v, 1)\n\n if isinstance(v, np.ndarray):\n # We fix the placeholder shape except the batch size.\n # This is suboptimal, but it is the best we can do with the info\n # we have. The user should call `model._set_inputs(placeholders)`\n # to specify custom placeholders if the need arises.\n shape = (None,) + tuple(v.shape[1:])\n if shape == (None,):\n shape = (None, 1)\n dtype = tf.as_dtype(v.dtype)\n if dtype.is_floating:\n dtype = backend.floatx()\n v = backend.placeholder(shape=shape, name=k, dtype=dtype)\n elif isinstance(v, tf.TensorSpec):\n shape = (None,) + tuple(v.shape.as_list()[1:])\n if shape == (None,):\n shape = (None, 1)\n v = backend.placeholder(shape=shape, name=k, dtype=v.dtype)\n\n self._flattened_inputs[i] = v\n\n if self._is_dict:\n return dict(zip(self._input_names, self._flattened_inputs))\n if self._is_single_input and not return_single_as_list:\n return self._flattened_inputs[0]\n return self._flattened_inputs\n\n def as_dict(self):\n \"\"\"An iterable over a dictionary version of inputs.\"\"\"\n for k, v in zip(self._input_names, self._flattened_inputs):\n yield k, v\n\n def as_list(self):\n \"\"\"Returning the inputs as a list.\"\"\"\n return self._flattened_inputs\n\n\n# Allow use of methods not exposed to the user.\n# pylint: disable=protected-access\n\n\n# pylint: enable=protected-access\n\n\ndef generic_output_names(outputs_list):\n return ['output_%d' % (i + 1) for i in range(len(outputs_list))]\n\n\ndef should_run_validation(validation_freq, epoch):\n \"\"\"Checks if validation should be run this epoch.\n\n Args:\n validation_freq: Integer or list. If an integer, specifies how many training\n epochs to run before a new validation run is performed. If a list,\n specifies the epochs on which to run validation.\n epoch: Integer, the number of the training epoch just completed.\n\n Returns:\n Bool, True if validation should be run.\n\n Raises:\n ValueError: if `validation_freq` is an Integer and less than 1, or if\n it is neither an Integer nor a Sequence.\n \"\"\"\n # `epoch` is 0-indexed internally but 1-indexed in the public API.\n one_indexed_epoch = epoch + 1\n\n if isinstance(validation_freq, int):\n if validation_freq < 1:\n raise ValueError('`validation_freq` can not be less than 1.')\n return one_indexed_epoch % validation_freq == 0\n\n if not isinstance(validation_freq, collections.abc.Container):\n raise ValueError('`validation_freq` must be an Integer or '\n '`collections.abc.Container` (e.g. list, tuple, etc.)')\n return one_indexed_epoch in validation_freq\n\n\ndef split_training_and_validation_data(x, y, sample_weights, validation_split):\n \"\"\"Split input data into train/eval section based on validation_split.\"\"\"\n if has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (generic_utils.slice_arrays(x, 0, split_at),\n generic_utils.slice_arrays(x, split_at))\n y, val_y = (generic_utils.slice_arrays(y, 0, split_at),\n generic_utils.slice_arrays(y, split_at))\n if sample_weights:\n sample_weights, val_sample_weights = (\n generic_utils.slice_arrays(sample_weights, 0, split_at),\n generic_utils.slice_arrays(sample_weights, split_at),\n )\n else:\n val_sample_weights = None\n return x, y, sample_weights, val_x, val_y, val_sample_weights\n\n\ndef unpack_validation_data(validation_data, raise_if_ambiguous=True):\n \"\"\"Unpack validation data based input type.\n\n The validation data is not touched if its dataset or dataset iterator.\n For other type of input (Numpy or tensor), it will be unpacked into tuple of\n 3 which is x, y and sample weights.\n\n Args:\n validation_data: dataset, dataset iterator, or numpy, tensor tuple.\n raise_if_ambiguous: boolean on whether to fail if validation_data cannot be\n parsed. Otherwise simply return validation_data, None, None and defer the\n decision to the caller.\n\n Returns:\n tuple of 3, (x, y, sample_weights) for numpy and tensor input.\n \"\"\"\n if (isinstance(validation_data, (tf.compat.v1.data.Iterator,\n tf.data.Iterator,\n tf.data.Dataset,\n data_utils.Sequence))\n or not hasattr(validation_data, '__len__')):\n val_x = validation_data\n val_y = None\n val_sample_weight = None\n elif len(validation_data) == 2:\n try:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n except ValueError:\n val_x, val_y, val_sample_weight = validation_data, None, None\n elif len(validation_data) == 3:\n try:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n except ValueError:\n val_x, val_y, val_sample_weight = validation_data, None, None\n else:\n if raise_if_ambiguous:\n raise ValueError(\n 'When passing a `validation_data` argument, '\n 'it must contain either 2 items (x_val, y_val), '\n 'or 3 items (x_val, y_val, val_sample_weights), '\n 'or alternatively it could be a dataset or a '\n 'dataset or a dataset iterator. '\n 'However we received `validation_data=%s`' % validation_data)\n val_x, val_y, val_sample_weight = validation_data, None, None\n return val_x, val_y, val_sample_weight\n\n\nclass TrainingLoop:\n \"\"\"TrainingLoop is a wrapper class around the training logic.\n\n This class is trying to encapsulate the different logic of fit/eval/predict\n with regard to different data input and model condition.\n\n Note that TrainingLoop is stateless, which means it doesn't contain any\n internal field and can be reused with different model and inputs.\n \"\"\"\n\n def fit(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n validation_freq=1,\n **kwargs):\n \"\"\"Train the model with the inputs and targets.\"\"\"\n raise NotImplementedError()\n\n def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n \"\"\"Returns the loss value & metrics values for the model in test mode.\"\"\"\n raise NotImplementedError()\n\n def predict(self,\n model,\n x,\n batch_size=None,\n verbose=0,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras upsampling layer for 2D inputs.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.utils import conv_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.UpSampling2D')\nclass UpSampling2D(Layer):\n \"\"\"Upsampling layer for 2D inputs.\n\n Repeats the rows and columns of the data\n by `size[0]` and `size[1]` respectively.\n\n Examples:\n\n >>> input_shape = (2, 2, 1, 3)\n >>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n >>> print(x)\n [[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n >>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)\n >>> print(y)\n tf.Tensor(\n [[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)\n\n Args:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n interpolation: A string, one of `nearest` or `bilinear`.\n\n Input shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\n Output shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`\n \"\"\"\n\n def __init__(self,\n size=(2, 2),\n data_format=None,\n interpolation='nearest',\n **kwargs):\n super(UpSampling2D, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.size = conv_utils.normalize_tuple(size, 2, 'size')\n if interpolation not in {'nearest', 'bilinear'}:\n raise ValueError('`interpolation` argument should be one of `\"nearest\"` '\n f'or `\"bilinear\"`. Received: \"{interpolation}\".')\n self.interpolation = interpolation\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n height = self.size[0] * input_shape[\n 2] if input_shape[2] is not None else None\n width = self.size[1] * input_shape[\n 3] if input_shape[3] is not None else None\n return tf.TensorShape(\n [input_shape[0], input_shape[1], height, width])\n else:\n height = self.size[0] * input_shape[\n 1] if input_shape[1] is not None else None\n width = self.size[1] * input_shape[\n 2] if input_shape[2] is not None else None\n return tf.TensorShape(\n [input_shape[0], height, width, input_shape[3]])\n\n def call(self, inputs):\n return backend.resize_images(\n inputs, self.size[0], self.size[1], self.data_format,\n interpolation=self.interpolation)\n\n def get_config(self):\n config = {\n 'size': self.size,\n 'data_format': self.data_format,\n 'interpolation': self.interpolation\n }\n base_config = super(UpSampling2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for np_utils.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport numpy as np\n\nfrom keras.utils import np_utils\n\n\nclass TestNPUtils(tf.test.TestCase):\n\n def test_to_categorical(self):\n num_classes = 5\n shapes = [(1,), (3,), (4, 3), (5, 4, 3), (3, 1), (3, 2, 1)]\n expected_shapes = [(1, num_classes), (3, num_classes), (4, 3, num_classes),\n (5, 4, 3, num_classes), (3, num_classes),\n (3, 2, num_classes)]\n labels = [np.random.randint(0, num_classes, shape) for shape in shapes]\n one_hots = [\n np_utils.to_categorical(label, num_classes) for label in labels]\n for label, one_hot, expected_shape in zip(labels,\n one_hots,\n expected_shapes):\n # Check shape\n self.assertEqual(one_hot.shape, expected_shape)\n # Make sure there is only one 1 in a row\n self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))\n # Get original labels back from one hots\n self.assertTrue(np.all(\n np.argmax(one_hot, -1).reshape(label.shape) == label))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmark for KPL implementation of bucketized columns with dense inputs.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport numpy as np\n\nimport keras\nfrom tensorflow.python.eager.def_function import function as tf_function\nfrom keras.layers.preprocessing import discretization\nfrom keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm\n\nNUM_REPEATS = 10 # The number of times to run each benchmark.\nBATCH_SIZES = [32, 256]\n\n\n### KPL AND FC IMPLEMENTATION BENCHMARKS ###\ndef embedding_varlen(batch_size, max_length):\n \"\"\"Benchmark a variable-length embedding.\"\"\"\n # Data and constants.\n max_value = 25.0\n bins = np.arange(1.0, max_value)\n data = fc_bm.create_data(\n max_length, batch_size * NUM_REPEATS, 100000, dtype=float)\n\n # Keras implementation\n model = keras.Sequential()\n model.add(keras.Input(shape=(max_length,), name=\"data\", dtype=tf.float32))\n model.add(discretization.Discretization(bins))\n\n # FC implementation\n fc = tf.feature_column.bucketized_column(\n tf.feature_column.numeric_column(\"data\"), boundaries=list(bins))\n\n # Wrap the FC implementation in a tf.function for a fair comparison\n @tf_function()\n def fc_fn(tensors):\n fc.transform_feature(tf.__internal__.feature_column.FeatureTransformationCache(tensors), None)\n\n # Benchmark runs\n keras_data = {\"data\": data.to_tensor(default_value=0.0)}\n k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)\n\n fc_data = {\"data\": data.to_tensor(default_value=0.0)}\n fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)\n\n return k_avg_time, fc_avg_time\n\n\nclass BenchmarkLayer(fc_bm.LayerBenchmark):\n \"\"\"Benchmark the layer forward pass.\"\"\"\n\n def benchmark_layer(self):\n for batch in BATCH_SIZES:\n name = \"bucketized|dense|batch_%s\" % batch\n k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)\n self.report(name, k_time, f_time, NUM_REPEATS)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training state management.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\nfrom keras import backend\nfrom keras.distribute import distributed_file_utils\nfrom keras.utils import mode_keys\n\n# Constant for `tf.keras.Model` attribute to store the epoch at which the most\n# recently saved checkpoint was saved.\nCKPT_SAVED_EPOCH = '_ckpt_saved_epoch'\n\nCKPT_SAVED_EPOCH_UNUSED_VALUE = -1\n\n\nclass WorkerTrainingState:\n \"\"\"Training state management class.\n\n This class provides apis for backing up and restoring the training state.\n This allows model and epoch information to be saved periodically and restore\n for fault-tolerance, also known as preemption-recovery purpose.\n \"\"\"\n\n def __init__(self, model, checkpoint_dir):\n self._model = model\n\n # The epoch at which the checkpoint is saved. Used for fault-tolerance.\n # GPU device only has int64 dtype registered VarHandleOp.\n self._ckpt_saved_epoch = tf.Variable(\n initial_value=tf.constant(\n CKPT_SAVED_EPOCH_UNUSED_VALUE, dtype=tf.int64),\n name='ckpt_saved_epoch')\n\n # Variable initialization.\n backend.set_value(self._ckpt_saved_epoch, CKPT_SAVED_EPOCH_UNUSED_VALUE)\n\n # _ckpt_saved_epoch gets tracked and is included in the checkpoint file\n # when backing up.\n checkpoint = tf.train.Checkpoint(\n model=self._model, ckpt_saved_epoch=self._ckpt_saved_epoch,\n train_counter=self._model._train_counter)\n\n # If this is single-worker training, checkpoint_dir are the same for\n # write_checkpoint_manager and read_checkpoint_manager.\n #\n # If this is multi-worker training, and this worker should not\n # save checkpoint, we replace the write_checkpoint_manager's checkpoint_dir\n # with a temp filepath, so it writes to a file that will be removed at the\n # end of back_up() call. This is necessary because the SyncOnReadVariable\n # needs to be synced across all the workers in order to be read, and all\n # workers need to perform `save()`.\n # But all workers should restore from the same checkpoint_dir as passed in\n # read_checkpoint_manager.\n self.read_checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=os.path.join(checkpoint_dir, 'chief'),\n max_to_keep=1)\n write_checkpoint_dir = distributed_file_utils.write_dirpath(\n checkpoint_dir, self._model.distribute_strategy)\n if self._model.distribute_strategy.extended.should_checkpoint:\n self.write_checkpoint_manager = self.read_checkpoint_manager\n else:\n self.write_checkpoint_manager = tf.train.CheckpointManager(\n checkpoint, directory=write_checkpoint_dir, max_to_keep=1)\n\n def back_up(self, epoch):\n \"\"\"Back up the current state of training into a checkpoint file.\n\n Args:\n epoch: The current epoch information to be saved.\n \"\"\"\n backend.set_value(self._ckpt_saved_epoch, epoch)\n # Save the model plus CKPT_SAVED_EPOCH variable.\n if self.write_checkpoint_manager.save():\n distributed_file_utils.remove_temp_dirpath(\n self.write_checkpoint_manager.directory,\n self._model.distribute_strategy)\n\n def restore(self):\n \"\"\"Restore the training state from the backed up checkpoint file.\n\n Returns:\n True if the training state is successfully restored. False if the training\n state doesn't need to be restored, or error occurred so it can't.\n \"\"\"\n self.read_checkpoint_manager.restore_or_initialize()\n\n def delete_backup(self):\n \"\"\"Delete the backup directories.\n\n Delete the backup directories which should not exist after `fit()`\n successfully finishes.\n \"\"\"\n if self.write_checkpoint_manager is self.read_checkpoint_manager:\n try:\n tf.io.gfile.rmtree(self.write_checkpoint_manager.directory)\n except tf.errors.NotFoundError:\n pass\n\n def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n \"\"\"Maybe load initial epoch from ckpt considering possible worker recovery.\n\n When `_ckpt_saved_epoch` attribute exists and is not\n `CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting\n and indicates the worker is recovering from previous failure. In this case,\n infer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous\n unfinished training from certain epoch.\n\n Args:\n initial_epoch: The original initial_epoch user passes in in `fit()`.\n mode: The mode for running `model.fit()`.\n\n Returns:\n If the training is recovering from previous failure under multi-worker\n training setting, return the epoch the training is supposed to continue\n at. Otherwise, return the `initial_epoch` the user passes in.\n \"\"\"\n\n epoch = backend.eval(self._ckpt_saved_epoch)\n if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:\n # The most recently saved epoch is one epoch prior to the epoch it\n # failed at, so return the value of 'self._ckpt_saved_epoch' plus one.\n return epoch + 1\n return initial_epoch\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the SpatialDropout1D layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras.engine.input_spec import InputSpec\nfrom keras.layers.regularization.dropout import Dropout\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.SpatialDropout1D')\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n Call arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n Input shape:\n 3D tensor with shape: `(samples, timesteps, channels)`\n Output shape: Same as input.\n References: - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = tf.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the GaussianDropout layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine import base_layer\nfrom keras.utils import tf_utils\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.GaussianDropout')\nclass GaussianDropout(base_layer.BaseRandomLayer):\n \"\"\"Apply multiplicative 1-centered Gaussian noise.\n\n As it is a regularization layer, it is only active at training time.\n\n Args:\n rate: Float, drop probability (as with `Dropout`).\n The multiplicative noise will have\n standard deviation `sqrt(rate / (1 - rate))`.\n seed: Integer, optional random seed to enable deterministic behavior.\n\n Call arguments:\n inputs: Input tensor (of any rank).\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as input.\n \"\"\"\n\n def __init__(self, rate, seed=None, **kwargs):\n super(GaussianDropout, self).__init__(seed=seed, **kwargs)\n self.supports_masking = True\n self.rate = rate\n self.seed = seed\n\n def call(self, inputs, training=None):\n if 0 < self.rate < 1:\n\n def noised():\n stddev = np.sqrt(self.rate / (1.0 - self.rate))\n return inputs * self._random_generator.random_normal(\n shape=tf.shape(inputs),\n mean=1.0,\n stddev=stddev,\n dtype=inputs.dtype)\n\n return backend.in_train_phase(noised, inputs, training=training)\n return inputs\n\n def get_config(self):\n config = {'rate': self.rate, 'seed': self.seed}\n base_config = super(GaussianDropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks on Antirectifier.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom keras.benchmarks import benchmark_util\n\n\nclass AntirectifierBenchmark(tf.test.Benchmark):\n \"\"\"Benchmarks for Antirectifier using `tf.test.Benchmark`.\"\"\"\n\n def __init__(self):\n super(AntirectifierBenchmark, self).__init__()\n (self.x_train, self.y_train), _ = tf.keras.datasets.mnist.load_data()\n self.x_train = self.x_train.reshape(-1, 784)\n self.x_train = self.x_train.astype(\"float32\") / 255\n\n def _build_model(self):\n \"\"\"Model from https://keras.io/examples/keras_recipes/antirectifier/.\"\"\"\n model = tf.keras.Sequential([\n tf.keras.Input(shape=(784,)),\n tf.keras.layers.Dense(256),\n Antirectifier(),\n tf.keras.layers.Dense(256),\n Antirectifier(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10),\n ])\n return model\n\n # In each benchmark test, the required arguments for the\n # method `measure_performance` include:\n # x: Input data, it could be Numpy or loaded from tfds.\n # y: Target data. If `x` is a dataset or generator instance,\n # `y` should not be specified.\n # loss: Loss function for model.\n # optimizer: Optimizer for model.\n # Check more details in `measure_performance()` method of\n # benchmark_util.\n def benchmark_antirectifier_bs_128(self):\n \"\"\"Measure performance with batch_size=128.\"\"\"\n batch_size = 128\n metrics, wall_time, extras = benchmark_util.measure_performance(\n self._build_model,\n x=self.x_train,\n y=self.y_train,\n batch_size=batch_size,\n optimizer=\"rmsprop\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"sparse_categorical_accuracy\"])\n\n metadata = benchmark_util.get_keras_examples_metadata(\n \"antirectifier\", batch_size)\n extras.update(metadata)\n self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_antirectifier_bs_256(self):\n \"\"\"Measure performance with batch_size=256.\"\"\"\n batch_size = 256\n metrics, wall_time, extras = benchmark_util.measure_performance(\n self._build_model,\n x=self.x_train,\n y=self.y_train,\n batch_size=batch_size,\n optimizer=\"rmsprop\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"sparse_categorical_accuracy\"])\n\n metadata = benchmark_util.get_keras_examples_metadata(\n \"antirectifier\", batch_size)\n extras.update(metadata)\n self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_antirectifier_bs_512(self):\n \"\"\"Measure performance with batch_size=512.\"\"\"\n batch_size = 512\n metrics, wall_time, extras = benchmark_util.measure_performance(\n self._build_model,\n x=self.x_train,\n y=self.y_train,\n batch_size=batch_size,\n optimizer=\"rmsprop\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"sparse_categorical_accuracy\"])\n\n metadata = benchmark_util.get_keras_examples_metadata(\n \"antirectifier\", batch_size)\n extras.update(metadata)\n self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_antirectifier_bs_512_gpu_2(self):\n \"\"\"Measure performance with batch_size=512, gpu=2 and\n\n distribution_strategy=`mirrored`.\n \"\"\"\n batch_size = 512\n metrics, wall_time, extras = benchmark_util.measure_performance(\n self._build_model,\n x=self.x_train,\n y=self.y_train,\n batch_size=batch_size,\n num_gpus=2,\n distribution_strategy=\"mirrored\",\n optimizer=\"rmsprop\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[\"sparse_categorical_accuracy\"])\n\n metadata = benchmark_util.get_keras_examples_metadata(\n \"antirectifier\", batch_size)\n extras.update(metadata)\n self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)\n\n\nclass Antirectifier(tf.keras.layers.Layer):\n \"\"\"Build simple custom layer.\"\"\"\n\n def __init__(self, initializer=\"he_normal\", **kwargs):\n super(Antirectifier, self).__init__(**kwargs)\n self.initializer = tf.keras.initializers.get(initializer)\n\n def build(self, input_shape):\n output_dim = input_shape[-1]\n self.kernel = self.add_weight(\n shape=(output_dim * 2, output_dim),\n initializer=self.initializer,\n name=\"kernel\",\n trainable=True,\n )\n\n def call(self, inputs): #pylint: disable=arguments-differ\n inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)\n pos = tf.nn.relu(inputs)\n neg = tf.nn.relu(-inputs)\n concatenated = tf.concat([pos, neg], axis=-1)\n mixed = tf.matmul(concatenated, self.kernel)\n return mixed\n\n def get_config(self):\n # Implement get_config to enable serialization. This is optional.\n base_config = super(Antirectifier, self).get_config()\n config = {\"initializer\": tf.keras.initializers.serialize(self.initializer)}\n return dict(list(base_config.items()) + list(config.items()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for zero-padding layers.\"\"\"\n\nfrom absl.testing import parameterized\nimport keras\nfrom keras.testing_infra import test_combinations\nfrom keras.testing_infra import test_utils\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\n@test_combinations.run_all_keras_modes\nclass ZeroPaddingTest(test_combinations.TestCase):\n\n def test_zero_padding_1d(self):\n num_samples = 2\n input_dim = 2\n num_steps = 5\n shape = (num_samples, num_steps, input_dim)\n inputs = np.ones(shape)\n\n with self.cached_session():\n # basic test\n test_utils.layer_test(\n keras.layers.ZeroPadding1D,\n kwargs={'padding': 2},\n input_shape=inputs.shape)\n test_utils.layer_test(\n keras.layers.ZeroPadding1D,\n kwargs={'padding': (1, 2)},\n input_shape=inputs.shape)\n\n # correctness test\n layer = keras.layers.ZeroPadding1D(padding=2)\n layer.build(shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n for offset in [0, 1, -1, -2]:\n np.testing.assert_allclose(np_output[:, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)\n\n layer = keras.layers.ZeroPadding1D(padding=(1, 2))\n layer.build(shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n for left_offset in [0]:\n np.testing.assert_allclose(np_output[:, left_offset, :], 0.)\n for right_offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, right_offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)\n layer.get_config()\n\n # test incorrect use\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding1D(padding=(1, 1, 1))\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding1D(padding=None)\n\n @parameterized.named_parameters(('channels_first', 'channels_first'),\n ('channels_last', 'channels_last'))\n def test_zero_padding_2d(self, data_format):\n num_samples = 2\n stack_size = 2\n input_num_row = 4\n input_num_col = 5\n if data_format == 'channels_first':\n inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))\n elif data_format == 'channels_last':\n inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))\n\n # basic test\n with self.cached_session():\n test_utils.layer_test(\n keras.layers.ZeroPadding2D,\n kwargs={\n 'padding': (2, 2),\n 'data_format': data_format\n },\n input_shape=inputs.shape)\n test_utils.layer_test(\n keras.layers.ZeroPadding2D,\n kwargs={\n 'padding': ((1, 2), (3, 4)),\n 'data_format': data_format\n },\n input_shape=inputs.shape)\n\n # correctness test\n with self.cached_session():\n layer = keras.layers.ZeroPadding2D(\n padding=(2, 2), data_format=data_format)\n layer.build(inputs.shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n if data_format == 'channels_last':\n for offset in [0, 1, -1, -2]:\n np.testing.assert_allclose(np_output[:, offset, :, :], 0.)\n np.testing.assert_allclose(np_output[:, :, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)\n elif data_format == 'channels_first':\n for offset in [0, 1, -1, -2]:\n np.testing.assert_allclose(np_output[:, :, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, :, :, offset], 0.)\n np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)\n\n layer = keras.layers.ZeroPadding2D(\n padding=((1, 2), (3, 4)), data_format=data_format)\n layer.build(inputs.shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n if data_format == 'channels_last':\n for top_offset in [0]:\n np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)\n for bottom_offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)\n for left_offset in [0, 1, 2]:\n np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)\n for right_offset in [-1, -2, -3, -4]:\n np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)\n elif data_format == 'channels_first':\n for top_offset in [0]:\n np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)\n for bottom_offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)\n for left_offset in [0, 1, 2]:\n np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)\n for right_offset in [-1, -2, -3, -4]:\n np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)\n np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)\n\n # test incorrect use\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding2D(padding=(1, 1, 1))\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding2D(padding=None)\n\n @parameterized.named_parameters(('channels_first', 'channels_first'),\n ('channels_last', 'channels_last'))\n def test_zero_padding_3d(self, data_format):\n num_samples = 2\n stack_size = 2\n input_len_dim1 = 4\n input_len_dim2 = 5\n input_len_dim3 = 3\n\n if data_format == 'channels_first':\n inputs = np.ones((num_samples, stack_size, input_len_dim1, input_len_dim2,\n input_len_dim3))\n elif data_format == 'channels_last':\n inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,\n input_len_dim3, stack_size))\n\n with self.cached_session():\n # basic test\n test_utils.layer_test(\n keras.layers.ZeroPadding3D,\n kwargs={\n 'padding': (2, 2, 2),\n 'data_format': data_format\n },\n input_shape=inputs.shape)\n test_utils.layer_test(\n keras.layers.ZeroPadding3D,\n kwargs={\n 'padding': ((1, 2), (3, 4), (0, 2)),\n 'data_format': data_format\n },\n input_shape=inputs.shape)\n\n with self.cached_session():\n # correctness test\n layer = keras.layers.ZeroPadding3D(\n padding=(2, 2, 2), data_format=data_format)\n layer.build(inputs.shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n if data_format == 'channels_last':\n for offset in [0, 1, -1, -2]:\n np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)\n elif data_format == 'channels_first':\n for offset in [0, 1, -1, -2]:\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)\n np.testing.assert_allclose(np_output[:, :, 2:-2, 2:-2, 2:-2], 1.)\n\n layer = keras.layers.ZeroPadding3D(\n padding=((1, 2), (3, 4), (0, 2)), data_format=data_format)\n layer.build(inputs.shape)\n output = layer(keras.backend.variable(inputs))\n if tf.executing_eagerly():\n np_output = output.numpy()\n else:\n np_output = keras.backend.eval(output)\n if data_format == 'channels_last':\n for offset in [0]:\n np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)\n for offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)\n for offset in [0, 1, 2]:\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n for offset in [-1, -2, -3, -4]:\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n for offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)\n np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, 0:-2, :], 1.)\n elif data_format == 'channels_first':\n for offset in [0]:\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n for offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)\n for offset in [0, 1, 2]:\n np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)\n for offset in [-1, -2, -3, -4]:\n np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)\n for offset in [-1, -2]:\n np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)\n np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4, 0:-2], 1.)\n\n # test incorrect use\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding3D(padding=(1, 1))\n with self.assertRaises(ValueError):\n keras.layers.ZeroPadding3D(padding=None)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for `Model.compile`.\"\"\"\n\n\nimport copy\nfrom keras import losses as losses_mod\nfrom keras import metrics as metrics_mod\nfrom keras.utils import generic_utils\nfrom keras.utils import losses_utils\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass Container:\n \"\"\"Base Container class.\"\"\"\n\n def __init__(self, output_names=None):\n self._output_names = output_names\n\n def build(self, y_pred):\n if self._output_names is None:\n # In Subclass API, output names like 'output_1' are used for\n # `Metric` names.\n self._output_names = create_pseudo_output_names(y_pred)\n\n def _conform_to_outputs(self, outputs, struct):\n \"\"\"Convenience method to conform `struct` to `outputs` structure.\n\n Mappings performed:\n\n (1) Map a dict to a list of outputs, using the output names.\n (2) Fill missing keys in a dict w/ `None`s.\n (3) Map a single item to all outputs.\n\n Args:\n outputs: Model predictions.\n struct: Arbitrary nested structure (e.g. of labels, sample_weights,\n losses, or metrics).\n\n Returns:\n Mapping of `struct` to `outputs` structure.\n \"\"\"\n struct = map_to_output_names(outputs, self._output_names, struct)\n struct = map_missing_dict_keys(outputs, struct)\n # Allow passing one object that applies to all outputs.\n if not tf.nest.is_nested(struct) and tf.nest.is_nested(outputs):\n struct = tf.nest.map_structure(lambda _: struct, outputs)\n return struct\n\n def _maybe_broadcast_to_outputs(self, outputs, objects):\n \"\"\"Determines if losses / metrics should be applied to all outputs.\n\n NOTE: This method should only be called for Metrics / Losses, not for\n y_true / sample_weight.\n\n Args:\n outputs: Model predictions.\n objects: Arbitrary nested structure (e.g. of losses or metrics)\n\n Returns:\n Arbitrary nested structure of objects, maybe copied to each output.\n\n Applies a Loss / Metric to all outputs.\n \"\"\"\n if not self._should_broadcast(objects):\n return objects\n\n # When there is more than one Model output, this is needed to keep\n # each Metric / Loss separate. When there is only one Model output,\n # the user-supplied object should be used.\n should_copy_objects = len(tf.nest.flatten(outputs)) > 1\n\n def _broadcast_fn():\n if should_copy_objects:\n return tf.nest.map_structure(self._copy_object, objects)\n return objects\n\n return tf.nest.map_structure(lambda _: _broadcast_fn(), outputs)\n\n def _should_broadcast(self, objects):\n raise NotImplementedError\n\n def _copy_object(self, obj):\n raise NotImplementedError\n\n\nclass LossesContainer(Container):\n \"\"\"A container class for losses passed to `Model.compile`.\"\"\"\n\n def __init__(self, losses, loss_weights=None, output_names=None):\n super(LossesContainer, self).__init__(output_names=output_names)\n\n # Keep user-supplied values untouched for recompiling and serialization.\n self._user_losses = losses\n self._user_loss_weights = loss_weights\n\n self._losses = losses\n self._loss_weights = loss_weights\n self._per_output_metrics = None # Per-output losses become metrics.\n self._loss_metric = metrics_mod.Mean(name='loss') # Total loss.\n self._built = False\n\n @property\n def metrics(self):\n \"\"\"Per-output loss metrics.\"\"\"\n if not self._built:\n return []\n per_output_metrics = [\n metric_obj for metric_obj in tf.nest.flatten(self._per_output_metrics)\n if metric_obj is not None\n ]\n return [self._loss_metric] + per_output_metrics\n\n def build(self, y_pred):\n \"\"\"One-time setup of loss objects.\"\"\"\n super(LossesContainer, self).build(y_pred)\n\n self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses)\n self._losses = self._conform_to_outputs(y_pred, self._losses)\n self._losses = tf.nest.map_structure(self._get_loss_object, self._losses)\n self._losses = tf.nest.flatten(self._losses)\n\n self._loss_weights = self._maybe_broadcast_to_outputs(\n y_pred, self._loss_weights)\n self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights)\n self._loss_weights = tf.nest.flatten(self._loss_weights)\n\n self._create_metrics()\n self._built = True\n\n @property\n def built(self):\n return self._built\n\n def _create_metrics(self):\n \"\"\"Creates per-output loss metrics, but only for multi-output Models.\"\"\"\n if len(self._output_names) == 1:\n self._per_output_metrics = [None]\n else:\n self._per_output_metrics = []\n for loss_obj, output_name in zip(self._losses, self._output_names):\n if loss_obj is None:\n self._per_output_metrics.append(None)\n else:\n self._per_output_metrics.append(\n metrics_mod.Mean(output_name + '_loss'))\n\n def __call__(self,\n y_true,\n y_pred,\n sample_weight=None,\n regularization_losses=None):\n \"\"\"Computes the overall loss.\n\n Args:\n y_true: An arbitrary structure of Tensors representing the ground truth.\n y_pred: An arbitrary structure of Tensors representing a Model's outputs.\n sample_weight: An arbitrary structure of Tensors representing the\n per-sample loss weights. If one Tensor is passed, it is used for all\n losses. If multiple Tensors are passed, the structure should match\n `y_pred`.\n regularization_losses: Additional losses to be added to the total loss.\n\n Returns:\n The total loss as a `tf.Tensor`, or `None` if no loss results.\n \"\"\"\n y_true = self._conform_to_outputs(y_pred, y_true)\n sample_weight = self._conform_to_outputs(y_pred, sample_weight)\n\n if not self._built:\n self.build(y_pred)\n\n y_pred = tf.nest.flatten(y_pred)\n y_true = tf.nest.flatten(y_true)\n sample_weight = tf.nest.flatten(sample_weight)\n\n loss_values = [] # Used for gradient calculation.\n loss_metric_values = [] # Used for loss metric calculation.\n batch_dim = None\n zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights,\n self._per_output_metrics)\n for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args):\n if y_t is None or loss_obj is None: # Ok to have no loss for an output.\n continue\n\n y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)\n sw = apply_mask(y_p, sw, get_mask(y_p))\n loss_value = loss_obj(y_t, y_p, sample_weight=sw)\n\n loss_metric_value = loss_value\n # Correct for the `Mean` loss metrics counting each replica as a batch.\n if loss_obj.reduction == losses_utils.ReductionV2.SUM:\n loss_metric_value *= tf.distribute.get_strategy().num_replicas_in_sync\n\n if batch_dim is None:\n if tf_utils.is_ragged(y_t):\n batch_dim = y_t.nrows()\n else:\n batch_dim = tf.shape(y_t)[0]\n\n if metric_obj is not None:\n metric_obj.update_state(loss_metric_value, sample_weight=batch_dim)\n\n if loss_weight is not None:\n loss_value *= loss_weight\n loss_metric_value *= loss_weight\n\n if (loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or\n loss_obj.reduction == losses_utils.ReductionV2.AUTO):\n loss_value = losses_utils.scale_loss_for_distribution(loss_value)\n\n loss_values.append(loss_value)\n loss_metric_values.append(loss_metric_value)\n\n if regularization_losses:\n regularization_losses = losses_utils.cast_losses_to_common_dtype(\n regularization_losses)\n reg_loss = tf.add_n(regularization_losses)\n loss_metric_values.append(reg_loss)\n loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss))\n\n if loss_values:\n loss_metric_values = losses_utils.cast_losses_to_common_dtype(\n loss_metric_values)\n total_loss_metric_value = tf.add_n(loss_metric_values)\n self._loss_metric.update_state(\n total_loss_metric_value, sample_weight=batch_dim)\n\n loss_values = losses_utils.cast_losses_to_common_dtype(loss_values)\n total_loss = tf.add_n(loss_values)\n return total_loss\n else:\n return None\n\n def reset_state(self):\n \"\"\"Resets the state of loss metrics.\"\"\"\n if not self._built:\n return\n metrics = [self._loss_metric] + tf.nest.flatten(self._per_output_metrics)\n for metric_obj in metrics:\n if metric_obj is not None:\n metric_obj.reset_state()\n\n def _get_loss_object(self, loss):\n \"\"\"Returns a `Loss` object.\n\n Converts the user-supplied loss to a `Loss` object. Also allows\n `SUM_OVER_BATCH_SIZE` reduction to be used for this loss.\n\n Args:\n loss: A string, function, or `Loss` object.\n\n Returns:\n A `Loss` object.\n \"\"\"\n if loss is None:\n return None # Ok to have no loss for an output.\n\n loss = losses_mod.get(loss)\n if not isinstance(loss, losses_mod.Loss):\n loss_name = get_custom_object_name(loss)\n if loss_name is None:\n raise ValueError(\n f'Loss should be a callable, received: {loss}')\n loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)\n loss._allow_sum_over_batch_size = True # pylint: disable=protected-access\n return loss\n\n def _should_broadcast(self, obj):\n return not tf.nest.is_nested(obj)\n\n def _copy_object(self, obj):\n return obj # Losses don't need to be copied.\n\n\nclass MetricsContainer(Container):\n \"\"\"A container class for metrics passed to `Model.compile`.\"\"\"\n\n def __init__(self, metrics=None, weighted_metrics=None, output_names=None,\n from_serialized=False):\n \"\"\"Initializes a container for metrics.\n\n Arguments:\n metrics: see the `metrics` argument from `tf.keras.Model.compile`.\n weighted_metrics: see the `weighted_metrics` argument from\n `tf.keras.Model.compile`.\n output_names: A list of strings of names of outputs for the model.\n from_serialized: Whether the model being compiled is from a serialized\n model. Used to avoid redundantly applying pre-processing renaming\n steps.\n \"\"\"\n super(MetricsContainer, self).__init__(output_names=output_names)\n\n self._check_duplicated_metrics(metrics, weighted_metrics)\n # Keep user-supplied values untouched for recompiling and serialization.\n self._user_metrics = metrics\n self._user_weighted_metrics = weighted_metrics\n\n self._metrics = metrics\n self._weighted_metrics = weighted_metrics\n self._built = False\n\n self._from_serialized = from_serialized\n\n def _check_duplicated_metrics(self, metrics, weighted_metrics):\n \"\"\"Check and raise error when user provided metrics has any duplications.\n\n Note that metrics are stateful container, a shared metric instance between\n model.metric and model.weighted_metric will make the same intance to be\n udpated twice, and report wrong value.\n\n Args:\n metrics: User provided metrics list.\n weighted_metrics: User provided weighted metrics list.\n\n Raises:\n ValueError, when duplicated metrics instance discovered in user provided\n metrics and weighted metrics.\n \"\"\"\n seen = set()\n duplicated = []\n for x in tf.nest.flatten(metrics) + tf.nest.flatten(weighted_metrics):\n # We only check metrics object. The string and function objects\n # will be converted to unique Metric instance.\n if not isinstance(x, metrics_mod.Metric):\n continue\n if x in seen:\n duplicated.append(x)\n seen.add(x)\n\n if duplicated:\n raise ValueError('Found duplicated metrics object in the user provided '\n 'metrics and weighted metrics. This will cause the same '\n 'metric object to be updated multiple times, and report '\n 'wrong results. \\n'\n f'Duplicated items: {duplicated}')\n\n @property\n def metrics(self):\n \"\"\"All metrics in this container.\"\"\"\n if not self._built:\n return []\n return self._metrics_in_order\n\n @property\n def unweighted_metrics(self):\n \"\"\"Metrics in this container that should not be passed `sample_weight`.\"\"\"\n if not self._built:\n return None\n return tf.nest.flatten(self._metrics)\n\n @property\n def weighted_metrics(self):\n \"\"\"Metrics in this container that should be passed `sample_weight`.\"\"\"\n if not self._built:\n return None\n return tf.nest.flatten(self._weighted_metrics)\n\n def build(self, y_pred, y_true):\n \"\"\"One-time setup of metric objects.\"\"\"\n super(MetricsContainer, self).build(y_pred)\n\n self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics)\n self._metrics = self._conform_to_outputs(y_pred, self._metrics)\n\n self._weighted_metrics = self._maybe_broadcast_to_outputs(\n y_pred, self._weighted_metrics)\n self._weighted_metrics = self._conform_to_outputs(y_pred,\n self._weighted_metrics)\n\n # Standardize on tuple since `tf.data` turns lists into `Tensor`s.\n y_pred = tf.__internal__.nest.list_to_tuple(y_pred)\n y_true = tf.__internal__.nest.list_to_tuple(y_true)\n self._metrics = tf.__internal__.nest.list_to_tuple(self._metrics)\n self._weighted_metrics = tf.__internal__.nest.list_to_tuple(\n self._weighted_metrics)\n\n # Convert to `Metric` objects, potentially disambiguating based on output\n # properties.\n self._metrics = tf.__internal__.nest.map_structure_up_to(\n y_pred,\n self._get_metric_objects,\n self._metrics,\n y_true,\n y_pred)\n self._weighted_metrics = tf.__internal__.nest.map_structure_up_to(\n y_pred,\n self._get_metric_objects,\n self._weighted_metrics,\n y_true,\n y_pred)\n\n self._metrics = tf.__internal__.nest.flatten_up_to(\n y_pred, self._metrics, check_types=False)\n self._weighted_metrics = tf.__internal__.nest.flatten_up_to(\n y_pred, self._weighted_metrics, check_types=False)\n\n # Assumes metrics, weighted_metrics have been flattened up to outputs.\n #\n # If we are loading a model that has been already serialized, we do not\n # want to re-apply any pre-processing metric renaming steps.\n if not self._from_serialized:\n self._set_metric_names()\n self._create_ordered_metrics()\n self._built = True\n\n @property\n def built(self):\n return self._built\n\n def _set_metric_names(self):\n \"\"\"Sets unique metric names.\"\"\"\n # For multi-output models, prepend the output name to the metric name.\n # For weighted metrics, prepend \"weighted_\" if the name would be non-unique.\n # pylint: disable=protected-access\n metric_names = set()\n is_multi_output = len(self._output_names) > 1\n zip_args = (self._output_names, self._metrics, self._weighted_metrics)\n for output_name, output_metrics, weighted_output_metrics in zip(*zip_args):\n for m in output_metrics:\n if m is None:\n continue\n if is_multi_output:\n m._name = output_name + '_' + m._name\n if m._name in metric_names:\n raise ValueError(\n f'Found two metrics with the same name: {m._name}.'\n 'All the metrics added to the model need to have unique names.')\n metric_names.add(m._name)\n\n for wm in weighted_output_metrics:\n if wm is None:\n continue\n if is_multi_output:\n if output_name + '_' + wm._name in metric_names:\n wm._name = output_name + '_weighted_' + wm._name\n else:\n wm._name = output_name + '_' + wm._name\n elif wm._name in metric_names:\n wm._name = 'weighted_' + wm._name\n\n if wm._name in metric_names:\n raise ValueError(\n f'Found two weighted metrics with the same name: {wm._name}.'\n 'All the metrics added to the model need to have unique names.')\n metric_names.add(wm._name)\n # pylint: enable=protected-access\n\n def _create_ordered_metrics(self):\n \"\"\"Cache the flat order needed when returning metrics, for backwards compat.\"\"\"\n self._metrics_in_order = []\n for output_metrics, output_weighted_metrics in zip(self._metrics,\n self._weighted_metrics):\n for m in tf.nest.flatten(output_metrics):\n if m is not None:\n self._metrics_in_order.append(m)\n for wm in tf.nest.flatten(output_weighted_metrics):\n if wm is not None:\n self._metrics_in_order.append(wm)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n \"\"\"Updates the state of per-output metrics.\"\"\"\n y_true = self._conform_to_outputs(y_pred, y_true)\n sample_weight = self._conform_to_outputs(y_pred, sample_weight)\n\n if not self._built:\n self.build(y_pred, y_true)\n\n y_pred = tf.nest.flatten(y_pred)\n y_true = tf.nest.flatten(y_true) if y_true is not None else []\n sample_weight = tf.nest.flatten(sample_weight)\n\n zip_args = (y_true, y_pred, sample_weight, self._metrics,\n self._weighted_metrics)\n for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args):\n # Ok to have no metrics for an output.\n if (y_t is None or (all(m is None for m in metric_objs) and\n all(wm is None for wm in weighted_metric_objs))):\n continue\n\n y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)\n mask = get_mask(y_p)\n sw = apply_mask(y_p, sw, mask)\n\n for metric_obj in metric_objs:\n if metric_obj is None:\n continue\n metric_obj.update_state(y_t, y_p, sample_weight=mask)\n\n for weighted_metric_obj in weighted_metric_objs:\n if weighted_metric_obj is None:\n continue\n weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw)\n\n def reset_state(self):\n \"\"\"Resets the state of all `Metric`s in this container.\"\"\"\n if self._built:\n metrics = self._metrics_in_order\n else:\n # If the user supplied `Metric` objects directly, we should\n # reset those. This could also contain `str`s or `function`s\n # though.\n metrics = tf.nest.flatten(self._user_metrics) + tf.nest.flatten(\n self._user_weighted_metrics)\n\n for metric_obj in metrics:\n if isinstance(metric_obj, metrics_mod.Metric):\n metric_obj.reset_state()\n\n def _get_metric_objects(self, metrics, y_t, y_p):\n \"\"\"Convert user-supplied metrics to `Metric` objects.\"\"\"\n metrics = tf.nest.flatten(metrics)\n return [self._get_metric_object(m, y_t, y_p) for m in metrics]\n\n def _get_metric_object(self, metric, y_t, y_p):\n \"\"\"Converts user-supplied metric to a `Metric` object.\n\n Args:\n metric: A string, function, or `Metric` object.\n y_t: Sample of label.\n y_p: Sample of output.\n\n Returns:\n A `Metric` object.\n \"\"\"\n if metric is None:\n return None # Ok to have no metric for an output.\n\n # Convenience feature for selecting b/t binary, categorical,\n # and sparse categorical.\n if str(metric).lower() not in ['accuracy', 'acc', 'crossentropy', 'ce']:\n metric_obj = metrics_mod.get(metric)\n else:\n y_t_rank = len(y_t.shape.as_list())\n y_p_rank = len(y_p.shape.as_list())\n y_t_last_dim = y_t.shape.as_list()[-1]\n y_p_last_dim = y_p.shape.as_list()[-1]\n\n is_binary = y_p_last_dim == 1\n is_sparse_categorical = (\n y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1)\n\n if str(metric).lower() in ['accuracy', 'acc']:\n if is_binary:\n metric_obj = metrics_mod.binary_accuracy\n elif is_sparse_categorical:\n metric_obj = metrics_mod.sparse_categorical_accuracy\n else:\n metric_obj = metrics_mod.categorical_accuracy\n else:\n if is_binary:\n metric_obj = metrics_mod.binary_crossentropy\n elif is_sparse_categorical:\n metric_obj = metrics_mod.sparse_categorical_crossentropy\n else:\n metric_obj = metrics_mod.categorical_crossentropy\n\n if isinstance(metric_obj, losses_mod.Loss):\n metric_obj._allow_sum_over_batch_size = True # pylint: disable=protected-access\n\n if not isinstance(metric_obj, metrics_mod.Metric):\n if isinstance(metric, str):\n metric_name = metric\n else:\n metric_name = get_custom_object_name(metric)\n if metric_name is None:\n raise ValueError(\n f'Metric should be a callable, received: {metric}')\n\n metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name)\n\n return metric_obj\n\n def _should_broadcast(self, obj):\n # e.g. 'mse'.\n if not tf.nest.is_nested(obj):\n return True\n # e.g. ['mse'] or ['mse', 'mae'].\n return (isinstance(obj, (list, tuple)) and\n not any(tf.nest.is_nested(o) for o in obj))\n\n def _copy_object(self, obj):\n if isinstance(obj, metrics_mod.Metric):\n return obj.__class__.from_config(obj.get_config())\n return obj # Can be a function or `None`.\n\n\ndef create_pseudo_output_names(outputs):\n \"\"\"Create pseudo output names for a subclassed Model.\"\"\"\n return _create_pseudo_names(outputs, prefix='output_')\n\n\ndef create_pseudo_input_names(inputs):\n \"\"\"Create pseudo input names for a subclassed Model.\"\"\"\n return _create_pseudo_names(inputs, prefix='input_')\n\n\ndef _create_pseudo_names(tensors, prefix):\n \"\"\"Creates pseudo {input | output} names for subclassed Models.\n\n Warning: this function should only be used to define default\n names for `Metics` and `SavedModel`. No other use cases should\n rely on a `Model`'s input or output names.\n\n Example with dict:\n\n `{'a': [x1, x2], 'b': x3}` becomes:\n `['a_1', 'a_2', 'b']`\n\n Example with list:\n\n `[x, y]` becomes:\n `['output_1', 'output_2']`\n\n Args:\n tensors: `Model`'s outputs or inputs.\n prefix: 'output_' for outputs, 'input_' for inputs.\n\n Returns:\n Flattened list of pseudo names.\n \"\"\"\n\n def one_index(ele):\n # Start with \"output_1\" instead of \"output_0\".\n if isinstance(ele, int):\n return ele + 1\n return ele\n\n flat_paths = list(tf.__internal__.nest.yield_flat_paths(tensors))\n flat_paths = tf.nest.map_structure(one_index, flat_paths)\n names = []\n for path in flat_paths:\n if not path:\n name = prefix + '1' # Single output.\n else:\n name = '_'.join(str(p) for p in path)\n if isinstance(path[0], int):\n name = prefix + name\n names.append(name)\n return names\n\n\ndef map_to_output_names(y_pred, output_names, struct):\n \"\"\"Maps a dict to a list using `output_names` as keys.\n\n This is a convenience feature only. When a `Model`'s outputs\n are a list, you can specify per-output losses and metrics as\n a dict, where the keys are the output names. If you specify\n per-output losses and metrics via the same structure as the\n `Model`'s outputs (recommended), no mapping is performed.\n\n For the Functional API, the output names are the names of the\n last layer of each output. For the Subclass API, the output names\n are determined by `create_pseudo_output_names` (For example:\n `['output_1', 'output_2']` for a list of outputs).\n\n This mapping preserves backwards compatibility for `compile` and\n `fit`.\n\n Args:\n y_pred: Sample outputs of the Model, to determine if this convenience\n feature should be applied (`struct` is returned unmodified if `y_pred`\n isn't a flat list).\n output_names: List. The names of the outputs of the Model.\n struct: The structure to map.\n\n Returns:\n `struct` mapped to a list in same order as `output_names`.\n \"\"\"\n single_output = not tf.nest.is_nested(y_pred)\n outputs_are_flat_list = (not single_output and\n isinstance(y_pred, (list, tuple)) and\n not any(tf.nest.is_nested(y_p) for y_p in y_pred))\n\n if (single_output or outputs_are_flat_list) and isinstance(struct, dict):\n output_names = output_names or create_pseudo_output_names(y_pred)\n struct = copy.copy(struct)\n new_struct = [struct.pop(name, None) for name in output_names]\n if struct:\n raise ValueError(\n 'Found unexpected losses or metrics that do not correspond '\n f'to any Model output: {struct.keys()}. '\n f'Valid mode output names: {output_names}. '\n f'Received struct is: {struct}.')\n if len(new_struct) == 1:\n return new_struct[0]\n return new_struct\n else:\n return struct\n\n\ndef map_missing_dict_keys(y_pred, struct):\n \"\"\"Replaces missing dict keys in `struct` with `None` placeholders.\"\"\"\n if not isinstance(y_pred, dict) or not isinstance(struct, dict):\n return struct\n for k in y_pred.keys():\n if k not in struct:\n struct[k] = None\n return struct\n\n\ndef match_dtype_and_rank(y_t, y_p, sw):\n \"\"\"Match dtype and rank of predictions.\"\"\"\n if y_t.shape.rank == 1 and y_p.shape.rank == 2:\n y_t = tf.expand_dims(y_t, axis=-1)\n if sw is not None:\n if sw.shape.rank == 1 and y_p.shape.rank == 2:\n sw = tf.expand_dims(sw, axis=-1)\n\n # Dtype.\n # This is required mainly for custom loss functions which do not take care\n # casting dtypes.\n if ((y_t.dtype.is_floating and y_p.dtype.is_floating) or\n (y_t.dtype.is_integer and y_p.dtype.is_integer)):\n y_t = tf.cast(y_t, y_p.dtype)\n\n if sw is not None:\n sw = tf.cast(sw, y_p.dtype)\n return y_t, y_p, sw\n\n\ndef get_mask(y_p):\n \"\"\"Returns Keras mask from tensor.\"\"\"\n return getattr(y_p, '_keras_mask', None)\n\n\ndef apply_mask(y_p, sw, mask):\n \"\"\"Applies any mask on predictions to sample weights.\"\"\"\n if mask is not None:\n mask = tf.cast(mask, y_p.dtype)\n if sw is not None:\n mask, _, sw = (\n losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw))\n sw *= mask\n else:\n sw = mask\n return sw\n\n\ndef get_custom_object_name(obj):\n \"\"\"Returns the name to use for a custom loss or metric callable.\n\n Args:\n obj: Custom loss of metric callable\n\n Returns:\n Name to use, or `None` if the object was not recognized.\n \"\"\"\n if hasattr(obj, 'name'): # Accept `Loss` instance as `Metric`.\n return obj.name\n elif hasattr(obj, '__name__'): # Function.\n return obj.__name__\n elif hasattr(obj, '__class__'): # Class instance.\n return generic_utils.to_snake_case(obj.__class__.__name__)\n else: # Unrecognized object.\n return None\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Bidirectional wrapper for RNNs.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nimport copy\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.engine.input_spec import InputSpec\nfrom keras.layers.rnn import rnn_utils\nfrom keras.layers.rnn.base_wrapper import Wrapper\nfrom keras.utils import generic_utils\nfrom keras.utils import tf_inspect\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Bidirectional')\nclass Bidirectional(Wrapper):\n \"\"\"Bidirectional wrapper for RNNs.\n\n Args:\n layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or\n `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance\n that meets the following criteria:\n 1. Be a sequence-processing layer (accepts 3D+ inputs).\n 2. Have a `go_backwards`, `return_sequences` and `return_state`\n attribute (with the same semantics as for the `RNN` class).\n 3. Have an `input_spec` attribute.\n 4. Implement serialization via `get_config()` and `from_config()`.\n Note that the recommended way to create new RNN layers is to write a\n custom RNN cell and use it with `keras.layers.RNN`, instead of\n subclassing `keras.layers.Layer` directly.\n - When the `returns_sequences` is true, the output of the masked timestep\n will be zero regardless of the layer's original `zero_output_for_mask`\n value.\n merge_mode: Mode by which outputs of the forward and backward RNNs will be\n combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the\n outputs will not be combined, they will be returned as a list. Default\n value is 'concat'.\n backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer`\n instance to be used to handle backwards input processing.\n If `backward_layer` is not provided, the layer instance passed as the\n `layer` argument will be used to generate the backward layer\n automatically.\n Note that the provided `backward_layer` layer should have properties\n matching those of the `layer` argument, in particular it should have the\n same values for `stateful`, `return_states`, `return_sequences`, etc.\n In addition, `backward_layer` and `layer` should have different\n `go_backwards` argument values.\n A `ValueError` will be raised if these requirements are not met.\n\n Call arguments:\n The call arguments for this layer are the same as those of the wrapped RNN\n layer.\n Beware that when passing the `initial_state` argument during the call of\n this layer, the first half in the list of elements in the `initial_state`\n list will be passed to the forward RNN call and the last half in the list\n of elements will be passed to the backward RNN call.\n\n Raises:\n ValueError:\n 1. If `layer` or `backward_layer` is not a `Layer` instance.\n 2. In case of invalid `merge_mode` argument.\n 3. If `backward_layer` has mismatched properties compared to `layer`.\n\n Examples:\n\n ```python\n model = Sequential()\n model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))\n model.add(Bidirectional(LSTM(10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n # With custom backward layer\n model = Sequential()\n forward_layer = LSTM(10, return_sequences=True)\n backward_layer = LSTM(10, activation='relu', return_sequences=True,\n go_backwards=True)\n model.add(Bidirectional(forward_layer, backward_layer=backward_layer,\n input_shape=(5, 10)))\n model.add(Dense(5))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n ```\n \"\"\"\n\n def __init__(self,\n layer,\n merge_mode='concat',\n weights=None,\n backward_layer=None,\n **kwargs):\n if not isinstance(layer, Layer):\n raise ValueError(\n 'Please initialize `Bidirectional` layer with a '\n f'`tf.keras.layers.Layer` instance. Received: {layer}')\n if backward_layer is not None and not isinstance(backward_layer, Layer):\n raise ValueError(\n '`backward_layer` need to be a `tf.keras.layers.Layer` instance. '\n f'Received: {backward_layer}')\n if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:\n raise ValueError(f'Invalid merge mode. Received: {merge_mode}. '\n 'Merge mode should be one of '\n '{\"sum\", \"mul\", \"ave\", \"concat\", None}')\n # We don't want to track `layer` since we're already tracking the two copies\n # of it we actually run.\n self._setattr_tracking = False\n super(Bidirectional, self).__init__(layer, **kwargs)\n self._setattr_tracking = True\n\n # Recreate the forward layer from the original layer config, so that it will\n # not carry over any state from the layer.\n self.forward_layer = self._recreate_layer_from_config(layer)\n\n if backward_layer is None:\n self.backward_layer = self._recreate_layer_from_config(\n layer, go_backwards=True)\n else:\n self.backward_layer = backward_layer\n # Keep the custom backward layer config, so that we can save it later. The\n # layer's name might be updated below with prefix 'backward_', and we want\n # to preserve the original config.\n self._backward_layer_config = generic_utils.serialize_keras_object(\n backward_layer)\n\n self.forward_layer._name = 'forward_' + self.forward_layer.name\n self.backward_layer._name = 'backward_' + self.backward_layer.name\n\n self._verify_layer_config()\n\n def force_zero_output_for_mask(layer):\n # Force the zero_output_for_mask to be True if returning sequences.\n if getattr(layer, 'zero_output_for_mask', None) is not None:\n layer.zero_output_for_mask = layer.return_sequences\n\n force_zero_output_for_mask(self.forward_layer)\n force_zero_output_for_mask(self.backward_layer)\n\n self.merge_mode = merge_mode\n if weights:\n nw = len(weights)\n self.forward_layer.initial_weights = weights[:nw // 2]\n self.backward_layer.initial_weights = weights[nw // 2:]\n self.stateful = layer.stateful\n self.return_sequences = layer.return_sequences\n self.return_state = layer.return_state\n self.supports_masking = True\n self._trainable = True\n self._num_constants = 0\n self.input_spec = layer.input_spec\n\n @property\n def _use_input_spec_as_call_signature(self):\n return self.layer._use_input_spec_as_call_signature # pylint: disable=protected-access\n\n def _verify_layer_config(self):\n \"\"\"Ensure the forward and backward layers have valid common property.\"\"\"\n if self.forward_layer.go_backwards == self.backward_layer.go_backwards:\n raise ValueError(\n 'Forward layer and backward layer should have different '\n '`go_backwards` value.'\n f'forward_layer.go_backwards = {self.forward_layer.go_backwards},'\n f'backward_layer.go_backwards = {self.backward_layer.go_backwards}')\n\n common_attributes = ('stateful', 'return_sequences', 'return_state')\n for a in common_attributes:\n forward_value = getattr(self.forward_layer, a)\n backward_value = getattr(self.backward_layer, a)\n if forward_value != backward_value:\n raise ValueError(\n 'Forward layer and backward layer are expected to have the same '\n f'value for attribute \"{a}\", got \"{forward_value}\" for forward '\n f'layer and \"{backward_value}\" for backward layer')\n\n def _recreate_layer_from_config(self, layer, go_backwards=False):\n # When recreating the layer from its config, it is possible that the layer\n # is a RNN layer that contains custom cells. In this case we inspect the\n # layer and pass the custom cell class as part of the `custom_objects`\n # argument when calling `from_config`.\n # See https://github.com/tensorflow/tensorflow/issues/26581 for more detail.\n config = layer.get_config()\n if go_backwards:\n config['go_backwards'] = not config['go_backwards']\n if 'custom_objects' in tf_inspect.getfullargspec(\n layer.__class__.from_config).args:\n custom_objects = {}\n cell = getattr(layer, 'cell', None)\n if cell is not None:\n custom_objects[cell.__class__.__name__] = cell.__class__\n # For StackedRNNCells\n stacked_cells = getattr(cell, 'cells', [])\n for c in stacked_cells:\n custom_objects[c.__class__.__name__] = c.__class__\n return layer.__class__.from_config(config, custom_objects=custom_objects)\n else:\n return layer.__class__.from_config(config)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n output_shape = self.forward_layer.compute_output_shape(input_shape)\n if self.return_state:\n state_shape = tf_utils.convert_shapes(output_shape[1:], to_tuples=False)\n output_shape = tf_utils.convert_shapes(output_shape[0], to_tuples=False)\n else:\n output_shape = tf_utils.convert_shapes(output_shape, to_tuples=False)\n\n if self.merge_mode == 'concat':\n output_shape = output_shape.as_list()\n output_shape[-1] *= 2\n output_shape = tf.TensorShape(output_shape)\n elif self.merge_mode is None:\n output_shape = [output_shape, copy.copy(output_shape)]\n\n if self.return_state:\n if self.merge_mode is None:\n return output_shape + state_shape + copy.copy(state_shape)\n return [output_shape] + state_shape + copy.copy(state_shape)\n return output_shape\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n \"\"\"`Bidirectional.__call__` implements the same API as the wrapped `RNN`.\"\"\"\n inputs, initial_state, constants = rnn_utils.standardize_args(\n inputs, initial_state, constants, self._num_constants)\n\n if isinstance(inputs, list):\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n if initial_state is None and constants is None:\n return super(Bidirectional, self).__call__(inputs, **kwargs)\n\n # Applies the same workaround as in `RNN.__call__`\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n # Check if `initial_state` can be split into half\n num_states = len(initial_state)\n if num_states % 2 > 0:\n raise ValueError(\n 'When passing `initial_state` to a Bidirectional RNN, '\n 'the state should be a list containing the states of '\n 'the underlying RNNs. '\n f'Received: {initial_state}')\n\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n state_specs = tf.nest.map_structure(\n lambda state: InputSpec(shape=backend.int_shape(state)),\n initial_state)\n self.forward_layer.state_spec = state_specs[:num_states // 2]\n self.backward_layer.state_spec = state_specs[num_states // 2:]\n additional_specs += state_specs\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n constants_spec = [InputSpec(shape=backend.int_shape(constant))\n for constant in constants]\n self.forward_layer.constants_spec = constants_spec\n self.backward_layer.constants_spec = constants_spec\n additional_specs += constants_spec\n\n self._num_constants = len(constants)\n self.forward_layer._num_constants = self._num_constants\n self.backward_layer._num_constants = self._num_constants\n\n is_keras_tensor = backend.is_keras_tensor(\n tf.nest.flatten(additional_inputs)[0])\n for tensor in tf.nest.flatten(additional_inputs):\n if backend.is_keras_tensor(tensor) != is_keras_tensor:\n raise ValueError('The initial state of a Bidirectional'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors'\n ' (a \"Keras tensor\" is a tensor that was'\n ' returned by a Keras layer, or by `Input`)')\n\n if is_keras_tensor:\n # Compute the full input spec, including state\n full_input = [inputs] + additional_inputs\n # The original input_spec is None since there could be a nested tensor\n # input. Update the input_spec to match the inputs.\n full_input_spec = [None for _ in range(len(tf.nest.flatten(inputs)))\n ] + additional_specs\n # Removing kwargs since the value are passed with input list.\n kwargs['initial_state'] = None\n kwargs['constants'] = None\n\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(Bidirectional, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(Bidirectional, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n training=None,\n mask=None,\n initial_state=None,\n constants=None):\n \"\"\"`Bidirectional.call` implements the same API as the wrapped `RNN`.\"\"\"\n kwargs = {}\n if generic_utils.has_arg(self.layer.call, 'training'):\n kwargs['training'] = training\n if generic_utils.has_arg(self.layer.call, 'mask'):\n kwargs['mask'] = mask\n if generic_utils.has_arg(self.layer.call, 'constants'):\n kwargs['constants'] = constants\n\n if generic_utils.has_arg(self.layer.call, 'initial_state'):\n if isinstance(inputs, list) and len(inputs) > 1:\n # initial_states are keras tensors, which means they are passed in\n # together with inputs as list. The initial_states need to be split into\n # forward and backward section, and be feed to layers accordingly.\n forward_inputs = [inputs[0]]\n backward_inputs = [inputs[0]]\n pivot = (len(inputs) - self._num_constants) // 2 + 1\n # add forward initial state\n forward_inputs += inputs[1:pivot]\n if not self._num_constants:\n # add backward initial state\n backward_inputs += inputs[pivot:]\n else:\n # add backward initial state\n backward_inputs += inputs[pivot:-self._num_constants]\n # add constants for forward and backward layers\n forward_inputs += inputs[-self._num_constants:]\n backward_inputs += inputs[-self._num_constants:]\n forward_state, backward_state = None, None\n if 'constants' in kwargs:\n kwargs['constants'] = None\n elif initial_state is not None:\n # initial_states are not keras tensors, eg eager tensor from np array.\n # They are only passed in from kwarg initial_state, and should be passed\n # to forward/backward layer via kwarg initial_state as well.\n forward_inputs, backward_inputs = inputs, inputs\n half = len(initial_state) // 2\n forward_state = initial_state[:half]\n backward_state = initial_state[half:]\n else:\n forward_inputs, backward_inputs = inputs, inputs\n forward_state, backward_state = None, None\n\n y = self.forward_layer(forward_inputs,\n initial_state=forward_state, **kwargs)\n y_rev = self.backward_layer(backward_inputs,\n initial_state=backward_state, **kwargs)\n else:\n y = self.forward_layer(inputs, **kwargs)\n y_rev = self.backward_layer(inputs, **kwargs)\n\n if self.return_state:\n states = y[1:] + y_rev[1:]\n y = y[0]\n y_rev = y_rev[0]\n\n if self.return_sequences:\n time_dim = 0 if getattr(self.forward_layer, 'time_major', False) else 1\n y_rev = backend.reverse(y_rev, time_dim)\n if self.merge_mode == 'concat':\n output = backend.concatenate([y, y_rev])\n elif self.merge_mode == 'sum':\n output = y + y_rev\n elif self.merge_mode == 'ave':\n output = (y + y_rev) / 2\n elif self.merge_mode == 'mul':\n output = y * y_rev\n elif self.merge_mode is None:\n output = [y, y_rev]\n else:\n raise ValueError(\n f'Unrecognized value for `merge_mode`. Received: {self.merge_mode}'\n 'Expected values are [\"concat\", \"sum\", \"ave\", \"mul\"]')\n\n if self.return_state:\n if self.merge_mode is None:\n return output + states\n return [output] + states\n return output\n\n def reset_states(self):\n self.forward_layer.reset_states()\n self.backward_layer.reset_states()\n\n def build(self, input_shape):\n with backend.name_scope(self.forward_layer.name):\n self.forward_layer.build(input_shape)\n with backend.name_scope(self.backward_layer.name):\n self.backward_layer.build(input_shape)\n self.built = True\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n if self.return_sequences:\n if not self.merge_mode:\n output_mask = [mask, mask]\n else:\n output_mask = mask\n else:\n output_mask = [None, None] if not self.merge_mode else None\n\n if self.return_state:\n states = self.forward_layer.states\n state_mask = [None for _ in states]\n if isinstance(output_mask, list):\n return output_mask + state_mask * 2\n return [output_mask] + state_mask * 2\n return output_mask\n\n @property\n def constraints(self):\n constraints = {}\n if hasattr(self.forward_layer, 'constraints'):\n constraints.update(self.forward_layer.constraints)\n constraints.update(self.backward_layer.constraints)\n return constraints\n\n def get_config(self):\n config = {'merge_mode': self.merge_mode}\n if self._num_constants:\n config['num_constants'] = self._num_constants\n\n if hasattr(self, '_backward_layer_config'):\n config['backward_layer'] = self._backward_layer_config\n base_config = super(Bidirectional, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n # Instead of updating the input, create a copy and use that.\n config = copy.deepcopy(config)\n num_constants = config.pop('num_constants', 0)\n # Handle forward layer instantiation (as would parent class).\n from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n config['layer'] = deserialize_layer(\n config['layer'], custom_objects=custom_objects)\n # Handle (optional) backward layer instantiation.\n backward_layer_config = config.pop('backward_layer', None)\n if backward_layer_config is not None:\n backward_layer = deserialize_layer(\n backward_layer_config, custom_objects=custom_objects)\n config['backward_layer'] = backward_layer\n # Instantiate the wrapper, adjust it and return it.\n layer = cls(**config)\n layer._num_constants = num_constants # pylint: disable=protected-access\n return layer\n"
] | [
[
"tensorflow.compat.v2.io.gfile.exists",
"numpy.random.random",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.__internal__.tf2.enabled"
],
[
"tensorflow.compat.v2.compat.v1.assign",
"tensorflow.compat.v2.raw_ops.ResourceApplyCenteredRMSProp",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.raw_ops.ResourceSparseApplyRMSProp",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.raw_ops.ResourceApplyRMSProp",
"tensorflow.compat.v2.group",
"numpy.array",
"tensorflow.compat.v2.raw_ops.ResourceSparseApplyCenteredRMSProp"
],
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.constant",
"numpy.asarray",
"tensorflow.compat.v2.ragged.constant",
"tensorflow.compat.v2.SparseTensor",
"numpy.array",
"tensorflow.compat.v2.TensorSpec"
],
[
"numpy.dot",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.summary.record_if",
"numpy.matmul",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.Graph",
"tensorflow.compat.v2.zeros",
"numpy.zeros",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.compat.v1.get_default_graph",
"tensorflow.compat.v2.constant",
"numpy.array",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones_like",
"numpy.random.random",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.compat.v1.train.summary_iterator",
"numpy.ones",
"tensorflow.compat.v2.compat.v1.assign_add",
"tensorflow.compat.v2.__internal__.get_name_scope"
],
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.tf_export.keras_export"
],
[
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.io.gfile.join",
"tensorflow.compat.v2.one_hot",
"numpy.random.randint",
"tensorflow.compat.v2.io.gfile.listdir",
"numpy.random.RandomState",
"tensorflow.compat.v2.io.gfile.walk"
],
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.transpose"
],
[
"tensorflow.python.util.tf_export.keras_export"
],
[
"tensorflow.compat.v2.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.compat.v1.train.Saver",
"tensorflow.python.training.tracking.util.gather_initializers",
"tensorflow.compat.v2.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v2.Graph",
"tensorflow.python.training.tracking.util.add_variable",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.train.Checkpoint",
"tensorflow.compat.v2.__internal__.eager_context.eager_mode"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.compat.v1.GraphDef",
"numpy.expand_dims",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.data.experimental.cardinality",
"numpy.asarray",
"numpy.concatenate",
"tensorflow.compat.v2.convert_to_tensor",
"numpy.max",
"tensorflow.compat.v2.__internal__.tf2.enabled",
"tensorflow.compat.v2.is_tensor",
"numpy.reshape",
"tensorflow.compat.v2.compat.v1.gather",
"tensorflow.compat.v2.compat.v1.expand_dims",
"numpy.argmax",
"tensorflow.compat.v2.compat.v1.data.make_one_shot_iterator",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.compat.v2.as_dtype",
"tensorflow.compat.v2.debugging.check_numerics",
"numpy.append",
"tensorflow.compat.v2.nest.pack_sequence_as",
"tensorflow.compat.v2.nest.flatten",
"numpy.array",
"tensorflow.compat.v2.compat.v1.sparse_concat",
"tensorflow.compat.v2.compat.v1.SparseTensorValue",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.cast",
"numpy.random.shuffle",
"tensorflow.compat.v2.compat.v1.data.make_initializable_iterator",
"numpy.prod",
"tensorflow.compat.v2.compat.v1.ragged.RaggedTensorValue",
"numpy.empty",
"tensorflow.compat.v2.__internal__.nest.flatten_up_to"
],
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.tf_export.keras_export"
],
[
"numpy.argmax",
"tensorflow.compat.v2.test.main",
"numpy.random.randint"
],
[
"tensorflow.compat.v2.test.main",
"numpy.arange",
"tensorflow.python.eager.def_function.function",
"tensorflow.compat.v2.feature_column.numeric_column",
"tensorflow.compat.v2.__internal__.feature_column.FeatureTransformationCache"
],
[
"tensorflow.compat.v2.io.gfile.rmtree",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.train.Checkpoint",
"tensorflow.compat.v2.train.CheckpointManager"
],
[
"tensorflow.compat.v2.shape",
"tensorflow.python.util.tf_export.keras_export"
],
[
"tensorflow.compat.v2.shape",
"tensorflow.python.util.tf_export.keras_export",
"numpy.sqrt"
],
[
"tensorflow.compat.v2.keras.initializers.get",
"tensorflow.compat.v2.keras.Input",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.keras.layers.Dense",
"tensorflow.compat.v2.keras.initializers.serialize",
"tensorflow.compat.v2.nn.relu",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.keras.layers.Dropout",
"tensorflow.compat.v2.keras.datasets.mnist.load_data"
],
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.test.main",
"numpy.testing.assert_allclose",
"numpy.ones"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.add_n",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.compat.v2.__internal__.nest.list_to_tuple",
"tensorflow.compat.v2.__internal__.nest.yield_flat_paths",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.distribute.get_strategy",
"tensorflow.compat.v2.__internal__.nest.map_structure_up_to",
"tensorflow.compat.v2.__internal__.nest.flatten_up_to"
],
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.nest.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
systemquant/book-pandas-for-finance | [
"90b7eb9be1de20a12ae72b9bb5d51424a979b174",
"90b7eb9be1de20a12ae72b9bb5d51424a979b174"
] | [
"old/03/08.py",
"old/02/28.py"
] | [
"from pandas import Series\n\ndata = [1000, 2000, 3000]\nindex = [\"메로나\", \"구구콘\", \"하겐다즈\"]\ns = Series(data=data, index=index)\n\nprint(s.loc['메로나':'구구콘'])\n",
"import numpy as np\n\ndata = [1, 2, 3]\narr = np.array(data)\ndata2 = arr * 10\nprint(data2)"
] | [
[
"pandas.Series"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vidursatija/SongWCT | [
"c892c2833ff9f85cfb31788babf016699c5eec8f"
] | [
"models.py"
] | [
"import torch\nimport torch.nn as nn\ntry:\n from torch.hub import load_state_dict_from_url\nexcept ImportError:\n from torch.utils.model_zoo import load_url as load_state_dict_from_url\nfrom torchsummary import summary\nimport numpy as np\n\n\nclass X_Enc(nn.Module):\n def __init__(self, layers, num_classes=1000, init_weights=True):\n super(X_Enc, self).__init__()\n\n self.features = nn.Sequential(*layers) # layers\n print(self.features)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n all_maxpools = []\n for l in self.features:\n if isinstance(l, nn.MaxPool1d) == False:\n x = l(x)\n else:\n x, pool_indices = l(x)\n all_maxpools.append(pool_indices)\n return x, all_maxpools\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers_enc(cfg):\n layers = []\n conv_layers = []\n in_channels = cfg[0]\n cfg = cfg[1:]\n for v in cfg:\n if v == 'M':\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n conv_layers = []\n layers += [nn.MaxPool1d(kernel_size=2, stride=2, return_indices=True)]\n else:\n conv1d = nn.Conv1d(in_channels, v, kernel_size=3, padding=1)\n conv_layers += [conv1d, nn.ReLU(inplace=True)]\n in_channels = v\n if len(conv_layers) > 0:\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n return layers\n\n\nconfigs_enc = [\n [128, 128],\n [128, 128, 128, 'M', 256],\n [128, 128, 128, 'M', 256, 256, 'M', 512],\n [128, 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512]\n]\n\nconfigs_dec = [\n [128, 128],\n [256, 128, 'M', 128, 128],\n [512, 256, 'M', 256, 128, 'M', 128, 128],\n [512, 512, 'M', 512, 256, 'M', 256, 128, 'M', 128, 128]\n]\n\n\ndef encoder(x, pretrained_path=None, **kwargs):\n if pretrained_path is not None:\n kwargs['init_weights'] = False\n model = X_Enc(make_layers_enc(configs_enc[x-1]), **kwargs)\n if pretrained_path is not None:\n model.load_state_dict(torch.load(pretrained_path), strict=False)\n return model\n\n\nclass X_Dec(nn.Module):\n def __init__(self, layers, num_classes=1000, init_weights=True):\n super(X_Dec, self).__init__()\n\n self.layers = nn.Sequential(*layers)\n print(self.layers)\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x, all_maxpools):\n ct = -1\n for l in self.layers:\n if isinstance(l, nn.MaxUnpool1d) == False:\n x = l(x)\n else:\n x = l(x, all_maxpools[ct])\n ct -= 1\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers_dec(cfg):\n layers = []\n conv_layers = []\n in_channels = cfg[0]\n cfg = cfg[1:]\n for i, v in enumerate(cfg):\n if v == 'M':\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n conv_layers = []\n layers += [nn.MaxUnpool1d(kernel_size=2, stride=2)]\n else:\n conv1d = nn.ConvTranspose1d(in_channels, v, kernel_size=3, padding=1)\n if i != len(cfg) - 1:\n conv_layers += [conv1d, nn.ReLU(inplace=True)]\n else:\n conv_layers += [conv1d]\n in_channels = v\n if len(conv_layers) > 0:\n layers += conv_layers # [nn.Sequential(*conv_layers)]\n return layers\n\n\ndef decoder(x, pretrained_path=None, **kwargs):\n if pretrained_path is not None:\n kwargs['init_weights'] = False\n model = X_Dec(make_layers_dec(configs_dec[x-1]), **kwargs)\n if pretrained_path is not None:\n model.load_state_dict(torch.load(pretrained_path), strict=False)\n return model\n\n\nif __name__ == '__main__':\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # PyTorch v0.4.0\n encoder = vgg16_enc(x=3, pretrained=True) # .to(device)\n for k in encoder.state_dict():\n print(k)\n summary(encoder, (3, 224, 224), device=\"cpu\")\n z, all_maxpools = encoder(torch.from_numpy(np.zeros([1, 3, 224, 224])).float())\n\n decoder = vgg16_dec(x=3, pretrained=False) # .to(device)\n for k in decoder.state_dict():\n print(k)\n x_rebuild = decoder(z, all_maxpools)\n # summary(decoder, (256, 56, 56), device=\"cpu\")\n"
] | [
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.init.constant_",
"torch.nn.MaxPool1d",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.nn.Conv1d",
"torch.nn.ConvTranspose1d",
"torch.nn.ReLU",
"torch.nn.MaxUnpool1d",
"numpy.zeros",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eembees/solar_flares | [
"9022f92c0577efaf06d7425002995e4fa4df74b4"
] | [
"reading_data.py"
] | [
"from pathlib import Path\nimport ijson\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom json import JSONDecoder, JSONDecodeError # for reading the JSON data files\nimport re # for regular expressions\nimport os # for os related operations\nfrom sklearn.preprocessing import maxabs_scale\n\n\ndef decode_obj(line, pos=0, decoder=JSONDecoder()):\n no_white_space_regex = re.compile(r'[^\\s]')\n while True:\n match = no_white_space_regex.search(line, pos)\n if not match:\n return\n pos = match.start()\n try:\n obj, pos = decoder.raw_decode(line, pos)\n except JSONDecodeError as err:\n print('Oops! something went wrong. Error: {}'.format(err))\n yield obj\n\n\ndef get_obj_with_last_n_val(line, n):\n obj = next(decode_obj(line)) # type:dict\n id = obj['id']\n class_label = obj['classNum']\n\n data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame\n data.set_index(data.index.astype(int), inplace=True)\n last_n_indices = np.arange(0, 60)[-n:]\n data = data.loc[last_n_indices]\n\n return {'id': id, 'classType': class_label, 'values': data}\n\n\ndef get_obj_with_all(line):\n obj = next(decode_obj(line)) # type:dict\n id = obj['id']\n try:\n class_label = obj['classNum']\n except KeyError:\n class_label = None\n\n data = pd.DataFrame.from_dict(obj['values']) # type:pd.DataFrame\n data.set_index(data.index.astype(int), inplace=True)\n # last_n_indices = np.arange(0, 60)[-n:]\n # data = data.loc[last_n_indices]\n\n return {'id': id, 'classType': class_label, 'values': data}\n\n\ndef read_json_data_to_df(file_path: Path):\n \"\"\"\n Generates a dataframe by concatenating the last values of each\n multi-variate time series. This method is designed as an example\n to show how a json object can be converted into a csv file.\n :param data_dir: the path to the data directory.\n :param file_name: name of the file to be read, with the extension.\n :return: the generated dataframe.\n \"\"\"\n\n all_df, labels, ids = [], [], []\n with open(file_path, 'r') as infile: # Open the file for reading\n for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).\n obj = get_obj_with_all(line)\n all_df.append(obj['values'])\n labels.append(obj['classType'])\n ids.append(obj['id'])\n print(type(obj))\n print(obj['values'])\n print(type(obj['values']))\n # df =\n\n exit()\n\n df = pd.concat(all_df).reset_index(drop=True)\n df = df.assign(LABEL=pd.Series(labels))\n df = df.assign(ID=pd.Series(ids))\n df.set_index([pd.Index(ids)])\n # Uncomment if you want to save this as CSV\n # df.to_csv(file_name + '_last_vals.csv', index=False)\n\n return df\n\n\ndef read_json_data_to_arr(file_path: Path):\n \"\"\"\n Generates a dataframe by concatenating the last values of each\n multi-variate time series. This method is designed as an example\n to show how a json object can be converted into a csv file.\n :param data_dir: the path to the data directory.\n :param file_name: name of the file to be read, with the extension.\n :return: the generated dataframe.\n \"\"\"\n\n all_df, labels, ids = [], [], []\n with open(file_path, 'r') as infile: # Open the file for reading\n for line in infile: # Each 'line' is one MVTS with its single label (0 or 1).\n obj = get_obj_with_all(line)\n # if obj['id'] < 100:\n df = obj['values'].sort_index()\n # remove anything 2 std dev from the mean\n df = df.mask(df.sub(df.mean()).div(df.std()).abs().gt(2))\n # do interpolation of variables\n\n df = df.interpolate(method='linear', extrapolate=False)\n\n df = df.fillna(method='ffill').fillna(method='bfill').fillna(0.0)\n\n\n\n\n all_df.append(df.values)\n labels.append(obj['classType'])\n ids.append(obj['id'])\n\n\n all_df = np.array(all_df)\n labels = np.array(labels)\n ids = np.array(ids)\n\n return all_df, labels, ids\n\n\ndef save_DF_to_NPZ(fp: Path, out_dir):\n fo = out_dir / fp.with_suffix('.npz').name\n # fo_k = Path(str(fo).replace(('.npz', '_keys.npz')))\n df = pd.read_json(fp, lines=True)\n\n np.savez(fo, df=df, keys=df.keys, index=df.index)\n\n pass\n\n\ndef save_arr_to_npz(arr: np.ndarray, labels: np.ndarray, ids: np.ndarray, fo: Path):\n np.savez(fo, data=arr, labels=labels, index=ids)\n pass\n\n\ndef load_npz_file(path: Path, return_ids = False):\n a = np.load(path)\n\n X = a['data']\n\n if np.any(np.isnan(X)):\n X = np.nan_to_num(X)\n\n\n try:\n y = a['labels']\n except KeyError:\n y = None\n except ValueError:\n y = None\n\n if return_ids:\n try:\n ids = a['ids']\n except KeyError:\n ids = None\n except ValueError:\n ids = None\n\n return X, y, ids\n else:\n return X, y\n\n\ndef save_y_preds(y_index: np.ndarray, y_pred: np.ndarray, fo: Path):\n np.savez(fo, index=y_index, labels=y_pred)\n pass\n\n\n\ndef preprocess_data(X, scaler=maxabs_scale):\n shap = X.shape\n # print(shap[1:])\n if shap[1:] != (60, 25):\n raise ValueError('Data shape wrong')\n for i, x_i in enumerate(X):\n x_i_t = np.zeros_like(x_i.transpose())\n for j, series in enumerate(x_i.transpose()):\n series = scaler(series)\n x_i_t[j] = series\n X[i] = x_i_t.transpose()\n return X\n\n\n\nif __name__ == '__main__':\n data_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/')\n out_dir = Path('/Users/mag/PycharmProjects/solar_flares/input/npz')\n # out_dir = Path('./input/npz')\n\n file_paths = list(data_dir.glob('test*.json'))\n print(file_paths)\n for fp in file_paths:\n fo = out_dir / fp.with_suffix('.npz').name\n all_df, labels, ids = read_json_data_to_arr(fp)\n\n save_arr_to_npz(all_df, labels, ids, fo)\n"
] | [
[
"pandas.concat",
"numpy.savez",
"pandas.Series",
"numpy.isnan",
"numpy.arange",
"numpy.nan_to_num",
"pandas.Index",
"pandas.read_json",
"pandas.DataFrame.from_dict",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
geoffreynyaga/ostrich-project | [
"157cd7a3c3d9014e31ef21ca21de43f04d039997"
] | [
"CORE/engines/constraint.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n##################################################################################\r\n# File: c:\\Projects\\KENYA ONE PROJECT\\CORE\\engines\\constraint.py #\r\n# Project: c:\\Projects\\KENYA ONE PROJECT\\CORE\\engines #\r\n# Created Date: Thursday, January 9th 2020, 8:56:55 pm #\r\n# Author: Geoffrey Nyaga Kinyua ( <[email protected]> ) #\r\n# ----- #\r\n# Last Modified: Thursday January 9th 2020 8:56:55 pm #\r\n# Modified By: Geoffrey Nyaga Kinyua ( <[email protected]> ) #\r\n# ----- #\r\n# MIT License #\r\n# #\r\n# Copyright (c) 2020 KENYA ONE PROJECT #\r\n# #\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy of#\r\n# this software and associated documentation files (the \"Software\"), to deal in #\r\n# the Software without restriction, including without limitation the rights to #\r\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #\r\n# of the Software, and to permit persons to whom the Software is furnished to do #\r\n# so, subject to the following conditions: #\r\n# #\r\n# The above copyright notice and this permission notice shall be included in all #\r\n# copies or substantial portions of the Software. #\r\n# #\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #\r\n# SOFTWARE. #\r\n# ----- #\r\n# Copyright (c) 2020 KENYA ONE PROJECT #\r\n##################################################################################\r\n\r\n\r\nimport sys\r\n\r\nsys.path.append(\"../\")\r\nfrom CORE.API.db_API import write_to_db, read_from_db\r\n\r\nimport numpy as np # type: ignore\r\nimport matplotlib.pylab as plt # type: ignore\r\n\r\na = np.arange(50)\r\n\r\nws = np.arange(10, 35, 0.01)\r\n\r\ncdmin: float = 0.025\r\nwrite_to_db(\"cdMin\", cdmin)\r\n\r\ndo = read_from_db(\"rhoSL\")\r\ndalt = read_from_db(\"altitudeDensity\") # AAAAA\r\nk = read_from_db(\"k\")\r\n\r\n# v = read_from_db('cruiseSpeed') * 1.688\r\nv: float = 140 * 1.688 # AAAAA\r\nqcruise = 0.5 * dalt * v ** 2 # dynamic pressure at cruise\r\nqtakeoff = 0.5 * do * v ** 2 # dynamic pressure at take-off\r\n\r\nturnangle = 40 # turn angle\r\nloadfactor = 1 / (np.cos(turnangle)) # loadfactor\r\ntwturn = (\r\n qcruise\r\n * ((cdmin / ws) + (k * (loadfactor / qcruise) ** 2) * ws)\r\n * (v * 5850 / (0.8 * 550 * 0.6604))\r\n)\r\n\r\n# rate of climb\r\nroc = read_from_db(\"rateOfClimb\") * 3.28 * 60 # rate of climb ft/min #AAAAAAA\r\n# Vy=sqrt((2/do)*ws * sqrt( k/(3*cdmin) ))\r\nVy = 150\r\nVv = roc / 60\r\nqclimb = 0.5 * do * (Vy ** 2)\r\ntwclimb = (\r\n (Vv / Vy) + ((qclimb / ws) * cdmin) + ((qclimb / ws) * cdmin) + ((k / qclimb) * ws)\r\n) * (Vy * 5850 / (0.6 * 550))\r\n\r\n# ground run\r\nSg: int = 1000 # ground run ft\r\nVlof: float = 70 * 1.688\r\nclto: float = 1.4670\r\nu: float = 0.04\r\ncdto = 0.03\r\nq1 = 0.5 * do * (Vlof / np.sqrt(2)) ** 2\r\ntwtakeoff = (\r\n ((Vlof ** 2) / (2 * 32.174 * Sg)) + ((q1 * cdto) / ws) + u * (1 - (q1 * clto / ws))\r\n) * (Vlof * 5850 / (0.6 * 550))\r\n\r\n# cruise altitude\r\ntwcruise = (((qcruise * cdmin) / ws) + ((k / qcruise) * ws)) * (\r\n v * 5850 / (0.6 * 550 * 0.6604)\r\n)\r\n\r\n# service ceiling\r\ntwservceiling = (\r\n (1.668 / np.sqrt((2 * ws / dalt) * np.sqrt(k / (3 * cdmin))))\r\n + (4 * np.sqrt(k * cdmin / 3))\r\n) * ((v * 5850) / (0.7 * 550 * 0.6604))\r\n\r\nplt.plot(ws, twclimb, label=\"climb\")\r\nplt.plot(ws, twturn, label=\"turn\")\r\nplt.plot(ws, twtakeoff, label=\"Takeoff\")\r\nplt.plot(ws, twservceiling, label=\"Service Ceiling\")\r\nplt.plot(ws, twcruise, label=\"cruise\")\r\nplotWS = read_from_db(\"WS\")\r\nplt.axvline(x=plotWS) ################################\r\nplt.legend(loc=\"upper left\")\r\n\r\nif __name__ == \"__main__\":\r\n plt.show()\r\n\r\n\r\ndef find_nearest(array, value):\r\n idx = (np.abs(array - value)).argmin()\r\n return idx\r\n\r\n\r\n# print(find_nearest(ws, plotWS))\r\nmyidx = find_nearest(ws, plotWS)\r\n\r\n# cruiseidx = (twcruise[myidx])\r\n# takeoffidx = twtakeoff[myidx]\r\n# climbidx = twclimb[myidx]\r\n# turnidx = twturn[myidx]\r\n# ceilingidx = twservceiling[myidx]\r\n# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])\r\n\r\n\r\ndef point():\r\n cruiseidx = twcruise[myidx]\r\n takeoffidx = twtakeoff[myidx]\r\n climbidx = twclimb[myidx]\r\n turnidx = twturn[myidx]\r\n ceilingidx = twservceiling[myidx]\r\n # print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])\r\n # print (cruiseidx,\"cruiseidx\")\r\n\r\n x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])\r\n idx = x.argmax()\r\n return x[idx]\r\n\r\n\r\nfinalBHP = point()\r\n# print ( finalBHP,\"BHP\")\r\n\r\nwrite_to_db(\"finalBHP\", finalBHP)\r\n\r\nS = (read_from_db(\"finalMTOW\")) / (plotWS * 10.57)\r\nwrite_to_db(\"S\", S)\r\n"
] | [
[
"matplotlib.pylab.show",
"numpy.sqrt",
"numpy.abs",
"numpy.arange",
"numpy.cos",
"matplotlib.pylab.plot",
"matplotlib.pylab.legend",
"numpy.array",
"matplotlib.pylab.axvline"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nkuxx161/baseline-SR | [
"c4caf06c5a5a88d7f8e27069018316b319f0913b"
] | [
"plot.py"
] | [
"import pandas as pd\nimport os\n\ncurve_name = '5_k7'\n\ndata = pd.read_csv(os.path.join('result', curve_name+'.csv'))\ntimestamp = data['timestamp']\nvalue = data['value']\nmag = data['mag']\nisAnomaly = data['isAnomaly']\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt \n\nplt.subplot(3, 1, 1)\nplt.plot(timestamp, value)\nplt.title('value')\n\nplt.subplot(3, 1, 2)\nplt.plot(timestamp, mag)\nplt.title('mag')\n\nplt.subplot(3, 1, 3)\nplt.plot(timestamp, isAnomaly)\nplt.title('isAnomaly')\n\nplt.savefig(os.path.join('./images', 'SR_'+curve_name+'.png'))\nplt.show()\nplt.close()"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
catniplab/ML-music-analysis | [
"793d54ed16166fbcd9acf4eec24998892334e064",
"793d54ed16166fbcd9acf4eec24998892334e064"
] | [
"models/_sources/model_trainer_c4d127b7cc8008ff2c0c849733ead6e1.py",
"models/_sources/logistic_regression_207b05cd2ed83ee471bd1fd9fb4270d4.py"
] | [
"\"\"\"\nThis script creates an instance of a sacred experiment and defines default configurations for training a neural network or a regression model.\n\"\"\"\n\nfrom src.neural_nets.models import get_model\nfrom src.neural_nets.load_data import get_loader\nfrom src.neural_nets.metrics import MaskedBCE, Accuracy, compute_acc, compute_loss\n\nimport src.regression.logistic_regression as reg\n\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchsso.optim as soptim\nimport torch.nn.functional as F\nimport random\n\nfrom torch.utils.data import DataLoader\nfrom sacred import Experiment\nfrom torch import Tensor, device\nfrom copy import deepcopy\nfrom time import sleep\nfrom tqdm import tqdm\n\nfrom typing import List\nfrom itertools import product\n\n\n# create a new sacred experiment whose name is an integer\nex = Experiment(name=str(random.randint(0, 1000000)))\n\n\n# default configurations\[email protected]\ndef cfg():\n\n # system\n cuda = torch.cuda.is_available()\n gpu = 0\n base_dir = os.getcwd()\n\n # supported datasets\n # JSB_Chorales (short)\n # Nottingham (medium)\n # Piano_midi (long)\n # MuseData (extra long)\n dataset = \"JSB_Chorales\"\n\n # training\n num_epochs = 150\n batch_size = 128\n # mask some low notes and some high notes because they never show up\n low_off_notes = 0\n high_off_notes = 88\n lr = 0.001\n decay = 1.0\n optmzr = \"SGD\"\n regularization = 0.0\n\n # hyperparameter search\n do_hpsearch = False\n learning_rates = 10**np.linspace(-2, -4, 5)\n decays = 1 - np.linspace(0, 0.1, num=5)\n regularizations = 10**np.linspace(-2, -4, num=5)\n hps_epochs = 50\n\n # Supported architectures\n # REGRESSION\n # LDS\n # TANH\n architecture = 'LDS'\n readout = 'linear'\n gradient_clipping = 1\n jit = False # not fully implemented\n # for regression\n lag = 1\n window = 1\n # for neural networks\n input_size = 88\n hidden_size = 300\n num_layers = 1\n output_size = 88\n\n # see models.py and initialization.py for details\n init = 'default'\n scale = 1.0\n parity = None # see models.py\n t_distrib = torch.distributions.Uniform(0, 0.75)\n path = 'results/77/final_state_dict.pt'\n\n # when to save state dictionaries\n save_init_model = True\n save_final_model = True\n save_every_epoch = False\n\n # detect backprop anomalies\n detect_anomaly = False\n\n\n# give all random number generators the same seed\ndef _seed_all(_seed) -> None:\n torch.manual_seed(_seed)\n np.random.seed(_seed)\n random.seed(_seed)\n\n\n# this context is used when we are running things on the cpu\nclass NullContext(object):\n def __init__(self):\n pass\n def __enter__(self):\n pass\n def __exit__(self, type, value, traceback):\n pass\n\n\n# this function simply trains regression models and logs the results\n# see regression.trainer for details\[email protected]\ndef sklearn_experiment(dataset: str,\n save_dir: str,\n num_epochs: int,\n high_off_notes: int,\n low_off_notes: int,\n lag: int,\n window: int,\n _seed,\n _log,\n _run):\n \"\"\"\n :param dataset: name of the dataset to be used\n :save_dir: temporary directory where artifacts are being stored\n :lag: how many time steps into the future the regression model is to predict\n :window: how many time steps the regression model is to take into account\n :param _seed: sacred random seed\n :param _log: sacred object used to output to the command line\n :param _run: sacred object used to monitor the runtime\n \"\"\"\n\n num_notes = high_off_notes - low_off_notes\n\n models = reg.train_models(dataset,\n num_epochs,\n low_off_notes,\n high_off_notes,\n _seed,\n lag=lag,\n window=window)\n\n coefs = np.zeros((num_notes, num_notes*window))\n intercepts = np.zeros(num_notes*window)\n\n for i in range(num_notes):\n\n model = models[i]\n\n # if there were no notes played for this channel, a model won't be trained\n # simply save all parameters as -1 to discourage the note from being played\n if model == None:\n coefs[i] = -1\n intercepts[i] = -1\n\n else:\n coefs[i] = model.coef_\n intercepts[i] = model.intercept_\n\n np.save(save_dir + 'coefs.npy', coefs)\n np.save(save_dir + 'intercepts.npy', intercepts)\n\n _run.add_artifact(save_dir + 'coefs.npy')\n _run.add_artifact(save_dir + 'intercepts.npy')\n\n train_loss = reg.compute_loss(models,\n dataset,\n 'traindata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n test_loss = reg.compute_loss(models,\n dataset,\n 'testdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n valid_loss = reg.compute_loss(models,\n dataset,\n 'validdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n\n _run.log_scalar('trainLoss', train_loss)\n _run.log_scalar('testLoss', test_loss)\n _run.log_scalar('validLoss', valid_loss)\n\n train_acc = reg.compute_accuracy(models,\n dataset,\n 'traindata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n test_acc = reg.compute_accuracy(models,\n dataset,\n 'testdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n valid_acc = reg.compute_accuracy(models,\n dataset,\n 'validdata',\n low_off_notes,\n high_off_notes,\n lag=lag,\n window=window)\n\n _run.log_scalar('trainAccuracy', train_acc)\n _run.log_scalar('testAccuracy', test_acc)\n _run.log_scalar('validAccuracy', valid_acc)\n\n\n# a single optimization step\[email protected]\ndef train_iter(device: device,\n cuda_device: torch.cuda.device,\n input_tensor: Tensor,\n target: Tensor,\n mask: Tensor,\n model: nn.Module,\n loss_fcn: nn.Module,\n optimizer: optim.Optimizer,\n save_every_epoch: bool,\n save_dir: str,\n train_loader: DataLoader,\n test_loader: DataLoader,\n valid_loader: DataLoader,\n low_off_notes: int,\n high_off_notes: int,\n _log,\n _run,\n logging=True):\n\n input_tensor = input_tensor.to(device)\n\n # number of songs in this batch\n N = input_tensor.shape[0]\n\n output, hidden_tensors = model(input_tensor)\n\n loss = loss_fcn(output, target, mask, model)/N\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # use sacred to log training loss and accuracy\n if logging:\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n _run.log_scalar(\"trainLoss\", loss.cpu().detach().item())\n _run.log_scalar(\"trainAccuracy\", train_acc)\n\n # save a copy of the model and make sacred remember it each epoch\n if save_every_epoch and logging:\n sd = deepcopy(model.state_dict())\n torch.save(init_sd, save_dir + 'state_dict_' + str(epoch) + '.pt')\n _run.add_artifact(save_dir + 'state_dict_' + str(epoch) + '.pt')\n\n\n# train a neural network\n# returns the final loss and accuracy on the training, testing, and validation sets\[email protected]\ndef pytorch_train_loop(cuda: bool,\n model_dict: dict,\n initializer: dict,\n train_loader: DataLoader,\n test_loader: DataLoader,\n valid_loader: DataLoader,\n low_off_notes: int,\n high_off_notes: int,\n optmzr: str,\n lr: float,\n decay: float,\n regularization: float,\n num_epochs: int,\n save_dir: str,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run,\n logging=True):\n\n # construct and initialize the model\n model = get_model(model_dict, initializer, cuda)\n\n # save a copy of the initial model and make sacred remember it\n if save_init_model and logging:\n init_sd = deepcopy(model.state_dict())\n torch.save(init_sd, save_dir + 'initial_state_dict.pt')\n _run.add_artifact(save_dir + 'initial_state_dict.pt')\n\n # if we are on cuda we construct the device and run everything on it\n cuda_device = NullContext()\n device = torch.device('cpu')\n if cuda:\n dev_name = 'cuda:' + str(gpu)\n cuda_device = torch.cuda.device(dev_name)\n device = torch.device(dev_name)\n model = model.to(device)\n\n with cuda_device:\n\n # see metrics.py\n loss_fcn = MaskedBCE(regularization, low_off_notes=low_off_notes, high_off_notes=high_off_notes)\n\n # compute the metrics before training and log them\n if logging:\n\n train_loss = compute_loss(loss_fcn, model, train_loader)\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n\n _run.log_scalar(\"trainLoss\", train_loss)\n _run.log_scalar(\"testLoss\", test_loss)\n _run.log_scalar(\"validLoss\", val_loss)\n\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n _run.log_scalar(\"trainAccuracy\", train_acc)\n _run.log_scalar(\"testAccuracy\", test_acc)\n _run.log_scalar(\"validAccuracy\", val_acc)\n\n # construct the optimizer\n optimizer = None\n if optmzr == \"SGD\":\n optimizer = optim.SGD(model.parameters(), lr=lr)\n elif optmzr == \"Adam\":\n optimizer = optim.Adam(model.parameters(), lr=lr)\n elif optmzr == \"RMSprop\":\n optimizer = optim.RMSprop(model.parameters(), lr=lr)\n else:\n raise ValueError(\"Optimizer {} not recognized.\".format(optmzr))\n\n # learning rate decay\n scheduler = None\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: decay**epoch)\n\n # begin training loop\n for epoch in tqdm(range(num_epochs)):\n\n for input_tensor, target, mask in train_loader:\n train_iter(device,\n cuda_device,\n input_tensor,\n target,\n mask,\n model,\n loss_fcn,\n optimizer,\n save_every_epoch,\n save_dir,\n train_loader,\n test_loader,\n valid_loader,\n low_off_notes,\n high_off_notes,\n _log,\n _run,\n logging=logging)\n\n # learning rate decay\n scheduler.step()\n\n # use sacred to log testing and validation loss and accuracy\n if logging:\n\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n _run.log_scalar(\"testLoss\", test_loss)\n _run.log_scalar(\"validLoss\", val_loss)\n _run.log_scalar(\"testAccuracy\", test_acc)\n _run.log_scalar(\"validAccuracy\", val_acc)\n\n # save a copy of the trained model and make sacred remember it\n if save_final_model and logging:\n fin_sd = deepcopy(model.state_dict())\n torch.save(fin_sd, save_dir + 'final_state_dict.pt')\n _run.add_artifact(save_dir + 'final_state_dict.pt')\n\n # recompute the metrics so that this function can return them\n train_loss = compute_loss(loss_fcn, model, train_loader)\n test_loss = compute_loss(loss_fcn, model, test_loader)\n val_loss = compute_loss(loss_fcn, model, valid_loader)\n\n train_acc = compute_acc(model, train_loader, low=low_off_notes, high=high_off_notes)\n test_acc = compute_acc(model, test_loader, low=low_off_notes, high=high_off_notes)\n val_acc = compute_acc(model, valid_loader, low=low_off_notes, high=high_off_notes)\n\n return ((train_loss, test_loss, val_loss), (train_acc, test_acc, val_acc))\n\n\n# main function\[email protected]\ndef train_loop(cuda,\n gpu,\n base_dir,\n dataset,\n num_epochs,\n batch_size,\n low_off_notes,\n high_off_notes,\n lr,\n decay,\n optmzr,\n regularization,\n do_hpsearch,\n learning_rates,\n decays,\n regularizations,\n hps_epochs,\n architecture,\n readout,\n gradient_clipping,\n jit,\n lag,\n window,\n input_size,\n hidden_size,\n num_layers,\n output_size,\n detect_anomaly,\n init,\n scale,\n parity,\n t_distrib,\n path,\n save_init_model,\n save_final_model,\n save_every_epoch,\n _seed,\n _log,\n _run):\n\n # save artifacts to a temporary directory that gets erased when the experiment is over\n save_dir = base_dir + '/tmp_' + str(_seed)\n os.system('mkdir ' + save_dir)\n save_dir += '/'\n\n # give all random number generators the same seed\n _seed_all(_seed)\n\n sklearn_program = architecture == 'REGRESSION'\n\n # regression models and neural networks are trained very differently\n if sklearn_program:\n\n sklearn_experiment(dataset,\n save_dir,\n num_epochs,\n high_off_notes,\n low_off_notes,\n lag,\n window,\n _seed,\n _log,\n _run)\n\n # run a pytorch program\n else:\n\n model_dict = {'architecture': architecture,\n 'readout': readout,\n 'gradient_clipping': gradient_clipping,\n 'jit': jit,\n 'lag': lag,\n 'window': window,\n 'input_size': input_size,\n 'hidden_size': hidden_size,\n 'num_layers': num_layers,\n 'output_size': output_size\n }\n\n initializer = {'init': init,\n 'scale': scale,\n 'parity': parity,\n 't_distrib': t_distrib,\n 'path': path,\n 'low_off_notes': low_off_notes,\n 'high_off_notes': high_off_notes\n }\n\n # if we are debugging we may want to detect autograd anomalies\n torch.autograd.set_detect_anomaly(detect_anomaly)\n\n # construct the pytorch data loaders\n train_loader, test_loader, valid_loader = get_loader(dataset, batch_size)\n\n # standard training loop\n if not do_hpsearch:\n\n # the training loop function returns the metrics achieved at the end of training\n # they will be logged by default, no need to do anything with them here\n metrics = pytorch_train_loop(cuda,\n model_dict,\n initializer,\n train_loader,\n test_loader,\n valid_loader,\n low_off_notes,\n high_off_notes,\n optmzr,\n lr,\n decay,\n regularization,\n num_epochs,\n save_dir,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run)\n\n # only goal here is to find the best hyper parameters\n else:\n\n min_test_loss = float('inf')\n best_lr = 0\n best_dcay = 0\n best_reg = 0\n\n hyperparams = product(learning_rates, decays, regularizations)\n\n for rate, dcay, reg in hyperparams:\n\n # train a model with the given hyperparameters\n # don't log anything, otherwise we will have a ridiculous amount of extraneous info\n metrics = pytorch_train_loop(cuda,\n model_dict,\n initializer,\n train_loader,\n test_loader,\n valid_loader,\n optmzr,\n rate,\n dcay,\n reg,\n hps_epochs,\n save_dir,\n save_init_model,\n save_every_epoch,\n save_final_model,\n _seed,\n _log,\n _run,\n logging=False)\n\n # loss is first index, test set is second index\n test_loss = metrics[0][1]\n\n # compare loss against other hyperparams and update if necessary\n if test_loss == test_loss and test_loss < min_test_loss:\n min_test_loss = test_loss\n best_lr = rate\n best_dcay = dcay\n best_reg = reg\n\n # record the best hyperparameters\n _run.log_scalar(\"learning_rate\", best_lr)\n _run.log_scalar(\"decay\", best_dcay)\n _run.log_scalar(\"regularization\", best_reg)\n\n # wait a second then remove the temporary directory used for storing artifacts\n sleep(1)\n os.system('rm -r ' + save_dir)\n",
"import sys\nimport math\nimport numpy as np\nimport sklearn.linear_model as lm\n\nfrom scipy.io import loadmat\n\nfrom tqdm import tqdm\n\n# For JSB_Chorales, notes 27 through 75 are the ones which are actually played\n\n\ndef get_dataset(dataname: str, key: str, lag=1, window=1, format='flattened'):\n \"\"\"\n :param dataname: which dataset is to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n data_dict = loadmat('data/' + dataname)\n arrays = data_dict[key][0]\n\n # this much will have to be chopped off at the beginning and end of each sequence\n offset = lag + window - 1\n\n # store sequences separately here\n xlist = []\n ylist = []\n\n # record each array, reformatted appropriately\n for array in arrays:\n\n T = len(array)\n\n newx = np.zeros((T - offset, 48*window))\n for t in range(T - offset):\n for i in range(window):\n newx[t, 48*i : 48*(i + 1)] = array[t + i, 27 : 75]\n xlist.append(newx)\n\n ylist.append(array[offset:, 27 : 75])\n\n # this format is needed for computing average loss and accuracy over time and sequences\n if format == 'listofarrays':\n\n return xlist, ylist\n\n # this format is needed for training\n elif format == 'flattened':\n\n # count how big the whole array needs to be\n size = 0\n for xseq in xlist:\n size += len(xseq)\n\n # initialize the flattened inputs and targets\n x = np.zeros((size, 48*window))\n y = np.zeros((size, 48))\n\n # keep track of where we are\n ix = 0\n\n # put every sequence together into one array\n for xseq, yseq in zip(xlist, ylist):\n\n T = len(xseq)\n\n x[ix : ix + T] = xseq\n y[ix : ix + T] = yseq\n\n ix += T\n\n return x, y\n\n else:\n raise ValueError(\"Format {} not recognized\".format(format))\n\n\n# some of the notes might be off the entire time, find them!\ndef find_off_notes(x):\n\n off_notes = []\n\n num_notes = x.shape[1]\n\n for note in range(num_notes):\n\n if not 1 in x[:, note]:\n off_notes.append(note)\n\n return off_notes\n\n\ndef train_models(dataname: str, num_epochs: int, num_notes: int, _seed, lag=1, window=1):\n \"\"\"\n :param dataname: which dataset to use for training\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n # load the data\n x, y = get_dataset(dataname, 'traindata', lag=lag, window=window)\n\n off_notes = find_off_notes(x)\n\n # model is needed for every channel (note)\n model_list = []\n\n # train every model\n for channel in tqdm(range(num_notes)):\n\n # append a placeholder to the model list if this note is not played\n if channel in off_notes:\n model_list.append(None)\n\n # otherwise train the model on this particular note and append it\n else:\n model = lm.LogisticRegression(solver='saga', penalty='elasticnet', l1_ratio=0.9, random_state=_seed, max_iter=num_epochs)\n\n model.fit(x, y[:, channel])\n\n model_list.append(model)\n\n return model_list\n\n\ndef compute_accuracy(model_list, dataname: str, key: str, lag=1, window=1):\n \"\"\"\n :param model_list: the trained regression model for every note\n :param dataname: dataname of the dataset to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n # how many notes we are predicting\n num_notes = len(model_list)\n\n # load the data\n x, y = get_dataset(dataname, key, lag=lag, window=window, format='listofarrays')\n\n # accumulate accuracy over all sequences\n tot_over_seqs = 0\n\n for xarr, yarr in tqdm(zip(x, y)):\n\n # accumulate accuracy over time\n tot_over_time = 0\n\n for xt, yt in zip(xarr, yarr):\n\n # true positives, false positives, false negatives\n tp = 0\n fp = 0\n fn = 0\n\n # compute for each note\n for channel in range(num_notes):\n\n # get the appropriate model and prediction\n model = model_list[channel]\n\n if model != None:\n pred = model.predict(xt.reshape(1, -1))[0]\n\n tp += yt[channel]*pred\n fp += (1 - yt[channel])*pred\n fn += yt[channel]*(1 - pred)\n\n # avoid nans\n if tp == 0 and fp == 0 and fn == 0:\n tot_over_time += 0\n else:\n tot_over_time += tp/(tp + fp + fn)\n\n tot_over_seqs += tot_over_time/len(xarr)\n\n return tot_over_seqs/len(x)\n\n\ndef compute_loss(model_list, dataname: str, key: str, lag=1, window=1):\n \"\"\"\n :param model_list: the trained regression model for every note\n :param dataname: dataname of the dataset to be used\n :param key: 'traindata', 'testdata', 'validdata'\n :param lag: how many steps into the future are we predicting\n :param window: how many steps are we predicting\n \"\"\"\n\n # how many notes we are predicting\n num_notes = len(model_list)\n\n # load the data\n x, y = get_dataset(dataname, key, lag=lag, window=window, format='listofarrays')\n\n # accumulate loss over all sequences\n tot_over_seqs = 0\n\n for xarr, yarr in tqdm(zip(x, y)):\n\n # accumulate loss over time\n tot_over_time = 0\n\n for xt, yt in zip(xarr, yarr):\n\n # accumulate over each note\n tot = 0\n\n for channel in range(num_notes):\n\n model = model_list[channel]\n\n # sigmoid of the trained affine transformation\n pred = 1.0/(1.0 + np.exp(-(model.coef_ @ xt + model.intercept_)))\n\n # binary cross entropy with logits\n tot -= yt[channel]*math.log(pred) + (1 - yt[channel])*math.log(1 - pred)\n\n tot_over_time += tot\n\n tot_over_seqs += tot_over_time/len(xarr)\n\n return tot_over_seqs/len(x)\n\n"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"numpy.random.seed",
"numpy.linspace",
"torch.autograd.set_detect_anomaly",
"torch.manual_seed",
"torch.cuda.device",
"numpy.save",
"torch.cuda.is_available",
"torch.device",
"torch.distributions.Uniform",
"numpy.zeros",
"torch.save"
],
[
"numpy.exp",
"scipy.io.loadmat",
"sklearn.linear_model.LogisticRegression",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
dutxubo/nni | [
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24",
"c16f4e1c89b54b8b80661ef0072433d255ad2d24"
] | [
"test/ut/tools/annotation/testcase/usercode/mnist.py",
"nni/algorithms/feature_engineering/gradient_selector/gradient_selector.py",
"examples/trials/mnist-keras/mnist-keras.py",
"test/ut/sdk/test_networkmorphism_tuner.py",
"test/ut/compression/v1/test_transformer_pruners.py",
"test/ut/compression/v2/test_task_generator.py",
"examples/trials/sklearn/classification/main.py",
"examples/nas/legacy/textnas/dataloader.py",
"nni/retiarii/oneshot/pytorch/sampling.py",
"nni/algorithms/hpo/metis_tuner/Regression_GP/Prediction.py",
"nni/common/graph_utils.py"
] | [
"# -*- encoding:utf8 -*-\n\n\"\"\"A deep MNIST classifier using convolutional layers.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport math\nimport tempfile\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nlogger = logging.getLogger('mnist')\n\nFLAGS = None\n\nclass MnistNetwork(object):\n def __init__(self,\n channel_1_num = 32,\n channel_2_num = 64,\n conv_size = 5,\n hidden_size = 1024,\n pool_size = 2,\n learning_rate = 0.0001,\n x_dim = 784,\n y_dim = 10):\n self.channel_1_num = channel_1_num\n self.channel_2_num = channel_2_num\n '''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''\n self.conv_size = conv_size\n '''@nni.variable(nni.choice(124,512,1024),name=self.hidden_size)'''\n self.hidden_size = hidden_size\n self.pool_size = pool_size\n '''@nni.variable(nni.randint(2,3,5),name=self.learning_rate)'''\n self.learning_rate = learning_rate\n self.x_dim = x_dim\n self.y_dim = y_dim\n\n def build_network(self):\n self.x = tf.placeholder(tf.float32, [None, self.x_dim], name = 'input_x')\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name = 'input_y')\n self.keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')\n\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n try:\n input_dim = int(math.sqrt(self.x_dim))\n except:\n #print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))\n logger.debug('input dim cannot be sqrt and reshape. input dim: ', str(self.x_dim))\n raise\n x_image = tf.reshape(self.x, [-1, input_dim, input_dim, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([self.conv_size, self.conv_size, 1, self.channel_1_num])\n b_conv1 = bias_variable([self.channel_1_num])\n \"\"\"@nni.function_choice(tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1),name=tf.nn.relu)\"\"\"\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n \"\"\"@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)\"\"\"\n h_pool1 = max_pool(h_conv1, self.pool_size)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num])\n b_conv2 = bias_variable([self.channel_2_num])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n #\"\"\"@nni.dynamic(input={cnn_block:1, concat:2},function_choice={\"cnn_block\":(x,nni.choice([3,4])),\"cnn_block\":(x),\"concat\":(x,y)},limit={\"cnn_block.input\":[concat,input],\"concat.input\":[this.depth-1,this.depth-3,this.depth-5],\"graph.width\":[1]})\"\"\"\n h_pool2 = max_pool(h_conv2, self.pool_size)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n last_dim = int(input_dim / (self.pool_size * self.pool_size))\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([last_dim * last_dim * self.channel_2_num, self.hidden_size])\n b_fc1 = bias_variable([self.hidden_size])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, last_dim * last_dim * self.channel_2_num])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of features.\n with tf.name_scope('dropout'):\n h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([self.hidden_size, self.y_dim])\n b_fc2 = bias_variable([self.y_dim])\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n with tf.name_scope('loss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = self.y, logits = y_conv))\n with tf.name_scope('adam_optimizer'):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(self.y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool(x, pool_size):\n \"\"\"max_pool downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, pool_size, pool_size, 1],\n strides=[1, pool_size, pool_size, 1], padding='SAME')\ndef avg_pool(x,pool_size):\n return tf.nn.avg_pool(x, ksize=[1, pool_size, pool_size, 1],\n strides=[1, pool_size, pool_size, 1], padding='SAME')\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef main():\n # Import data\n data_dir= '/tmp/tensorflow/mnist/input_data'\n mnist = input_data.read_data_sets(data_dir, one_hot=True)\n logger.debug('Mnist download data done.')\n\n # Create the model\n # Build the graph for the deep net\n mnist_network = MnistNetwork()\n mnist_network.build_network()\n logger.debug('Mnist build network done.')\n\n # Write log\n graph_location = tempfile.mkdtemp()\n logger.debug('Saving graph to: %s', graph_location)\n # print('Saving graph to: %s' % graph_location)\n train_writer = tf.summary.FileWriter(graph_location)\n train_writer.add_graph(tf.get_default_graph())\n\n test_acc = 0.0\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n batch_num=200\n for i in range(batch_num):\n '''@nni.variable(nni.choice(50,250,500),name=batch_size)'''\n batch_size=50\n batch = mnist.train.next_batch(batch_size)\n '''@nni.variable(nni.choice(1,5),name=dropout_rate)'''\n dropout_rate=0.5\n mnist_network.train_step.run(feed_dict={mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: dropout_rate})\n\n if i % 100 == 0:\n #train_accuracy = mnist_network.accuracy.eval(feed_dict={\n # mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']})\n #print('step %d, training accuracy %g' % (i, train_accuracy))\n\n test_acc = mnist_network.accuracy.eval(feed_dict={\n mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})\n '''@nni.report_intermediate_result(test_acc)'''\n\n test_acc = mnist_network.accuracy.eval(feed_dict={\n mnist_network.x: mnist.test.images, mnist_network.y: mnist.test.labels, mnist_network.keep_prob: 1.0})\n '''@nni.report_final_result(test_acc)'''\n\n\ndef generate_default_params():\n params = {'data_dir': '/tmp/tensorflow/mnist/input_data',\n 'dropout_rate': 0.5,\n 'channel_1_num': 32,\n 'channel_2_num': 64,\n 'conv_size': 5,\n 'pool_size': 2,\n 'hidden_size': 1024,\n 'batch_size': 50,\n 'batch_num': 200,\n 'learning_rate': 1e-4}\n return params\n\nif __name__ == '__main__':\n # run command: python mnist.py --init_file_path ./init.json\n\n #FLAGS, unparsed = parse_command()\n #original_params = parse_init_json(FLAGS.init_file_path, {})\n\n #pipe_interface.set_params_to_env()\n '''@nni.get_next_parameter()'''\n try:\n params = generate_default_params()\n logger.debug('params')\n logger.debug('params update')\n main()\n except:\n logger.exception('Got some exception in while loop in mnist.py')\n raise\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute,\n# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or\n# substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT\n# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# ==================================================================================================\n\nimport time\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.base import BaseEstimator\nfrom sklearn.feature_selection import SelectorMixin\nfrom sklearn.utils.validation import check_is_fitted\n\nimport torch\n\nfrom nni.feature_engineering.feature_selector import FeatureSelector\nfrom . import constants\nfrom .fginitialize import PrepareData\nfrom .fgtrain import _train\n\n\nclass FeatureGradientSelector(FeatureSelector, BaseEstimator, SelectorMixin):\n def __init__(self,\n order=4,\n penalty=1,\n n_features=None,\n max_features=None,\n learning_rate=1e-1,\n init='zero',\n n_epochs=1,\n shuffle=True,\n batch_size=1000,\n target_batch_size=1000,\n max_time=np.inf,\n classification=True,\n ordinal=False,\n balanced=True,\n preprocess='zscore',\n soft_grouping=False,\n verbose=0,\n device='cpu'):\n \"\"\"\n FeatureGradientSelector is a class that selects features for a machine\n learning model using a gradient based search.\n\n Parameters\n ----------\n order : int\n What order of interactions to include. Higher orders\n may be more accurate but increase the run time. 12 is the maximum allowed order.\n penatly : int\n Constant that multiplies the regularization term.\n n_features: int\n If None, will automatically choose number of features based on search.\n Otherwise, number of top features to select.\n max_features : int\n If not None, will use the 'elbow method' to determine the number of features\n with max_features as the upper limit.\n learning_rate : float\n init : str\n How to initialize the vector of scores. 'zero' is the default.\n Options: {'zero', 'on', 'off', 'onhigh', 'offhigh', 'sklearn'}\n n_epochs : int\n number of epochs to run\n shuffle : bool\n Shuffle \"rows\" prior to an epoch.\n batch_size : int\n Nnumber of \"rows\" to process at a time\n target_batch_size : int\n Number of \"rows\" to accumulate gradients over.\n Useful when many rows will not fit into memory but are needed for accurate estimation.\n classification : bool\n If True, problem is classification, else regression.\n ordinal : bool\n If True, problem is ordinal classification. Requires classification to be True.\n balanced : bool\n If true, each class is weighted equally in optimization, otherwise\n weighted is done via support of each class. Requires classification to be True.\n prerocess : str\n 'zscore' which refers to centering and normalizing data to unit variance or\n 'center' which only centers the data to 0 mean\n soft_grouping : bool\n if True, groups represent features that come from the same source.\n Used to encourage sparsity of groups and features within groups.\n verbose : int\n Controls the verbosity when fitting. Set to 0 for no printing\n 1 or higher for printing every verbose number of gradient steps.\n device : str\n 'cpu' to run on CPU and 'cuda' to run on GPU. Runs much faster on GPU\n \"\"\"\n assert order <= 12 and order >= 1, 'order must be an integer between 1 and 12, inclusive'\n assert n_features is None or max_features is None, \\\n 'only specify one of n_features and max_features at a time'\n\n self.order = order\n self.penalty = penalty\n self.n_features = n_features\n self.max_features = max_features\n self.learning_rate = learning_rate\n self.init = init\n self.n_epochs = n_epochs\n self.shuffle = shuffle\n self.batch_size = batch_size\n self.target_batch_size = target_batch_size\n self.max_time = max_time\n self.dftol_stop = -1\n self.freltol_stop = -1\n self.classification = classification\n self.ordinal = ordinal\n self.balanced = balanced\n self.preprocess = preprocess\n self.soft_grouping = soft_grouping\n self.verbose = verbose\n self.device = device\n\n self.model_ = None\n self.scores_ = None\n self._prev_checkpoint = None\n self._data_train = None\n\n def partial_fit(self, X, y,\n n_classes=None,\n groups=None):\n \"\"\"\n Select Features via a gradient based search on (X, y) on the given samples.\n Can be called repeatedly with different X and y to handle streaming datasets.\n\n Parameters\n ----------\n X : array-like\n Shape = [n_samples, n_features]\n The training input samples.\n y : array-like\n Shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n n_classes : int\n Number of classes\n Classes across all calls to partial_fit.\n Can be obtained by via `np.unique(y_all).shape[0]`, where y_all is the\n target vector of the entire dataset.\n This argument is expected for the first call to partial_fit,\n otherwise will assume all classes are present in the batch of y given.\n It will be ignored in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n groups : array-like\n Optional, shape = [n_features]\n Groups of columns that must be selected as a unit\n e.g. [0, 0, 1, 2] specifies the first two columns are part of a group.\n This argument is expected for the first call to partial_fit,\n otherwise will assume all classes are present in the batch of y given.\n It will be ignored in the subsequent calls.\n \"\"\"\n try:\n self._partial_fit(X, y, n_classes=n_classes, groups=groups)\n except constants.NanError:\n if hasattr(self, '_prev_checkpoint'):\n # if it's already done some batches successfully just ignore it\n print('failed fitting this batch, loss was nan')\n else:\n # if this is the first batch, reset and try with doubles\n if self.verbose:\n print('Loss was nan, trying with Doubles')\n self._reset()\n torch.set_default_tensor_type(torch.DoubleTensor)\n self._partial_fit(X, y, n_classes=n_classes, groups=groups)\n\n return self\n\n def _partial_fit(self, X, y, n_classes=None, groups=None):\n \"\"\"\n Private function for partial_fit to enable trying floats before doubles.\n \"\"\"\n # pass in X and y in chunks\n if hasattr(self, '_data_train'):\n # just overwrite the X and y from the new chunk but make them tensors\n # keep dataset stats from previous\n self._data_train.X = X.values if isinstance(X, pd.DataFrame) else X\n self._data_train.N, self._data_train.D = self._data_train.X.shape\n self._data_train.dense_size_gb = self._data_train.get_dense_size()\n self._data_train.set_dense_X()\n\n self._data_train.y = y.values if isinstance(y, pd.Series) else y\n self._data_train.y = torch.as_tensor(\n y, dtype=torch.get_default_dtype())\n else:\n data_train = self._prepare_data(X, y, n_classes=n_classes)\n self._data_train = data_train\n\n batch_size, _, accum_steps, max_iter = self._set_batch_size(\n self._data_train)\n\n rng = None # not used\n debug = 0 # {0,1} print messages and do other stuff?\n dn_logs = None # tensorboard logs; only specify if debug=1\n path_save = None # intermediate models saves; only specify if debug=1\n m, solver = _train(self._data_train,\n batch_size,\n self.order,\n self.penalty,\n rng,\n self.learning_rate,\n debug,\n max_iter,\n self.max_time,\n self.init,\n self.dftol_stop,\n self.freltol_stop,\n dn_logs,\n accum_steps,\n path_save,\n self.shuffle,\n device=self.device,\n verbose=self.verbose,\n prev_checkpoint=self._prev_checkpoint if hasattr(\n self, '_prev_checkpoint') else None,\n groups=groups if not self.soft_grouping else None,\n soft_groups=groups if self.soft_grouping else None)\n\n self._prev_checkpoint = m\n self._process_results(m, solver, X, groups=groups)\n return self\n\n def fit(self, X, y,\n groups=None):\n \"\"\"\n Select Features via a gradient based search on (X, y).\n\n Parameters\n ----------\n X : array-like\n Shape = [n_samples, n_features]\n The training input samples.\n y : array-like\n Shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n groups : array-like\n Optional, shape = [n_features]\n Groups of columns that must be selected as a unit\n e.g. [0, 0, 1, 2] specifies the first two columns are part of a group.\n \"\"\"\n try:\n self._fit(X, y, groups=groups)\n except constants.NanError:\n if self.verbose:\n print('Loss was nan, trying with Doubles')\n torch.set_default_tensor_type(torch.DoubleTensor)\n self._fit(X, y, groups=groups)\n return self\n\n def get_selected_features(self):\n return self.selected_features_\n\n def _prepare_data(self, X, y, n_classes=None):\n \"\"\"\n Returns a PrepareData object.\n \"\"\"\n return PrepareData(X=X.values if isinstance(X, pd.DataFrame) else X,\n y=y.values if isinstance(y, pd.Series) else y,\n data_format=constants.DataFormat.NUMPY,\n classification=int(self.classification),\n ordinal=self.ordinal,\n balanced=self.balanced,\n preprocess=self.preprocess,\n verbose=self.verbose,\n device=self.device,\n n_classes=n_classes)\n\n def _fit(self, X, y, groups=None):\n \"\"\"\n Private function for fit to enable trying floats before doubles.\n \"\"\"\n data_train = self._prepare_data(X, y)\n\n batch_size, _, accum_steps, max_iter = self._set_batch_size(\n data_train)\n\n rng = None # not used\n debug = 0 # {0,1} print messages and log to tensorboard\n dn_logs = None # tensorboard logs; only specify if debug=1\n path_save = None # intermediate models saves; only specify if debug=1\n m, solver = _train(data_train,\n batch_size,\n self.order,\n self.penalty,\n rng,\n self.learning_rate,\n debug,\n max_iter,\n self.max_time,\n self.init,\n self.dftol_stop,\n self.freltol_stop,\n dn_logs,\n accum_steps,\n path_save,\n self.shuffle,\n device=self.device,\n verbose=self.verbose,\n groups=groups if not self.soft_grouping else None,\n soft_groups=groups if self.soft_grouping else None)\n\n self._process_results(m, solver, X, groups=groups)\n return self\n\n def _process_torch_scores(self, scores):\n \"\"\"\n Convert scores into flat numpy arrays.\n \"\"\"\n if constants.Device.CUDA in scores.device.type:\n scores = scores.cpu()\n return scores.numpy().ravel()\n\n def _set_batch_size(self, data_train):\n \"\"\"\n Ensures that batch_size is less than the number of rows.\n \"\"\"\n batch_size = min(self.batch_size, data_train.N)\n target_batch_size = min(max(\n self.batch_size, self.target_batch_size), data_train.N)\n accum_steps = max(int(np.ceil(target_batch_size / self.batch_size)), 1)\n max_iter = self.n_epochs * (data_train.N // batch_size)\n return batch_size, target_batch_size, accum_steps, max_iter\n\n def _process_results(self, m, solver, X, groups=None):\n \"\"\"\n Process the results of a run into something suitable for transform().\n \"\"\"\n self.scores_ = self._process_torch_scores(\n torch.sigmoid(m[constants.Checkpoint.MODEL]['x'] * 2))\n if self.max_features:\n self.max_features = min([self.max_features, self.scores_.shape[0]])\n n_features = self._recommend_number_features(solver)\n self.set_n_features(n_features, groups=groups)\n elif self.n_features:\n self.set_n_features(self.n_features, groups=groups)\n else:\n self.selected_features_ = m['feats']\n\n # subtract elapsed time from max_time\n self.max_time -= m['t']\n\n self.model_ = m\n\n return self\n\n def transform(self, X):\n \"\"\"\n Returns selected features from X.\n\n Paramters\n ---------\n X: array-like\n Shape = [n_samples, n_features]\n The training input samples.\n \"\"\"\n\n self._get_support_mask()\n if self.selected_features_.shape[0] == 0:\n raise ValueError(\n 'No Features selected, consider lowering the penalty or specifying n_features')\n return (X.iloc[:, self.selected_features_]\n if isinstance(X, pd.DataFrame)\n else X[:, self.selected_features_])\n\n def get_support(self, indices=False):\n \"\"\"\n Get a mask, or integer index, of the features selected.\n\n Parameters\n ----------\n indices : bool\n Default False\n If True, the return value will be an array of integers, rather than a boolean mask.\n\n Returns\n -------\n list :\n returns support: An index that selects the retained features from a feature vector.\n If indices is False, this is a boolean array of shape [# input features],\n in which an element is True iff its corresponding feature is selected for retention.\n If indices is True, this is an integer array of shape [# output features] whose values\n are indices into the input feature vector.\n \"\"\"\n self._get_support_mask()\n if indices:\n return self.selected_features_\n\n mask = np.zeros_like(self.scores_, dtype=bool)\n # pylint: disable=E1137\n mask[self.selected_features_] = True\n return mask\n\n def inverse_transform(self, X):\n \"\"\"\n Returns transformed X to the original number of column.\n This operation is lossy and all columns not in the transformed data\n will be returned as columns of 0s.\n \"\"\"\n self._get_support_mask()\n X_new = np.zeros((X.shape[0], self.scores_.shape[0]))\n X_new[self.selected_features_] = X\n return X_new\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n \"\"\"\n params = self.__dict__\n params = {key: val for (key, val) in params.items()\n if not key.endswith('_')}\n return params\n\n def set_params(self, **params):\n \"\"\"\n Set the parameters of this estimator.\n \"\"\"\n for param in params:\n if hasattr(self, param):\n setattr(self, param, params[param])\n return self\n\n def fit_transform(self, X, y):\n \"\"\"\n Select features and then return X with the selected features.\n\n Parameters\n ----------\n X : array-like\n Shape = [n_samples, n_features]\n The training input samples.\n y : array-like\n Shape = [n_samples]\n The target values (class labels in classification, real numbers in\n regression).\n \"\"\"\n self.fit(X, y)\n return self.transform(X)\n\n def _get_support_mask(self):\n \"\"\"\n Check if it is fitted.\n \"\"\"\n check_is_fitted(self, 'scores_')\n\n def _generate_scores(self, solver, xsub, ysub, step_size, feature_order):\n \"\"\"\n Generate forward passes to determine the number of features when max_features is set.\n \"\"\"\n scores = []\n for i in np.arange(1, self.max_features + 1, step_size):\n # optimization possible since xsub is growing?\n i = int(np.ceil(i))\n # pylint: disable=E1102\n score = solver.f_train(torch.tensor(np.ones(i),\n dtype=torch.get_default_dtype()\n ).unsqueeze(1).to(self.device),\n xsub[:, feature_order[:i]],\n ysub)\n if constants.Device.CUDA in score.device.type:\n score = score.cpu()\n # score.numpy()[0][0]\n scores.append(score)\n return scores\n\n def set_n_features(self, n, groups=None):\n \"\"\"\n Set the number of features to return after fitting.\n \"\"\"\n self._get_support_mask()\n self.n_features = n\n return self._set_top_features(groups=groups)\n\n def _set_top_features(self, groups=None):\n \"\"\"\n Set the selected features after a run.\n\n With groups, ensures that if any member of a group is selected, all members are selected\n \"\"\"\n self._get_support_mask()\n assert self.n_features <= self.scores_.shape[0], \\\n 'n_features must be less than or equal to the number of columns in X'\n # pylint: disable=E1130\n self.selected_features_ = np.argpartition(\n self.scores_, -self.n_features)[-self.n_features:]\n if groups is not None and not self.soft_grouping:\n selected_feature_set = set(self.selected_features_.tolist())\n for _ in np.unique(groups):\n group_members = np.where(groups == groups)[0].tolist()\n if selected_feature_set.intersection(group_members):\n selected_feature_set.update(group_members)\n self.selected_features_ = np.array(list(selected_feature_set))\n self.selected_features_ = np.sort(self.selected_features_)\n return self\n\n def set_top_percentile(self, percentile, groups=None):\n \"\"\"\n Set the percentile of features to return after fitting.\n \"\"\"\n self._get_support_mask()\n assert percentile <= 1 and percentile >= 0, \\\n 'percentile must between 0 and 1 inclusive'\n self.n_features = int(self.scores_.shape[0] * percentile)\n return self._set_top_features(groups=groups)\n\n def _recommend_number_features(self, solver, max_time=None):\n \"\"\"\n Get the recommended number of features by doing forward passes when max_features is set.\n \"\"\"\n max_time = max_time if max_time else self.max_time\n if max_time < 0:\n max_time = 60 # allow 1 minute extra if we already spent max_time\n MAX_FORWARD_PASS = 200\n MAX_FULL_BATCHES = 3 # the forward passes can take longer than the fitting\n # if we allow a full epoch of data to be included. By only doing 3 full batches at most\n # we get enough accuracy without increasing the time too much. This\n # constant may not be optimal\n accum_steps = solver.accum_steps\n step_size = max(self.max_features / MAX_FORWARD_PASS, 1)\n # pylint: disable=E1130\n feature_order = np.argsort(-self.scores_) # note the negative\n t = time.time()\n\n dataloader_iterator = iter(solver.ds_train)\n full_scores = []\n # keep_going = True\n with torch.no_grad():\n # might want to only consider a batch valid if there are at least\n # two classes\n for _ in range(accum_steps * MAX_FULL_BATCHES):\n scores = []\n try:\n xsub, ysub = next(dataloader_iterator)\n except StopIteration:\n # done with epoch, don't do more than one epoch\n break\n except Exception as e:\n print(e)\n break\n if max_time and time.time() - t > max_time:\n if self.verbose:\n print(\n \"Stoppinn forward passes because they reached max_time: \",\n max_time)\n if not full_scores:\n # no forward passes worked, return half of max_features\n return self.max_features // 2\n break\n if solver.multiclass:\n for target_class in range(solver.n_classes):\n ysub_binary = solver.transform_y_into_binary(\n ysub, target_class)\n scaling_value = solver._get_scaling_value(\n ysub, target_class)\n if not solver._skip_y_forward(ysub_binary):\n scores = self._generate_scores(\n solver, xsub, ysub_binary, step_size, feature_order)\n # one row will represent one class that is present in the data\n # all classes are weighted equally\n full_scores.append(\n [score * scaling_value for score in scores])\n else:\n if not solver._skip_y_forward(ysub):\n scores = self._generate_scores(\n solver, xsub, ysub, step_size, feature_order)\n full_scores.append(scores)\n best_index = FeatureGradientSelector._find_best_index_elbow(\n full_scores)\n if self.verbose:\n print(\"Forward passes took: \", time.time() - t)\n # account for step size and off by one (n_features is 1 indexed, not 0\n # )\n return int(\n np.ceil(\n np.arange(\n 1,\n self.max_features +\n 1,\n step_size))[best_index])\n\n @staticmethod\n def _find_best_index_elbow(full_scores):\n \"\"\"\n Finds the point on the curve that maximizes distance from the line determined by the endpoints.\n \"\"\"\n scores = pd.DataFrame(full_scores).mean(0).values.tolist()\n first_point = np.array([0, scores[0]])\n last_point = np.array([len(scores) - 1, scores[-1]])\n elbow_metric = []\n for i in range(len(scores)):\n elbow_metric.append(\n FeatureGradientSelector._distance_to_line(\n first_point, last_point, np.array([i, scores[i]])))\n return np.argmax(elbow_metric)\n\n @staticmethod\n def _distance_to_line(start_point, end_point, new_point):\n \"\"\"\n Calculates the shortest distance from new_point to the line determined by start_point and end_point.\n \"\"\"\n # for calculating elbow method\n return np.cross(new_point - start_point,\n end_point - start_point) / np.linalg.norm(\n end_point - start_point)\n\n def _reset(self):\n \"\"\"\n Reset the estimator by deleting all private and fit parameters.\n \"\"\"\n params = self.__dict__\n for key, _ in params.items():\n if key.endswith('_') or key.startswith('_'):\n delattr(self, key)\n return self\n",
"# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport argparse\nimport logging\n\nimport os\nimport keras\nimport numpy as np\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard\nfrom keras.datasets import mnist\nfrom keras.layers import Conv2D, Dense, Flatten, MaxPooling2D\nfrom keras.models import Sequential\n\nimport nni\n\nLOG = logging.getLogger('mnist_keras')\nK.set_image_data_format('channels_last')\nTENSORBOARD_DIR = os.environ['NNI_OUTPUT_DIR']\n\nH, W = 28, 28\nNUM_CLASSES = 10\n\ndef create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):\n '''\n Create simple convolutional model\n '''\n layers = [\n Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),\n Conv2D(64, (3, 3), activation='relu'),\n MaxPooling2D(pool_size=(2, 2)),\n Flatten(),\n Dense(100, activation='relu'),\n Dense(num_classes, activation='softmax')\n ]\n\n model = Sequential(layers)\n\n if hyper_params['optimizer'] == 'Adam':\n optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])\n else:\n optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)\n model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])\n\n return model\n\ndef load_mnist_data(args):\n '''\n Load MNIST dataset\n '''\n mnist_path = os.path.join(os.environ.get('NNI_OUTPUT_DIR'), 'mnist.npz')\n (x_train, y_train), (x_test, y_test) = mnist.load_data(path=mnist_path)\n os.remove(mnist_path)\n\n x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]\n x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]\n y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]\n y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]\n\n LOG.debug('x_train shape: %s', (x_train.shape,))\n LOG.debug('x_test shape: %s', (x_test.shape,))\n\n return x_train, y_train, x_test, y_test\n\nclass SendMetrics(keras.callbacks.Callback):\n '''\n Keras callback to send metrics to NNI framework\n '''\n def on_epoch_end(self, epoch, logs={}):\n '''\n Run on end of each epoch\n '''\n LOG.debug(logs)\n # TensorFlow 2.0 API reference claims the key is `val_acc`, but in fact it's `val_accuracy`\n if 'val_acc' in logs:\n nni.report_intermediate_result(logs['val_acc'])\n else:\n nni.report_intermediate_result(logs['val_accuracy'])\n\ndef train(args, params):\n '''\n Train model\n '''\n x_train, y_train, x_test, y_test = load_mnist_data(args)\n model = create_mnist_model(params)\n\n model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,\n validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])\n\n _, acc = model.evaluate(x_test, y_test, verbose=0)\n LOG.debug('Final result is: %d', acc)\n nni.report_final_result(acc)\n\ndef generate_default_params():\n '''\n Generate default hyper parameters\n '''\n return {\n 'optimizer': 'Adam',\n 'learning_rate': 0.001\n }\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser()\n PARSER.add_argument(\"--batch_size\", type=int, default=200, help=\"batch size\", required=False)\n PARSER.add_argument(\"--epochs\", type=int, default=10, help=\"Train epochs\", required=False)\n PARSER.add_argument(\"--num_train\", type=int, default=60000, help=\"Number of train samples to be used, maximum 60000\", required=False)\n PARSER.add_argument(\"--num_test\", type=int, default=10000, help=\"Number of test samples to be used, maximum 10000\", required=False)\n\n ARGS, UNKNOWN = PARSER.parse_known_args()\n\n try:\n # get parameters from tuner\n RECEIVED_PARAMS = nni.get_next_parameter()\n LOG.debug(RECEIVED_PARAMS)\n PARAMS = generate_default_params()\n PARAMS.update(RECEIVED_PARAMS)\n # train\n train(ARGS, PARAMS)\n except Exception as e:\n LOG.exception(e)\n raise\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport json\nfrom unittest import TestCase, main\nfrom copy import deepcopy\nimport torch\n\nfrom nni.algorithms.hpo.networkmorphism_tuner import NetworkMorphismTuner\nfrom nni.algorithms.hpo.networkmorphism_tuner.graph import graph_to_json, json_to_graph\nfrom nni.algorithms.hpo.networkmorphism_tuner.graph_transformer import (\n to_deeper_graph,\n to_skip_connection_graph,\n to_wider_graph,\n)\nfrom nni.algorithms.hpo.networkmorphism_tuner.layers import layer_description_extractor\nfrom nni.algorithms.hpo.networkmorphism_tuner.nn import CnnGenerator\n\n\nclass NetworkMorphismTestCase(TestCase):\n \"\"\" unittest for NetworkMorphismTuner\n \"\"\"\n\n def test_graph_json_transform(self):\n \"\"\" unittest for graph_json_transform function\n \"\"\"\n\n graph_init = CnnGenerator(10, (32, 32, 3)).generate()\n graph_init = to_wider_graph(deepcopy(graph_init))\n graph_init = to_deeper_graph(deepcopy(graph_init))\n graph_init = to_skip_connection_graph(deepcopy(graph_init))\n json_out = graph_to_json(graph_init, \"temp.json\")\n\n graph_recover = json_to_graph(json_out)\n\n # compare all data in graph\n self.assertEqual(graph_init.input_shape, graph_recover.input_shape)\n self.assertEqual(graph_init.weighted, graph_recover.weighted)\n self.assertEqual(\n graph_init.layer_id_to_input_node_ids,\n graph_recover.layer_id_to_input_node_ids,\n )\n self.assertEqual(graph_init.adj_list, graph_recover.adj_list)\n self.assertEqual(\n graph_init.reverse_adj_list,\n graph_recover.reverse_adj_list)\n self.assertEqual(\n len(graph_init.operation_history), len(\n graph_recover.operation_history)\n )\n self.assertEqual(graph_init.n_dim, graph_recover.n_dim)\n self.assertEqual(graph_init.conv, graph_recover.conv)\n self.assertEqual(graph_init.batch_norm, graph_recover.batch_norm)\n self.assertEqual(graph_init.vis, graph_recover.vis)\n\n node_list_init = [node.shape for node in graph_init.node_list]\n node_list_recover = [node.shape for node in graph_recover.node_list]\n self.assertEqual(node_list_init, node_list_recover)\n self.assertEqual(len(graph_init.node_to_id),\n len(graph_recover.node_to_id))\n layer_list_init = [\n layer_description_extractor(item, graph_init.node_to_id)\n for item in graph_init.layer_list\n ]\n layer_list_recover = [\n layer_description_extractor(item, graph_recover.node_to_id)\n for item in graph_recover.layer_list\n ]\n self.assertEqual(layer_list_init, layer_list_recover)\n\n node_to_id_init = [graph_init.node_to_id[node]\n for node in graph_init.node_list]\n node_to_id_recover = [\n graph_recover.node_to_id[node] for node in graph_recover.node_list\n ]\n self.assertEqual(node_to_id_init, node_to_id_recover)\n\n layer_to_id_init = [\n graph_init.layer_to_id[layer] for layer in graph_init.layer_list\n ]\n layer_to_id_recover = [\n graph_recover.layer_to_id[layer] for layer in graph_recover.layer_list\n ]\n self.assertEqual(layer_to_id_init, layer_to_id_recover)\n\n def test_to_wider_graph(self):\n \"\"\" unittest for to_wider_graph function\n \"\"\"\n\n graph_init = CnnGenerator(10, (32, 32, 3)).generate()\n json_out = graph_to_json(graph_init, \"temp.json\")\n graph_recover = json_to_graph(json_out)\n wider_graph = to_wider_graph(deepcopy(graph_recover))\n model = wider_graph.produce_torch_model()\n out = model(torch.ones(1, 3, 32, 32))\n self.assertEqual(out.shape, torch.Size([1, 10]))\n\n def test_to_deeper_graph(self):\n \"\"\" unittest for to_deeper_graph function\n \"\"\"\n\n graph_init = CnnGenerator(10, (32, 32, 3)).generate()\n json_out = graph_to_json(graph_init, \"temp.json\")\n graph_recover = json_to_graph(json_out)\n deeper_graph = to_deeper_graph(deepcopy(graph_recover))\n model = deeper_graph.produce_torch_model()\n out = model(torch.ones(1, 3, 32, 32))\n self.assertEqual(out.shape, torch.Size([1, 10]))\n\n def test_to_skip_connection_graph(self):\n \"\"\" unittest for to_skip_connection_graph function\n \"\"\"\n\n graph_init = CnnGenerator(10, (32, 32, 3)).generate()\n json_out = graph_to_json(graph_init, \"temp.json\")\n graph_recover = json_to_graph(json_out)\n skip_connection_graph = to_skip_connection_graph(deepcopy(graph_recover))\n model = skip_connection_graph.produce_torch_model()\n out = model(torch.ones(1, 3, 32, 32))\n self.assertEqual(out.shape, torch.Size([1, 10]))\n\n def test_generate_parameters(self):\n \"\"\" unittest for generate_parameters function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n model_json = tuner.generate_parameters(0)\n model_json = json.loads(model_json)\n self.assertEqual(model_json[\"input_shape\"], [32, 32, 3])\n self.assertEqual(tuner.total_data[0][1:], (-1, 0))\n\n def test_receive_trial_result(self):\n \"\"\" unittest for receive_trial_result function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n model_json = tuner.generate_parameters(0)\n tuner.receive_trial_result(0, {}, 0.7)\n (json_out, father_id, model_id) = tuner.total_data[0]\n\n self.assertEqual(father_id, -1)\n self.assertEqual(model_json, json_out)\n\n ret = {\"model_id\": 0, \"metric_value\": 0.7}\n self.assertEqual(tuner.bo.search_tree.adj_list[model_id], [])\n self.assertEqual(tuner.history[-1], ret)\n\n def test_update_search_space(self):\n \"\"\" unittest for update_search_space function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n self.assertEqual(tuner.search_space, dict())\n tuner.update_search_space(\"Test\")\n self.assertEqual(tuner.search_space, \"Test\")\n\n def test_init_search(self):\n \"\"\" unittest for init_search function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n self.assertEqual(tuner.history, [])\n tuner.init_search()\n self.assertEqual(tuner.model_count, 1)\n self.assertEqual(len(tuner.training_queue), 1)\n self.assertEqual(len(tuner.descriptors), 1)\n\n def test_add_model(self):\n \"\"\" unittest for add_model function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n tuner.add_model(0.8, 0)\n ret = {\"model_id\": 0, \"metric_value\": 0.8}\n self.assertEqual(tuner.history[-1], ret)\n\n def test_get_best_model_id(self):\n \"\"\" unittest for get_best_model_id function\n \"\"\"\n\n tuner = NetworkMorphismTuner()\n tuner.add_model(0.8, 0)\n tuner.add_model(0.9, 1)\n self.assertEqual(tuner.get_best_model_id(), 1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport math\nimport sys\nimport unittest\nfrom unittest import TestCase, main\n\nfrom nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\nfrom sdk.models.pytorch_models.transformer import TransformerEncoder\n\n\ndef validate_sparsity(wrapper, sparsity, bias=False):\n masks = [wrapper.weight_mask]\n if bias and wrapper.bias_mask is not None:\n masks.append(wrapper.bias_mask)\n for m in masks:\n actual_sparsity = (m == 0).sum().item() / m.numel()\n msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity)\n assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg\n\n\nclass Model(nn.Module):\n \"\"\"\n A binary classifier using a transformer encoder for contextual embedding.\n \"\"\"\n def __init__(self, n_layer, hidden_dim, n_head):\n super(Model, self).__init__()\n self.embedding = TransformerEncoder(vocab_size=100, hidden_dim=hidden_dim, n_layers=n_layer, n_heads=n_head)\n self.classifier = nn.Linear(hidden_dim, 1)\n\n def forward(self, x, mask):\n raw_output = self.embedding(x, mask)\n pooled_output = raw_output[0]\n prediction = F.sigmoid(self.classifier(pooled_output)).squeeze()\n return prediction\n\n\ndef train(model, dataloader, criterion, optimizer):\n model.train()\n device = next(model.parameters()).device\n for _ in range(2):\n y = torch.ones(10).to(device)\n out = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device))\n loss = criterion(out, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef dry_run(model):\n device = next(model.parameters()).device\n for _ in range(2):\n y = torch.ones(10).to(device)\n _ = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device))\n\n\ndef head_pruner_tests(criterion, global_sort, use_graph, iterative):\n print(\"Testing criterion {} with global_sort={} and use_graph={}\".format(criterion, global_sort, use_graph))\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Build config list and arguments\n config_list = [{'sparsity': 0.5, 'op_types': ['Linear']}]\n\n kwargs = {'ranking_criterion': criterion, 'head_hidden_dim': 64}\n if global_sort:\n kwargs['global_sort'] = True\n else:\n kwargs['global_sort'] = False\n\n if use_graph:\n attention_name_groups = list(zip(['embedding.layers.{}.self_attn.q_proj'.format(i) for i in range(6)],\n ['embedding.layers.{}.self_attn.k_proj'.format(i) for i in range(6)],\n ['embedding.layers.{}.self_attn.v_proj'.format(i) for i in range(6)],\n ['embedding.layers.{}.self_attn.output_proj'.format(i) for i in range(6)]))\n kwargs['attention_name_groups'] = attention_name_groups\n else:\n dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device))\n kwargs['dummy_input'] = dummy_input\n\n if iterative:\n kwargs['num_iterations'] = 2\n kwargs['epochs_per_iteration'] = 1\n\n n_layers = 6\n n_heads = 8\n hidden_dim = 512\n model = Model(n_layers, hidden_dim, n_heads)\n model.to(device)\n kwargs['optimizer'] = torch.optim.SGD(model.parameters(), lr=0.001)\n\n def trainer(model, optimizer, criterion, epoch):\n return train(model, None, criterion, optimizer)\n kwargs['trainer'] = trainer\n kwargs['criterion'] = nn.BCELoss()\n\n def forward_runner(model):\n return dry_run(model)\n kwargs['forward_runner'] = forward_runner\n\n # create pruner and call compress()\n pruner = TransformerHeadPruner(model, config_list, **kwargs)\n pruner.compress()\n\n # test model and mask export\n pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device)\n dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device))\n pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth',\n dummy_input=dummy_input, opset_version=10)\n\n # validate sparsity\n if not global_sort:\n for wrapper in pruner.modules_wrapper:\n validate_sparsity(wrapper, wrapper.config['sparsity'])\n\n\nclass PrunerTestCase(TestCase):\n def test_head_pruner(self):\n for criterion in [\"l1_weight\", \"l2_weight\", \"l1_activation\", \"l2_activation\", \"taylorfo\"]:\n for global_sort in [False, True]:\n for use_graph in [False, True]:\n for iterative in [False, True]:\n head_pruner_tests(criterion, global_sort, use_graph, iterative)\n\n file_paths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', './search_history.csv',\n './search_result.json']\n for f in file_paths:\n if os.path.exists(f):\n os.remove(f)\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom typing import List\nimport unittest\n\nimport torch\nimport torch.nn.functional as F\n\nfrom nni.algorithms.compression.v2.pytorch.base import TaskResult\nfrom nni.algorithms.compression.v2.pytorch.pruning.tools import (\n AGPTaskGenerator,\n LinearTaskGenerator,\n LotteryTicketTaskGenerator,\n SimulatedAnnealingTaskGenerator\n)\n\n\nclass TorchModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(1, 5, 5, 1)\n self.bn1 = torch.nn.BatchNorm2d(5)\n self.conv2 = torch.nn.Conv2d(5, 10, 5, 1)\n self.bn2 = torch.nn.BatchNorm2d(10)\n self.fc1 = torch.nn.Linear(4 * 4 * 10, 100)\n self.fc2 = torch.nn.Linear(100, 10)\n\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 10)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef run_task_generator(task_generator_type):\n model = TorchModel()\n config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]\n\n if task_generator_type == 'agp':\n task_generator = AGPTaskGenerator(5, model, config_list)\n elif task_generator_type == 'linear':\n task_generator = LinearTaskGenerator(5, model, config_list)\n elif task_generator_type == 'lottery_ticket':\n task_generator = LotteryTicketTaskGenerator(5, model, config_list)\n elif task_generator_type == 'simulated_annealing':\n task_generator = SimulatedAnnealingTaskGenerator(model, config_list)\n\n count = run_task_generator_(task_generator)\n\n if task_generator_type == 'agp':\n assert count == 6\n elif task_generator_type == 'linear':\n assert count == 6\n elif task_generator_type == 'lottery_ticket':\n assert count == 5\n elif task_generator_type == 'simulated_annealing':\n assert count == 17\n\n\ndef run_task_generator_(task_generator):\n task = task_generator.next()\n factor = 0.9\n count = 0\n while task is not None:\n factor = factor ** 2\n count += 1\n task_result = TaskResult(task.task_id, TorchModel(), {}, {}, 1 - factor)\n task_generator.receive_task_result(task_result)\n task = task_generator.next()\n return count\n\n\nclass TaskGenerator(unittest.TestCase):\n def test_agp_task_generator(self):\n run_task_generator('agp')\n\n def test_linear_task_generator(self):\n run_task_generator('linear')\n\n def test_lottery_ticket_task_generator(self):\n run_task_generator('lottery_ticket')\n\n def test_simulated_annealing_task_generator(self):\n run_task_generator('simulated_annealing')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport nni\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_digits\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nimport logging\nimport numpy as np\n\nLOG = logging.getLogger('sklearn_classification')\n\ndef load_data():\n '''Load dataset, use 20newsgroups dataset'''\n digits = load_digits()\n X_train, X_test, y_train, y_test = train_test_split(\n digits.data, digits.target, random_state=99, test_size=0.25)\n\n ss = StandardScaler()\n X_train = ss.fit_transform(X_train)\n X_test = ss.transform(X_test)\n\n return X_train, X_test, y_train, y_test\n\ndef get_default_parameters():\n '''get default parameters'''\n params = {\n 'C': 1.0,\n 'kernel': 'linear',\n 'degree': 3,\n 'gamma': 0.01,\n 'coef0': 0.01\n }\n return params\n\ndef get_model(PARAMS):\n '''Get model according to parameters'''\n model = SVC()\n model.C = PARAMS.get('C')\n model.kernel = PARAMS.get('kernel')\n model.degree = PARAMS.get('degree')\n model.gamma = PARAMS.get('gamma')\n model.coef0 = PARAMS.get('coef0')\n\n return model\n\ndef run(X_train, X_test, y_train, y_test, model):\n '''Train model and predict result'''\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n LOG.debug('score: %s', score)\n nni.report_final_result(score)\n\nif __name__ == '__main__':\n X_train, X_test, y_train, y_test = load_data()\n\n try:\n # get parameters from tuner\n RECEIVED_PARAMS = nni.get_next_parameter()\n LOG.debug(RECEIVED_PARAMS)\n PARAMS = get_default_parameters()\n PARAMS.update(RECEIVED_PARAMS)\n LOG.debug(PARAMS)\n model = get_model(PARAMS)\n run(X_train, X_test, y_train, y_test, model)\n except Exception as exception:\n LOG.exception(exception)\n raise\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport os\nimport pickle\nfrom collections import Counter\n\nimport numpy as np\nimport torch\nfrom torch.utils import data\n\nlogger = logging.getLogger(\"nni.textnas\")\n\n\nclass PTBTree:\n WORD_TO_WORD_MAPPING = {\n \"{\": \"-LCB-\",\n \"}\": \"-RCB-\"\n }\n\n def __init__(self):\n self.subtrees = []\n self.word = None\n self.label = \"\"\n self.parent = None\n self.span = (-1, -1)\n self.word_vector = None # HOS, store dx1 RNN word vector\n self.prediction = None # HOS, store Kx1 prediction vector\n\n def is_leaf(self):\n return len(self.subtrees) == 0\n\n def set_by_text(self, text, pos=0, left=0):\n depth = 0\n right = left\n for i in range(pos + 1, len(text)):\n char = text[i]\n # update the depth\n if char == \"(\":\n depth += 1\n if depth == 1:\n subtree = PTBTree()\n subtree.parent = self\n subtree.set_by_text(text, i, right)\n right = subtree.span[1]\n self.span = (left, right)\n self.subtrees.append(subtree)\n elif char == \")\":\n depth -= 1\n if len(self.subtrees) == 0:\n pos = i\n for j in range(i, 0, -1):\n if text[j] == \" \":\n pos = j\n break\n self.word = text[pos + 1:i]\n self.span = (left, left + 1)\n\n # we've reached the end of the category that is the root of this subtree\n if depth == 0 and char == \" \" and self.label == \"\":\n self.label = text[pos + 1:i]\n # we've reached the end of the scope for this bracket\n if depth < 0:\n break\n\n # Fix some issues with variation in output, and one error in the treebank\n # for a word with a punctuation POS\n self.standardise_node()\n\n def standardise_node(self):\n if self.word in self.WORD_TO_WORD_MAPPING:\n self.word = self.WORD_TO_WORD_MAPPING[self.word]\n\n def __repr__(self, single_line=True, depth=0):\n ans = \"\"\n if not single_line and depth > 0:\n ans = \"\\n\" + depth * \"\\t\"\n ans += \"(\" + self.label\n if self.word is not None:\n ans += \" \" + self.word\n for subtree in self.subtrees:\n if single_line:\n ans += \" \"\n ans += subtree.__repr__(single_line, depth + 1)\n ans += \")\"\n return ans\n\n\ndef read_tree(source):\n cur_text = []\n depth = 0\n while True:\n line = source.readline()\n # Check if we are out of input\n if line == \"\":\n return None\n # strip whitespace and only use if this contains something\n line = line.strip()\n if line == \"\":\n continue\n cur_text.append(line)\n # Update depth\n for char in line:\n if char == \"(\":\n depth += 1\n elif char == \")\":\n depth -= 1\n # At depth 0 we have a complete tree\n if depth == 0:\n tree = PTBTree()\n tree.set_by_text(\" \".join(cur_text))\n return tree\n return None\n\n\ndef read_trees(source, max_sents=-1):\n with open(source) as fp:\n trees = []\n while True:\n tree = read_tree(fp)\n if tree is None:\n break\n trees.append(tree)\n if len(trees) >= max_sents > 0:\n break\n return trees\n\n\nclass SSTDataset(data.Dataset):\n def __init__(self, sents, mask, labels):\n self.sents = sents\n self.labels = labels\n self.mask = mask\n\n def __getitem__(self, index):\n return (self.sents[index], self.mask[index]), self.labels[index]\n\n def __len__(self):\n return len(self.sents)\n\n\ndef sst_get_id_input(content, word_id_dict, max_input_length):\n words = content.split(\" \")\n sentence = [word_id_dict[\"<pad>\"]] * max_input_length\n mask = [0] * max_input_length\n unknown = word_id_dict[\"<unknown>\"]\n for i, word in enumerate(words[:max_input_length]):\n sentence[i] = word_id_dict.get(word, unknown)\n mask[i] = 1\n return sentence, mask\n\n\ndef sst_get_phrases(trees, sample_ratio=1.0, is_binary=False, only_sentence=False):\n all_phrases = []\n for tree in trees:\n if only_sentence:\n sentence = get_sentence_by_tree(tree)\n label = int(tree.label)\n pair = (sentence, label)\n all_phrases.append(pair)\n else:\n phrases = get_phrases_by_tree(tree)\n sentence = get_sentence_by_tree(tree)\n pair = (sentence, int(tree.label))\n all_phrases.append(pair)\n all_phrases += phrases\n if sample_ratio < 1.:\n np.random.shuffle(all_phrases)\n result_phrases = []\n for pair in all_phrases:\n if is_binary:\n phrase, label = pair\n if label <= 1:\n pair = (phrase, 0)\n elif label >= 3:\n pair = (phrase, 1)\n else:\n continue\n if sample_ratio == 1.:\n result_phrases.append(pair)\n else:\n rand_portion = np.random.random()\n if rand_portion < sample_ratio:\n result_phrases.append(pair)\n return result_phrases\n\n\ndef get_phrases_by_tree(tree):\n phrases = []\n if tree is None:\n return phrases\n if tree.is_leaf():\n pair = (tree.word, int(tree.label))\n phrases.append(pair)\n return phrases\n left_child_phrases = get_phrases_by_tree(tree.subtrees[0])\n right_child_phrases = get_phrases_by_tree(tree.subtrees[1])\n phrases.extend(left_child_phrases)\n phrases.extend(right_child_phrases)\n sentence = get_sentence_by_tree(tree)\n pair = (sentence, int(tree.label))\n phrases.append(pair)\n return phrases\n\n\ndef get_sentence_by_tree(tree):\n if tree is None:\n return \"\"\n if tree.is_leaf():\n return tree.word\n left_sentence = get_sentence_by_tree(tree.subtrees[0])\n right_sentence = get_sentence_by_tree(tree.subtrees[1])\n sentence = left_sentence + \" \" + right_sentence\n return sentence.strip()\n\n\ndef get_word_id_dict(word_num_dict, word_id_dict, min_count):\n z = [k for k in sorted(word_num_dict.keys())]\n for word in z:\n count = word_num_dict[word]\n if count >= min_count:\n index = len(word_id_dict)\n if word not in word_id_dict:\n word_id_dict[word] = index\n return word_id_dict\n\n\ndef load_word_num_dict(phrases, word_num_dict):\n for sentence, _ in phrases:\n words = sentence.split(\" \")\n for cur_word in words:\n word = cur_word.strip()\n word_num_dict[word] += 1\n return word_num_dict\n\n\ndef init_trainable_embedding(embedding_path, word_id_dict, embed_dim=300):\n word_embed_model = load_glove_model(embedding_path, embed_dim)\n assert word_embed_model[\"pool\"].shape[1] == embed_dim\n embedding = np.random.random([len(word_id_dict), embed_dim]).astype(np.float32) / 2.0 - 0.25\n embedding[0] = np.zeros(embed_dim) # PAD\n embedding[1] = (np.random.rand(embed_dim) - 0.5) / 2 # UNK\n for word in sorted(word_id_dict.keys()):\n idx = word_id_dict[word]\n if idx == 0 or idx == 1:\n continue\n if word in word_embed_model[\"mapping\"]:\n embedding[idx] = word_embed_model[\"pool\"][word_embed_model[\"mapping\"][word]]\n else:\n embedding[idx] = np.random.rand(embed_dim) / 2.0 - 0.25\n return embedding\n\n\ndef sst_get_trainable_data(phrases, word_id_dict, max_input_length):\n texts, labels, mask = [], [], []\n\n for phrase, label in phrases:\n if not phrase.split():\n continue\n phrase_split, mask_split = sst_get_id_input(phrase, word_id_dict, max_input_length)\n texts.append(phrase_split)\n labels.append(int(label))\n mask.append(mask_split) # field_input is mask\n labels = np.array(labels, dtype=np.int64)\n texts = np.reshape(texts, [-1, max_input_length]).astype(np.int32)\n mask = np.reshape(mask, [-1, max_input_length]).astype(np.int32)\n\n return SSTDataset(texts, mask, labels)\n\n\ndef load_glove_model(filename, embed_dim):\n if os.path.exists(filename + \".cache\"):\n logger.info(\"Found cache. Loading...\")\n with open(filename + \".cache\", \"rb\") as fp:\n return pickle.load(fp)\n embedding = {\"mapping\": dict(), \"pool\": []}\n with open(filename) as f:\n for i, line in enumerate(f):\n line = line.rstrip(\"\\n\")\n vocab_word, *vec = line.rsplit(\" \", maxsplit=embed_dim)\n assert len(vec) == 300, \"Unexpected line: '%s'\" % line\n embedding[\"pool\"].append(np.array(list(map(float, vec)), dtype=np.float32))\n embedding[\"mapping\"][vocab_word] = i\n embedding[\"pool\"] = np.stack(embedding[\"pool\"])\n with open(filename + \".cache\", \"wb\") as fp:\n pickle.dump(embedding, fp)\n return embedding\n\n\ndef read_data_sst(data_path, max_input_length=64, min_count=1, train_with_valid=False,\n train_ratio=1., valid_ratio=1., is_binary=False, only_sentence=False):\n word_id_dict = dict()\n word_num_dict = Counter()\n\n sst_path = os.path.join(data_path, \"sst\")\n logger.info(\"Reading SST data...\")\n train_file_name = os.path.join(sst_path, \"trees\", \"train.txt\")\n valid_file_name = os.path.join(sst_path, \"trees\", \"dev.txt\")\n test_file_name = os.path.join(sst_path, \"trees\", \"test.txt\")\n train_trees = read_trees(train_file_name)\n train_phrases = sst_get_phrases(train_trees, train_ratio, is_binary, only_sentence)\n logger.info(\"Finish load train phrases.\")\n valid_trees = read_trees(valid_file_name)\n valid_phrases = sst_get_phrases(valid_trees, valid_ratio, is_binary, only_sentence)\n logger.info(\"Finish load valid phrases.\")\n if train_with_valid:\n train_phrases += valid_phrases\n test_trees = read_trees(test_file_name)\n test_phrases = sst_get_phrases(test_trees, valid_ratio, is_binary, only_sentence=True)\n logger.info(\"Finish load test phrases.\")\n\n # get word_id_dict\n word_id_dict[\"<pad>\"] = 0\n word_id_dict[\"<unknown>\"] = 1\n load_word_num_dict(train_phrases, word_num_dict)\n logger.info(\"Finish load train words: %d.\", len(word_num_dict))\n load_word_num_dict(valid_phrases, word_num_dict)\n load_word_num_dict(test_phrases, word_num_dict)\n logger.info(\"Finish load valid+test words: %d.\", len(word_num_dict))\n word_id_dict = get_word_id_dict(word_num_dict, word_id_dict, min_count)\n logger.info(\"After trim vocab length: %d.\", len(word_id_dict))\n\n logger.info(\"Loading embedding...\")\n embedding = init_trainable_embedding(os.path.join(data_path, \"glove.840B.300d.txt\"), word_id_dict)\n logger.info(\"Finish initialize word embedding.\")\n\n dataset_train = sst_get_trainable_data(train_phrases, word_id_dict, max_input_length)\n logger.info(\"Loaded %d training samples.\", len(dataset_train))\n dataset_valid = sst_get_trainable_data(valid_phrases, word_id_dict, max_input_length)\n logger.info(\"Loaded %d validation samples.\", len(dataset_valid))\n dataset_test = sst_get_trainable_data(test_phrases, word_id_dict, max_input_length)\n logger.info(\"Loaded %d test samples.\", len(dataset_test))\n\n return dataset_train, dataset_valid, dataset_test, torch.from_numpy(embedding)\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom nni.retiarii.nn.pytorch.api import LayerChoice, InputChoice\nfrom .random import PathSamplingLayerChoice, PathSamplingInputChoice\nfrom .base_lightning import BaseOneShotLightningModule\nfrom .enas import ReinforceController, ReinforceField\n\n\nclass EnasModule(BaseOneShotLightningModule):\n \"\"\"\n The ENAS module. There are 2 steps in an epoch. 1: training model parameters. 2: training ENAS RL agent. The agent will produce\n a sample of model architecture to get the best reward.\n The ENASModule should be trained with :class:`nni.retiarii.oneshot.utils.ConcatenateTrainValDataloader`.\n\n Parameters\n ----------\n base_model : pl.LightningModule\n he evaluator in ``nni.retiarii.evaluator.lightning``. User defined model is wrapped by base_model, and base_model will\n be wrapped by this model.\n ctrl_kwargs : dict\n Optional kwargs that will be passed to :class:`ReinforceController`.\n entropy_weight : float\n Weight of sample entropy loss.\n skip_weight : float\n Weight of skip penalty loss.\n baseline_decay : float\n Decay factor of baseline. New baseline will be equal to ``baseline_decay * baseline_old + reward * (1 - baseline_decay)``.\n ctrl_steps_aggregate : int\n Number of steps that will be aggregated into one mini-batch for RL controller.\n grad_clip : float\n Gradient clipping value.\n custom_replace_dict : Dict[Type[nn.Module], Callable[[nn.Module], nn.Module]], default = None\n The custom xxxChoice replace method. Keys should be xxxChoice type and values should return an ``nn.module``. This custom\n replace dict will override the default replace dict of each NAS method.\n\n Reference\n ----------\n .. [enas] H. Pham, M. Guan, B. Zoph, Q. Le, and J. Dean, “Efficient Neural Architecture Search via Parameters Sharing,”\n in Proceedings of the 35th International Conference on Machine Learning, Jul. 2018, pp. 4095-4104.\n Available: https://proceedings.mlr.press/v80/pham18a.html\n \"\"\"\n def __init__(self, base_model, ctrl_kwargs = None,\n entropy_weight = 1e-4, skip_weight = .8, baseline_decay = .999,\n ctrl_steps_aggregate = 20, grad_clip = 0, custom_replace_dict = None):\n super().__init__(base_model, custom_replace_dict)\n\n self.nas_fields = [ReinforceField(name, len(module),\n isinstance(module, PathSamplingLayerChoice) or module.n_chosen == 1)\n for name, module in self.nas_modules]\n self.controller = ReinforceController(self.nas_fields, **(ctrl_kwargs or {}))\n\n self.entropy_weight = entropy_weight\n self.skip_weight = skip_weight\n self.baseline_decay = baseline_decay\n self.baseline = 0.\n self.ctrl_steps_aggregate = ctrl_steps_aggregate\n self.grad_clip = grad_clip\n\n def configure_architecture_optimizers(self):\n return optim.Adam(self.controller.parameters(), lr=3.5e-4)\n\n @property\n def default_replace_dict(self):\n return {\n LayerChoice : PathSamplingLayerChoice,\n InputChoice : PathSamplingInputChoice\n }\n\n def training_step(self, batch, batch_idx):\n # The ConcatenateTrainValDataloader yields both data and which dataloader it comes from.\n batch, source = batch\n\n if source == 'train':\n # step 1: train model params\n self._resample()\n self.call_user_optimizers('zero_grad')\n loss_and_metrics = self.model.training_step(batch, batch_idx)\n w_step_loss = loss_and_metrics['loss'] \\\n if isinstance(loss_and_metrics, dict) else loss_and_metrics\n self.manual_backward(w_step_loss)\n self.call_user_optimizers('step')\n return loss_and_metrics\n\n if source == 'val':\n # step 2: train ENAS agent\n x, y = batch\n arc_opt = self.architecture_optimizers\n arc_opt.zero_grad()\n self._resample()\n with torch.no_grad():\n logits = self.model(x)\n # use the default metric of self.model as reward function\n if len(self.model.metrics) == 1:\n _, metric = next(iter(self.model.metrics.items()))\n else:\n if 'default' not in self.model.metrics.keys():\n raise KeyError('model.metrics should contain a ``default`` key when' \\\n 'there are multiple metrics')\n metric = self.model.metrics['default']\n\n reward = metric(logits, y)\n if self.entropy_weight:\n reward = reward + self.entropy_weight * self.controller.sample_entropy.item()\n self.baseline = self.baseline * self.baseline_decay + reward * (1 - self.baseline_decay)\n rnn_step_loss = self.controller.sample_log_prob * (reward - self.baseline)\n if self.skip_weight:\n rnn_step_loss = rnn_step_loss + self.skip_weight * self.controller.sample_skip_penalty\n\n rnn_step_loss = rnn_step_loss / self.ctrl_steps_aggregate\n self.manual_backward(rnn_step_loss)\n\n if (batch_idx + 1) % self.ctrl_steps_aggregate == 0:\n if self.grad_clip > 0:\n nn.utils.clip_grad_norm_(self.controller.parameters(), self.grad_clip)\n arc_opt.step()\n arc_opt.zero_grad()\n\n def _resample(self):\n \"\"\"\n Resample the architecture as ENAS result. This doesn't require an ``export`` method in nas_modules to work.\n \"\"\"\n result = self.controller.resample()\n for name, module in self.nas_modules:\n module.sampled = result[name]\n\n def export(self):\n self.controller.eval()\n with torch.no_grad():\n return self.controller.resample()\n\n\nclass RandomSampleModule(BaseOneShotLightningModule):\n \"\"\"\n Random Sampling NAS Algorithm. In each epoch, model parameters are trained after a uniformly random sampling of each choice.\n The training result is also a random sample of the search space.\n\n Parameters\n ----------\n base_model : pl.LightningModule\n he evaluator in ``nni.retiarii.evaluator.lightning``. User defined model is wrapped by base_model, and base_model will\n be wrapped by this model.\n custom_replace_dict : Dict[Type[nn.Module], Callable[[nn.Module], nn.Module]], default = None\n The custom xxxChoice replace method. Keys should be xxxChoice type and values should return an ``nn.module``. This custom\n replace dict will override the default replace dict of each NAS method.\n \"\"\"\n automatic_optimization = True\n\n def training_step(self, batch, batch_idx):\n self._resample()\n return self.model.training_step(batch, batch_idx)\n\n @property\n def default_replace_dict(self):\n return {\n LayerChoice : PathSamplingLayerChoice,\n InputChoice : PathSamplingInputChoice\n }\n\n def _resample(self):\n \"\"\"\n Resample the architecture as RandomSample result. This is simply a uniformly sampling that doesn't require an ``export``\n method in nas_modules to work.\n \"\"\"\n result = {}\n for name, module in self.nas_modules:\n if name not in result:\n result[name] = random.randint(0, len(module) - 1)\n module.sampled = result[name]\n return result\n\n def export(self):\n return self._resample()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport sys\n\nimport numpy\n\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n\n\ndef predict(parameters_value, regressor_gp):\n '''\n Predict by Gaussian Process Model\n '''\n parameters_value = numpy.array(parameters_value).reshape(-1, len(parameters_value))\n mu, sigma = regressor_gp.predict(parameters_value, return_std=True)\n\n return mu[0], sigma[0]\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n\nimport logging\nimport queue\nimport re\nfrom collections import defaultdict\nimport torch\nfrom torch.utils.tensorboard._pytorch_graph import NodePy, NodePyIO, NodePyOP, GraphPy\nCLASSTYPE_KIND = 'ClassType'\nGETATTR_KIND = 'prim::GetAttr'\nCAT_KIND = 'aten::cat'\nLIST_CONSTRUCT_KIND = 'prim::ListConstruct'\nLIST_UNPACK_KIND = 'prim::ListUnpack'\nTUPLE_CONSTRUCT_KIND = 'prim::TupleConstruct'\nTUPLE_UNPACK_KIND = 'prim::TupleUnpack'\nCONSTANT_KIND = 'prim::Constant'\n\n_logger = logging.getLogger(__name__)\n\n\ndef build_module_graph(model, dummy_input):\n return TorchModuleGraph(model, dummy_input)\n\n\ndef build_graph(model, dummy_input, verbose=False):\n g = TorchProtoGraph(model, dummy_input, verbose)\n return g.graph_def, g.stepstats\n\n\ndef parse_traced_name(module_name):\n prefix = 'TracedModule['\n suffix = ']'\n if module_name.startswith(prefix) and module_name.endswith(suffix):\n module_name = module_name[len(prefix):-len(suffix)]\n return module_name\n\n\nclass TorchGraph:\n \"\"\"\n This class is to extract pytorch model topology graph by tracing\n \"\"\"\n\n def __init__(self, model=None, dummy_input=None, traced_model=None):\n \"\"\"\n Parameters\n ----------\n model : pytorch model\n The model user wants to speed up\n dummy_input : pytorch tensor\n The dummy input for ```jit.trace```, users should put it on right device before pass in\n traced_model : torch._C.torch.jit.TopLevelTracedModule\n An alredy traced model, if traced_model is not None, then TorchGraph will build the graph\n based on this traced model and won't trace the model again.\n \"\"\"\n assert torch.__version__ >= '1.3.1'\n # check if the input is legal\n if traced_model is not None:\n assert isinstance(traced_model, torch.jit.TopLevelTracedModule)\n self.trace = traced_model\n # it's ok if the graph is already unpacked\n torch._C._jit_pass_inline(self.trace.graph)\n elif model is not None and dummy_input is not None:\n self.bound_model = model\n self._trace(model, dummy_input)\n else:\n raise Exception(\n 'Please provide model & dummy_input or the traced_model as inputs')\n\n def _trace(self, model, dummy_input):\n training = model.training\n model.eval()\n kw_args = {}\n if torch.__version__ >= '1.6.0':\n # only pytorch with version greater than 1.6.0 has the strict option\n kw_args['strict'] = False\n self.trace = torch.jit.trace(model, dummy_input, **kw_args)\n torch._C._jit_pass_inline(self.trace.graph)\n model.train(training)\n\n\nclass TorchProtoGraph(TorchGraph):\n \"\"\"\n Generates model graph for pytorch models in protobuf, this implementation\n is borrowed from pytorch v1.4.0, and fixed following issues:\n https://github.com/pytorch/pytorch/issues/33691\n https://github.com/pytorch/pytorch/issues/33670\n\n \"\"\"\n\n def __init__(self, model, dummy_input, verbose=False):\n super().__init__(model, dummy_input)\n\n from tensorboard.compat.proto.config_pb2 import RunMetadata\n from tensorboard.compat.proto.graph_pb2 import GraphDef\n from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats\n from tensorboard.compat.proto.versions_pb2 import VersionDef\n\n list_of_nodes = self.parse(self.trace.graph, self.trace, dummy_input)\n if verbose:\n print(self.trace.graph)\n self.stepstats = RunMetadata(step_stats=StepStats(\n dev_stats=[DeviceStepStats(device=\"/device:CPU:0\")]))\n self.graph_def = GraphDef(\n node=list_of_nodes, versions=VersionDef(producer=22))\n\n def parse(self, graph, trace, args=None, omit_useless_nodes=True):\n \"\"\"This method parses an optimized PyTorch model graph and produces\n a list of nodes and node stats for eventual conversion to TensorBoard\n protobuf format.\n\n Args:\n graph (PyTorch module): The model graph to be parsed.\n trace (PyTorch JIT TracedModule): The model trace to be parsed.\n args (tuple): input tensor[s] for the model.\n omit_useless_nodes (boolean): Whether to remove nodes from the graph.\n \"\"\"\n nodes_py = GraphPy()\n for node in graph.inputs():\n if omit_useless_nodes:\n if not node.uses(): # number of user of the node (= number of outputs/ fanout)\n continue\n\n if node.type().kind() != CLASSTYPE_KIND:\n nodes_py.append(NodePyIO(node, 'input'))\n\n attr_to_scope = dict()\n\n def node_to_name(d):\n return str(d).split(\":\")[0].strip()\n for node in graph.nodes():\n if node.kind() == GETATTR_KIND:\n attr_name = node.s('name')\n node_name = node_to_name(node)\n parent = node.input().node()\n # If the parent node is not the top-level \"self\" node\n if parent.kind() == GETATTR_KIND:\n parent_scope = attr_to_scope[node_to_name(parent)]\n attr_scope = parent_scope.split('/')[-1]\n attr_to_scope[node_name] = '{}/{}.{}'.format(\n parent_scope, attr_scope, attr_name)\n else:\n attr_to_scope[node_name] = '__module.{}'.format(attr_name)\n # We don't need classtype nodes; scope will provide this information\n if node.output().type().kind() != CLASSTYPE_KIND:\n node_py = NodePyOP(node)\n node_py.scopeName = attr_to_scope[node_name]\n nodes_py.append(node_py)\n else:\n nodes_py.append(NodePyOP(node))\n\n # Create sink nodes for output ops\n for i, node in enumerate(graph.outputs()):\n node_py = NodePyIO(node, 'output')\n node_py.debugName = \"output.{}\".format(i + 1)\n node_py.inputs = [node.debugName()]\n nodes_py.append(node_py)\n\n alias_to_name = dict()\n base_name = parse_traced_name(trace._name)\n for name, module in trace.named_modules(prefix='__module'):\n mod_name = parse_traced_name(module._name)\n attr_name = name.split('.')[-1]\n alias_to_name[name] = '{}[{}]'.format(mod_name, attr_name)\n\n for node in nodes_py.nodes_op:\n module_aliases = node.scopeName.split('/')[-1].split('.')\n module_name = ''\n for i, alias in enumerate(module_aliases):\n if i == 0:\n module_name = alias\n node.scopeName = base_name\n else:\n module_name += '.' + alias\n node.scopeName += '/' + \\\n (alias_to_name[module_name]\n if module_name in alias_to_name else alias)\n\n nodes_py.populate_namespace_from_OP_to_IO()\n return nodes_py.to_proto()\n\n\nclass NodePyGroup(NodePy):\n \"\"\"\n This class is used to represent a graph node which consists of multiple jit traced nodes. In a pytorch trace graph,\n there are multiple nodes are traced for one torch.nn.Module object, we group them together to form a single node to\n represent the torch.nn.Module object. We also group some functional call trace nodes together to form a new node.\n \"\"\"\n\n def __init__(self, name, unique_name, node_type, op_type, node_cpps, inputs=None, outputs=None, key_node=None):\n \"\"\"\n Parameters:\n -----------\n name: str\n node name, such as `conv1`, `backbone.classifier`\n unique_name: str\n A global unique name for current node. Due to some modules,\n such as relu, may be reused several times, so the scopename\n is not suitable as the global unique identifier, so we add a\n unique_name for each node as the global unique identifier.\n We should use the unique_name to traverset the module graph.\n node_type: str\n `module` or `func`\n op_type: str\n operation type, such as `Conv2d`, `aten::view`\n node_cpps: list of torch._C.Node\n jit trace nodes which are included in this new node\n inputs: list of str\n All the inputs of this node, each element is debugName of one input\n outputs: list of str\n All the outputs of this node, each element is debugName of one output\n key_node: torch._C.Node\n The key node of this NodePyGroup.\n \"\"\"\n super(NodePyGroup, self).__init__(name, [])\n self.node_cpps = node_cpps\n self.name = name\n self.unique_name = unique_name\n self.op_type = op_type\n self.type = node_type\n self.nodes = []\n self.auxiliary = None\n self.add_nodes(node_cpps)\n self.inputs = inputs\n self.outputs = outputs\n # The core node in this NodePyGroup\n self.key_node = key_node\n\n def add_nodes(self, node_cpps):\n for node_cpp in node_cpps:\n nodepy = NodePyOP(node_cpp)\n nodepy.name = node_cpp.scopeName() + '_' + node_cpp.kind()\n self.nodes.append(nodepy)\n\n def sub_node_names(self):\n return [x.name for x in self.nodes]\n\n def __repr__(self):\n return 'name: {}, type: {}, op_type: {}, sub_nodes: {}, inputs: {}, outputs: {}, aux: {}'.format(\n self.name, self.type, self.op_type, self.sub_node_names(),\n self.inputs, self.outputs, self.auxiliary\n )\n\n\nclass TorchModuleGraph(TorchGraph):\n \"\"\"\n Generates model graph, each node is created from single or multiple jit trace nodes.\n \"\"\"\n\n def __init__(self, model=None, dummy_input=None, traced_model=None):\n super().__init__(model, dummy_input, traced_model)\n self.global_count = 0\n self.reused_module = set()\n self.name_to_node, self.input_to_node, self.output_to_node = self._build_graph()\n self._extract_auxiliary_info()\n\n def _expand_key_func_node(self, node, nodes, input_to_node, output_to_node,\n module_type):\n \"\"\"\n For trace graph nodes, some nodes are not in modules, these nodes are usually generated by\n the functions directly called in module ```forward```. For such nodes, some of them are\n trivial op which are label by ```prim::```, some of them are not such ops which is call\n non-prim ops. This function is to merge neighbor prim ops to a non-prim op, to construct\n a node.\n\n Parameters\n ----------\n node : trace graph node\n The non-prim node to expand\n nodes : list of trace graph node\n All the trace graph nodes within the same scope as the non-prim node\n input_to_node : dict\n key: input name, value: a node that uses this input\n output_to_node : dict\n key: output name, value: a node that generates this output\n module_type : str\n can be 'module' or 'func'\n\n Returns\n -------\n node\n the expanded non-prim node\n \"\"\"\n # TODO: scope name could be empty\n node_name = '.'.join([self._get_module_name(\n node.scopeName()), node.kind(), str(self.global_count)])\n unique_name = node_name\n _logger.debug(\"expand non-prim node, node name: %s\", node_name)\n self.global_count += 1\n op_type = node.kind()\n node_group = [node]\n inputs = []\n outputs = []\n node_queue = queue.Queue()\n node_queue.put(node)\n while not node_queue.empty():\n curr_node = node_queue.get()\n for _input in curr_node.inputs():\n if _input.node().kind() == CONSTANT_KIND:\n continue\n input_name = _input.debugName()\n if input_name in output_to_node:\n for predecessor_node in output_to_node[input_name]:\n if predecessor_node in nodes:\n if not self._is_key_func(predecessor_node):\n if predecessor_node not in node_group:\n node_group.append(predecessor_node)\n node_queue.put(predecessor_node)\n else:\n inputs.append(input_name)\n else:\n inputs.append(input_name)\n else:\n inputs.append(input_name)\n for output in node.outputs():\n if output.node().kind() == CONSTANT_KIND:\n continue\n outputs.append(output.debugName())\n nodepy = NodePyGroup(node_name, unique_name, module_type, op_type,\n node_group, inputs=inputs, outputs=outputs, key_node=node)\n return nodepy\n\n def _expand_module_node(self, node, node_name, unique_name, op_type, nodes,\n input_to_node, output_to_node, module_type):\n \"\"\"\n merge the adjacent nodes of the module. The difference between the\n _expand_module_node and _expand_non_prim_node is that, the _expand_non_prim_node\n only merge the prim:: nodes into the aten:: node, in contrast,the _expand_module_node\n will merge all adjacent nodes into a same nodepy group.\n\n Parameters\n ----------\n node : trace graph node\n The non-prim node to expand\n node_name : str\n specify the node_name for NodePyGroup\n unique_name : str\n unique_name for the NodePyGroup\n op_type : str\n specify the op_type for the NodePyGroup\n nodes : list of trace graph node\n All the trace graph nodes within the same scope as the non-prim node\n input_to_node : dict\n key: input name, value: a node that uses this input\n output_to_node : dict\n key: output name, value: a node that generates this output\n module_type : str\n can be 'module' or 'func'\n Returns\n -------\n node\n the expanded non-prim node\n\n \"\"\"\n _logger.debug(\"expand module node, node name: %s\", node_name)\n self.global_count += 1\n if not op_type:\n op_type = node.kind()\n node_group = [node]\n inputs = []\n outputs = []\n node_queue = queue.Queue()\n node_queue.put(node)\n visited = {node}\n while not node_queue.empty():\n curr_node = node_queue.get()\n for _input in curr_node.inputs():\n if _input.node().kind() == CONSTANT_KIND:\n continue\n input_name = _input.debugName()\n if input_name in output_to_node:\n for predecessor_node in output_to_node[input_name]:\n if predecessor_node in nodes:\n if predecessor_node not in visited:\n node_group.append(predecessor_node)\n node_queue.put(predecessor_node)\n visited.add(predecessor_node)\n else:\n inputs.append(input_name)\n else:\n inputs.append(input_name)\n for _output in curr_node.outputs():\n if _output.node().kind() == CONSTANT_KIND:\n continue\n output_name = _output.debugName()\n if output_name in input_to_node:\n for successor_node in input_to_node[output_name]:\n if successor_node in nodes:\n if successor_node not in visited:\n node_group.append(successor_node)\n node_queue.put(successor_node)\n visited.add(successor_node)\n else:\n outputs.append(output_name)\n else:\n outputs.append(output_name)\n unique_outputs = list(set(outputs))\n # remove the dumplicated output names\n unique_outputs.sort(key=outputs.index)\n\n nodepy = NodePyGroup(node_name, unique_name, module_type, op_type,\n node_group, inputs=list(inputs), outputs=unique_outputs)\n return nodepy\n\n def _extract_cat_info(self, node_group, cpp_node):\n \"\"\"\n Extract the detail information of the cat operation,\n such the order of the input tensor, the shape of each\n input tensor, the output shape, and the cat dimension.\n\n Parameters\n ----------\n node_group : NodePyGroup\n cpp_node: torch._C.Node\n It should be ```aten::cat``` node\n\n Returns\n -------\n dict\n Include auxiliary information for the cat operation.\n This dict objec has four keys: 'cat_dim', 'out_shape',\n 'in_order' and 'in_shape'. cat_dim is the dimension of\n the cat operation to concat the input tensors. out_shape\n is the shape of the output tensor of the cat operation.\n in_order is an ordered list which contains the corresponding\n parent operaion nodes of the input tensors. in_shape is also\n an ordered list that contains the input shapes of the input\n tensor.\n \"\"\"\n # only suport the cat operation\n assert cpp_node.kind() == CAT_KIND\n cat_info = {}\n # get the shape of the output tensor\n t_output = cpp_node.output()\n out_shape = t_output.type().sizes()\n cat_info['out_shape'] = out_shape\n # get the cat dimension\n inputs = cpp_node.inputs()\n cat_dim = list(inputs)[1].toIValue()\n cat_info['cat_dim'] = cat_dim\n # get the order of the input tensors\n # To get the order of the input tensors, we need\n # to be aware of the topology of the model, which\n # means we should extract the auxiliary information\n # after the build_index function.\n input_order = []\n list_construct_cpp = list(cpp_node.inputs())[0].node()\n input_tensors = list(list_construct_cpp.inputs())\n for _tensor in input_tensors:\n debug_name = _tensor.debugName()\n if debug_name in self.output_to_node:\n input_order.append(self.output_to_node[debug_name].unique_name)\n else:\n # the input tensor may be the input tensor of the whole model\n input_order.append(None)\n cat_info['in_order'] = input_order\n input_shapes = [t.type().sizes() for t in input_tensors]\n cat_info['in_shape'] = input_shapes\n return cat_info\n\n def _extract_linear_shape_info(self, node_group):\n \"\"\"\n Extract linear shape input/output tensor shape info from its aten::addmm op.\n\n Parameters\n ----------\n node_group : NodePyGroup\n NodePyGroup object associated with the linear module.\n\n Returns\n -------\n dict\n Include shape of input tensor and shape of output tensor\n \"\"\"\n for cpp_node in node_group.node_cpps:\n if cpp_node.kind() == 'aten::addmm':\n # https://github.com/pytorch/pytorch/blob/1.6/torch/nn/functional.py#L1682\n # inputs of aten::addmm:\n # inputs[0] is bias\n # inputs[1] is input data\n # inputs[2] is weight\n t_input = list(cpp_node.inputs())[1]\n t_output = cpp_node.output()\n assert isinstance(t_input.type(), torch._C.TensorType)\n assert isinstance(t_output.type(), torch._C.TensorType)\n in_shape = t_input.type().sizes()\n out_shape = t_output.type().sizes()\n return {'in_shape': in_shape, 'out_shape': out_shape}\n return None\n\n def _extract_shape_info(self, node):\n \"\"\"\n Extract the shape information of ```aten::view``` node\n\n Parameters\n ----------\n node : trace graph node\n It should be ```aten::view``` node\n\n Returns\n -------\n dict\n Include shape of input tensor and shape of output tensor\n \"\"\"\n t_input = None\n for _input in node.inputs():\n t_input = _input\n break\n t_output = node.output()\n assert isinstance(t_input.type(), torch._C.TensorType)\n assert isinstance(t_output.type(), torch._C.TensorType)\n in_shape = t_input.type().sizes()\n out_shape = t_output.type().sizes()\n return {'in_shape': in_shape, 'out_shape': out_shape}\n\n def _extract_leaf_modules(self):\n \"\"\"\n Extract leaf modules from the given graph. Leaf module means it does not have submodules.\n To extract leaf modules because only leaf module can be replaced. And shape inference can\n be done in leaf module level. Other shape inference is done in lower level i.e.,\n operation level.\n\n Returns\n -------\n list\n a list of scope name of all the leaf modules\n \"\"\"\n def is_parent(name1, name2):\n \"\"\"\n check if name1 is parent node of name2, for example:\n name1: aa.bb, name2: aa.bb.cc, return True\n name1: aa.b, name2: aa.bb, return False\n \"\"\"\n parts1, parts2 = name1.split('.'), name2.split('.')\n if len(parts1) >= len(parts2):\n return False\n for i, _ in enumerate(parts1):\n if parts2[i] != parts1[i]:\n return False\n return True\n module_names = sorted([x[0]\n for x in self.trace.named_modules() if x[0]])\n leaf_nodes = []\n for i, name in enumerate(module_names):\n if i + 1 >= len(module_names) or not is_parent(name, module_names[i + 1]):\n leaf_nodes.append(name)\n return leaf_nodes\n\n def _get_module_name(self, scope_name):\n \"\"\"\n Retrieve module name from scope name.\n Parameters:\n -----------\n scope_name: str\n scope_name of a graph node, for example:\n for pytorch 1.3.1: MyModel/BackboneModel[backbone]/Conv2d[conv2]\n for pytorch 1.4.0: __module.backbone/__module.backbone.conv2\n\n Returns:\n -------\n str\n module name, such as backbone.conv2\n \"\"\"\n if torch.__version__ >= '1.4.0':\n return scope_name.split('/')[-1].replace('__module.', '')\n else:\n return '.'.join(re.findall(r'\\[(.*?)\\]', scope_name))\n\n def _build_index(self, nodes_op):\n name_to_node = dict()\n input_to_node = defaultdict(list)\n output_to_node = dict()\n for node in nodes_op:\n name_to_node[node.unique_name] = node\n for _input in node.inputs:\n # inputs may have duplicate tensors\n if node not in input_to_node[_input]:\n input_to_node[_input].append(node)\n for output in node.outputs:\n if output in output_to_node:\n assert output_to_node[output] == node, \\\n \"One output cannot be generated by multiple nodes %s\" % output\n output_to_node[output] = node\n return name_to_node, input_to_node, output_to_node\n\n def _is_key_func(self, node_cpp):\n \"\"\"\n Judge if a cpp node is a key function node.\n If so, we should not merge this node into the\n adjacent node.\n \"\"\"\n if node_cpp.kind().startswith('aten::'):\n # the nodes that start with 'aten' are key function\n # nodes\n return True\n if node_cpp.kind() in [LIST_UNPACK_KIND, TUPLE_UNPACK_KIND]:\n # We cannot merge the List/Tuple\n # Unpack func into other nodes, else it\n # may lead to a graph construction error.\n # The reason why we donnot take the construct node\n # also as a key node is that `cat` operation node need\n # the last(previous) visited node to infer the mask. If\n # we take the Construct node as the important node, the\n # predecessor of the `cat` node will always be a construct\n # node, which means we cannot infer the mask for the cat\n # operation.\n return True\n return False\n\n def unpack_manually(self):\n \"\"\"\n Unpack the tensor tuple or tensor list manually,\n and remove the ListUnpack/TupleUnpack node from\n the graph. Note: this function will change the\n graph structure.\n \"\"\"\n if hasattr(self, 'unpacked'):\n # if already unpacked the tuple/list manually\n return\n for node in self.nodes_py.nodes_op:\n if node.op_type in [TUPLE_UNPACK_KIND, LIST_UNPACK_KIND]:\n unpack_cpp = node.key_node\n last_cpp = list(unpack_cpp.inputs())[0].node()\n if last_cpp.kind() in [TUPLE_CONSTRUCT_KIND, LIST_CONSTRUCT_KIND]:\n # we need check if the tensor tuple or tensor list is produced\n # by a list/tuple construct node. If so, we can unpack the tuple\n # or list manunally.\n _logger.debug('List/Tuple Construct Node(cpp) %s', str(last_cpp))\n _logger.debug('List/Tuple Unpack Node(cpp) %s', str(unpack_cpp))\n assert len(list(unpack_cpp.outputs())) == len(list(last_cpp.inputs()))\n errmsg = '%s Input number: %d if inconsistent with the output number %d' % (unpack_cpp, \\\n len(node.inputs), len(list(last_cpp.inputs())))\n\n assert len(node.inputs) == len(list(last_cpp.inputs())), errmsg\n for _debug_input, _debug_output in zip(node.inputs, node.outputs):\n if _debug_input in self.input_to_node and _debug_output in self.input_to_node:\n # input_to_node[_debug_input] is a list of NodePyGroup, because\n # one tensor can be used as input for multiple nodes at the same time.\n\n # note that, in this case, the construct cpp node and unpack cpp node\n # will be merged into the same NodePyGroup, so we remove the `node` from\n # input_to_node[_debug_input] and directly connect this tensor to the\n # input_to_node[_debug_output]\n if node in self.input_to_node[_debug_input]:\n self.input_to_node[_debug_input].remove(node)\n # add the following nodes of _output into the input_to_node[_debug_input]\n self.input_to_node[_debug_input].extend(self.input_to_node[_debug_output])\n # just remove the _debug_output from the grapgh index. So that we can also skip\n # the construct and tuple\n if _debug_output in self.input_to_node:\n for following_node in self.input_to_node[_debug_output]:\n _tmp_index = following_node.inputs.index(_debug_output)\n following_node.inputs[_tmp_index] = _debug_input\n\n\n self.unpacked = True\n\n def _build_graph(self):\n \"\"\"\n Build graph using our defined format from jit trace.\n There are basically three steps: first, construct necessary information (data structures),\n second, extract all the modules to convert to node, Third, extract all functions to convert\n to node.\n\n Returns\n -------\n dict\n use name to index nodes, key: node name, value: node\n dict\n use input (its name) to index nodes,\n key: input, value: list of nodes that take this input\n dict\n use output (its name) to index nodes,\n key: output, value: node that generates this output\n \"\"\"\n omit_useless_nodes = True\n graph = self.trace.graph\n _logger.debug(graph)\n # build input/output mapping, from input/output debugName to its node\n input_to_node = defaultdict(list)\n output_to_node = defaultdict(list)\n for node in graph.nodes():\n if node.kind() == CONSTANT_KIND:\n continue\n for x in node.outputs():\n if x.node().kind() == CONSTANT_KIND:\n continue\n output_to_node[x.debugName()].append(node)\n assert len(output_to_node[x.debugName()]) <= 1, \"One output cannot be generated by multiple nodes %s\" % x.debugName()\n for x in node.inputs():\n if x.node().kind() == CONSTANT_KIND:\n continue\n input_to_node[x.debugName()].append(node)\n\n # build module mapping, from module name to all nodes (as list) under this module scope\n module_to_nodes = defaultdict(list)\n # the mapping of function (non-module in forward) to nodes, key is scope name\n func_to_nodes = defaultdict(list)\n\n nodes_py = GraphPy()\n for node in graph.inputs():\n if omit_useless_nodes:\n if not node.uses(): # number of user of the node (= number of outputs/ fanout)\n continue\n\n if node.type().kind() != 'ClassType':\n nodes_py.append(NodePyIO(node, 'input'))\n\n self.leaf_modules = self._extract_leaf_modules()\n module_to_type = {name: parse_traced_name(\n module._name) for name, module in self.trace.named_modules()}\n\n # associate module name with their trace graph nodes\n for node in graph.nodes():\n if node.kind() == CONSTANT_KIND:\n continue\n module_name = self._get_module_name(node.scopeName())\n if module_name in self.leaf_modules:\n module_to_nodes[module_name].append(node)\n else:\n func_to_nodes[node.scopeName()].append(node)\n # build node group for module\n for module_name, node_cpps in module_to_nodes.items():\n use_count = 0\n merged = set()\n for node in node_cpps:\n if node not in merged:\n # modules that have same scope name may have different locations in the\n # graph. Futhermore, there are also lots of prim:: nodes that in node_cpps,\n # so we also need to call the expand_module_node.\n unique_name = module_name\n if use_count > 0:\n unique_name = module_name + '.%d' % use_count\n self.reused_module.add(unique_name)\n self.reused_module.add(module_name)\n node_group = self._expand_module_node(\n node, module_name, unique_name, module_to_type[module_name],\n node_cpps, input_to_node, output_to_node, 'module')\n nodes_py.nodes_op.append(node_group)\n use_count += 1\n merged.update(node_group.node_cpps)\n\n # each scope_name may have multiple funcs, we split them and create node for each of them\n # build node group for torch.nn.functional\n for _, nodes in func_to_nodes.items():\n # extract non prim:: nodes\n key_func_nodes = list()\n for node in nodes:\n if self._is_key_func(node):\n # find the key function nodes\n key_func_nodes.append(node)\n # for each non prim node, expand it\n for node in key_func_nodes:\n node_group = self._expand_key_func_node(\n node, nodes, input_to_node, output_to_node, 'func')\n nodes_py.nodes_op.append(node_group)\n # get shape infor for view (aten::view) func\n # if node_group.op_type in ['aten::view', 'aten::flatten']:\n # node_group.auxiliary = self._extract_shape_info(node)\n\n for node in graph.outputs(): # Create sink nodes for output ops\n node_py = NodePyIO(node, 'output')\n nodes_py.append(node_py)\n\n self.nodes_py = nodes_py\n # build index\n return self._build_index(self.nodes_py.nodes_op)\n\n def _extract_auxiliary_info(self):\n \"\"\"\n Extract the auxiliary information for the nodegroups\n if necessary. For example, view/flatten operations may\n need the shape of the input tensor and output tensor.\n \"\"\"\n # extract the input & output shape for the view and flatten\n for node_group in self.nodes_py.nodes_op:\n if node_group.op_type in ['aten::view', 'aten::flatten', 'aten::mean', 'aten::reshape']:\n # get shape infor for view (aten::view) func\n cpp_node = list(filter(lambda x: x.kind() == node_group.op_type,\n node_group.node_cpps))[0]\n node_group.auxiliary = self._extract_shape_info(cpp_node)\n elif node_group.op_type == 'Linear':\n node_group.auxiliary = self._extract_linear_shape_info(node_group)\n elif node_group.op_type == CAT_KIND:\n # get the detail information for cat func\n cpp_node = list(filter(lambda x: x.kind() == node_group.op_type,\n node_group.node_cpps))[0]\n node_group.auxiliary = self._extract_cat_info(\n node_group, cpp_node)\n\n def find_predecessors(self, unique_name):\n \"\"\"\n Find predecessor node of the given node\n\n Parameters\n ----------\n unique_name : str\n The unique name of the node\n\n Returns\n -------\n list\n a list of nodes who are the given node's predecessor\n \"\"\"\n predecessors = []\n for _input in self.name_to_node[unique_name].inputs:\n if not _input in self.output_to_node:\n _logger.debug(\"cannot find node with %s as its output\", _input)\n else:\n node_py = self.output_to_node[_input]\n predecessors.append(node_py.unique_name)\n return predecessors\n\n def find_successors(self, unique_name):\n \"\"\"\n Find successor nodes of the given node\n\n Parameters\n ----------\n unique_name : str\n The unique name of the node\n\n Returns\n -------\n list\n a list of nodes who are the given node's successor\n \"\"\"\n successors = []\n for output in self.name_to_node[unique_name].outputs:\n if output not in self.input_to_node:\n # may reach the output of the whole graph\n continue\n nodes_py = self.input_to_node[output]\n for node_py in nodes_py:\n successors.append(node_py.unique_name)\n return successors\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.nn.max_pool",
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.avg_pool",
"tensorflow.constant",
"tensorflow.summary.FileWriter",
"tensorflow.reshape"
],
[
"torch.set_default_tensor_type",
"sklearn.utils.validation.check_is_fitted",
"pandas.DataFrame",
"numpy.zeros_like",
"torch.no_grad",
"numpy.cross",
"numpy.where",
"numpy.unique",
"numpy.arange",
"numpy.ceil",
"numpy.argmax",
"numpy.argpartition",
"torch.get_default_dtype",
"numpy.zeros",
"torch.sigmoid",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"numpy.sort",
"numpy.ones"
],
[
"numpy.expand_dims"
],
[
"torch.Size",
"torch.ones"
],
[
"torch.ones",
"torch.randint",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.cuda.is_available"
],
[
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.functional.max_pool2d"
],
[
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_digits",
"sklearn.svm.SVC"
],
[
"numpy.random.random",
"numpy.reshape",
"torch.from_numpy",
"numpy.random.shuffle",
"numpy.stack",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
],
[
"torch.no_grad"
],
[
"numpy.array"
],
[
"torch.utils.tensorboard._pytorch_graph.GraphPy",
"torch.jit.trace",
"torch.utils.tensorboard._pytorch_graph.NodePyIO",
"torch._C._jit_pass_inline",
"torch.utils.tensorboard._pytorch_graph.NodePyOP"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jessehui/occlum | [
"8a5f3033881c090340d678f2aecdca4ac6355bf4"
] | [
"demos/python/python_musl/demo.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.datasets import dump_svmlight_file\n\ndf1 = pd.read_csv(\"./dataset/input_label.csv\")\ndf2 = pd.read_csv(\"./dataset/input.csv\")\nres = pd.merge(df1, df2, how='left', left_on='id', right_on='id')\n\nX = res[np.setdiff1d(res.columns,['label','id'])]\ny = res.label\n\ndump_svmlight_file(X,y,'/host/smvlight.dat',zero_based=True,multilabel=False)\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"sklearn.datasets.dump_svmlight_file",
"numpy.setdiff1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
adrenadine33/graphvite | [
"34fc203f96ff13095073c605ecfcae32213e7f6a"
] | [
"python/graphvite/application/application.py"
] | [
"# Copyright 2019 MilaGraph. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Zhaocheng Zhu\n\n\"\"\"Implementation of applications\"\"\"\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\nimport os\nimport re\nimport pickle\nimport logging\nimport multiprocessing\nfrom collections import defaultdict\n\nfrom future.builtins import str, map, range\nfrom easydict import EasyDict\nimport numpy as np\n\nfrom .. import lib, cfg, auto\nfrom .. import graph, solver\nfrom ..util import assert_in, monitor, SharedNDArray\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApplicationMixin(object):\n \"\"\"\n General interface of graph applications.\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n gpu_memory_limit (int, optional): memory limit per GPU in bytes, default is all memory\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n \"\"\"\n def __init__(self, dim, gpus=[], cpu_per_gpu=auto, gpu_memory_limit=auto,\n float_type=cfg.float_type, index_type=cfg.index_type):\n self.dim = dim\n self.gpus = gpus\n self.cpu_per_gpu = cpu_per_gpu\n self.gpu_memory_limit = gpu_memory_limit\n self.float_type = float_type\n self.index_type = index_type\n self.set_format()\n\n def get_graph(self, **kwargs):\n raise NotImplementedError\n\n def get_solver(self, **kwargs):\n raise NotImplementedError\n\n def set_format(self, delimiters=\" \\t\\r\\n\", comment=\"#\"):\n \"\"\"\n Set the format for parsing input data.\n\n Parameters:\n delimiters (str, optional): string of delimiter characters\n comment (str, optional): prefix of comment strings\n \"\"\"\n self.delimiters = delimiters\n self.comment = comment\n self.pattern = re.compile(\"[%s]\" % self.delimiters)\n\n @monitor.time\n def load(self, **kwargs):\n \"\"\"load(**kwargs)\n Load a graph from file or Python object.\n Arguments depend on the underlying graph type.\n \"\"\"\n self.graph = self.get_graph(**kwargs)\n if \"file_name\" in kwargs or \"vector_file\" in \"kwargs\":\n self.graph.load(delimiters=self.delimiters, comment=self.comment, **kwargs)\n else:\n self.graph.load(**kwargs)\n\n @monitor.time\n def build(self, **kwargs):\n \"\"\"build(**kwargs)\n Build the solver from the graph.\n Arguments depend on the underlying solver type.\n \"\"\"\n self.solver = self.get_solver(**kwargs)\n self.solver.build(self.graph, **kwargs)\n\n @monitor.time\n def train(self, **kwargs):\n \"\"\"train(**kwargs)\n Train embeddings with the solver.\n Arguments depend on the underlying solver type.\n \"\"\"\n self.solver.train(**kwargs)\n\n @monitor.time\n def evaluate(self, task, **kwargs):\n \"\"\"evaluate(task, **kwargs)\n Evaluate the learned embeddings on a downstream task.\n Arguments depend on the underlying graph type and the task.\n\n Parameters:\n task (str): name of task\n\n Returns:\n dict: metrics and their values\n \"\"\"\n func_name = task.replace(\" \", \"_\")\n if not hasattr(self, func_name):\n raise ValueError(\"Unknown task `%s`\" % task)\n\n logger.info(lib.io.header(task))\n result = getattr(self, func_name)(**kwargs)\n if isinstance(result, dict):\n for metric, value in sorted(result.items()):\n logger.warning(\"%s: %g\" % (metric, value))\n\n return result\n\n @monitor.time\n def load_model(self, file_name):\n \"\"\"\n Load model in pickle format.\n\n Parameters:\n file_name (str): file name:\n \"\"\"\n logger.warning(\"load model from `%s`\" % file_name)\n\n with open(file_name, \"rb\") as fin:\n model = pickle.load(fin)\n self.set_parameters(model)\n\n @monitor.time\n def save_model(self, file_name, save_hyperparameter=False):\n \"\"\"\n Save model in pickle format.\n\n Parameters:\n file_name (str): file name\n save_hyperparameter (bool, optional): save hyperparameters or not, default is false\n \"\"\"\n def is_mapping(name, attribute):\n return \"2\" in name\n\n def is_embedding(name, attribute):\n if name[0] == \"_\":\n return False\n return isinstance(attribute, np.ndarray)\n\n def is_hyperparameter(name, attribute):\n if name[0] == \"_\":\n return False\n return isinstance(attribute, int) or isinstance(attribute, float) or isinstance(attribute, str)\n\n def get_attributes(object, filter):\n attributes = EasyDict()\n for name in dir(object):\n attribute = getattr(object, name)\n if filter(name, attribute):\n attributes[name] = attribute\n return attributes\n\n logger.warning(\"save model to `%s`\" % file_name)\n\n model = EasyDict()\n model.graph = get_attributes(self.graph, is_mapping)\n model.solver = get_attributes(self.solver, is_embedding)\n if save_hyperparameter:\n model.graph.update(get_attributes(self.graph, is_hyperparameter))\n model.solver.update(get_attributes(self.solver, is_hyperparameter))\n model.solver.optimizer = get_attributes(self.solver.optimizer, is_hyperparameter)\n model.solver.optimizer.schedule = self.solver.optimizer.schedule.type\n\n with open(file_name, \"wb\") as fout:\n pickle.dump(model, fout, protocol=pickle.HIGHEST_PROTOCOL)\n\n def get_mapping(self, id2name, name2id):\n mapping = []\n for name in id2name:\n if name not in name2id:\n raise ValueError(\"Can't find the embedding for `%s`\" % name)\n mapping.append(name2id[name])\n return mapping\n\n def tokenize(self, str):\n str = str.strip(self.delimiters)\n comment_start = str.find(self.comment)\n if comment_start != -1:\n str = str[:comment_start]\n return self.pattern.split(str)\n\n def name_map(self, dicts, names):\n assert len(dicts) == len(names), \"The number of dictionaries and names must be equal\"\n\n indexes = [[] for _ in range(len(names))]\n num_param = len(names)\n num_sample = len(names[0])\n for i in range(num_sample):\n valid = True\n for j in range(num_param):\n if names[j][i] not in dicts[j]:\n valid = False\n break\n if valid:\n for j in range(num_param):\n indexes[j].append(dicts[j][names[j][i]])\n return indexes\n\n def gpu_map(self, func, settings):\n import torch\n\n gpus = self.gpus if self.gpus else range(torch.cuda.device_count())\n new_settings = []\n for i, setting in enumerate(settings):\n new_settings.append(setting + (gpus[i % len(gpus)],))\n settings = new_settings\n\n try:\n start_method = multiprocessing.get_start_method()\n # if there are other running processes, this could cause leakage of semaphores\n multiprocessing.set_start_method(\"spawn\", force=True)\n pool = multiprocessing.Pool(len(gpus))\n results = pool.map(func, settings, chunksize=1)\n multiprocessing.set_start_method(start_method, force=True)\n except AttributeError:\n logger.info(\"Spawn mode is not supported by multiprocessing. Switch to serial execution.\")\n results = list(map(func, settings))\n\n return results\n\n\nclass GraphApplication(ApplicationMixin):\n \"\"\"\n Node embedding application.\n\n Given a graph, it embeds each node into a continuous vector representation.\n The learned embeddings can be used for many downstream tasks.\n e.g. **node classification**, **link prediction**, **node analogy**.\n The similarity between node embeddings can be measured by cosine distance.\n\n Supported Models:\n - DeepWalk (`DeepWalk: Online Learning of Social Representations`_)\n - LINE (`LINE: Large-scale Information Network Embedding`_)\n - node2vec (`node2vec: Scalable Feature Learning for Networks`_)\n\n .. _DeepWalk\\: Online Learning of Social Representations:\n https://arxiv.org/pdf/1403.6652.pdf\n .. _LINE\\: Large-scale Information Network Embedding:\n https://arxiv.org/pdf/1503.03578.pdf\n .. _node2vec\\: Scalable Feature Learning for Networks:\n https://www.kdd.org/kdd2016/papers/files/rfp0218-groverA.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`Graph <graphvite.graph.Graph>`,\n :class:`GraphSolver <graphvite.solver.GraphSolver>`\n \"\"\"\n\n def get_graph(self, **kwargs):\n return graph.Graph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.GraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n mapping = self.get_mapping(self.graph.id2name, model.graph.name2id)\n self.solver.vertex_embeddings[:] = model.solver.vertex_embeddings[mapping]\n self.solver.context_embeddings[:] = model.solver.context_embeddings[mapping]\n\n def node_classification(self, X=None, Y=None, file_name=None, portions=(0.02,), normalization=False, times=1,\n patience=100):\n \"\"\"\n Evaluate node embeddings on node classification task.\n\n Parameters:\n X (list of str, optional): names of nodes\n Y (list, optional): labels of nodes\n file_name (str, optional): file of nodes & labels\n portions (tuple of float, optional): how much data for training\n normalization (bool, optional): normalize the embeddings or not\n times (int, optional): number of trials\n patience (int, optional): patience on loss convergence\n\n Returns:\n dict: macro-F1 & micro-F1 averaged over all trials\n \"\"\"\n import scipy.sparse as sp\n\n self.solver.clear()\n\n if file_name:\n if not (X is None and Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n X = []\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n x, y = tokens\n X.append(x)\n Y.append(y)\n if X is None or Y is None:\n raise ValueError(\"Either evaluataion data (X, Y) or a file name should be provided\")\n\n name2id = self.graph.name2id\n class2id = {c:i for i, c in enumerate(np.unique(Y))}\n new_X, new_Y = self.name_map((name2id, class2id), (X, Y))\n logger.info(\"effective labels: %d / %d\" % (len(new_X), len(X)))\n X = np.asarray(new_X)\n Y = np.asarray(new_Y)\n\n labels = sp.coo_matrix((np.ones_like(X), (X, Y)), dtype=np.int32).todense()\n indexes, _ = np.where(np.sum(labels, axis=1) > 0)\n # discard non-labeled nodes\n labels = labels[indexes]\n vertex_embeddings = SharedNDArray(self.solver.vertex_embeddings[indexes])\n\n settings = []\n for portion in portions:\n settings.append((vertex_embeddings, labels, portion, normalization, times, patience))\n results = self.gpu_map(linear_classification, settings)\n\n metrics = {}\n for result in results:\n metrics.update(result)\n return metrics\n\n def link_prediction(self, H=None, T=None, Y=None, file_name=None, filter_H=None, filter_T=None, filter_file=None):\n \"\"\"\n Evaluate node embeddings on link prediction task.\n\n Parameters:\n H (list of str, optional): names of head nodes\n T (list of str, optional): names of tail nodes\n Y (list of int, optional): labels of edges\n file_name (str, optional): file of edges and labels (e.g. validation set)\n filter_H (list of str, optional): names of head nodes to filter out\n filter_T (list of str, optional): names of tail nodes to filter out\n filter_file (str, optional): file of edges to filter out (e.g. training set)\n\n Returns:\n dict: AUC of link prediction\n \"\"\"\n import torch\n\n from .network import LinkPredictor\n\n self.solver.clear()\n\n if file_name:\n if not (H is None and T is None and Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n T = []\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n h, t, y = tokens\n H.append(h)\n T.append(t)\n Y.append(y)\n if H is None or T is None or Y is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n\n if filter_file:\n if not (filter_H is None and filter_T is None):\n raise ValueError(\"Filter data and file should not be provided at the same time\")\n filter_H = []\n filter_T = []\n with open(filter_file, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n h, t = tokens\n filter_H.append(h)\n filter_T.append(t)\n elif filter_H is None:\n filter_H = []\n filter_T = []\n\n name2id = self.graph.name2id\n Y = [int(y) for y in Y]\n new_H, new_T, new_Y = self.name_map((name2id, name2id, {0:0, 1:1}), (H, T, Y))\n logger.info(\"effective edges: %d / %d\" % (len(new_H), len(H)))\n H = new_H\n T = new_T\n Y = new_Y\n new_H, new_T = self.name_map((name2id, name2id), (filter_H, filter_T))\n logger.info(\"effective filter edges: %d / %d\" % (len(new_H), len(filter_H)))\n filters = set(zip(new_H, new_T))\n new_H = []\n new_T = []\n new_Y = []\n for h, t, y in zip(H, T, Y):\n if (h, t) not in filters:\n new_H.append(h)\n new_T.append(t)\n new_Y.append(y)\n logger.info(\"remaining edges: %d / %d\" % (len(new_H), len(H)))\n H = np.asarray(new_H)\n T = np.asarray(new_T)\n Y = np.asarray(new_Y)\n\n vertex_embeddings = self.solver.vertex_embeddings\n context_embeddings = self.solver.context_embeddings\n model = LinkPredictor(self.solver.model, vertex_embeddings, context_embeddings)\n model = model.cuda()\n\n H = torch.as_tensor(H)\n T = torch.as_tensor(T)\n Y = torch.as_tensor(Y)\n H = H.cuda()\n T = T.cuda()\n Y = Y.cuda()\n score = model(H, T)\n order = torch.argsort(score, descending=True)\n Y = Y[order]\n hit = torch.cumsum(Y, dim=0)\n all = torch.sum(Y == 0) * torch.sum(Y == 1)\n auc = torch.sum(hit[Y == 0]).item() / all.item()\n\n return {\n \"AUC\": auc\n }\n\n\ndef linear_classification(args):\n import torch\n from torch import optim\n from torch.nn import functional as F\n from .network import NodeClassifier\n\n def generate_one_vs_rest(indexes, labels):\n new_indexes = []\n new_labels = []\n num_class = labels.shape[1]\n for index, sample_labels in zip(indexes, labels):\n for cls in np.where(sample_labels)[0]:\n new_indexes.append(index)\n new_label = np.zeros(num_class, dtype=np.int)\n new_label[cls] = 1\n new_labels.append(new_label)\n return torch.as_tensor(new_indexes), torch.as_tensor(new_labels)\n\n embeddings, labels, portion, normalization, times, patience, gpu = args\n embeddings = np.asarray(embeddings)\n num_sample, num_class = labels.shape\n num_train = int(num_sample * portion)\n\n macro_f1s = []\n micro_f1s = []\n for t in range(times):\n samples = np.random.permutation(num_sample)\n train_samples = samples[:num_train]\n train_labels = np.asarray(labels[train_samples])\n train_samples, train_labels = generate_one_vs_rest(train_samples, train_labels)\n test_samples = torch.as_tensor(samples[num_train:])\n test_labels = torch.as_tensor(labels[test_samples])\n\n model = NodeClassifier(embeddings, num_class, normalization=normalization)\n\n train_samples = train_samples.cuda(gpu)\n train_labels = train_labels.cuda(gpu)\n test_samples = test_samples.cuda(gpu)\n test_labels = test_labels.cuda(gpu)\n model = model.cuda(gpu)\n\n # train\n optimizer = optim.SGD(model.parameters(), lr=1, weight_decay=2e-5, momentum=0.9)\n best_loss = float(\"inf\")\n best_epoch = -1\n for epoch in range(100000):\n optimizer.zero_grad()\n logits = model(train_samples)\n loss = F.binary_cross_entropy_with_logits(logits, train_labels.float())\n loss.backward()\n optimizer.step()\n\n loss = loss.item()\n if loss < best_loss:\n best_epoch = epoch\n best_loss = loss\n if epoch == best_epoch + patience:\n break\n\n # test\n logits = model(test_samples)\n num_labels = test_labels.sum(dim=1, keepdim=True)\n sorted, _ = logits.sort(dim=1, descending=True)\n thresholds = sorted.gather(dim=1, index=num_labels-1)\n predictions = (logits >= thresholds).int()\n # compute metric\n num_TP_per_class = (predictions & test_labels).sum(dim=0).float()\n num_T_per_class = test_labels.sum(dim=0).float()\n num_P_per_class = predictions.sum(dim=0).float()\n macro_f1s.append((2 * num_TP_per_class / (num_T_per_class + num_P_per_class)).mean().item())\n num_TP = (predictions & test_labels).sum().float()\n num_T = test_labels.sum().float()\n num_P = predictions.sum().float()\n micro_f1s.append((2 * num_TP / (num_T + num_P)).item())\n\n return {\n \"macro-F1@%g%%\" % (portion * 100): np.mean(macro_f1s),\n \"micro-F1@%g%%\" % (portion * 100): np.mean(micro_f1s)\n }\n\n\nclass WordGraphApplication(ApplicationMixin):\n \"\"\"\n Word node embedding application.\n\n Given a corpus, it embeds each word into a continuous vector representation.\n The learned embeddings can be used for natural language processing tasks.\n This can be viewed as a variant of the word2vec algorithm, with random walk augmentation support.\n The similarity between node embeddings can be measured by cosine distance.\n\n Supported Models:\n - LINE (`LINE: Large-scale Information Network Embedding`_)\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`WordGraph <graphvite.graph.WordGraph>`,\n :class:`GraphSolver <graphvite.solver.GraphSolver>`\n \"\"\"\n def get_graph(self, **kwargs):\n return graph.WordGraph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.GraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n mapping = self.get_mapping(self.graph.id2name, model.graph.name2id)\n self.solver.vertex_embeddings[:] = model.solver.vertex_embeddings[mapping]\n self.solver.context_embeddings[:] = model.solver.context_embeddings[mapping]\n\n\nclass KnowledgeGraphApplication(ApplicationMixin):\n \"\"\"\n Knowledge graph embedding application.\n\n Given a knowledge graph, it embeds each entity and relation into a continuous vector representation respectively.\n The learned embeddings can be used for analysis of knowledge graphs.\n e.g. **entity prediction**, **link prediction**.\n The likelihood of edges can be predicted by computing the score function over embeddings of triplets.\n\n Supported Models:\n - TransE (`Translating Embeddings for Modeling Multi-relational Data`_)\n - DistMult (`Embedding Entities and Relations for Learning and Inference in Knowledge Bases`_)\n - ComplEx (`Complex Embeddings for Simple Link Prediction`_)\n - SimplE (`SimplE Embedding for Link Prediction in Knowledge Graphs`_)\n - RotatE (`RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space`_)\n\n .. _Translating Embeddings for Modeling Multi-relational Data:\n http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-relational-data.pdf\n .. _Embedding Entities and Relations for Learning and Inference in Knowledge Bases:\n https://arxiv.org/pdf/1412.6575.pdf\n .. _Complex Embeddings for Simple Link Prediction:\n http://proceedings.mlr.press/v48/trouillon16.pdf\n .. _SimplE Embedding for Link Prediction in Knowledge Graphs:\n https://papers.nips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf\n .. _RotatE\\: Knowledge Graph Embedding by Relational Rotation in Complex Space:\n https://arxiv.org/pdf/1902.10197.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n Note:\n The implementation of TransE, DistMult and ComplEx, SimplE are slightly different from their original papers.\n The loss function and the regularization term generally follow `this repo`_.\n Self-adversarial negative sampling is also adopted in these models like RotatE.\n\n .. _this repo: https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding\n\n See also:\n :class:`KnowledgeGraph <graphvite.graph.KnowledgeGraph>`,\n :class:`KnowledgeGraphSolver <graphvite.solver.KnowledgeGraphSolver>`\n \"\"\"\n\n SAMPLE_PER_DIMENSION = 7\n MEMORY_SCALE_FACTOR = 1.5\n\n def get_graph(self, **kwargs):\n return graph.KnowledgeGraph(self.index_type)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n return solver.KnowledgeGraphSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n entity_mapping = self.get_mapping(self.graph.id2entity, model.graph.entity2id)\n relation_mapping = self.get_mapping(self.graph.id2relation, model.graph.relation2id)\n self.solver.entity_embeddings[:] = model.solver.entity_embeddings[entity_mapping]\n self.solver.relation_embeddings[:] = model.solver.relation_embeddings[relation_mapping]\n\n def entity_prediction(self, H=None, R=None, T=None, file_name=None, save_file=None, target=\"tail\", k=10,\n backend=cfg.backend):\n \"\"\"\n Predict the distribution of missing entity or relation for triplets.\n\n Parameters:\n H (list of str, optional): names of head entities\n R (list of str, optional): names of relations\n T (list of str, optional): names of tail entities\n file_name (str, optional): file of triplets (e.g. validation set)\n save_file (str, optional): ``txt`` or ``pkl`` file to save predictions\n k (int, optional): top-k recalls will be returned\n target (str, optional): 'head' or 'tail'\n backend (str, optional): 'graphvite' or 'torch'\n\n Return:\n list of list of tuple: top-k recalls for each triplet, if save file is not provided\n \"\"\"\n def torch_predict():\n import torch\n\n entity_embeddings = SharedNDArray(self.solver.entity_embeddings)\n relation_embeddings = SharedNDArray(self.solver.relation_embeddings)\n\n num_gpu = len(self.gpus) if self.gpus else torch.cuda.device_count()\n work_load = (num_sample + num_gpu - 1) // num_gpu\n settings = []\n\n for i in range(num_gpu):\n work_H = H[work_load * i: work_load * (i+1)]\n work_R = R[work_load * i: work_load * (i+1)]\n work_T = T[work_load * i: work_load * (i+1)]\n settings.append((entity_embeddings, relation_embeddings, work_H, work_R, work_T,\n None, None, target, k, self.solver.model, self.solver.margin))\n\n results = self.gpu_map(triplet_prediction, settings)\n return sum(results, [])\n\n def graphvite_predict():\n num_entity = len(entity2id)\n batch_size = self.get_batch_size(num_entity)\n recalls = []\n\n for i in range(0, num_sample, batch_size):\n batch_h = H[i: i + batch_size]\n batch_r = R[i: i + batch_size]\n batch_t = T[i: i + batch_size]\n batch = self.generate_one_vs_rest(batch_h, batch_r, batch_t, num_entity, target)\n\n scores = self.solver.predict(batch)\n scores = scores.reshape(-1, num_entity)\n indexes = np.argpartition(scores, num_entity - k, axis=-1)\n for index, score in zip(indexes, scores):\n index = index[-k:]\n score = score[index]\n order = np.argsort(score)[::-1]\n recall = list(zip(index[order], score[order]))\n recalls.append(recall)\n\n return recalls\n\n assert_in([\"head\", \"tail\"], target=target)\n assert_in([\"graphvite\", \"torch\"], backend=backend)\n\n if backend == \"torch\":\n self.solver.clear()\n\n if file_name:\n if not (H is None and R is None and T is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n R = []\n T = []\n with open(file_name, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n elif len(tokens) == 2:\n if target == \"head\":\n r, t = tokens\n h = None\n else:\n h, r = tokens\n t = None\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, file_name))\n H.append(h)\n R.append(r)\n T.append(t)\n if (H is None and T is None) or R is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n if H is None:\n target = \"head\"\n if T is None:\n target = \"tail\"\n\n entity2id = self.graph.entity2id\n relation2id = self.graph.relation2id\n num_sample = len(R)\n new_H = np.zeros(num_sample, dtype=np.uint32)\n new_T = np.zeros(num_sample, dtype=np.uint32)\n if target == \"head\":\n new_R, new_T = self.name_map((relation2id, entity2id), (R, T))\n if target == \"tail\":\n new_H, new_R = self.name_map((entity2id, relation2id), (H, R))\n assert len(new_R) == len(R), \"Can't recognize some entities or relations\"\n H = np.asarray(new_H, dtype=np.uint32)\n R = np.asarray(new_R, dtype=np.uint32)\n T = np.asarray(new_T, dtype=np.uint32)\n\n if backend == \"graphvite\":\n recalls = graphvite_predict()\n else:\n recalls = torch_predict()\n\n id2entity = self.graph.id2entity\n new_recalls = []\n for recall in recalls:\n new_recall = [(id2entity[e], s) for e, s in recall]\n new_recalls.append(new_recall)\n recalls = new_recalls\n\n if save_file:\n logger.warning(\"save entity predictions to `%s`\" % save_file)\n extension = os.path.splitext(save_file)[1]\n if extension == \".txt\":\n with open(save_file, \"w\") as fout:\n for recall in recalls:\n tokens = [\"%s: %g\" % x for x in recall]\n fout.write(\"%s\\n\" % \"\\t\".join(tokens))\n elif extension == \".pkl\":\n with open(save_file, \"wb\") as fout:\n pickle.dump(recalls, fout, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n raise ValueError(\"Unknown file extension `%s`\" % extension)\n else:\n return recalls\n\n def link_prediction(self, H=None, R=None, T=None, filter_H=None, filter_R=None, filter_T=None, file_name=None,\n filter_files=None, target=\"both\", fast_mode=None, backend=cfg.backend):\n \"\"\"\n Evaluate knowledge graph embeddings on link prediction task.\n\n Parameters:\n H (list of str, optional): names of head entities\n R (list of str, optional): names of relations\n T (list of str, optional): names of tail entities\n file_name (str, optional): file of triplets (e.g. validation set)\n filter_H (list of str, optional): names of head entities to filter out\n filter_R (list of str, optional): names of relations to filter out\n filter_T (list of str, optional): names of tail entities to filter out\n filter_files (str, optional): files of triplets to filter out (e.g. training / validation / test set)\n target (str, optional): 'head', 'tail' or 'both'\n fast_mode (int, optional): if specified, only that number of samples will be evaluated\n backend (str, optional): 'graphvite' or 'torch'\n\n Returns:\n dict: MR, MRR, HITS\\@1, HITS\\@3 & HITS\\@10 of link prediction\n \"\"\"\n def torch_predict():\n import torch\n\n entity_embeddings = SharedNDArray(self.solver.entity_embeddings)\n relation_embeddings = SharedNDArray(self.solver.relation_embeddings)\n\n num_gpu = len(self.gpus) if self.gpus else torch.cuda.device_count()\n work_load = (fast_mode + num_gpu - 1) // num_gpu\n settings = []\n\n for i in range(num_gpu):\n work_H = H[work_load * i: work_load * (i+1)]\n work_R = R[work_load * i: work_load * (i+1)]\n work_T = T[work_load * i: work_load * (i+1)]\n settings.append((entity_embeddings, relation_embeddings, work_H, work_R, work_T,\n exclude_H, exclude_T, target, None, self.solver.model, self.solver.margin))\n\n results = self.gpu_map(triplet_prediction, settings)\n return np.concatenate(results)\n\n def graphvite_predict():\n num_entity = len(entity2id)\n if target == \"both\":\n batch_size = self.get_batch_size(num_entity * 2)\n else:\n batch_size = self.get_batch_size(num_entity)\n rankings = []\n\n for i in range(0, fast_mode, batch_size):\n batch_h = H[i: i + batch_size]\n batch_r = R[i: i + batch_size]\n batch_t = T[i: i + batch_size]\n batch = self.generate_one_vs_rest(batch_h, batch_r, batch_t, num_entity, target)\n masks = self.generate_mask(batch_h, batch_r, batch_t, exclude_H, exclude_T, num_entity, target)\n if target == \"head\":\n positives = batch_h\n if target == \"tail\":\n positives = batch_t\n if target == \"both\":\n positives = np.asarray([batch_h, batch_t]).transpose()\n positives = positives.ravel()\n\n scores = self.solver.predict(batch)\n scores = scores.reshape(-1, num_entity)\n truths = scores[range(len(positives)), positives]\n ranking = np.sum((scores >= truths[:, np.newaxis]) * masks, axis=1)\n rankings.append(ranking)\n\n return np.concatenate(rankings)\n\n assert_in([\"head\", \"tail\", \"both\"], target=target)\n assert_in([\"graphvite\", \"torch\"], backend=backend)\n\n if backend == \"torch\":\n self.solver.clear()\n\n if file_name:\n if not (H is None and R is None and T is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n H = []\n R = []\n T = []\n with open(file_name, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, file_name))\n H.append(h)\n R.append(r)\n T.append(t)\n if H is None or R is None or T is None:\n raise ValueError(\"Either evaluation data or file should be provided\")\n\n if filter_files:\n if not (filter_H is None and filter_R is None and filter_T is None):\n raise ValueError(\"Filter data and file should not be provided at the same time\")\n filter_H = []\n filter_R = []\n filter_T = []\n for filter_file in filter_files:\n with open(filter_file, \"r\") as fin:\n for i, line in enumerate(fin):\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n if 3 <= len(tokens) <= 4:\n h, r, t = tokens[:3]\n else:\n raise ValueError(\"Invalid line format at line %d in %s\" % (i + 1, filter_file))\n filter_H.append(h)\n filter_R.append(r)\n filter_T.append(t)\n elif filter_H is None:\n filter_H = []\n filter_R = []\n filter_T = []\n\n entity2id = self.graph.entity2id\n relation2id = self.graph.relation2id\n new_H, new_R, new_T = self.name_map((entity2id, relation2id, entity2id), (H, R, T))\n logger.info(\"effective triplets: %d / %d\" % (len(new_H), len(H)))\n H = np.asarray(new_H, dtype=np.uint32)\n R = np.asarray(new_R, dtype=np.uint32)\n T = np.asarray(new_T, dtype=np.uint32)\n new_H, new_R, new_T = self.name_map((entity2id, relation2id, entity2id), (filter_H, filter_R, filter_T))\n logger.info(\"effective filter triplets: %d / %d\" % (len(new_H), len(filter_H)))\n filter_H = np.asarray(new_H, dtype=np.uint32)\n filter_R = np.asarray(new_R, dtype=np.uint32)\n filter_T = np.asarray(new_T, dtype=np.uint32)\n\n exclude_H = defaultdict(set)\n exclude_T = defaultdict(set)\n for h, r, t in zip(filter_H, filter_R, filter_T):\n exclude_H[(t, r)].add(h)\n exclude_T[(h, r)].add(t)\n\n num_sample = len(H)\n fast_mode = fast_mode or num_sample\n indexes = np.random.permutation(num_sample)[:fast_mode]\n H = H[indexes]\n R = R[indexes]\n T = T[indexes]\n\n if backend == \"graphvite\":\n rankings = graphvite_predict()\n elif backend == \"torch\":\n rankings = torch_predict()\n\n return {\n \"MR\": np.mean(rankings),\n \"MRR\": np.mean(1 / rankings),\n \"HITS@1\": np.mean(rankings <= 1),\n \"HITS@3\": np.mean(rankings <= 3),\n \"HITS@10\": np.mean(rankings <= 10)\n }\n\n def get_batch_size(self, sample_size):\n import psutil\n memory = psutil.virtual_memory()\n\n batch_size = int(self.SAMPLE_PER_DIMENSION * self.dim * self.graph.num_vertex\n * self.solver.num_partition / self.solver.num_worker / sample_size)\n # 2 triplet (Python, C++ sample pool) + 1 sample index\n mem_per_sample = sample_size * (2 * 3 * np.uint32().itemsize + 1 * np.uint64().itemsize)\n max_batch_size = int(memory.available / mem_per_sample / self.MEMORY_SCALE_FACTOR)\n if max_batch_size < batch_size:\n logger.info(\"Memory is not enough for optimal prediction batch size. \"\n \"Use the maximal possible size instead.\")\n batch_size = max_batch_size\n return batch_size\n\n def generate_one_vs_rest(self, H, R, T, num_entity, target=\"both\"):\n one = np.ones(num_entity, dtype=np.bool)\n all = np.arange(num_entity, dtype=np.uint32)\n batches = []\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n batch = np.asarray([all, t * one, r * one]).transpose()\n batches.append(batch)\n if target == \"tail\" or target == \"both\":\n batch = np.asarray([h * one, all, r * one]).transpose()\n batches.append(batch)\n\n batches = np.concatenate(batches)\n return batches\n\n def generate_mask(self, H, R, T, exclude_H, exclude_T, num_entity, target=\"both\"):\n one = np.ones(num_entity, dtype=np.bool)\n masks = []\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n mask = one.copy()\n mask[list(exclude_H[(t, r)])] = 0\n mask[h] = 1\n masks.append(mask)\n if target == \"tail\" or target == \"both\":\n mask = one.copy()\n mask[list(exclude_T[(h, r)])] = 0\n mask[t] = 1\n masks.append(mask)\n\n masks = np.asarray(masks)\n return masks\n\n\ndef triplet_prediction(args):\n import torch\n from .network import LinkPredictor\n torch.set_grad_enabled(False)\n\n entity_embeddings, relation_embeddings, H, R, T, \\\n exclude_H, exclude_T, target, k, score_function, margin, device = args\n entity_embeddings = np.asarray(entity_embeddings)\n relation_embeddings = np.asarray(relation_embeddings)\n num_entity = len(entity_embeddings)\n score_function = LinkPredictor(score_function, entity_embeddings, relation_embeddings, entity_embeddings,\n margin=margin)\n\n if device != \"cpu\":\n try:\n score_function = score_function.to(device)\n except RuntimeError:\n logger.info(\"Model is too large for GPU evaluation with PyTorch. Switch to CPU evaluation.\")\n device = \"cpu\"\n if device == \"cpu\":\n del score_function\n torch.cuda.empty_cache()\n score_function = LinkPredictor(score_function, entity_embeddings, relation_embeddings, entity_embeddings,\n margin=margin)\n\n one = torch.ones(num_entity, dtype=torch.long, device=device)\n all = torch.arange(num_entity, dtype=torch.long, device=device)\n results = [] # rankings or top-k recalls\n\n for h, r, t in zip(H, R, T):\n if target == \"head\" or target == \"both\":\n batch_h = all\n batch_r = r * one\n batch_t = t * one\n score = score_function(batch_h, batch_r, batch_t)\n if k: # top-k recalls\n score, index = torch.topk(score, k)\n score = score.cpu().numpy()\n index = index.cpu().numpy()\n recall = list(zip(index, score))\n results.append(recall)\n else: # ranking\n mask = torch.ones(num_entity, dtype=torch.uint8, device=device)\n index = torch.tensor(list(exclude_H[(t, r)]), dtype=torch.long, device=device)\n mask[index] = 0\n mask[h] = 1\n ranking = torch.sum((score >= score[h]) * mask).item()\n results.append(ranking)\n\n if target == \"tail\" or target == \"both\":\n batch_h = h * one\n batch_r = r * one\n batch_t = all\n score = score_function(batch_h, batch_r, batch_t)\n if k: # top-k recalls\n score, index = torch.topk(score, k)\n score = score.cpu().numpy()\n index = index.cpu().numpy()\n recall = list(zip(index, score))\n results.append(recall)\n else: # ranking\n mask = torch.ones(num_entity, dtype=torch.uint8, device=device)\n index = torch.tensor(list(exclude_T[(h, r)]), dtype=torch.long, device=device)\n mask[index] = 0\n mask[t] = 1\n ranking = torch.sum((score >= score[t]) * mask).item()\n results.append(ranking)\n\n if not k: # ranking\n results = np.asarray(results)\n return results\n\n\nclass VisualizationApplication(ApplicationMixin):\n \"\"\"\n Graph & high-dimensional data visualization.\n \n Given a graph or high-dimensional vectors, it maps each node to 2D or 3D coordinates to\n faciliate visualization. The learned coordinates preserve most local similarity information\n of the original input, and may shed some light on the structure of the graph or the\n high-dimensional space.\n\n Supported Models:\n - LargeVis (`Visualizing Large-scale and High-dimensional Data`_)\n\n .. _Visualizing Large-scale and High-dimensional Data: https://arxiv.org/pdf/1602.00370.pdf\n\n Parameters:\n dim (int): dimension of embeddings\n gpus (list of int, optional): GPU ids, default is all GPUs\n cpu_per_gpu (int, optional): number of CPU threads per GPU, default is all CPUs\n float_type (dtype, optional): type of parameters\n index_type (dtype, optional): type of graph indexes\n\n See also:\n :class:`Graph <graphvite.graph.Graph>`,\n :class:`KNNGraph <graphvite.graph.KNNGraph>`,\n :class:`VisualizationSolver <graphvite.solver.VisualizationSolver>`\n \"\"\"\n\n OUTLIER_THRESHOLD = 5\n\n def get_graph(self, **kwargs):\n if \"file_name\" in kwargs or \"edge_list\" in kwargs:\n return graph.Graph(self.index_type)\n else:\n return graph.KNNGraph(self.index_type, self.gpus, self.cpu_per_gpu)\n\n def get_solver(self, **kwargs):\n if self.cpu_per_gpu == auto:\n num_sampler_per_worker = auto\n else:\n num_sampler_per_worker = self.cpu_per_gpu - 1\n\n return solver.VisualizationSolver(self.dim, self.float_type, self.index_type, self.gpus, num_sampler_per_worker,\n self.gpu_memory_limit)\n\n def set_parameters(self, model):\n if self.solver.coordinates.shape != model.solver.coordinates.shape:\n raise ValueError(\"Expect coordinates with shape %s, but %s is found\" %\n (self.solver.coordinates.shape, model.solver.coordinates.shape))\n self.solver.coordinates[:] = model.solver.coordinates\n\n def visualization(self, Y=None, file_name=None, save_file=None, figure_size=10, scale=2):\n \"\"\"\n Visualize learned 2D or 3D coordinates.\n\n Parameters:\n Y (list of str, optional): labels of vectors\n file_name (str, optional): file of labels\n save_file (str, optional): ``png`` or ``pdf`` file to save visualization,\n if not provided, show the figure in window\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n \"\"\"\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if not (dim == 2 or dim == 3):\n raise ValueError(\"Can't visualize %dD data\" % dim)\n\n if file_name:\n if not (Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n y, = tokens\n Y.append(y)\n elif Y is None:\n Y = [\"unknown\"] * self.graph.num_vertex\n Y = np.asarray(Y)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n Y = Y[indexes]\n classes = sorted(np.unique(Y))\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n if dim == 2:\n ax = fig.gca()\n elif dim == 3:\n from mpl_toolkits.mplot3d import Axes3D\n ax = fig.gca(projection=\"3d\")\n for cls in classes:\n indexes, = np.where(Y == cls)\n ax.scatter(*coordinates[indexes].T, s=scale)\n ax.set_xticks([])\n ax.set_yticks([])\n if dim == 3:\n ax.set_zticks([])\n if len(classes) > 1:\n ax.legend(classes, markerscale=6, loc=\"upper right\")\n if save_file:\n logger.warning(\"save visualization to `%s`\" % save_file)\n plt.savefig(save_file)\n else:\n plt.show()\n\n return {}\n\n def hierarchy(self, HY=None, file_name=None, target=None, save_file=None, figure_size=10, scale=2, duration=3):\n \"\"\"\n Visualize learned 2D coordinates with hierarchical labels.\n\n Parameters:\n HY (list of list of str, optional): hierarchical labels of vectors\n file_name (str, optional): file of hierarchical labels\n target (str): target class\n save_file (str): ``gif`` file to save visualization\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n duration (float, optional): duration of each frame in seconds\n \"\"\"\n import imageio\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if dim != 2:\n raise ValuerError(\"Can't visualize the hierarchy of %dD data\" % dim)\n\n if file_name:\n if not (HY is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n HY = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) > 0:\n HY.append(tokens)\n elif HY is None:\n raise ValueError(\"No label is provided for hierarchy\")\n HY = np.asarray(HY)\n min_type = \"S%d\" % len(\"else\")\n if HY.dtype < min_type:\n HY = HY.astype(min_type)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n HY = HY[indexes].T\n\n if target is None:\n raise ValueError(\"Target class is not provided\")\n for depth, Y in enumerate(HY):\n indexes, = np.where(Y == target)\n if len(indexes) > 0:\n sample = indexes[0]\n break\n else:\n raise ValueError(\"Can't find target `%s` in the hierarchy\" % target)\n\n settings = [(coordinates, None, HY[0], sample, figure_size, scale, 0)]\n for i in range(depth):\n settings.append((coordinates, HY[i], HY[i + 1], sample, figure_size, scale, i+1))\n pool = multiprocessing.Pool(self.solver.num_worker + self.solver.num_sampler)\n frames = pool.map(render_hierarchy, settings)\n logger.warning(\"save hierarchy to `%s`\" % save_file)\n imageio.mimsave(save_file, frames, fps=1 / duration, subrectangles=True)\n\n return {}\n\n def animation(self, Y=None, file_name=None, save_file=None, figure_size=5, scale=1, elevation=30, num_frame=700):\n \"\"\"\n Rotate learn 3D coordinates as an animation.\n\n Parameters:\n Y (list of str, optional): labels of vectors\n file_name (str, optional): file of labels\n save_file (str): ``gif`` file to save visualization\n figure_size (int, optional): size of figure\n scale (int, optional): size of points\n elevation (float, optional): elevation angle\n num_frame (int, optional): number of frames\n \"\"\"\n import imageio\n from matplotlib import pyplot as plt, animation\n from mpl_toolkits.mplot3d import Axes3D\n plt.switch_backend(\"agg\") # for compatibility\n\n self.solver.clear()\n\n coordinates = self.solver.coordinates\n dim = coordinates.shape[1]\n if dim != 3:\n raise ValueError(\"Can't animate %dD data\" % dim)\n\n if file_name:\n if not (Y is None):\n raise ValueError(\"Evaluation data and file should not be provided at the same time\")\n Y = []\n with open(file_name, \"r\") as fin:\n for line in fin:\n tokens = self.tokenize(line)\n if len(tokens) == 0:\n continue\n y, = tokens\n Y.append(y)\n elif Y is None:\n Y = [\"unknown\"] * self.graph.num_vertex\n Y = np.asarray(Y)\n\n mean = np.mean(coordinates, axis=0)\n std = np.std(coordinates, axis=0)\n inside = np.abs(coordinates - mean) < self.OUTLIER_THRESHOLD * std\n indexes, = np.where(np.all(inside, axis=1))\n # discard outliers\n coordinates = coordinates[indexes]\n Y = Y[indexes]\n\n settings = []\n degrees = np.linspace(0, 360, num_frame, endpoint=False)\n for degree in degrees:\n settings.append((coordinates, Y, degree, figure_size, scale, elevation))\n pool = multiprocessing.Pool(self.solver.num_worker + self.solver.num_sampler)\n frames = pool.map(render_animation, settings)\n logger.warning(\"save animation to `%s`\" % save_file)\n imageio.mimsave(save_file, frames, fps=num_frame / 70, subrectangles=True) # 70 seconds\n\n return {}\n\n\ndef render_hierarchy(args):\n from matplotlib import pyplot as plt\n plt.switch_backend(\"agg\")\n\n coordinates, H, Y, sample, figure_size, scale, depth = args\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n ax = fig.gca()\n if H is not None:\n for i in range(len(Y)):\n if H[i] != H[sample]:\n Y[i] = \"else\"\n classes = set(Y)\n classes.discard(Y[sample])\n classes.discard(\"else\")\n classes = [Y[sample]] + sorted(classes) + [\"else\"]\n for i, cls in enumerate(classes):\n indexes, = np.where(Y == cls)\n color = \"lightgrey\" if cls == \"else\" else None\n ax.scatter(*coordinates[indexes].T, s=2, c=color, zorder=-i)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.legend(classes, markerscale=6, loc=\"upper right\")\n fig.canvas.draw()\n frame = np.asarray(fig.canvas.renderer._renderer)\n\n return frame\n\n\ndef render_animation(args):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.switch_backend(\"agg\")\n\n coordinates, Y, degree, figure_size, scale, elevation = args\n classes = sorted(np.unique(Y))\n\n fig = plt.figure(figsize=(figure_size, figure_size))\n ax = fig.gca(projection=\"3d\")\n for cls in classes:\n indexes, = np.where(Y == cls)\n ax.scatter(*coordinates[indexes].T, s=scale)\n ax.view_init(elev=elevation, azim=degree)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n if len(classes) > 1:\n ax.legend(classes, markerscale=6)\n fig.canvas.draw()\n frame = np.asarray(fig.canvas.renderer._renderer)\n\n return frame\n\n\nclass Application(object):\n \"\"\"\n Application(type, *args, **kwargs)\n Create an application instance of any type.\n\n Parameters:\n type (str): application type,\n can be 'graph', 'word graph', 'knowledge graph' or 'visualization'\n \"\"\"\n\n application = {\n \"graph\": GraphApplication,\n \"word graph\": WordGraphApplication,\n \"knowledge graph\": KnowledgeGraphApplication,\n \"visualization\": VisualizationApplication\n }\n\n def __new__(cls, type, *args, **kwargs):\n if type in cls.application:\n return cls.application[type](*args, **kwargs)\n else:\n raise ValueError(\"Unknown application `%s`\" % type)\n\n__all__ = [\n \"Application\",\n \"GraphApplication\", \"WordGraphApplication\", \"KnowledgeGraphApplication\", \"VisualizationApplication\"\n]"
] | [
[
"numpy.linspace",
"numpy.asarray",
"torch.sum",
"numpy.concatenate",
"numpy.all",
"torch.set_grad_enabled",
"numpy.mean",
"torch.topk",
"numpy.where",
"torch.ones",
"numpy.ones_like",
"numpy.uint32",
"numpy.unique",
"numpy.arange",
"numpy.std",
"numpy.argpartition",
"torch.arange",
"torch.argsort",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.switch_backend",
"torch.cuda.empty_cache",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"torch.cuda.device_count",
"matplotlib.pyplot.show",
"numpy.sum",
"torch.as_tensor",
"numpy.abs",
"numpy.ones",
"numpy.random.permutation",
"numpy.uint64",
"torch.cumsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhoudoufu/lingvo | [
"bd0f89809942fd0508ff43bd4b6bca1b598220cb",
"bd0f89809942fd0508ff43bd4b6bca1b598220cb",
"bd0f89809942fd0508ff43bd4b6bca1b598220cb",
"bd0f89809942fd0508ff43bd4b6bca1b598220cb",
"bd0f89809942fd0508ff43bd4b6bca1b598220cb"
] | [
"lingvo/core/test_utils_test.py",
"lingvo/tasks/asr/model_test.py",
"lingvo/core/conv_layers_with_time_padding.py",
"lingvo/core/base_model.py",
"lingvo/tasks/asr/frontend.py"
] | [
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for test_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom lingvo.core import test_utils\n\n\nclass TestUtilsTest(test_utils.TestCase):\n\n def testReplaceGoldenSingleFloat(self):\n old_line = ' CompareToGoldenSingleFloat(self, 1.489712, vs[0])\\n'\n expected = ' CompareToGoldenSingleFloat(self, 1.000000, vs[0])\\n'\n actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0)\n self.assertEqual(expected, actual)\n\n old_line = ('test_utils.CompareToGoldenSingleFloat(self, -2.e-3, vs[0])'\n ' # pylint: disable=line-too-long\\n')\n expected = ('test_utils.CompareToGoldenSingleFloat(self, 1.000000, vs[0])'\n ' # pylint: disable=line-too-long\\n')\n actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0)\n self.assertEqual(expected, actual)\n\n def CompareToGoldenSingleFloat(self, unused_v1, v2):\n return test_utils.ReplaceGoldenStackAnalysis(v2)\n\n def testReplaceGoldenStackAnalysis(self):\n v2 = 2.0\n result = TestUtilsTest.CompareToGoldenSingleFloat(self, 1.0, v2)\n self.assertTrue(result[0].endswith('test_utils_test.py'))\n old_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat('\n 'self, 1.0, v2)\\n')\n new_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat('\n 'self, 2.000000, v2)\\n')\n self.assertEqual(old_line, result[2])\n self.assertEqual(new_line, result[3])\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Asr Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\n\nimport numpy as np\nimport six\nfrom six.moves import range\n\nimport tensorflow as tf\n\nfrom lingvo.core import base_layer\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.core import summary_utils\nfrom lingvo.core import test_helper\nfrom lingvo.core import test_utils\nfrom lingvo.tasks.asr import decoder\nfrom lingvo.tasks.asr import input_generator\nfrom lingvo.tasks.asr import model\nfrom lingvo.tasks.asr import model_test_input_generator as tig\n\n\nclass DecoderForTest(decoder.AsrDecoder):\n \"\"\"Unit test class for AsrDecoder with functional.for based unrolling.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DecoderForTest, cls).Params()\n p.use_while_loop_based_unrolling = False\n return p\n\n\nclass AsrModelTest(test_utils.TestCase):\n\n def _testParams(self):\n input_shape = [2, 16, 8, 3]\n p = model.AsrModel.Params()\n p.decoder.target_seq_len = 5\n p.encoder.input_shape = input_shape\n p.input = tig.TestInputGenerator.Params()\n p.input.target_max_length = 5\n p.input.source_shape = input_shape\n p.input.target_shape = [2, 5]\n p.name = 'test_mdl'\n return p\n\n def testMakeDecoderTheta(self):\n # Test that decoder theta returns a copy of theta.decoder without changes.\n with self.session(use_gpu=False, graph=tf.Graph()):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None)\n mdl.BProp()\n self.assertEqual(decoder_theta, mdl.theta.decoder)\n\n def testFProp(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n\n actual_var_names = [_.name for _ in tf.all_variables()]\n print('all vars \\n', '\\n'.join(actual_var_names))\n expected_var_names = [\n 'global_step:0', 'test_mdl/enc/conv_L0/w/var:0',\n 'test_mdl/enc/conv_L0/beta/var:0', 'test_mdl/enc/conv_L0/gamma/var:0',\n 'test_mdl/enc/conv_L0/moving_mean/var:0',\n 'test_mdl/enc/conv_L0/moving_variance/var:0',\n 'test_mdl/enc/conv_L1/w/var:0', 'test_mdl/enc/conv_L1/beta/var:0',\n 'test_mdl/enc/conv_L1/gamma/var:0',\n 'test_mdl/enc/conv_L1/moving_mean/var:0',\n 'test_mdl/enc/conv_L1/moving_variance/var:0',\n 'test_mdl/enc/f_conv_lstm_0/wm/var:0',\n 'test_mdl/enc/f_conv_lstm_0/b/var:0',\n 'test_mdl/enc/b_conv_lstm_0/wm/var:0',\n 'test_mdl/enc/b_conv_lstm_0/b/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/w/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/beta/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/gamma/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/moving_mean/var:0',\n 'test_mdl/enc/conv_lstm_cnn_0/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L0/wm/var:0', 'test_mdl/enc/fwd_rnn_L0/b/var:0',\n 'test_mdl/enc/bak_rnn_L0/wm/var:0', 'test_mdl/enc/bak_rnn_L0/b/var:0',\n 'test_mdl/enc/proj_L0/w/var:0', 'test_mdl/enc/proj_L0/beta/var:0',\n 'test_mdl/enc/proj_L0/gamma/var:0',\n 'test_mdl/enc/proj_L0/moving_mean/var:0',\n 'test_mdl/enc/proj_L0/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L1/wm/var:0', 'test_mdl/enc/fwd_rnn_L1/b/var:0',\n 'test_mdl/enc/bak_rnn_L1/wm/var:0', 'test_mdl/enc/bak_rnn_L1/b/var:0',\n 'test_mdl/enc/proj_L1/w/var:0', 'test_mdl/enc/proj_L1/beta/var:0',\n 'test_mdl/enc/proj_L1/gamma/var:0',\n 'test_mdl/enc/proj_L1/moving_mean/var:0',\n 'test_mdl/enc/proj_L1/moving_variance/var:0',\n 'test_mdl/enc/fwd_rnn_L2/wm/var:0', 'test_mdl/enc/fwd_rnn_L2/b/var:0',\n 'test_mdl/enc/bak_rnn_L2/wm/var:0', 'test_mdl/enc/bak_rnn_L2/b/var:0',\n 'test_mdl/dec/emb/var_0/var:0', 'test_mdl/dec/rnn_cell/wm/var:0',\n 'test_mdl/dec/rnn_cell/b/var:0',\n 'test_mdl/dec/atten/source_var/var:0',\n 'test_mdl/dec/atten/query_var/var:0',\n 'test_mdl/dec/atten/hidden_var/var:0',\n 'test_mdl/dec/softmax/weight_0/var:0',\n 'test_mdl/dec/softmax/bias_0/var:0'\n ]\n self.assertEqual(sorted(expected_var_names), sorted(actual_var_names))\n\n def testDecode(self):\n with self.session(use_gpu=False) as sess:\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n input_batch = mdl.input_generator.GetPreprocessedInputBatch()\n dec_out_dict = mdl.Decode(input_batch)\n tf.global_variables_initializer().run()\n dec_out = sess.run(dec_out_dict)\n print('dec_out', dec_out)\n metrics_dict = mdl.CreateDecoderMetrics()\n key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)\n\n self.assertEqual(1.0, metrics_dict['wer'].value)\n self.assertEqual(1.0, metrics_dict['norm_wer'].value)\n self.assertEqual(1.0, metrics_dict['ter'].value)\n self.assertEqual(0, len(key_value_pairs))\n\n def testPostProcessDecodeOut(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 2\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a'],\n 'topk_decoded': [['a b c d', 'a b c d'], ['wrong', '']],\n 'topk_scores': [[1.0, 0.9], [1.0, 0.9]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]],\n 'topk_lens': [2, 4, 4, 2],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 0, 1]],\n 'norm_wer_errors': [[0, 0], [1, 1]],\n 'norm_wer_words': [[4, 4], [1, 1]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n key_value_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(4 + 1, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(4 + 1, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(4, metrics_dict['ter'].total_value)\n self.assertEqual(6, metrics_dict['ter'].total_weight)\n self.assertEqual(2, metrics_dict['num_samples_in_batch'].total_value)\n self.assertEqual(1.0, metrics_dict['num_samples_in_batch'].total_weight)\n self.assertEqual((4 / 5 * 3 / 3 * 2 / 2 * 1 / 1)**(1 / 4),\n metrics_dict['corpus_bleu'].value)\n self.assertEqual((0 + 1) / 2, metrics_dict['sacc'].value)\n self.assertEqual((0 + 1) / (4 + 1), metrics_dict['oracle_norm_wer'].value)\n self.assertEqual(0, len(key_value_pairs))\n\n def testPostProcessDecodeOutFiltersEpsilonTokensForWER(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a b c'],\n 'topk_decoded': [['a b<epsilon>c d'], ['<epsilon>a b<epsilon>']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[0], [1]],\n 'norm_wer_words': [[4], [3]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(7, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(7, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(0, len(kv_pairs))\n\n def testPostProcessDecodeOutFiltersNoiseTokensForWER(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['a b c d', 'a b c'],\n 'topk_decoded': [['a b <noise> c d'], ['<noise> a b <noise>']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[0], [1]],\n 'norm_wer_words': [[4], [3]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(0 + 1, metrics_dict['wer'].total_value)\n self.assertEqual(7, metrics_dict['wer'].total_weight)\n self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)\n self.assertEqual(7, metrics_dict['norm_wer'].total_weight)\n self.assertEqual(0, len(kv_pairs))\n\n def testPostProcessDecodeOutHandlesEmptyRef(self):\n p = self._testParams()\n p.decoder.beam_search.num_hyps_per_beam = 1\n mdl = p.Instantiate()\n fake_dec_out = {\n 'utt_id': ['utt1', 'utt2'],\n 'transcripts': ['', 'a b c d'],\n 'topk_decoded': [['a'], ['a b c d']],\n 'topk_scores': [[1.0], [1.0]],\n 'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'topk_lens': [3, 4],\n 'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],\n 'target_paddings': [[1, 1, 1, 1], [0, 0, 1, 1]],\n 'norm_wer_errors': [[1], [0]],\n 'norm_wer_words': [[0], [4]],\n }\n metrics_dict = mdl.CreateDecoderMetrics()\n mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)\n\n self.assertEqual(1 + 0, metrics_dict['wer'].total_value)\n self.assertEqual(0 + 4, metrics_dict['wer'].total_weight)\n self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value)\n self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight)\n\n def testBProp(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n mdl.BProp()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n mdl.train_op.run()\n\n def testBPropSmoothDecay(self):\n with self.session(use_gpu=False):\n tf.set_random_seed(93820985)\n p = self._testParams()\n p.train.lr_schedule = (\n schedule.ContinuousLearningRateSchedule.Params().Set(\n start_step=350000, half_life_steps=45000))\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n mdl.BProp()\n tf.global_variables_initializer().run()\n test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())\n mdl.train_op.run()\n\n def testAllLayerParams(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n lps = base_layer.RecursiveFindLayerParams(mdl.params)\n l_names = sorted([p.cls.__name__ for p in lps])\n expected_layers = sorted([\n 'Adam',\n 'AdditiveAttention',\n 'AsciiTokenizer',\n 'AsrDecoder',\n 'AsrEncoder',\n 'AsrModel',\n 'BeamSearchHelper',\n 'TargetSequenceSampler',\n 'ConvLSTMCell',\n 'Conv2DLayer',\n 'Conv2DLayer',\n 'EmbeddingLayer',\n 'HighwaySkipLayer',\n 'LSTMCellSimple',\n 'LSTMCellSimple',\n 'NullContextualizer',\n 'NullFusion',\n 'NullLm',\n 'Learner',\n 'PiecewiseConstantLearningRateSchedule',\n 'ProjectionLayer',\n 'SimpleFullSoftmax',\n 'SpectrumAugmenter',\n 'StackingOverTime',\n 'TestInputGenerator',\n ])\n self.assertEqual(expected_layers, l_names)\n\n def testParamValueSumSquared(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n all_vars = tf.trainable_variables()\n py_utils.SumSquared(all_vars)\n\n def testCollectVarHistogram(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)\n summary_utils.CollectVarHistogram(var_grads)\n\n def testGradientMult(self):\n with self.session(use_gpu=False, graph=tf.Graph()):\n p = self._testParams()\n mdl = p.Instantiate()\n mdl.FPropDefaultTheta()\n var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)\n py_utils.ApplyGradMultiplier(var_grads, -1.1)\n\n def testLRDecay(self):\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n p = self._testParams()\n tp = p.train\n tp.lr_schedule.boundaries = [300000, 400000, 500000]\n tp.lr_schedule.values = [1.0, 0.1, 0.01, 0.001]\n lrs = tp.lr_schedule.Instantiate()\n steps = [299999, 300001, 399999, 400001, 499999, 500001]\n fetches = [lrs.Value(_) for _ in steps]\n values = sess.run(fetches)\n self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values)\n\n def testBatchSplit(self):\n\n def Run(num_splits):\n p = self._testParams()\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n tf.set_random_seed(93820981)\n p.is_eval = True\n p.input.cur_iter_in_seed = False\n p.input.bucket_batch_limit = [\n b * 2 / num_splits for b in p.input.bucket_batch_limit\n ]\n with cluster_factory.ForTestingWorker(gpus=num_splits):\n mdl = p.Instantiate()\n metrics = mdl.FPropDefaultTheta()[0]\n tf.global_variables_initializer().run()\n return sess.run(metrics['loss'])\n\n res1, res2 = Run(1), Run(2)\n self.assertAllClose(res1[0], res2[0])\n self.assertAllEqual(res1[1], res2[1])\n\n def testInference(self):\n\n def _CreateModelParamsForTest():\n p = model.AsrModel.Params()\n p.name = 'test_config'\n\n # Encoder params.\n ep = p.encoder\n ep.input_shape = [None, None, 80, 1]\n ep.lstm_cell_size = 16\n ep.num_lstm_layers = 2\n ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)]\n ep.conv_filter_strides = [(2, 2), (2, 2)]\n ep.num_conv_lstm_layers = 0\n # Initialize decoder params.\n dp = p.decoder\n dp.rnn_cell_dim = 16\n dp.rnn_layers = 2\n dp.source_dim = ep.lstm_cell_size * 2\n # Use functional while based unrolling.\n dp.use_while_loop_based_unrolling = False\n\n p.input = input_generator.AsrInput.Params()\n ip = p.input\n ip.frame_size = 80\n ip.append_eos_frame = True\n ip.pad_to_max_seq_length = False\n\n p.is_eval = True\n return p\n\n with self.session(use_gpu=False, graph=tf.Graph()) as sess:\n p = _CreateModelParamsForTest()\n mdl = p.Instantiate()\n subgraphs = mdl.Inference()\n self.assertTrue('default' in subgraphs)\n\n fetches, feeds = subgraphs['default']\n self.assertTrue('wav' in feeds)\n for name in ['hypotheses', 'scores', 'src_frames', 'encoder_frames']:\n self.assertTrue(name in fetches)\n\n with open(\n test_helper.test_src_dir_path('tools/testdata/gan_or_vae.16k.wav'),\n 'rb') as f:\n wav = f.read()\n sess.run(tf.global_variables_initializer())\n fetches = sess.run(fetches, {feeds['wav']: wav})\n\n self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),\n fetches['hypotheses'].shape)\n self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),\n fetches['scores'].shape)\n self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1),\n fetches['src_frames'].shape)\n self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size),\n fetches['encoder_frames'].shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common conv layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom lingvo.core import base_layer\nfrom lingvo.core import bn_layers\nfrom lingvo.core import py_utils\nfrom lingvo.core import tshape\n\n\ndef ComputeConvOutputShape(in_shape,\n t_stride,\n f_stride,\n outc=None,\n padding='SAME'):\n \"\"\"Computes output shape for convolution and pooling layers.\n\n If `in_shape` is a dynamic shape, the output will be Tensors, while if\n `in_shape` is a list of ints then the output will also be a list of ints.\n\n Args:\n in_shape: A length 4 Tensor or list representing the input shape.\n t_stride: The stride along the time dimension.\n f_stride: The stride along the frequency dimension.\n outc: The expected output channel. If None, will use the input channel.\n padding: 'SAME' or 'VALID'.\n\n Returns:\n The expected output shape.\n \"\"\"\n # In the order of batch, time, frequency, channel\n n = in_shape[0]\n t = in_shape[1]\n f = in_shape[2]\n c = in_shape[3]\n # Last two dimensions has to be specified.\n assert f is not None and c is not None\n if padding == 'VALID':\n if t:\n t -= t_stride - 1\n f -= f_stride - 1\n ot = t\n if ot is not None:\n ot = (ot + t_stride - 1) // t_stride\n of = (f + f_stride - 1) // f_stride\n if outc is None:\n outc = c\n return [n, ot, of, outc]\n\n\ndef ComputeConvOutputPadding(paddings, window, stride,\n padding_algorithm='SAME'):\n \"\"\"Computes paddings for convolution and pooling output.\n\n out_padding[i] == 1 iff any in_padding corresponding to that output is 1.\n\n Args:\n paddings: The paddings tensor. It is expected to be of shape [batch, time].\n window: The size of the windows.\n stride: The time-stride between adjacent windows.\n padding_algorithm: 'SAME' or 'VALID'.\n\n Returns:\n out_padding, The new padding tensor of size [batch, ceil(time / stride)].\n \"\"\"\n if stride == 1:\n return paddings\n\n # Pad so input_length divides stride.\n input_length = py_utils.GetShape(paddings)[1]\n pad_len = (input_length + stride - 1) // stride * stride - input_length\n paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0)\n out_padding = tf.nn.pool(\n tf.expand_dims(paddings, -1),\n [window],\n 'MAX',\n padding_algorithm,\n strides=[stride],\n )\n return tf.squeeze(out_padding, -1)\n\n\nclass BaseConv2DLayerWithPadding(base_layer.BaseLayer):\n \"\"\"Base class for 2D convolution layers.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseConv2DLayerWithPadding, cls).Params()\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height (time), width (frequency), in_channel,'\n ' out_channel. For causal convolution, filter_shape[0]'\n ' is the actual number of trained weights in the time dimension'\n ' of the kernel.')\n p.Define(\n 'filter_stride', (1, 1),\n 'Filter stride to use. Must be a pair of ints. The first int'\n ' specifies the stride on the time dimension. The second int'\n ' specifies the stride on the frequency dimension.')\n p.Define(\n 'dilation_rate', (1, 1),\n 'If > 1, dilation rate for atrous convolution. '\n 'Must be a pair of ints. '\n 'The first int specifies the dilation rate on the time dimension. '\n 'The second int specifies the dilation rate on the frequency '\n 'dimension. '\n 'If any value of dilation_rate is > 1, then all values of strides '\n 'must be 1.')\n p.Define(\n 'weight_norm', False,\n 'If true, apply weight normalization to weights as proposed by'\n ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(BaseConv2DLayerWithPadding, self).__init__(params)\n p = self.params\n assert p.name\n assert len(p.filter_shape) == 4\n assert len(p.filter_stride) == 2\n assert all(x > 0 for x in p.filter_shape)\n assert all(x > 0 for x in p.filter_stride)\n assert len(p.dilation_rate) == 2\n assert all(x > 0 for x in p.dilation_rate)\n # Dilation and stride can't be combined.\n if any(x > 1 for x in p.dilation_rate):\n assert all(x == 1 for x in p.filter_stride)\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n raise NotImplementedError()\n\n @property\n def input_channels(self):\n \"\"\"The number of input channels for this conv layer.\"\"\"\n return self.params.filter_shape[2]\n\n def OutShape(self, in_shape):\n \"\"\"Compute the output shape given the input shape.\"\"\"\n p = self.params\n return ComputeConvOutputShape(in_shape, p.filter_stride[0],\n p.filter_stride[1], self.output_channels)\n\n def FProp(self, theta, inputs, paddings):\n \"\"\"Apply convolution to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor, expected to be of shape [batch, time].\n\n Returns:\n outputs, out_paddings pair.\n \"\"\"\n p = self.params\n with tf.name_scope(p.name):\n inputs = py_utils.with_dependencies([\n py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]),\n py_utils.assert_shape_match(\n tf.shape(inputs),\n tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0))\n ], inputs)\n\n def _ApplyPadding(tensor_in, padding_in):\n padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1)\n return tensor_in * (1.0 - padding_expanded)\n\n # Zeroing out padded inputs.\n inputs = _ApplyPadding(inputs, paddings)\n\n # Evaluate the conv kernel on 'inputs'.\n out = self._EvaluateConvKernel(theta, inputs)\n\n # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1.\n # But there's likely no real problems. Trying to set it gives an error:\n # pooling with SAME padding is not implemented for dilation_rate > 1.\n # NOTE: we use window=p.filter_stride[0] to be compatible with legacy\n # implementation. Consider updating it to be the actual shape.\n conv_padding = ComputeConvOutputPadding(\n paddings, window=p.filter_stride[0], stride=p.filter_stride[0])\n # Assuming padded nodes will be properly zero-ed out if necessary by\n # sub-sequent layers.\n # out = _ApplyPadding(out, conv_padding)\n out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs)))\n return out, conv_padding\n\n def _EvaluateConvKernel(self, theta, conv_input):\n \"\"\"Evaluate the convolution kernel on input 'conv_input'.\"\"\"\n raise NotImplementedError\n\n\nclass Conv2DLayerWithPadding(BaseConv2DLayerWithPadding):\n \"\"\"Conv2D layer.\"\"\"\n\n @base_layer.initializer\n def __init__(self, params):\n super(Conv2DLayerWithPadding, self).__init__(params)\n p = self.params\n assert p.name\n w_pc = py_utils.WeightParams(\n shape=p.filter_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n with tf.variable_scope(p.name):\n self.CreateVariable('w', w_pc)\n if p.weight_norm:\n self.CreateVariable(\n 'g',\n py_utils.WeightParams(\n shape=[p.filter_shape[-1]],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars']))\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n p = self.params\n return p.filter_shape[-1]\n\n def _GetWeight(self, theta):\n p = self.params\n if p.weight_norm:\n # Normalize along the last dim (standard conv).\n filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape(\n (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]])\n else:\n filter_w = theta.w\n return filter_w\n\n def _EvaluateConvKernel(self, theta, inputs):\n \"\"\"Apply convolution to inputs.\"\"\"\n p = self.params\n filter_w = self._GetWeight(theta)\n return tf.nn.convolution(\n inputs,\n filter_w,\n strides=p.filter_stride,\n dilation_rate=p.dilation_rate,\n data_format='NHWC',\n padding='SAME')\n\n\nclass CausalConv2DLayerWithPadding(Conv2DLayerWithPadding):\n \"\"\"2D conv layer with causal dependency on the time axis.\"\"\"\n\n @base_layer.initializer\n def __init__(self, params):\n super(CausalConv2DLayerWithPadding, self).__init__(params)\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'\n\n def _EvaluateConvKernel(self, theta, inputs):\n \"\"\"Apply convolution to inputs.\"\"\"\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'\n # Use VALID padding and shift the inputs to the right to ensure that the\n # first output only depends on the first input and so on. The output is\n # the same size as the input, as if the convolution used SAME padding.\n padding_algorithm = 'VALID'\n # The effective spatial filter width for dilated convolutions is\n # (kernel_width - 1) * dilation_rate + 1 as according to\n # https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]\n inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])\n\n filter_w = self._GetWeight(theta)\n return tf.nn.convolution(\n inputs,\n filter_w,\n strides=p.filter_stride,\n dilation_rate=p.dilation_rate,\n data_format='NHWC',\n padding=padding_algorithm)\n\n\nclass DepthwiseConv2DLayer(BaseConv2DLayerWithPadding):\n \"\"\"Depthwise conv 2D layer.\n\n paper: https://arxiv.org/abs/1610.02357\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DepthwiseConv2DLayer, cls).Params()\n # Redefine 'filter_shape' since the semantic of shape elements is different\n # from regular Conv2D.\n p.Delete('filter_shape')\n p.Define(\n 'filter_shape', (0, 0, 0, 0),\n 'Filter shape. Must be a sequence of length 4. Elements are in'\n ' the order of height (time), width (frequency), in_channel,'\n ' channel_multipliers. ')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(DepthwiseConv2DLayer, self).__init__(params)\n p = self.params\n assert p.name\n w_pc = py_utils.WeightParams(\n shape=p.filter_shape,\n init=p.params_init,\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars'])\n\n with tf.variable_scope(p.name):\n self.CreateVariable('w', w_pc)\n if p.weight_norm:\n self.CreateVariable(\n 'g',\n py_utils.WeightParams(\n shape=[p.filter_shape[2], p.filter_shape[3]],\n init=py_utils.WeightInit.Constant(0.0),\n dtype=p.dtype,\n collections=[self.__class__.__name__ + '_vars']))\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n p = self.params\n # Depthwise convolution filter shape is:\n # [..., in_channels, channel_multiplier].\n return p.filter_shape[2] * p.filter_shape[3]\n\n def _GetWeight(self, theta):\n p = self.params\n if p.weight_norm:\n # Normalize along the last two dims.\n filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape(\n (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]])\n else:\n filter_w = theta.w\n return filter_w\n\n def _EvaluateConvKernel(self, theta, inputs):\n \"\"\"Apply convolution to inputs.\"\"\"\n p = self.params\n filter_w = self._GetWeight(theta)\n return tf.nn.depthwise_conv2d(\n inputs,\n filter_w,\n strides=[1, p.filter_stride[0], p.filter_stride[1], 1],\n rate=p.dilation_rate,\n data_format='NHWC',\n padding='SAME')\n\n\nclass CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer):\n \"\"\"Depthwise conv layer with causal dependency on the time axis.\"\"\"\n\n @base_layer.initializer\n def __init__(self, params):\n super(CausalDepthwiseConv2DLayer, self).__init__(params)\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.'\n\n def _EvaluateConvKernel(self, theta, inputs):\n \"\"\"Apply convolution to inputs.\"\"\"\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'\n # Use VALID padding and shift the inputs to the right to ensure that the\n # first output only depends on the first input and so on. The output is\n # the same size as the input, as if the convolution used SAME padding.\n padding_algorithm = 'VALID'\n # The effective spatial filter width for dilated convolutions is\n # (kernel_width - 1) * dilation_rate + 1 as according to\n # https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]\n inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])\n filter_w = self._GetWeight(theta)\n return tf.nn.depthwise_conv2d(\n inputs,\n filter_w,\n strides=[1, p.filter_stride[0], p.filter_stride[1], 1],\n rate=p.dilation_rate,\n data_format='NHWC',\n padding=padding_algorithm)\n\n\nclass NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer):\n \"\"\"DepthwiseConv2DLayer where weights are normalized over the time dim.\n\n https://arxiv.org/abs/1901.10430\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(NormalizedDepthwiseConv2DLayer, cls).Params()\n p.Define('dropconnect_prob', 0.0,\n 'Prob at which DropConnect regularization is performed.')\n p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.')\n p.Define('temperature', 1.0,\n 'Temperature for the softmax normalization of the weights.')\n p.Define('weight_tiling_factor', 1,\n 'Number of times weights are tiled over the input channels.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(NormalizedDepthwiseConv2DLayer, self).__init__(params)\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.'\n assert p.temperature > 0.0, 'Absolute zero temperature is not possible.'\n\n @property\n def output_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n p = self.params\n # Depthwise convolution filter shape is:\n # [kernel_size, 1, in_channels, channel_multiplier].\n return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor\n\n @property\n def input_channels(self):\n \"\"\"The number of output channels for this conv layer.\"\"\"\n p = self.params\n return p.filter_shape[2] * p.weight_tiling_factor\n\n def _GetWeight(self, theta):\n p = self.params\n filter_w = theta.w\n\n # First normalize filter_w over the temporal dimension here.\n filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0)\n\n # Add dropconnect on the weights for regularization.\n if p.dropconnect_prob > 0.0 and not p.is_eval:\n if p.deterministic_dropout:\n filter_w = py_utils.DeterministicDropout(\n filter_w, 1.0 - p.dropconnect_prob,\n py_utils.GenerateStepSeedPair(p, theta.global_step))\n else:\n filter_w = tf.nn.dropout(\n filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed)\n\n # Tie the parameters of every subsequent number of weight_tiling_factor\n # channels.\n filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1])\n return filter_w\n\n @classmethod\n def FPropMeta(cls, p, inputs, paddings):\n py_utils.CheckShapes((inputs, paddings))\n b, t, f, ic = inputs\n assert f == 1\n oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor\n outputs = tshape.Shape([b, t, f, oc])\n flops = b * t * f * p.filter_shape[0] * ic * oc * 5\n return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings))\n\n\nclass CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer):\n \"\"\"Depthwise conv layer with causal dependency on the time axis.\"\"\"\n\n def _EvaluateConvKernel(self, theta, inputs):\n \"\"\"Apply convolution to inputs.\"\"\"\n # Same as CausalDepthwiseConv2DLayer.\n p = self.params\n assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.'\n padding_algorithm = 'VALID'\n causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0]\n inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])\n filter_w = self._GetWeight(theta)\n return tf.nn.depthwise_conv2d(\n inputs,\n filter_w,\n strides=[1, p.filter_stride[0], p.filter_stride[1], 1],\n rate=p.dilation_rate,\n data_format='NHWC',\n padding=padding_algorithm)\n\n\nclass ConvBatchNormLayer(bn_layers.BatchNormLayer):\n \"\"\"A wrapper around regular BatchNormLayer that pass around the ...\n\n paddings layers.\n \"\"\"\n\n def FProp(self, theta, inputs, paddings):\n paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)\n bned = super(ConvBatchNormLayer, self).FProp(\n theta, inputs, paddings_expanded)\n return bned, paddings\n\n\n# Supported activation functions.\n_ACTIVATIONS = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'SIGMOID': tf.sigmoid,\n 'TANH': tf.tanh,\n 'SWISH': tf.nn.swish,\n 'NONE': tf.identity,\n}\n\n\nclass ActivationLayer(base_layer.BaseLayer):\n \"\"\"Applies activation function to the inputs.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(ActivationLayer, cls).Params()\n p.Define('activation', 'RELU',\n 'The activation function to apply')\n return p\n\n def FProp(self, theta, inputs, paddings):\n p = self.params\n out = _ACTIVATIONS[p.activation](inputs)\n return out, paddings\n\n\nclass PaddingLayer(base_layer.BaseLayer):\n \"\"\"Zeros out padded positions.\"\"\"\n\n def FProp(self, theta, inputs, paddings):\n paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1)\n return inputs * (1.0 - paddings_expanded), paddings\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport six\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import build_data\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import early_stop\nfrom lingvo.core import hyperparams\nfrom lingvo.core import learner\nfrom lingvo.core import optimizer\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.core import summary_utils\nfrom lingvo.core import task_scheduler\n\n\ndef CreateTaskGlobalStep(task_name):\n \"\"\"Create if needed and return the global_step.\"\"\"\n with tf.name_scope(None), tf.variable_scope(py_utils.global_variable_scope):\n graph_collections = [tf.GraphKeys.GLOBAL_VARIABLES, 'TASK_GLOBAL_STEP']\n _, v = py_utils.CreateVariable(\n name=task_name + '_global_step',\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False,\n collections=graph_collections)\n summary_utils.scalar(v.name, v)\n return v\n\n\nclass StatsCounter(object):\n \"\"\"A single counter in TF.\"\"\"\n\n def __init__(self, name):\n self._name = name\n _, self._var = py_utils.CreateVariable(\n name=name,\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False)\n self._value = self._var.value() + 0 # Makes a copy.\n\n def Value(self):\n \"\"\"Returns the current counter value.\"\"\"\n return self._value\n\n def IncBy(self, params, delta):\n \"\"\"Increment the counter by delta and return the new value.\"\"\"\n # NOTE: We must ensure _value is computed (_var + 0) before\n # updating _var with delta.\n delta = tf.to_int64(delta)\n with tf.control_dependencies([self._value]):\n summary_utils.scalar(self._name, self._value)\n return tf.identity(tf.assign_add(self._var, delta))\n\n\nclass BaseTask(base_layer.BaseLayer):\n \"\"\"A single encoder/decoder task.\n\n One task usually consists of one InputGenerator, one train_op,\n a list of eval_metrics, etc.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseTask, cls).Params()\n p.Define('input', None, 'Input generator Params.')\n p.Define('encoder', None, 'Encoder Params.')\n p.Define('online_encoder', None, 'Online Encoder Params.')\n p.Define('decoder', None, 'Decoder Params.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this task should be trained.')\n\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Maximum number of training steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'vn_start_step', 200000000,\n 'Step starting from which variational noise is added to '\n 'params values during training.')\n tp.Define('vn_std', 0.0, 'Std of the variational noise.')\n tp.Define('early_stop', early_stop.EarlyStop.Params(),\n 'Early stopping based on dev-set performance.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define(\n 'init_from_checkpoint_rules', {},\n 'If not None, a dictionary with keys corresponding to a checkpoint '\n 'path and values corresponding to variable loading rules is expected. '\n 'Each key is expected to be a path to a checkpoint from which to '\n 'initialize part of the model. Variables are only loaded from this '\n 'path during initialization and will override values provided by '\n 'initialization.'\n 'The corresponding values (loading_rules) are expected to be a tuple '\n 'consisting of two list: loading rules, and ignore rules, respectively.'\n 'The first list (loading rules) contains the list of variables '\n 'which should be initialized from the checkpoint: each element in the '\n 'list is a pair of strings. The first element is a regex and the '\n 'second is a python format string. If a variable in the model matches '\n 'a regex, we rename using the format string to determine the '\n 'corresponding var in the checkpoint. Note that, it is an error if a '\n 'model variable matches multiple loading rules, for the same '\n 'checkpoint or across checkpoints.'\n 'The second list (ignore rules) is a list of regexes which specify '\n 'variables in the model which should not be initialized using the '\n 'loading rules. Thus, if a variable in the model to be trained matches '\n 'one of the rules in the loading rules, as well as one of the regular '\n 'expressions in the ignore rules, the variable will not be initialized '\n 'from the checkpoint, but will instead be initialized from the '\n 'variable initalizer defined in the graph.'\n 'Examples:'\n '{\"checkpoint_path\": ([(\"(.*)\", \"%s\")], [])} will initialize all the '\n 'model parameters from the checkpoint_path.')\n tp.Define(\n 'pruning_hparams_dict', None, 'Pruning related hyperparameters. A dict '\n 'with hyperparameter: value pairs. See tf.contrib.model_pruning.')\n tp.Define(\n 'enqueue_max_steps', -1, 'Max enqueue steps. -1 meaning no limit.'\n ' This flag should be set for unit-test only.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('save_max_to_keep', 100,\n 'Maximum number of recent checkpoints to keep.')\n tp.Define('save_keep_checkpoint_every_n_hours', 0.5,\n 'How often to keep a checkpoint.')\n\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n # The following params must mirror those in Learner.Params().\n # TODO(rpang): migrate existing params to use learner and\n # delete legacy params.\n # LINT.IfChange\n tp.Define(\n 'learner', None, 'One or a list of optimization programs. '\n 'If None, uses a Learner created from the legacy params '\n 'defined below: learning_rate, lr_schedule, optimizer, etc.')\n tp.Define(\n 'l2_regularizer_weight', None,\n 'If not None, L2 regularization to apply to the weights. '\n 'Otherwise, disable L2 regularization.')\n tp.Define(\n 'l1_regularizer_weight', None,\n 'If not None, L1 regularization to apply to the weights. '\n 'Otherwise, disable L1 regularization.')\n tp.Define('learning_rate', 0.0, 'learning rate to use.')\n tp.Define(\n 'clip_gradient_norm_to_value', 0.0,\n 'Clip gradient by global norm to this value. This is similar to '\n 'the bahaviour of tf.clip_by_global_norm, if you are looking for '\n 'tf.clip_by_norm refer to clip_gradient_single_norm_to_value. Note '\n 'these are mutually exclusive.')\n tp.Define(\n 'clip_gradient_single_norm_to_value', 0.0,\n 'Clip gradient by single tensor norm to this value. This is '\n 'similar to the bahaviour of tf.clip_by_norm. Note this is mutually '\n 'exlusive to using clip_gradient_norm_to_value.')\n tp.Define('grad_norm_to_clip_to_zero', 0.0,\n 'Clip gradient to 0 if its norm exceeds this value.')\n tp.Define('grad_norm_tracker', None, 'Params for GradNormTracker.')\n tp.Define('optimizer', optimizer.Adam.Params(), 'Params for the optimizer.')\n tp.Define('lr_schedule', schedule.ContinuousLearningRateSchedule.Params(),\n 'Learning rate decay schedule.')\n tp.Define(\n 'bprop_variable_filter', None,\n 'If set, only backprop variables whose names partially match '\n 'this regexp (re.search).')\n tp.Define(\n 'grad_aggregation_method', tf.AggregationMethod.EXPERIMENTAL_TREE,\n 'Specifies the method used to combine gradient terms. Accepted '\n 'values are constants defined in the class AggregationMethod.')\n tp.Define(\n 'gate_gradients', False,\n 'If True, add a tuple around the gradients returned for an '\n 'operations. This avoids some race conditions.')\n tp.Define('colocate_gradients_with_ops', True,\n 'If True, try colocating gradients with the corresponding op.')\n # LINT.ThenChange(learner.py)\n p.Define('eval', hyperparams.Params(),\n 'Params to control how this task should be evaled.')\n ep = p.eval\n ep.Define(\n 'samples_per_summary', 1000,\n 'If > 0, generates one summary after this many samples, at most. '\n 'If == 0 or the dataset has fewer examples, evaluate the whole set.')\n ep.Define(\n 'decoder_samples_per_summary', 0,\n 'If > 0, each decoder summary will contain at most this many samples. '\n 'If == 0, defaults to `samples_per_summary` for '\n 'backwards compatibility.')\n ep.Define(\n 'load_checkpoint_from', None,\n 'If not None, specifies a location for the checkpoint that '\n 'should be used for eval. One example format is a '\n 'checkpoint directory of a training run.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, BaseTask)\n # Ensure global_step exists before calling super.\n py_utils.GetOrCreateGlobalStepVar()\n super(BaseTask, self).__init__(params)\n\n p = self.params\n\n if p.input:\n # TODO(zhifengc): Consider a simpler way to ensure the input\n # generator stops after one epoch.\n if p.is_eval and p.eval:\n seq_inp = issubclass(p.input.cls,\n base_input_generator.BaseInputGeneratorFromFiles)\n if p.input.num_samples == 0:\n # Dataset size is unknown. Computes eval summary based on num_samples.\n assert p.eval.samples_per_summary > 0\n elif (p.eval.samples_per_summary == 0) or (p.input.num_samples <\n p.eval.samples_per_summary):\n # If we know the dataset size and we want to evaluate the full\n # set, we need to coordinate the input generator to flush out\n # all samples so the evaler and decoder compute metrics on the\n # whole set for each summary step.\n if seq_inp:\n p.input.flush_every_n = p.input.num_samples\n p.eval.samples_per_summary = p.input.num_samples\n if seq_inp and p.input.num_batcher_threads > 1:\n tf.logging.warning('input.num_batcher_threads > 1 inside eval mode. '\n 'The input generator may not iterate over exactly '\n 'one epoch per run')\n\n input_params = self.cluster.PlaceInput(p.input)\n with py_utils.outside_all_rewrites():\n self.CreateChild('input', input_params)\n\n self._encoder = None\n self._online_encoder = None\n self._decoder = None\n\n self._total_examples = None\n self._total_nans_and_infs = None\n self._loss = None\n self._num_predictions = None\n self._train_op = None\n self._eval_metrics = {}\n self._per_example = {}\n self._trainer_verbose_tensors = {}\n\n # Create the gradient mask,\n self._per_input_gradient_mask = None\n task_global_step_list = tf.get_collection('TASK_GLOBAL_STEP',\n '^%s_global_step' % p.name)\n if len(task_global_step_list) > 1:\n raise ValueError('Found multiple task_global_step for task %s' % p.name)\n self._global_step_var = (\n task_global_step_list[0] if len(task_global_step_list) == 1 else\n py_utils.GetOrCreateGlobalStepVar())\n self._global_step = tf.identity(\n self._global_step_var, name='global_step_tensor')\n tp = p.train\n # p.train can be None if this task is the teacher/student task in a\n # DistillationTask.\n if tp and self.cluster.job in ('worker', 'trainer', 'trainer_client',\n 'controller'):\n self._SetLearnerFromLegacyParams(tp)\n if tp.learner is not None:\n if isinstance(tp.learner, (list, tuple)):\n self.CreateChildren('learners', tp.learner)\n else:\n self.CreateChildren('learners', [tp.learner])\n self._UpdateVnConfig()\n\n def _SetLearnerFromLegacyParams(self, tp):\n \"\"\"Sets tp.learner based on legacy params.\"\"\"\n if tp.learner is not None:\n return\n tp.learner = learner.ExtractLearnerFromLegacyParams(tp)\n\n def ComputePredictions(self, theta, input_batch):\n \"\"\"Computes predictions for `input_batch`.\n\n The output can be in the form of probablistic distributions, e.g., softmax\n logits for discrete outputs, mixture of logistics for continuous values, or\n regression values.\n\n For training/evaluation, the output will be used for computing loss and\n gradient updates, including comparing predicted distributions between\n teacher and student for distillation. During inference the output can be\n used to compute final outputs, perhaps with sampling.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Predictions, either a single Tensor, a `.NestedMap`, or a namedtuple.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def ComputeLoss(self, theta, input_batch, predictions):\n \"\"\"Computes loss and other metrics for the given predictions.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n predictions: The output of `ComputePredictions`.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def FilterPerExampleTensors(self, per_example):\n \"\"\"Return the per-example tensors ProcessFPropResults needs.\n\n By default we don't send any per-example tensors to ProcessFPropResults\n because some may be expensive to compute. Implement this method to let\n some of them pass through.\n\n Args:\n per_example: A dict of tensors returned as per-example tensors from FProp.\n\n Returns:\n A dict containing a subset of the key/value pairs in per_example.\n \"\"\"\n return {}\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseModel.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n def FPropTower(self, theta, input_batch):\n \"\"\"Forward propagation through one tower of the model.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task\n copied to this tower's devices.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n predicted = self.ComputePredictions(theta, input_batch)\n return self.ComputeLoss(theta, input_batch, predicted)\n\n def FProp(self, theta, input_batch):\n \"\"\"Forward propagation.\n\n This default `FProp` implementation here supports batch splitting in\n synchronous and asynchronous training when sub-classes implement\n `FPropTower`.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n # Always reset step seed at the start of a new global_step.\n py_utils.ResetStepSeed()\n if py_utils.use_tpu():\n metrics, per_example = self._FPropTpu(theta, input_batch)\n else:\n metrics, per_example = self._FPropSplitInputBatch(theta, input_batch)\n self._FPropResult(metrics, per_example)\n return metrics, per_example\n\n def _FPropTpu(self, theta, input_batch):\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n with tf.name_scope('tower_0_0'):\n metrics, per_example = self.FPropTower(theta, input_batch)\n metrics = py_utils.WeightedAvgOfMetrics([metrics])\n return metrics, per_example\n\n def _FPropSplitInputBatch(self, theta, input_batch):\n \"\"\"Splits the input batch on the input device.\"\"\"\n cluster = self.cluster\n num_splits = cluster.num_splits_per_client\n\n if not isinstance(input_batch, list):\n input_batch = [input_batch]\n\n assert len(input_batch) == num_splits, (len(input_batch), num_splits)\n\n # dev_list_per_replica[i][j] is the i-th worker's j-th device.\n dev_list_per_replica = cluster.available_devices.tolist()\n\n # Asserts invariant of the total number of splits w.r.t.,\n # splits per worker.\n splits_per_replica = cluster.num_splits_per_replica\n assert num_splits == splits_per_replica * len(dev_list_per_replica), (\n num_splits, splits_per_replica, len(dev_list_per_replica))\n\n all_metrics = []\n all_per_example_tensors = []\n for w_id, w_devs in enumerate(dev_list_per_replica):\n # Make local copy of the vars, shard on devices for this worker.\n theta_local = py_utils.CreateLocalTheta(\n theta, w_devs, label='worker %d' % w_id)\n\n for s_id in range(splits_per_replica):\n # s_id-th split for the w_id-th worker.\n split_id = splits_per_replica * w_id + s_id\n with py_utils.ModelSplit(split_id):\n with tf.device(cluster.WorkerDeviceInModelSplit(0)):\n with tf.name_scope('tower_%d_%d' % (w_id, s_id)):\n batch = self.input_generator.PreprocessInputBatch(\n input_batch[split_id])\n metrics, per_example = self.FPropTower(theta_local, batch)\n all_metrics.append(metrics)\n all_per_example_tensors.append(per_example)\n\n return py_utils.WeightedAvgOfMetrics(\n all_metrics), py_utils.ConcatPerExampleTensors(all_per_example_tensors)\n\n def _FPropResult(self, metrics, per_example):\n # Adds stats about the input batch.\n metrics['num_samples_in_batch'] = (tf.convert_to_tensor(\n self.input_generator.GlobalBatchSize()), tf.constant(1.0))\n # Generates summaries.\n for name, (value, weight) in six.iteritems(metrics):\n self.AddEvalMetric(name, value, weight)\n per_example = self.FilterPerExampleTensors(per_example)\n for name, value in six.iteritems(per_example):\n self.AddPerExampleTensor(name, value)\n # Loss.\n self._loss, self._num_predictions = metrics['loss']\n self._loss = py_utils.CheckNumerics(self._loss)\n self._metrics = metrics\n summary_utils.scalar('num_predictions', self._num_predictions)\n\n def GetInputBatch(self):\n \"\"\"Returns input batch from input_generator.\"\"\"\n if py_utils.use_tpu():\n return self.input_generator.CreateTpuFeeds()\n else:\n return self.input_generator.SplitInputBatch(\n self.cluster.num_splits_per_client)\n\n def FPropDefaultTheta(self, input_batch=None):\n \"\"\"Calls `FProp` with this layer's parameters.\"\"\"\n if input_batch is None:\n input_batch = self.GetInputBatch()\n return self.FProp(self.theta, input_batch)\n\n def AdjustGradients(self, vars_gradients):\n \"\"\"Allow for custom gradient manipulation prior to clipping.\"\"\"\n tf.logging.info('BaseTask.AdjustGradients')\n return vars_gradients\n\n def BProp(self):\n self._BPropForVariables(self.vars)\n\n def _BPropForVariables(self, vmap):\n \"\"\"Constructs the backward graph.\"\"\"\n bprop_variable_filters = self.input_generator.GetBpropVariableFilters()\n # Only compute the mask if the variable filters are not empty.\n if bprop_variable_filters != [''] * len(bprop_variable_filters):\n self._ComputeGradientMask(bprop_variable_filters)\n train_ops = {} # mapping from op name to op.\n train_ops['total_samples'] = self.IncrementTotalSamples()\n gradient_mask = None\n if self._per_input_gradient_mask:\n # TODO(neerajgaur): Change this to use source_selected from input_batch.\n onehot = self.input_generator.GetInputSourceOneHot()\n gradient_mask = {\n k: tf.tensordot(v, onehot, 1)\n for k, v in six.iteritems(self._per_input_gradient_mask)\n }\n all_losses = []\n for optimization in self.learners:\n loss_name = optimization.params.name\n metric = self._metrics.get(loss_name, None)\n if metric is None:\n raise ValueError('Loss %s not found in metrics %s' %\n (loss_name, list(self._metrics.keys())))\n loss = metric[0]\n all_losses.append(loss)\n train_ops['train/%s' % loss_name], stats = optimization.Apply(\n loss,\n vmap,\n gradient_mask=gradient_mask,\n gradient_adjuster=self.AdjustGradients)\n train_ops['stats/%s' % loss_name] = self.IncrementTotalNans(\n tf.to_int32(stats.has_nan_or_inf))\n for key, (value, weight) in six.iteritems(stats.eval_metrics):\n self.AddEvalMetric(key + '/' + loss_name, value, weight)\n\n relevant_bn_updates, _ = py_utils.FindRelevantBatchNormUpdates(\n all_losses, tf.get_collection(py_utils.BATCH_NORM_UPDATES))\n train_ops['bn_updates'] = relevant_bn_updates\n\n # Get the op to update the weight masks and thresholds\n train_ops['mask_updates'] = self._GetMaskUpdateOp()\n\n # Post training step update.\n train_ops['post_step'] = self.PostTrainingStepUpdate(self.global_step)\n\n with tf.control_dependencies(tf.nest.flatten(train_ops)):\n true_global_step = py_utils.GetOrCreateGlobalStepVar()\n with tf.colocate_with(true_global_step):\n increment_global_steps = tf.assign_add(true_global_step, 1)\n if self._global_step_var != true_global_step:\n with tf.colocate_with(self._global_step_var):\n increment_global_steps = tf.group(\n increment_global_steps, tf.assign_add(self._global_step_var, 1))\n train_ops['global_step'] = increment_global_steps\n\n # If we are using Tpu Embeddings, generate the monolithic send\n # gradient op.\n tpu_embedding_activations = tf.get_collection(\n py_utils.TPU_EMBEDDING_ACTIVATIONS)\n if tpu_embedding_activations:\n tpu_embedding_activations_dict = tpu_embedding_activations[0]\n tpu_embedding = tf.get_collection(py_utils.TPU_EMBEDDING)[0]\n tpu_embedding_send_gradient_op = py_utils.ComputeTpuEmbeddingGradients(\n self.loss, tpu_embedding_activations_dict, tpu_embedding)\n train_ops['tpu_embedding'] = tpu_embedding_send_gradient_op\n\n for op_name, op in six.iteritems(train_ops):\n assert op is not None, op_name\n\n # TODO(rpang): try to structure _train_op as:\n # tf.cond(skip_step, <only update skip stats>, <all updates>)\n # so that we skip all other updates when a step is skipped.\n self._train_op = tf.group(*tf.nest.flatten(train_ops), name='bprop')\n\n def _ComputeGradientMask(self, bprop_variable_filters):\n \"\"\"Compute gradient mask for each variable and bprop_variable_filters.\n\n Note that per_input_gradient_mask[var][i] will be 1 if var matches\n bprop_variable_filter[i], 0 otherwise.\n\n Args:\n bprop_variable_filters: A list of regex bprop_variable_filters for each\n file pattern.\n \"\"\"\n self._per_input_gradient_mask = py_utils.NestedMap()\n all_vars = set(self.vars.Flatten())\n for var in all_vars:\n self._per_input_gradient_mask[var.name] = (\n tf.zeros(len(bprop_variable_filters), dtype=tf.float32))\n for i in range(len(bprop_variable_filters)):\n if re.search(bprop_variable_filters[i], var.name):\n self._per_input_gradient_mask[var.name] += (\n tf.one_hot(i, len(bprop_variable_filters), dtype=tf.float32))\n\n def ApplyExponentialMovingAverage(self, ema):\n \"\"\"Wraps `self.train_op` with an op updating exponential moving average.\"\"\"\n # We need to apply EMA to trainable and moving average variable of this\n # Task, not just bprop vars, so that we create a shadow\n # '/ExponentialMovingAverage' variable for every trainable and moving\n # average variable.\n all_vars = set(tf.trainable_variables()) | set(\n tf.moving_average_variables())\n all_vars &= set(self.vars.Flatten())\n for var in all_vars:\n tf.logging.debug('ApplyExponentialMovingAverage: %s', var.name)\n with tf.control_dependencies([self._train_op\n ]), tf.name_scope('moving_average'):\n self._train_op = ema.apply(all_vars)\n\n def Decode(self, input_batch):\n \"\"\"Constructs the inference graph for eval decoding.\n\n Args:\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n a dict of Tensors as decoder output.\n \"\"\"\n return {}\n\n def Inference(self):\n \"\"\"Constructs the inference graph.\n\n Each subgraph represents a public API for a part of the graph which can\n be operated independently. By convention, the subgraph named 'default'\n should perform end to end inference via the input generator.\n\n Note that having distinct subgraphs (e.g. 'encoder', 'decoder') is\n not just a space optimization: when driving the graph externally in an\n online fashion, evaluation often needs to be broken into pieces. In this\n case, the graph will be constructed with only those pieces.\n\n Returns:\n An `inference_graph_pb2.InferenceGraph` message.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def CreateDecoderMetrics(self):\n \"\"\"Creates a dict of decoder metrics for `PostProcessDecodeOut` to update.\n\n Returns a dict mapping from string keys to `.BaseMetric` objects.\n \"\"\"\n pass\n\n def PostProcessDecodeOut(self, decode_out_dict, decode_metrics_dict):\n \"\"\"Post-processes decoder out and updates contents of `decode_metrics_dict`.\n\n Args:\n decode_out_dict: A dictionary of Tensors fetched.\n decode_metrics_dict: A dict mapping from string key to `.BaseMetric`\n object as created by `CreateDecoderMetrics`.\n\n Returns:\n output_key_value_pairs - a list of (key, value) pairs that can be saved\n (i.e. of type str, bytes, or unicode).\n \"\"\"\n pass\n\n @property\n def loss(self):\n assert self._loss is not None, ('No loss is defined. Call FProp first.')\n return self._loss\n\n @property\n def train_op(self):\n assert self._train_op is not None, (\n 'No train op is defined. Call BProp first.')\n return self._train_op\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def input_generator(self):\n return self.input\n\n @property\n def eval_metrics(self):\n \"\"\"Returns the evaluation metrics.\n\n Returns:\n A map from metric name (a python string) to a tuple (value, weight).\n Both value and weight are scalar Tensors.\n \"\"\"\n return self._eval_metrics\n\n @property\n def per_example_tensors(self):\n \"\"\"Returns per-example outputs.\n\n Returns:\n A map from tensor name (a python string) to a tensor, where the\n first dimension is the batch index of the training example corresponding\n to this output.\n \"\"\"\n return self._per_example\n\n def AddEvalMetric(self, name, value, weight):\n \"\"\"Adds a metric to the eval metrics.\n\n Args:\n name: A python string. The name of the metric.\n value: A scalar Tensor.\n weight: A scalar Tensor.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._eval_metrics:\n raise ValueError('Metric %s has already been defined.' % name)\n self._eval_metrics[name] = (value, weight)\n\n def AddPerExampleTensor(self, name, value):\n if name in self._per_example:\n raise ValueError('Metric %s has already been defined.' % name)\n self._per_example[name] = value\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n return self._total_examples.Value()\n\n @property\n def trainer_verbose_tensors(self):\n \"\"\"Return the dict of verbose tensors to eval in the training loop.\"\"\"\n return self._trainer_verbose_tensors\n\n def AddTrainerVerboseTensor(self, name, target):\n \"\"\"Add a (set of) tensors to be evaluated in the training loop.\n\n Args:\n name: A python string. The name of the target(s).\n target: A Tensor or a list or dict of Tensors.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._trainer_verbose_tensors:\n raise ValueError('Verbose target %s has already been defined.' % name)\n self._trainer_verbose_tensors[name] = target\n\n def IncrementTotalSamples(self, value=None):\n \"\"\"Updates the total number of training examples with the batch size.\"\"\"\n p = self.params\n if self._total_examples is None:\n with tf.variable_scope(p.name):\n self._total_examples = StatsCounter('total_samples')\n if value is None:\n assert self.input_generator is not None, ('No input generator defined')\n value = self.input_generator.GlobalBatchSize()\n return self._total_examples.IncBy(p, value)\n\n def IncrementTotalNans(self, value):\n \"\"\"Updates the total number of NaN/Inf gradients by `value`.\"\"\"\n if self._total_nans_and_infs is None:\n with tf.variable_scope(\n py_utils.global_variable_scope, reuse=tf.AUTO_REUSE):\n self._total_nans_and_infs = StatsCounter('total_nan_gradients')\n return self._total_nans_and_infs.IncBy(self.params, value)\n\n def _UpdateVnConfig(self):\n \"\"\"Update vn config from the various vn flags.\"\"\"\n p = self.params\n tp = p.train\n if tp:\n vn_enabled = ((tp.vn_std > 0) and p.vn and\n (p.vn.global_vn or p.vn.per_step_vn))\n if p.is_eval or (not vn_enabled):\n p.vn = py_utils.VariationalNoiseParams(None, False, False)\n else:\n # vn.scale is dependent on global_step.\n p.vn.scale = tf.cast(self.global_step > tp.vn_start_step,\n py_utils.FPropDtype(p)) * tp.vn_std\n\n def _GetMaskUpdateOp(self):\n \"\"\"Returns op to update masks and threshold variables for model pruning.\"\"\"\n p = self.params\n tp = p.train\n mask_update_op = tf.no_op()\n if tp.pruning_hparams_dict:\n assert isinstance(tp.pruning_hparams_dict, dict)\n pruning_hparams = tf.contrib.model_pruning.get_pruning_hparams(\n ).override_from_dict(tp.pruning_hparams_dict)\n pruning_obj = tf.contrib.model_pruning.Pruning(\n pruning_hparams, global_step=self.global_step)\n pruning_obj.add_pruning_summaries()\n mask_update_op = pruning_obj.conditional_mask_update_op()\n return mask_update_op\n\n\nclass DistillationTask(BaseTask):\n \"\"\"A task to distill knowledge from a teacher task to a student task.\n\n The training parameters (e.g., learning rate) are determined only by\n `DistillationTask.params.train`. Teacher and student task's training and eval\n parameters must be set to None.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DistillationTask, cls).Params()\n p.Define('teacher', None, 'The teacher task params.')\n p.Define('student', None, 'The student task params.')\n p.Define(\n 'distillation_loss_weight',\n # Only uses distillation loss by default.\n schedule.ConstantOne.Params(),\n 'A schedule of distillation loss weight. '\n 'The weight determines the fraction of total loss contributed by '\n 'distillation loss, while the rest loss will be computed against '\n 'the ground truth. '\n 'A weight of 0 means to only use ground-truth and ignore teacher '\n 'predictions, while a weight 1 means to only use teacher '\n 'predictions and ignore ground truth. '\n 'The weight is specified as a schedule to allow it to change '\n 'during training.')\n p.Define(\n 'teacher_target_type', 'truth', 'The target type for the teacher. '\n 'Choices are: '\n ' \"truth\": using the ground-truth target labels '\n ' \"beam\": using the 1-best hypothesis from the beam search.')\n p.Define(\n 'beam_search_temperature', 1.0, 'The temperature to scale the'\n 'log-prob of each beam search hypothesis. This is used in '\n 'training only')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, DistillationTask)\n super(DistillationTask, self).__init__(params)\n\n p = self.params\n # While student does not need its own input generator for training, it\n # needs an input generator for inference graphs.\n p.student.input = p.input\n # Teacher also might need an input generator, eg. for waveform_processor.\n p.teacher.input = p.input\n with tf.variable_scope(p.name):\n for child in ('teacher', 'student'):\n child_p = getattr(p, child)\n assert issubclass(child_p.cls, BaseTask)\n assert child_p.train is None\n assert child_p.eval is None\n # In theory it's ok for teacher to be a DistillationTask. In practice\n # it probably won't happen.\n assert not issubclass(child_p.cls, DistillationTask)\n child_p.name = child\n self.CreateChild(child, child_p)\n self.CreateChild('distillation_loss_weight', p.distillation_loss_weight)\n\n def ComputePredictions(self, theta, input_batch):\n p = self.params\n with tf.name_scope(p.name):\n if p.teacher_target_type == 'truth':\n teacher_predictions = self.teacher.ComputePredictions(\n theta.teacher, input_batch)\n student_predictions = self.student.ComputePredictions(\n theta.student, input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions, student=student_predictions)\n elif p.teacher_target_type == 'beam':\n (teacher_predictions, teacher_input_batch,\n teacher_beam_prob) = self.teacher.ComputeBeamPredictions(\n theta.teacher, input_batch, p.beam_search_temperature)\n # We use 'teacher_input_batch' instead of 'input_batch' for 'student'\n # because the training of student network uses target transcripts for\n # the \"teacher forcing\" mode and here the target transcripts should come\n # from the teacher's beam search.\n student_predictions = self.student.ComputePredictions(\n theta.student, teacher_input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions,\n student=student_predictions,\n teacher_beam_prob=teacher_beam_prob)\n else:\n raise ValueError('teacher target type not defined properly: %s' %\n self.p.teacher_target_type)\n\n def ComputeLoss(self, theta, input_batch, predictions):\n per_example = {}\n with tf.name_scope('groundtruth_loss'):\n groundtruth_loss, groundtruth_per_example = self.student.ComputeLoss(\n theta.student, input_batch, predictions.student)\n groundtruth_loss['groundtruth_loss'] = groundtruth_loss['loss']\n per_example.update(groundtruth_per_example)\n\n with tf.name_scope('distillation_loss'):\n distillation_loss, distill_per_example = self.ComputeDistillationLoss(\n theta, input_batch, predictions)\n distillation_loss['distillation_loss'] = distillation_loss['loss']\n per_example.update(distill_per_example)\n\n distillation_loss_weight = self.distillation_loss_weight.FProp(\n theta.distillation_loss_weight, self.global_step)\n metrics = py_utils.CombineMetrics([\n (groundtruth_loss, 1 - distillation_loss_weight),\n (distillation_loss, distillation_loss_weight),\n ])\n return metrics, per_example\n\n def ComputeDistillationLoss(self, theta, input_batch, predictions):\n raise NotImplementedError('Abstract method')\n\n def BProp(self):\n # Only bprop on student variables.\n self._BPropForVariables(self.student.vars)\n\n def Decode(self, input_batch):\n return self.student.Decode(input_batch)\n\n def Inference(self):\n return self.student.Inference()\n\n def CreateDecoderMetrics(self):\n return self.student.CreateDecoderMetrics()\n\n def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):\n return self.student.PostProcessDecodeOut(dec_out_dict, dec_metrics_dict)\n\n\nclass BaseModel(base_layer.BaseLayer):\n \"\"\"The abstract model class. All models are sub-class of this class.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseModel, cls).Params()\n p.Define(\n 'model', None, 'Which python function generates the param. It includes '\n 'the file name and lineno where the function is defined.')\n p.Define(\n 'cluster', cluster_factory.Cluster.Params(),\n 'The training cluster. Individual layer may config differently'\n ' based on training cluster it is running under.')\n p.Define('input', None, 'Input generator Params.')\n p.Define('build_data', build_data.BuildData(), 'Build data of this binary.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this model should be trained.')\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Training max of 4M steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define('init_from_checkpoint_rules', {},\n 'See BaseTask documentation for details.')\n tp.Define('early_stop', None,\n 'Early stopping based on dev-set performance.')\n tp.Define(\n 'enqueue_max_steps', -1, 'Max enqueue steps. -1 meaning no limit.'\n ' This flag should be set for unit-test only.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('save_max_to_keep', 100,\n 'Maximum number of recent checkpoints to keep.')\n tp.Define('save_keep_checkpoint_every_n_hours', 0.5,\n 'How often to keep a checkpoint.')\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n \"\"\"Initializes this Model.\"\"\"\n assert issubclass(params.cls, BaseModel)\n self._global_step_var = py_utils.GetOrCreateGlobalStepVar()\n self._global_step = tf.identity(\n self._global_step_var, name='global_step_tensor')\n super(BaseModel, self).__init__(params)\n # tasks are not yet instantiated.\n self._total_examples_sum = None\n\n self._ema = None\n tp = self.params.train\n tf.logging.info('Training parameters for %s: %s', params.cls, tp)\n if tp.ema_decay > 0:\n assert tp.ema_decay < 1.0\n self._ema = tf.train.ExponentialMovingAverage(\n decay=tp.ema_decay, num_updates=self.global_step)\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def ema(self):\n return self._ema\n\n def ConstructFPropBPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n def ConstructFPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n @property\n def tasks(self):\n \"\"\"Returns a list of all tasks.\"\"\"\n raise NotImplementedError('Abstract method')\n\n def GetTask(self, task_name):\n \"\"\"Return the task associated with 'task_name'.\n\n Args:\n task_name: string, the name of the model task to be returned.\n\n Returns:\n An instance of `BaseTask`.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n if self._total_examples_sum is None:\n self._total_examples_sum = tf.reduce_sum(\n [task.total_examples for task in self.tasks])\n return self._total_examples_sum\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseTask.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Be sure to implement BaseTask.FilterPerExampleTensors if you plan to use any\n per-example tensors in this method.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n\nclass SingleTaskModel(BaseModel):\n \"\"\"Model that consists of a single task.\"\"\"\n\n @classmethod\n def Params(cls, task_params=None):\n p = super(SingleTaskModel, cls).Params()\n p.Define(\n 'task', None,\n '`InstantiableParams` object for a `BaseTask` or its derivatives.')\n\n if task_params is not None:\n # Copy over model parameters from the task parameters.\n p.task = task_params\n base_layer.BaseLayer.CopyBaseParams(p.task, p)\n tp = p.train\n tp.start_up_delay_steps = p.task.train.start_up_delay_steps\n tp.max_steps = p.task.train.max_steps\n tp.tpu_steps_per_loop = p.task.train.tpu_steps_per_loop\n tp.ema_decay = p.task.train.ema_decay\n # init_from_checkpoint_rules does not need to be copied.\n tp.early_stop = p.task.train.early_stop\n tp.enqueue_max_steps = p.task.train.enqueue_max_steps\n tp.save_interval_seconds = p.task.train.save_interval_seconds\n tp.save_max_to_keep = p.task.train.save_interval_seconds\n tp.save_keep_checkpoint_every_n_hours = p.task.train.save_keep_checkpoint_every_n_hours\n tp.summary_interval_steps = p.task.train.summary_interval_steps\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, SingleTaskModel)\n assert params.task\n p = params.Copy() # Make a copy to avoid modifying the input.\n p.name = p.name or p.task.name\n p.task.name = p.task.name or p.name\n if p.input:\n assert not p.task.input\n p.task.input = p.input\n else:\n assert p.task.input\n p.input = p.task.input\n\n super(SingleTaskModel, self).__init__(p)\n\n p = self.params\n with py_utils.GlobalStepContext(self.global_step):\n self.CreateChild('_task', p.task)\n\n @property\n def tasks(self):\n return [self._task]\n\n def GetTask(self, task_name=None):\n assert not task_name, 'Must not specify >task_name< for single-task model.'\n return self._task\n\n def SampleTask(self, global_step):\n return self._task\n\n def ConstructFPropBPropGraph(self):\n self._task.FPropDefaultTheta()\n self._task.BProp()\n if self.ema:\n tf.logging.info('ApplyExponentialMovingAverage on %s', self._task)\n self._task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n self._task.FPropDefaultTheta()\n\n\nclass MultiTaskModel(BaseModel):\n \"\"\"Model that consists of multiple tasks.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(MultiTaskModel, cls).Params()\n p.Define(\n 'task_params', hyperparams.Params(),\n 'Params object mapping task name to `BaskTask`(or derivatives) '\n 'Params.')\n p.Define(\n 'task_probs', hyperparams.Params(),\n 'Params object mapping task name to the relative likelihood the '\n 'task will be sampled during training.')\n p.Define('task_schedule', None, 'Task schedule.')\n p.Define(\n 'task_global_step', False,\n 'Whether or not to use task-specific global steps, which causes each '\n 'task to use its own global_step instead of the true global_step.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, MultiTaskModel)\n super(MultiTaskModel, self).__init__(params)\n p = self.params\n assert len(p.task_params) > 1\n\n # Pass input params to tasks.\n assert isinstance(p.input, hyperparams.Params)\n assert set(dir(p.input)) == set(dir(p.task_params))\n for k, v in p.task_params.IterParams():\n assert isinstance(v, hyperparams.Params)\n assert not v.input\n v.input = p.input.Get(k)\n\n # For compatibility with older API (with p.task_probs)\n if p.task_schedule is None:\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = sorted(list(p.task_probs.IterParams()))\n\n # CreateChild copies over global configs in p to individual task params,\n # which then gets propagated down to all sub-layers during\n # BaseTask._PropagateDownGlobalConfigs(), or through sub-sequent CreateChild\n # or CreateChildren calls.\n with py_utils.GlobalStepContext(self.global_step):\n with tf.name_scope(p.name):\n sorted_task_params = sorted(\n (task_name, task_params)\n for task_name, task_params in p.task_params.IterParams())\n for task_name, task_params in sorted_task_params:\n if p.task_global_step:\n assert task_name == task_params.name\n CreateTaskGlobalStep(task_name)\n # Make sure each task is under its own variable scope.\n with tf.variable_scope(task_name):\n self.CreateChild(task_name, task_params)\n self.CreateChild('task_schedule', p.task_schedule)\n\n @property\n def task_names(self):\n sorted_task_names = sorted(\n task_name for task_name, _ in self.params.task_params.IterParams())\n return sorted_task_names\n\n @property\n def tasks(self):\n return [self.children[name] for name in self.task_names]\n\n def GetTask(self, task_name):\n assert task_name, 'Must specify >task_name< for multi-task model.'\n return self.children[task_name]\n\n def SampleTask(self, global_step):\n \"\"\"Sample a task according self.task_schedule.\n\n `self.task_schedule.cur_probs` will also be updated.\n\n Args:\n global_step: int. Current time step.\n \"\"\"\n sampled_task = self.task_schedule.Sample(global_step)\n tf.logging.info('Sampled task: %s', sampled_task)\n return self.children[sampled_task]\n\n def ConstructFPropBPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n task.BProp()\n if self.ema:\n task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layers to construct an ASR frontend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\n\nimport tensorflow as tf\n\nfrom lingvo.core import base_layer\nfrom lingvo.core import py_utils\n\n\n# AsrFrontendConfig which defines characteristics of the frontend that may\n# be relevant to interfacing code which needs to reason about inputs and\n# outputs.\n# Fields:\n# is_null: Whether this is the NullAsrFrontend.\n# src_type: Interpretation of the src_inputs. Can be one of 'none' or 'pcm'.\n# src_pcm_scale: If src_type is 'pcm', then this is the scale of each sample.\n# If normalized, this should be 1.0. If unnormalized from int16, then it\n# should be 32768.0.\n# src_pcm_sample_rate: Sample rate of the expected src PCM frames.\n# output_dim: Dimension of the output. Typically the number of mel bands\n# or equiv. May be -1 for unknown.\n# input_frame_ratio: Approximate ratio of the number of\n# input_frames / output_frames. Intended to be multiplied by output frames\n# (i.e. as part of bucket_bounds to arrive at input frames to the frontend).\nAsrFrontendConfig = collections.namedtuple('AsrFrontendConfig', [\n 'is_null',\n 'src_type',\n 'src_pcm_scale',\n 'src_pcm_sample_rate',\n 'output_dim',\n 'input_frame_ratio',\n])\n\n\ndef _NextPowerOfTwo(i):\n return math.pow(2, math.ceil(math.log(i, 2)))\n\n\nclass BaseAsrFrontend(base_layer.BaseLayer):\n \"\"\"Base class for ASR frontends.\n\n An ASR frontend is responsible for performing feature extraction from the\n input in the cases where features are not precomputed as part of the\n dataset. In such cases, it would be typical for the input to consist of\n waveform data in some form.\n \"\"\"\n\n @property\n def config(self):\n \"\"\"Returns the AsrFrontendConfig namedtuple for this instance.\"\"\"\n return self.GetConfigFromParams(self.params)\n\n @staticmethod\n def GetConfigFromParams(params):\n \"\"\"Returns an AsrFrontendConfig namedtuple with vital config settings.\"\"\"\n raise NotImplementedError()\n\n def FProp(self, theta, input_batch):\n \"\"\"Generates ASR features for a batch.\n\n Shapes of the input_batch and output are dependent on the implementation\n and should be paired with the model's input format and encoder expectations.\n\n Args:\n theta: A NestedMap object containing weights' values of this layer and its\n children layers.\n input_batch: A NestedMap with fields:\n\n - 'src_inputs' - The inputs tensor,\n compatible with model input. Expected to be of shape\n [batch, time, ...].\n - 'paddings' - The paddings tensor. It is expected to be of shape\n [batch, time].\n\n Returns:\n NestedMap of encoder inputs which can be passed directly to a\n compatible encoder and contains:\n\n - 'src_inputs': inputs to the encoder, minimally of shape\n [batch, time, ...].\n - 'paddings': a 0/1 tensor of shape [batch, time].\n \"\"\"\n raise NotImplementedError()\n\n\nclass NullAsrFrontend(BaseAsrFrontend):\n \"\"\"ASR frontend that just returns its input as FProp output.\"\"\"\n\n @staticmethod\n def GetConfigFromParams(params):\n \"\"\"Returns an AsrFrontendConfig namedtuple with vital config settings.\"\"\"\n return AsrFrontendConfig(\n is_null=True,\n src_type='none',\n src_pcm_sample_rate=-1,\n src_pcm_scale=1.0,\n output_dim=-1,\n input_frame_ratio=1.0)\n\n def FProp(self, theta, input_batch):\n return input_batch.DeepCopy()\n\n\nclass MelAsrFrontend(BaseAsrFrontend):\n \"\"\"An AsrFrontend that implements mel feature extraction from PCM frames.\n\n This is expressed in pure TensorFlow and without reference to external\n resources.\n\n The frontend implements the following stages:\n `Framer -> Window -> FFT -> FilterBank -> MeanStdDev`\n\n Also, if stack_left_context > 0, this will further apply:\n `FrameStack -> SubSample(stack_left_context + 1)`\n\n The FProp input to this layer can either have rank 3 or rank 4 shape:\n [batch_size, timestep, packet_size, channel_count]\n [batch_size, timestep * packet_size, channel_count]\n\n For compatibility with existing code, 2D [batch_size, timestep] mono shapes\n are also supported.\n\n In the common case, the packet_size is 1. The 4D variant is accepted for\n glueless interface to input generators that frame their input samples in\n some way. The external framing choice does not influence the operation of\n this instance, but it is accepted.\n\n TODO(laurenzo): Refactor call sites to uniformly use the 4D variant and\n eliminate fallback logic in this class.\n\n Only 1 channel is currently supported.\n TODO(laurenzo): Refactor this class to operate on multi-channel inputs.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(MelAsrFrontend, cls).Params()\n p.name = 'frontend'\n p.Define('sample_rate', 16000.0, 'Sample rate in Hz')\n p.Define('channel_count', 1, 'Number of channels.')\n p.Define('frame_size_ms', 25.0,\n 'Amount of data grabbed for each frame during analysis')\n p.Define('frame_step_ms', 10.0, 'Number of ms to jump between frames')\n p.Define('num_bins', 80, 'Number of bins in the mel-spectrogram output')\n p.Define('lower_edge_hertz', 125.0,\n 'The lowest frequency of the mel-spectrogram analsis')\n p.Define('upper_edge_hertz', 7600.0,\n 'The highest frequency of the mel-spectrogram analsis')\n p.Define('preemph', 0.97,\n 'The first-order filter coefficient used for preemphasis')\n p.Define('noise_scale', 8.0,\n 'The amount of noise (in 16-bit LSB units) to add')\n p.Define(\n 'window_fn', 'HANNING',\n 'Window function to apply (valid values are \"HANNING\", '\n 'and None)')\n p.Define(\n 'pad_end', False,\n 'Whether to pad the end of `signals` with zeros when the provided '\n 'frame length and step produces a frame that lies partially past '\n 'its end.')\n p.Define(\n 'per_bin_mean', None,\n 'Per-bin (num_bins) means for normalizing the spectrograms. '\n 'Defaults to zeros.')\n p.Define('per_bin_stddev', None, 'Per-bin (num_bins) standard deviations. '\n 'Defaults to ones.')\n p.Define('stack_left_context', 0, 'Number of left context frames to stack.')\n return p\n\n @staticmethod\n def GetConfigFromParams(params):\n \"\"\"Returns an AsrFrontendConfig namedtuple with vital config settings.\"\"\"\n subsample_factor = params.num_bins * (params.stack_left_context + 1)\n frame_step = round(params.sample_rate * params.frame_step_ms / 1000.0)\n return AsrFrontendConfig(\n is_null=False,\n src_type='pcm',\n src_pcm_scale=32768.0,\n src_pcm_sample_rate=16000.0,\n output_dim=subsample_factor,\n input_frame_ratio=frame_step * subsample_factor)\n\n @base_layer.initializer\n def __init__(self, params):\n super(MelAsrFrontend, self).__init__(params)\n p = self.params\n assert p.channel_count == 1, 'Only 1 channel currently supported.'\n # Make sure key params are in floating point.\n p.sample_rate = float(p.sample_rate)\n p.frame_step_ms = float(p.frame_step_ms)\n p.frame_size_ms = float(p.frame_size_ms)\n p.lower_edge_hertz = float(p.lower_edge_hertz)\n p.upper_edge_hertz = float(p.upper_edge_hertz)\n\n self._frame_step = int(round(p.sample_rate * p.frame_step_ms / 1000.0))\n self._frame_size = (int(round(p.sample_rate * p.frame_size_ms / 1000.0)) + 1\n ) # +1 for the preemph\n # Overdrive means double FFT size.\n # Note: 2* because of overdrive\n self._fft_size = 2 * int(max(512, _NextPowerOfTwo(self._frame_size)))\n\n self._CreateWindowFunction()\n\n # Mean/stddev.\n if p.per_bin_mean is None:\n p.per_bin_mean = [0.0] * p.num_bins\n if p.per_bin_stddev is None:\n p.per_bin_stddev = [1.0] * p.num_bins\n assert len(p.per_bin_mean) == p.num_bins\n assert len(p.per_bin_stddev) == p.num_bins\n\n def _CreateWindowFunction(self):\n p = self.params\n if p.window_fn is None:\n self._window_fn = None\n elif p.window_fn == 'HANNING':\n\n def _HanningWindow(frame_size, dtype):\n return tf.signal.hann_window(frame_size, dtype=dtype)\n\n self._window_fn = _HanningWindow\n else:\n raise ValueError('Illegal value %r for window_fn param' % (p.window_fn,))\n\n @property\n def window_frame_size(self):\n return self._frame_size\n\n @property\n def window_frame_step(self):\n return self._frame_step\n\n def _RemoveChannelDim(self, pcm_audio_data):\n if pcm_audio_data.shape.rank == 3:\n pcm_audio_data = tf.squeeze(pcm_audio_data, 2)\n assert pcm_audio_data.shape.rank == 2, (\n 'MelAsrFrontend only supports one channel')\n return pcm_audio_data\n\n def _ReshapeToMono2D(self, pcm_audio_data, paddings):\n \"\"\"Reshapes a 3D or 4D input to 2D.\n\n Since the input to FProp can be 3D or 4D (see class comments), this will\n collapse it back to a 2D, mono shape for internal processing.\n\n Args:\n pcm_audio_data: 2D, 3D or 4D audio input. See class comments. Must have a\n rank.\n paddings: Original paddings shaped to the first two dims of\n pcm_audio_data.\n\n Returns:\n Tuple of 2D [batch_size, timestep] mono audio data, new paddings.\n \"\"\"\n shape = py_utils.GetShape(pcm_audio_data)\n rank = len(shape)\n if rank == 2:\n return pcm_audio_data, paddings\n elif rank == 3:\n # [batch, time, channel]\n with tf.control_dependencies([tf.assert_equal(shape[2], 1)]):\n return tf.squeeze(pcm_audio_data, axis=2), paddings\n elif rank == 4:\n # [batch, time, packet, channel]\n batch_size, orig_time, orig_packet_size, channel = shape\n time = orig_time * orig_packet_size\n with tf.control_dependencies([tf.assert_equal(channel, 1)]):\n pcm_audio_data = tf.reshape(pcm_audio_data, (batch_size, time))\n # Transform paddings into the new time base with a padding per time\n # step vs per packet by duplicating each packet.\n paddings = tf.reshape(\n tf.tile(tf.expand_dims(paddings, axis=2), [1, 1, orig_packet_size]),\n (batch_size, time))\n return pcm_audio_data, paddings\n else:\n raise ValueError('Illegal pcm_audio_data shape')\n\n def FProp(self, theta, input_batch):\n \"\"\"Perform signal processing on a sequence of PCM data.\n\n NOTE: This implementation does not currently support paddings, and they\n are accepted for compatibility with the super-class.\n\n TODO(laurenzo): Rework this to support paddings.\n\n Args:\n theta: Layer theta.\n input_batch: PCM input map:\n\n - 'src_inputs': int16 or float32 tensor of PCM audio data, scaled to\n +/-32768 (versus [-1..1)!). See class comments for supported input\n shapes.\n - 'paddings': per frame 0/1 paddings. Shaped: [batch, frame].\n Returns:\n NestedMap of encoder inputs which can be passed directly to a\n compatible encoder and contains:\n\n - 'src_inputs': inputs to the encoder, minimally of shape\n [batch, time, ...].\n - 'paddings': a 0/1 tensor of shape [batch, time].\n \"\"\"\n\n pcm_audio_data, pcm_audio_paddings = self._ReshapeToMono2D(\n input_batch.src_inputs, input_batch.paddings)\n\n mel_spectrogram, mel_spectrogram_paddings = self._FPropChunk(\n theta, pcm_audio_data, pcm_audio_paddings)\n\n mel_spectrogram, mel_spectrogram_paddings = self._PadAndReshapeSpec(\n mel_spectrogram, mel_spectrogram_paddings)\n\n return py_utils.NestedMap(\n src_inputs=mel_spectrogram, paddings=mel_spectrogram_paddings)\n\n def _PadAndReshapeSpec(self, mel_spectrogram, mel_spectrogram_paddings):\n p = self.params\n batch_size = py_utils.GetShape(mel_spectrogram)[0]\n # Stack and sub-sample. Only subsampling with a stride of the stack size\n # is supported.\n if p.stack_left_context > 0:\n # Since left context is leading, pad the left by duplicating the first\n # frame.\n stack_size = 1 + p.stack_left_context\n mel_spectrogram = tf.concat(\n [mel_spectrogram[:, 0:1, :]] * p.stack_left_context +\n [mel_spectrogram],\n axis=1)\n mel_spectrogram_paddings = tf.concat(\n [mel_spectrogram_paddings[:, 0:1]] * p.stack_left_context +\n [mel_spectrogram_paddings],\n axis=1)\n\n # Note that this is the maximum number of frames. Actual frame count\n # depends on padding.\n stacked_frame_dim = tf.shape(mel_spectrogram)[1] // stack_size\n mel_spectrogram = tf.reshape(\n mel_spectrogram[:, 0:(stack_size) * stacked_frame_dim, :],\n [batch_size, stacked_frame_dim, stack_size * p.num_bins])\n # After stacking paddings, pad if any source frame was padded.\n # Stacks into [batch_size, stacked_frame_dim, stack_size] like the\n # spectrogram stacking above, and then reduces the stack_size dim\n # to the max (effectively, making padding = 1.0 if any of the pre-stacked\n # frames were 1.0). Final shape is [batch_size, stacked_frame_dim].\n mel_spectrogram_paddings = tf.reshape(\n mel_spectrogram_paddings[:, 0:(stack_size) * stacked_frame_dim],\n [batch_size, stacked_frame_dim, stack_size])\n mel_spectrogram_paddings = tf.reduce_max(mel_spectrogram_paddings, axis=2)\n\n # Add feature dim. Shape = [batch, time, features, 1]\n mel_spectrogram = tf.expand_dims(mel_spectrogram, -1)\n return mel_spectrogram, mel_spectrogram_paddings\n\n def _ApplyPreemphasis(self, framed_signal):\n p = self.params\n preemphasized = (\n framed_signal[:, :, 1:] - p.preemph * framed_signal[:, :, 0:-1])\n return preemphasized\n\n def _GetMelPadding(self, pcm_audio_paddings):\n p = self.params\n # shape: [batch, time, _frame_size]\n framed_paddings = tf.signal.frame(pcm_audio_paddings, self._frame_size,\n self._frame_step, p.pad_end)\n # Pad spectrograms that have any padded frames.\n mel_spectrogram_paddings = tf.reduce_max(framed_paddings, axis=2)\n return mel_spectrogram_paddings\n\n def _FPropChunk(self, theta, pcm_audio_chunk, pcm_audio_paddings):\n p = self.params\n pcm_audio_chunk = tf.cast(pcm_audio_chunk, tf.float32)\n # shape: [batch, time, _frame_size]\n framed_signal = tf.signal.frame(pcm_audio_chunk, self._frame_size,\n self._frame_step, p.pad_end)\n\n # Pre-emphasis.\n if p.preemph != 1.0:\n preemphasized = self._ApplyPreemphasis(framed_signal)\n else:\n preemphasized = framed_signal[:-1]\n\n # Noise.\n if p.noise_scale > 0.0:\n noise_signal = tf.random_normal(\n tf.shape(preemphasized),\n stddev=p.noise_scale,\n mean=0.0,\n seed=p.random_seed)\n else:\n noise_signal = 0.0\n\n # Apply window fn.\n windowed_signal = preemphasized + noise_signal\n if self._window_fn is not None:\n window = self._window_fn(self._frame_size - 1, framed_signal.dtype)\n windowed_signal *= window\n\n mel_spectrogram = self._MelSpectrogram(windowed_signal)\n\n output_floor = 1.0\n mel_spectrogram_log = tf.log(\n tf.maximum(float(output_floor), mel_spectrogram))\n\n # Mean and stddev.\n mel_spectrogram_norm = (\n (mel_spectrogram_log - tf.convert_to_tensor(p.per_bin_mean)) /\n tf.convert_to_tensor(p.per_bin_stddev))\n return mel_spectrogram_norm, self._GetMelPadding(pcm_audio_paddings)\n\n def _MelSpectrogram(self, signal):\n \"\"\"Computes the mel spectrogram from a waveform signal.\n\n Args:\n signal: f32 Tensor, shaped [batch_size, num_samples]\n\n Returns:\n features: f32 Tensor, shaped [batch_size, num_frames, mel_channels]\n \"\"\"\n p = self.params\n # FFT.\n real_frequency_spectrogram = tf.signal.rfft(signal, [self._fft_size])\n magnitude_spectrogram = tf.abs(real_frequency_spectrogram)\n\n # Shape of magnitude_spectrogram is num_frames x (fft_size/2+1)\n # Mel_weight is [num_spectrogram_bins, num_mel_bins]\n mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins=p.num_bins,\n num_spectrogram_bins=self._fft_size // 2 + 1,\n sample_rate=p.sample_rate,\n lower_edge_hertz=p.lower_edge_hertz,\n upper_edge_hertz=p.upper_edge_hertz,\n dtype=tf.float32)\n # Weight matrix implemented in the magnitude domain.\n batch_size, num_frames, fft_channels = py_utils.GetShape(\n magnitude_spectrogram, 3)\n mel_spectrogram = tf.matmul(\n tf.reshape(magnitude_spectrogram,\n [batch_size * num_frames, fft_channels]), mel_weight_matrix)\n mel_spectrogram = tf.reshape(mel_spectrogram,\n [batch_size, num_frames, p.num_bins])\n\n return mel_spectrogram\n"
] | [
[
"tensorflow.test.main"
],
[
"tensorflow.Graph",
"tensorflow.all_variables",
"tensorflow.test.main",
"tensorflow.trainable_variables",
"tensorflow.global_variables_initializer",
"tensorflow.set_random_seed"
],
[
"tensorflow.nn.l2_normalize",
"tensorflow.nn.convolution",
"tensorflow.nn.softmax",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.tile",
"tensorflow.name_scope",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.nn.dropout"
],
[
"tensorflow.logging.warning",
"tensorflow.control_dependencies",
"tensorflow.colocate_with",
"tensorflow.logging.debug",
"tensorflow.reduce_sum",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.moving_average_variables",
"tensorflow.nest.flatten",
"tensorflow.to_int32",
"tensorflow.to_int64",
"tensorflow.assign_add",
"tensorflow.get_collection",
"tensorflow.contrib.model_pruning.get_pruning_hparams",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.tensordot",
"tensorflow.identity",
"tensorflow.logging.info",
"tensorflow.no_op",
"tensorflow.constant",
"tensorflow.contrib.model_pruning.Pruning",
"tensorflow.variable_scope"
],
[
"tensorflow.signal.frame",
"tensorflow.signal.linear_to_mel_weight_matrix",
"tensorflow.reduce_max",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.signal.hann_window",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.assert_equal",
"tensorflow.signal.rfft",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
garaytc/reinforcement | [
"e6af258bf2ac3b45c20e0ed3d2f58ca7bc2b232f"
] | [
"tests/agents/test_agent_interface.py"
] | [
"import pytest\nimport torch\nfrom gym.spaces import Discrete, MultiDiscrete, MultiBinary, Dict, Tuple, Box\n\nfrom blobrl.agents import AgentInterface\n\n\nclass MOCKAgentInterface(AgentInterface):\n def __init__(self, observation_space, action_space, device):\n super().__init__(observation_space, action_space, device)\n\n def get_action(self, observation):\n pass\n\n def enable_exploration(self):\n pass\n\n def disable_exploration(self):\n pass\n\n def learn(self, observation, action, reward, next_observation, done) -> None:\n pass\n\n def episode_finished(self) -> None:\n pass\n\n def save(self, file_name, dire_name=\".\"):\n pass\n\n @classmethod\n def load(cls, file_name, dire_name=\".\", device=None):\n pass\n\n def __str__(self):\n return \"\"\n\n\nclass TestAgentInterface:\n __test__ = True\n\n agent = MOCKAgentInterface\n\n list_work = [\n [Discrete(3), Discrete(1)],\n [Discrete(3), Discrete(3)],\n [Discrete(10), Discrete(50)],\n [MultiDiscrete([3]), MultiDiscrete([1])],\n [MultiDiscrete([3, 3]), MultiDiscrete([3, 3])],\n [MultiDiscrete([4, 4, 4]), MultiDiscrete([50, 4, 4])],\n [MultiDiscrete([[100, 3], [3, 5]]), MultiDiscrete([[100, 3], [3, 5]])],\n [MultiDiscrete([[[100, 3], [3, 5]], [[100, 3], [3, 5]]]),\n MultiDiscrete([[[100, 3], [3, 5]], [[100, 3], [3, 5]]])],\n [MultiBinary(1), MultiBinary(1)],\n [MultiBinary(3), MultiBinary(3)],\n # [MultiBinary([3, 2]), MultiBinary([3, 2])], # Don't work yet because gym don't implemented this\n [Box(low=0, high=10, shape=[1]), Box(low=0, high=10, shape=[1])],\n [Box(low=0, high=10, shape=[2, 2]), Box(low=0, high=10, shape=[2, 2])],\n [Box(low=0, high=10, shape=[2, 2, 2]), Box(low=0, high=10, shape=[2, 2, 2])],\n\n [Tuple([Discrete(1), MultiDiscrete([1, 1])]), Tuple([Discrete(1), MultiDiscrete([1, 1])])],\n [Dict({\"first\": Discrete(1), \"second\": MultiDiscrete([1, 1])}),\n Dict({\"first\": Discrete(1), \"second\": MultiDiscrete([1, 1])})],\n\n ]\n list_fail = [\n [None, None],\n [\"dedrfe\", \"qdzq\"],\n [1215.4154, 157.48],\n [\"zdzd\", (Discrete(1))],\n [Discrete(1), \"zdzd\"],\n [\"zdzd\", (1, 4, 7)],\n [(1, 4, 7), \"zdzd\"],\n [152, 485]\n ]\n\n def test_init(self):\n for o, a in self.list_work:\n with pytest.raises(TypeError):\n self.agent(o, a, \"cpu\")\n\n for o, a in self.list_fail:\n with pytest.raises(TypeError):\n self.agent(o, a, \"cpu\")\n\n def test_device(self):\n for o, a in self.list_work:\n device = torch.device(\"cpu\")\n assert device == self.agent(o, a, device).device\n\n device = None\n assert torch.device(\"cpu\") == self.agent(o, a, device).device\n\n for device in [\"dzeqdzqd\", 1512, object(), 151.515]:\n with pytest.raises(TypeError):\n self.agent(o, a, device)\n\n if torch.cuda.is_available():\n self.agent(o, a, torch.device(\"cuda\"))\n\n def test__str__(self):\n\n pass\n"
] | [
[
"torch.device",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ivary43/pandas | [
"46adc5b1c2aacb312d72729af72bc0ad600917c0",
"46adc5b1c2aacb312d72729af72bc0ad600917c0",
"46adc5b1c2aacb312d72729af72bc0ad600917c0"
] | [
"pandas/tests/series/test_alter_axes.py",
"pandas/tests/plotting/common.py",
"pandas/tests/plotting/test_hist_method.py"
] | [
"from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, MultiIndex, RangeIndex, Series\nimport pandas.util.testing as tm\n\n\nclass TestSeriesAlterAxes:\n\n def test_setindex(self, string_series):\n # wrong type\n msg = (r\"Index\\(\\.\\.\\.\\) must be called with a collection of some\"\n r\" kind, None was passed\")\n with pytest.raises(TypeError, match=msg):\n string_series.index = None\n\n # wrong length\n msg = (\"Length mismatch: Expected axis has 30 elements, new\"\n \" values have 29 elements\")\n with pytest.raises(ValueError, match=msg):\n string_series.index = np.arange(len(string_series) - 1)\n\n # works\n string_series.index = np.arange(len(string_series))\n assert isinstance(string_series.index, Index)\n\n # Renaming\n\n def test_rename(self, datetime_series):\n ts = datetime_series\n renamer = lambda x: x.strftime('%Y%m%d')\n renamed = ts.rename(renamer)\n assert renamed.index[0] == renamer(ts.index[0])\n\n # dict\n rename_dict = dict(zip(ts.index, renamed.index))\n renamed2 = ts.rename(rename_dict)\n tm.assert_series_equal(renamed, renamed2)\n\n # partial dict\n s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')\n renamed = s.rename({'b': 'foo', 'd': 'bar'})\n tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))\n\n # index with name\n renamer = Series(np.arange(4),\n index=Index(['a', 'b', 'c', 'd'], name='name'),\n dtype='int64')\n renamed = renamer.rename({})\n assert renamed.index.name == renamer.index.name\n\n def test_rename_by_series(self):\n s = Series(range(5), name='foo')\n renamer = Series({1: 10, 2: 20})\n result = s.rename(renamer)\n expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')\n tm.assert_series_equal(result, expected)\n\n def test_rename_set_name(self):\n s = Series(range(4), index=list('abcd'))\n for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:\n result = s.rename(name)\n assert result.name == name\n tm.assert_numpy_array_equal(result.index.values, s.index.values)\n assert s.name is None\n\n def test_rename_set_name_inplace(self):\n s = Series(range(3), index=list('abc'))\n for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:\n s.rename(name, inplace=True)\n assert s.name == name\n\n exp = np.array(['a', 'b', 'c'], dtype=np.object_)\n tm.assert_numpy_array_equal(s.index.values, exp)\n\n def test_rename_axis_supported(self):\n # Supporting axis for compatibility, detailed in GH-18589\n s = Series(range(5))\n s.rename({}, axis=0)\n s.rename({}, axis='index')\n with pytest.raises(ValueError, match='No axis named 5'):\n s.rename({}, axis=5)\n\n def test_set_name_attribute(self):\n s = Series([1, 2, 3])\n s2 = Series([1, 2, 3], name='bar')\n for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), \"\\u05D0\"]:\n s.name = name\n assert s.name == name\n s2.name = name\n assert s2.name == name\n\n def test_set_name(self):\n s = Series([1, 2, 3])\n s2 = s._set_name('foo')\n assert s2.name == 'foo'\n assert s.name is None\n assert s is not s2\n\n def test_rename_inplace(self, datetime_series):\n renamer = lambda x: x.strftime('%Y%m%d')\n expected = renamer(datetime_series.index[0])\n\n datetime_series.rename(renamer, inplace=True)\n assert datetime_series.index[0] == expected\n\n def test_set_index_makes_timeseries(self):\n idx = tm.makeDateIndex(10)\n\n s = Series(range(10))\n s.index = idx\n assert s.index.is_all_dates\n\n def test_reset_index(self):\n df = tm.makeDataFrame()[:5]\n ser = df.stack()\n ser.index.names = ['hash', 'category']\n\n ser.name = 'value'\n df = ser.reset_index()\n assert 'value' in df\n\n df = ser.reset_index(name='value2')\n assert 'value2' in df\n\n # check inplace\n s = ser.reset_index(drop=True)\n s2 = ser\n s2.reset_index(drop=True, inplace=True)\n tm.assert_series_equal(s, s2)\n\n # level\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n s = Series(np.random.randn(6), index=index)\n rs = s.reset_index(level=1)\n assert len(rs.columns) == 2\n\n rs = s.reset_index(level=[0, 2], drop=True)\n tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))\n assert isinstance(rs, Series)\n\n def test_reset_index_name(self):\n s = Series([1, 2, 3], index=Index(range(3), name='x'))\n assert s.reset_index().index.name is None\n assert s.reset_index(drop=True).index.name is None\n\n def test_reset_index_level(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]],\n columns=['A', 'B', 'C'])\n\n for levels in ['A', 'B'], [0, 1]:\n # With MultiIndex\n s = df.set_index(['A', 'B'])['C']\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df.set_index('B'))\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df.set_index('B'))\n\n result = s.reset_index(level=levels)\n tm.assert_frame_equal(result, df)\n\n result = df.set_index(['A', 'B']).reset_index(level=levels,\n drop=True)\n tm.assert_frame_equal(result, df[['C']])\n\n with pytest.raises(KeyError, match='Level E '):\n s.reset_index(level=['A', 'E'])\n\n # With single-level Index\n s = df.set_index('A')['B']\n\n result = s.reset_index(level=levels[0])\n tm.assert_frame_equal(result, df[['A', 'B']])\n\n result = s.reset_index(level=levels[:1])\n tm.assert_frame_equal(result, df[['A', 'B']])\n\n result = s.reset_index(level=levels[0], drop=True)\n tm.assert_series_equal(result, df['B'])\n\n with pytest.raises(IndexError, match='Too many levels'):\n s.reset_index(level=[0, 1, 2])\n\n # Check that .reset_index([],drop=True) doesn't fail\n result = Series(range(4)).reset_index([], drop=True)\n expected = Series(range(4))\n tm.assert_series_equal(result, expected)\n\n def test_reset_index_range(self):\n # GH 12071\n s = Series(range(2), name='A', dtype='int64')\n series_result = s.reset_index()\n assert isinstance(series_result.index, RangeIndex)\n series_expected = DataFrame([[0, 0], [1, 1]],\n columns=['index', 'A'],\n index=RangeIndex(stop=2))\n tm.assert_frame_equal(series_result, series_expected)\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n s = Series(np.arange(6), index=index)\n\n # no change, position\n result = s.reorder_levels([0, 1, 2])\n tm.assert_series_equal(s, result)\n\n # no change, labels\n result = s.reorder_levels(['L0', 'L1', 'L2'])\n tm.assert_series_equal(s, result)\n\n # rotate, position\n result = s.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = Series(np.arange(6), index=e_idx)\n tm.assert_series_equal(result, expected)\n\n def test_rename_axis_mapper(self):\n # GH 19978\n mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],\n names=['ll', 'nn'])\n s = Series([i for i in range(len(mi))], index=mi)\n\n result = s.rename_axis(index={'ll': 'foo'})\n assert result.index.names == ['foo', 'nn']\n\n result = s.rename_axis(index=str.upper, axis=0)\n assert result.index.names == ['LL', 'NN']\n\n result = s.rename_axis(index=['foo', 'goo'])\n assert result.index.names == ['foo', 'goo']\n\n with pytest.raises(TypeError, match='unexpected'):\n s.rename_axis(columns='wrong')\n\n def test_rename_axis_inplace(self, datetime_series):\n # GH 15704\n expected = datetime_series.rename_axis('foo')\n result = datetime_series\n no_return = result.rename_axis('foo', inplace=True)\n\n assert no_return is None\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('kwargs', [{'mapper': None}, {'index': None}, {}])\n def test_rename_axis_none(self, kwargs):\n # GH 25034\n index = Index(list('abc'), name='foo')\n df = Series([1, 2, 3], index=index)\n\n result = df.rename_axis(**kwargs)\n expected_index = index.rename(None) if kwargs else index\n expected = Series([1, 2, 3], index=expected_index)\n tm.assert_series_equal(result, expected)\n\n def test_set_axis_inplace_axes(self, axis_series):\n # GH14636\n ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = ser.copy()\n expected.index = list('abcd')\n\n # inplace=True\n # The FutureWarning comes from the fact that we would like to have\n # inplace default to False some day\n for inplace, warn in [(None, FutureWarning), (True, None)]:\n result = ser.copy()\n kwargs = {'inplace': inplace}\n with tm.assert_produces_warning(warn):\n result.set_axis(list('abcd'), axis=axis_series, **kwargs)\n tm.assert_series_equal(result, expected)\n\n def test_set_axis_inplace(self):\n # GH14636\n\n s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = s.copy()\n expected.index = list('abcd')\n\n # inplace=False\n result = s.set_axis(list('abcd'), axis=0, inplace=False)\n tm.assert_series_equal(expected, result)\n\n # omitting the \"axis\" parameter\n with tm.assert_produces_warning(None):\n result = s.set_axis(list('abcd'), inplace=False)\n tm.assert_series_equal(result, expected)\n\n # wrong values for the \"axis\" parameter\n for axis in [2, 'foo']:\n with pytest.raises(ValueError, match='No axis named'):\n s.set_axis(list('abcd'), axis=axis, inplace=False)\n\n def test_set_axis_prior_to_deprecation_signature(self):\n s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')\n\n expected = s.copy()\n expected.index = list('abcd')\n\n for axis in [0, 'index']:\n with tm.assert_produces_warning(FutureWarning):\n result = s.set_axis(0, list('abcd'), inplace=False)\n tm.assert_series_equal(result, expected)\n\n def test_reset_index_drop_errors(self):\n # GH 20925\n\n # KeyError raised for series index when passed level name is missing\n s = Series(range(4))\n with pytest.raises(KeyError, match='must be same as name'):\n s.reset_index('wrong', drop=True)\n with pytest.raises(KeyError, match='must be same as name'):\n s.reset_index('wrong')\n\n # KeyError raised for series when level to be dropped is missing\n s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))\n with pytest.raises(KeyError, match='not found'):\n s.reset_index('wrong', drop=True)\n\n def test_droplevel(self):\n # GH20342\n ser = Series([1, 2, 3, 4])\n ser.index = MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)],\n names=['a', 'b'])\n expected = ser.reset_index('b', drop=True)\n result = ser.droplevel('b', axis='index')\n tm.assert_series_equal(result, expected)\n # test that droplevel raises ValueError on axis != 0\n with pytest.raises(ValueError):\n ser.droplevel(1, axis='columns')\n",
"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy import random\n\nfrom pandas.util._decorators import cache_readonly\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.api import is_list_like\n\nfrom pandas import DataFrame, Series\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_is_valid_plot_return_object, ensure_clean)\n\n\n\"\"\"\nThis is a common base class used for various plotting tests\n\"\"\"\n\n\[email protected]_if_no_mpl\nclass TestPlotBase:\n\n def setup_method(self, method):\n\n import matplotlib as mpl\n from pandas.plotting._matplotlib import compat\n mpl.rcdefaults()\n\n self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3()\n self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0()\n self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0()\n\n self.bp_n_objects = 7\n self.polycollection_factor = 2\n self.default_figsize = (6.4, 4.8)\n self.default_tick_position = 'left'\n\n n = 100\n with tm.RNGContext(42):\n gender = np.random.choice(['Male', 'Female'], size=n)\n classroom = np.random.choice(['A', 'B', 'C'], size=n)\n\n self.hist_df = DataFrame({'gender': gender,\n 'classroom': classroom,\n 'height': random.normal(66, 4, size=n),\n 'weight': random.normal(161, 32, size=n),\n 'category': random.randint(4, size=n)})\n\n self.tdf = tm.makeTimeDataFrame()\n self.hexbin_df = DataFrame({\"A\": np.random.uniform(size=20),\n \"B\": np.random.uniform(size=20),\n \"C\": np.arange(20) + np.random.uniform(\n size=20)})\n\n def teardown_method(self, method):\n tm.close()\n\n @cache_readonly\n def plt(self):\n import matplotlib.pyplot as plt\n return plt\n\n @cache_readonly\n def colorconverter(self):\n import matplotlib.colors as colors\n return colors.colorConverter\n\n def _check_legend_labels(self, axes, labels=None, visible=True):\n \"\"\"\n Check each axes has expected legend labels\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n labels : list-like\n expected legend labels\n visible : bool\n expected legend visibility. labels are checked only when visible is\n True\n \"\"\"\n\n if visible and (labels is None):\n raise ValueError('labels must be specified when visible is True')\n axes = self._flatten_visible(axes)\n for ax in axes:\n if visible:\n assert ax.get_legend() is not None\n self._check_text_labels(ax.get_legend().get_texts(), labels)\n else:\n assert ax.get_legend() is None\n\n def _check_data(self, xp, rs):\n \"\"\"\n Check each axes has identical lines\n\n Parameters\n ----------\n xp : matplotlib Axes object\n rs : matplotlib Axes object\n \"\"\"\n xp_lines = xp.get_lines()\n rs_lines = rs.get_lines()\n\n def check_line(xpl, rsl):\n xpdata = xpl.get_xydata()\n rsdata = rsl.get_xydata()\n tm.assert_almost_equal(xpdata, rsdata)\n\n assert len(xp_lines) == len(rs_lines)\n [check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]\n tm.close()\n\n def _check_visible(self, collections, visible=True):\n \"\"\"\n Check each artist is visible or not\n\n Parameters\n ----------\n collections : matplotlib Artist or its list-like\n target Artist or its list or collection\n visible : bool\n expected visibility\n \"\"\"\n from matplotlib.collections import Collection\n if not isinstance(collections,\n Collection) and not is_list_like(collections):\n collections = [collections]\n\n for patch in collections:\n assert patch.get_visible() == visible\n\n def _get_colors_mapped(self, series, colors):\n unique = series.unique()\n # unique and colors length can be differed\n # depending on slice value\n mapped = dict(zip(unique, colors))\n return [mapped[v] for v in series.values]\n\n def _check_colors(self, collections, linecolors=None, facecolors=None,\n mapping=None):\n \"\"\"\n Check each artist has expected line colors and face colors\n\n Parameters\n ----------\n collections : list-like\n list or collection of target artist\n linecolors : list-like which has the same length as collections\n list of expected line colors\n facecolors : list-like which has the same length as collections\n list of expected face colors\n mapping : Series\n Series used for color grouping key\n used for andrew_curves, parallel_coordinates, radviz test\n \"\"\"\n\n from matplotlib.lines import Line2D\n from matplotlib.collections import (\n Collection, PolyCollection, LineCollection\n )\n conv = self.colorconverter\n if linecolors is not None:\n\n if mapping is not None:\n linecolors = self._get_colors_mapped(mapping, linecolors)\n linecolors = linecolors[:len(collections)]\n\n assert len(collections) == len(linecolors)\n for patch, color in zip(collections, linecolors):\n if isinstance(patch, Line2D):\n result = patch.get_color()\n # Line2D may contains string color expression\n result = conv.to_rgba(result)\n elif isinstance(patch, (PolyCollection, LineCollection)):\n result = tuple(patch.get_edgecolor()[0])\n else:\n result = patch.get_edgecolor()\n\n expected = conv.to_rgba(color)\n assert result == expected\n\n if facecolors is not None:\n\n if mapping is not None:\n facecolors = self._get_colors_mapped(mapping, facecolors)\n facecolors = facecolors[:len(collections)]\n\n assert len(collections) == len(facecolors)\n for patch, color in zip(collections, facecolors):\n if isinstance(patch, Collection):\n # returned as list of np.array\n result = patch.get_facecolor()[0]\n else:\n result = patch.get_facecolor()\n\n if isinstance(result, np.ndarray):\n result = tuple(result)\n\n expected = conv.to_rgba(color)\n assert result == expected\n\n def _check_text_labels(self, texts, expected):\n \"\"\"\n Check each text has expected labels\n\n Parameters\n ----------\n texts : matplotlib Text object, or its list-like\n target text, or its list\n expected : str or list-like which has the same length as texts\n expected text label, or its list\n \"\"\"\n if not is_list_like(texts):\n assert texts.get_text() == expected\n else:\n labels = [t.get_text() for t in texts]\n assert len(labels) == len(expected)\n for label, e in zip(labels, expected):\n assert label == e\n\n def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,\n ylabelsize=None, yrot=None):\n \"\"\"\n Check each axes has expected tick properties\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xlabelsize : number\n expected xticks font size\n xrot : number\n expected xticks rotation\n ylabelsize : number\n expected yticks font size\n yrot : number\n expected yticks rotation\n \"\"\"\n from matplotlib.ticker import NullFormatter\n axes = self._flatten_visible(axes)\n for ax in axes:\n if xlabelsize or xrot:\n if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):\n # If minor ticks has NullFormatter, rot / fontsize are not\n # retained\n labels = ax.get_xticklabels()\n else:\n labels = ax.get_xticklabels() + ax.get_xticklabels(\n minor=True)\n\n for label in labels:\n if xlabelsize is not None:\n tm.assert_almost_equal(label.get_fontsize(),\n xlabelsize)\n if xrot is not None:\n tm.assert_almost_equal(label.get_rotation(), xrot)\n\n if ylabelsize or yrot:\n if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):\n labels = ax.get_yticklabels()\n else:\n labels = ax.get_yticklabels() + ax.get_yticklabels(\n minor=True)\n\n for label in labels:\n if ylabelsize is not None:\n tm.assert_almost_equal(label.get_fontsize(),\n ylabelsize)\n if yrot is not None:\n tm.assert_almost_equal(label.get_rotation(), yrot)\n\n def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):\n \"\"\"\n Check each axes has expected scales\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xaxis : {'linear', 'log'}\n expected xaxis scale\n yaxis : {'linear', 'log'}\n expected yaxis scale\n \"\"\"\n axes = self._flatten_visible(axes)\n for ax in axes:\n assert ax.xaxis.get_scale() == xaxis\n assert ax.yaxis.get_scale() == yaxis\n\n def _check_axes_shape(self, axes, axes_num=None, layout=None,\n figsize=None):\n \"\"\"\n Check expected number of axes is drawn in expected layout\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n axes_num : number\n expected number of axes. Unnecessary axes should be set to\n invisible.\n layout : tuple\n expected layout, (expected number of rows , columns)\n figsize : tuple\n expected figsize. default is matplotlib default\n \"\"\"\n from pandas.plotting._matplotlib.tools import _flatten\n\n if figsize is None:\n figsize = self.default_figsize\n visible_axes = self._flatten_visible(axes)\n\n if axes_num is not None:\n assert len(visible_axes) == axes_num\n for ax in visible_axes:\n # check something drawn on visible axes\n assert len(ax.get_children()) > 0\n\n if layout is not None:\n result = self._get_axes_layout(_flatten(axes))\n assert result == layout\n\n tm.assert_numpy_array_equal(\n visible_axes[0].figure.get_size_inches(),\n np.array(figsize, dtype=np.float64))\n\n def _get_axes_layout(self, axes):\n x_set = set()\n y_set = set()\n for ax in axes:\n # check axes coordinates to estimate layout\n points = ax.get_position().get_points()\n x_set.add(points[0][0])\n y_set.add(points[0][1])\n return (len(y_set), len(x_set))\n\n def _flatten_visible(self, axes):\n \"\"\"\n Flatten axes, and filter only visible\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n\n \"\"\"\n from pandas.plotting._matplotlib.tools import _flatten\n\n axes = _flatten(axes)\n axes = [ax for ax in axes if ax.get_visible()]\n return axes\n\n def _check_has_errorbars(self, axes, xerr=0, yerr=0):\n \"\"\"\n Check axes has expected number of errorbars\n\n Parameters\n ----------\n axes : matplotlib Axes object, or its list-like\n xerr : number\n expected number of x errorbar\n yerr : number\n expected number of y errorbar\n \"\"\"\n axes = self._flatten_visible(axes)\n for ax in axes:\n containers = ax.containers\n xerr_count = 0\n yerr_count = 0\n for c in containers:\n has_xerr = getattr(c, 'has_xerr', False)\n has_yerr = getattr(c, 'has_yerr', False)\n if has_xerr:\n xerr_count += 1\n if has_yerr:\n yerr_count += 1\n assert xerr == xerr_count\n assert yerr == yerr_count\n\n def _check_box_return_type(self, returned, return_type, expected_keys=None,\n check_ax_title=True):\n \"\"\"\n Check box returned type is correct\n\n Parameters\n ----------\n returned : object to be tested, returned from boxplot\n return_type : str\n return_type passed to boxplot\n expected_keys : list-like, optional\n group labels in subplot case. If not passed,\n the function checks assuming boxplot uses single ax\n check_ax_title : bool\n Whether to check the ax.title is the same as expected_key\n Intended to be checked by calling from ``boxplot``.\n Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.\n \"\"\"\n from matplotlib.axes import Axes\n types = {'dict': dict, 'axes': Axes, 'both': tuple}\n if expected_keys is None:\n # should be fixed when the returning default is changed\n if return_type is None:\n return_type = 'dict'\n\n assert isinstance(returned, types[return_type])\n if return_type == 'both':\n assert isinstance(returned.ax, Axes)\n assert isinstance(returned.lines, dict)\n else:\n # should be fixed when the returning default is changed\n if return_type is None:\n for r in self._flatten_visible(returned):\n assert isinstance(r, Axes)\n return\n\n assert isinstance(returned, Series)\n\n assert sorted(returned.keys()) == sorted(expected_keys)\n for key, value in returned.items():\n assert isinstance(value, types[return_type])\n # check returned dict has correct mapping\n if return_type == 'axes':\n if check_ax_title:\n assert value.get_title() == key\n elif return_type == 'both':\n if check_ax_title:\n assert value.ax.get_title() == key\n assert isinstance(value.ax, Axes)\n assert isinstance(value.lines, dict)\n elif return_type == 'dict':\n line = value['medians'][0]\n axes = line.axes\n if check_ax_title:\n assert axes.get_title() == key\n else:\n raise AssertionError\n\n def _check_grid_settings(self, obj, kinds, kws={}):\n # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792\n\n import matplotlib as mpl\n\n def is_grid_on():\n xticks = self.plt.gca().xaxis.get_major_ticks()\n yticks = self.plt.gca().yaxis.get_major_ticks()\n # for mpl 2.2.2, gridOn and gridline.get_visible disagree.\n # for new MPL, they are the same.\n\n if self.mpl_ge_3_1_0:\n xoff = all(not g.gridline.get_visible() for g in xticks)\n yoff = all(not g.gridline.get_visible() for g in yticks)\n else:\n xoff = all(not g.gridOn for g in xticks)\n yoff = all(not g.gridOn for g in yticks)\n\n return not (xoff and yoff)\n\n spndx = 1\n for kind in kinds:\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=False)\n obj.plot(kind=kind, **kws)\n assert not is_grid_on()\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=True)\n obj.plot(kind=kind, grid=False, **kws)\n assert not is_grid_on()\n\n if kind != 'pie':\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=True)\n obj.plot(kind=kind, **kws)\n assert is_grid_on()\n\n self.plt.subplot(1, 4 * len(kinds), spndx)\n spndx += 1\n mpl.rc('axes', grid=False)\n obj.plot(kind=kind, grid=True, **kws)\n assert is_grid_on()\n\n def _unpack_cycler(self, rcParams, field='color'):\n \"\"\"\n Auxiliary function for correctly unpacking cycler after MPL >= 1.5\n \"\"\"\n return [v[field] for v in rcParams['axes.prop_cycle']]\n\n\ndef _check_plot_works(f, filterwarnings='always', **kwargs):\n import matplotlib.pyplot as plt\n ret = None\n with warnings.catch_warnings():\n warnings.simplefilter(filterwarnings)\n try:\n try:\n fig = kwargs['figure']\n except KeyError:\n fig = plt.gcf()\n\n plt.clf()\n\n ax = kwargs.get('ax', fig.add_subplot(211)) # noqa\n ret = f(**kwargs)\n\n assert_is_valid_plot_return_object(ret)\n\n try:\n kwargs['ax'] = fig.add_subplot(212)\n ret = f(**kwargs)\n except Exception:\n pass\n else:\n assert_is_valid_plot_return_object(ret)\n\n with ensure_clean(return_filelike=True) as path:\n plt.savefig(path)\n finally:\n tm.close(fig)\n\n return ret\n\n\ndef curpath():\n pth, _ = os.path.split(os.path.abspath(__file__))\n return pth\n",
"# coding: utf-8\n\n\"\"\" Test cases for .hist method \"\"\"\n\nimport numpy as np\nfrom numpy.random import randn\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame, Series\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\nimport pandas.util.testing as tm\n\n\[email protected]_if_no_mpl\nclass TestSeriesPlots(TestPlotBase):\n\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n import matplotlib as mpl\n mpl.rcdefaults()\n\n self.ts = tm.makeTimeSeries()\n self.ts.name = 'ts'\n\n @pytest.mark.slow\n def test_hist_legacy(self):\n _check_plot_works(self.ts.hist)\n _check_plot_works(self.ts.hist, grid=False)\n _check_plot_works(self.ts.hist, figsize=(8, 10))\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month)\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)\n\n fig, ax = self.plt.subplots(1, 1)\n _check_plot_works(self.ts.hist, ax=ax)\n _check_plot_works(self.ts.hist, ax=ax, figure=fig)\n _check_plot_works(self.ts.hist, figure=fig)\n tm.close()\n\n fig, (ax1, ax2) = self.plt.subplots(1, 2)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax1)\n _check_plot_works(self.ts.hist, figure=fig, ax=ax2)\n\n with pytest.raises(ValueError):\n self.ts.hist(by=self.ts.index, figure=fig)\n\n @pytest.mark.slow\n def test_hist_bins_legacy(self):\n df = DataFrame(np.random.randn(10, 2))\n ax = df.hist(bins=2)[0][0]\n assert len(ax.patches) == 2\n\n @pytest.mark.slow\n def test_hist_layout(self):\n df = self.hist_df\n with pytest.raises(ValueError):\n df.height.hist(layout=(1, 1))\n\n with pytest.raises(ValueError):\n df.height.hist(layout=[1, 1])\n\n @pytest.mark.slow\n def test_hist_layout_with_by(self):\n df = self.hist_df\n\n # _check_plot_works adds an `ax` kwarg to the method call\n # so we get a warning about an axis being cleared, even\n # though we don't explicing pass one, see GH #13188\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender,\n layout=(2, 1))\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.gender,\n layout=(3, -1))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.height.hist, by=df.category,\n layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.height.hist, by=df.category, layout=(2, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.height.hist, by=df.category, layout=(3, -1))\n self._check_axes_shape(axes, axes_num=4, layout=(3, 2))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.height.hist, by=df.category, layout=(-1, 4))\n self._check_axes_shape(axes, axes_num=4, layout=(1, 4))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.height.hist, by=df.classroom, layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))\n self._check_axes_shape(\n axes, axes_num=4, layout=(4, 2), figsize=(12, 7))\n\n @pytest.mark.slow\n def test_hist_no_overlap(self):\n from matplotlib.pyplot import subplot, gcf\n x = Series(randn(2))\n y = Series(randn(2))\n subplot(121)\n x.hist()\n subplot(122)\n y.hist()\n fig = gcf()\n axes = fig.axes\n assert len(axes) == 2\n\n @pytest.mark.slow\n def test_hist_by_no_extra_plots(self):\n df = self.hist_df\n axes = df.height.hist(by=df.gender) # noqa\n assert len(self.plt.get_fignums()) == 1\n\n @pytest.mark.slow\n def test_plot_fails_when_ax_differs_from_figure(self):\n from pylab import figure\n fig1 = figure()\n fig2 = figure()\n ax1 = fig1.add_subplot(111)\n with pytest.raises(AssertionError):\n self.ts.hist(ax=ax1, figure=fig2)\n\n\[email protected]_if_no_mpl\nclass TestDataFramePlots(TestPlotBase):\n\n @pytest.mark.slow\n def test_hist_df_legacy(self):\n from matplotlib.patches import Rectangle\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(self.hist_df.hist)\n\n # make sure layout is handled\n df = DataFrame(randn(100, 3))\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, grid=False)\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n assert not axes[1, 1].get_visible()\n\n df = DataFrame(randn(100, 1))\n _check_plot_works(df.hist)\n\n # make sure layout is handled\n df = DataFrame(randn(100, 6))\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, layout=(4, 2))\n self._check_axes_shape(axes, axes_num=6, layout=(4, 2))\n\n # make sure sharex, sharey is handled\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, sharex=True, sharey=True)\n\n # handle figsize arg\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, figsize=(8, 10))\n\n # check bins argument\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.hist, bins=5)\n\n # make sure xlabelsize and xrot are handled\n ser = df[0]\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,\n ylabelsize=yf, yrot=yrot)\n\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)\n self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,\n ylabelsize=yf, yrot=yrot)\n\n tm.close()\n\n ax = ser.hist(cumulative=True, bins=4, density=True)\n # height of last bin (index 5) must be 1.0\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n tm.assert_almost_equal(rects[-1].get_height(), 1.0)\n\n tm.close()\n ax = ser.hist(log=True)\n # scale of y must be 'log'\n self._check_ax_scales(ax, yaxis='log')\n\n tm.close()\n\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n ser.hist(foo='bar')\n\n @pytest.mark.slow\n def test_hist_non_numerical_raises(self):\n # gh-10444\n df = DataFrame(np.random.rand(10, 2))\n df_o = df.astype(np.object)\n\n msg = \"hist method requires numerical columns, nothing to plot.\"\n with pytest.raises(ValueError, match=msg):\n df_o.hist()\n\n @pytest.mark.slow\n def test_hist_layout(self):\n df = DataFrame(randn(100, 3))\n\n layout_to_expected_size = (\n {'layout': None, 'expected_size': (2, 2)}, # default is 2x2\n {'layout': (2, 2), 'expected_size': (2, 2)},\n {'layout': (4, 1), 'expected_size': (4, 1)},\n {'layout': (1, 4), 'expected_size': (1, 4)},\n {'layout': (3, 3), 'expected_size': (3, 3)},\n {'layout': (-1, 4), 'expected_size': (1, 4)},\n {'layout': (4, -1), 'expected_size': (4, 1)},\n {'layout': (-1, 2), 'expected_size': (2, 2)},\n {'layout': (2, -1), 'expected_size': (2, 2)}\n )\n\n for layout_test in layout_to_expected_size:\n axes = df.hist(layout=layout_test['layout'])\n expected = layout_test['expected_size']\n self._check_axes_shape(axes, axes_num=3, layout=expected)\n\n # layout too small for all 4 plots\n with pytest.raises(ValueError):\n df.hist(layout=(1, 1))\n\n # invalid format for layout\n with pytest.raises(ValueError):\n df.hist(layout=(1,))\n with pytest.raises(ValueError):\n df.hist(layout=(-1, -1))\n\n @pytest.mark.slow\n # GH 9351\n def test_tight_layout(self):\n df = DataFrame(randn(100, 3))\n _check_plot_works(df.hist)\n self.plt.tight_layout()\n\n tm.close()\n\n\[email protected]_if_no_mpl\nclass TestDataFrameGroupByPlots(TestPlotBase):\n\n @pytest.mark.slow\n def test_grouped_hist_legacy(self):\n from matplotlib.patches import Rectangle\n from pandas.plotting._matplotlib.hist import _grouped_hist\n\n df = DataFrame(randn(500, 2), columns=['A', 'B'])\n df['C'] = np.random.randint(0, 4, 500)\n df['D'] = ['X'] * 500\n\n axes = _grouped_hist(df.A, by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n axes = df.hist(by=df.C)\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\n\n tm.close()\n # group by a key with single value\n axes = df.hist(by='D', rot=30)\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n self._check_ticks_props(axes, xrot=30)\n\n tm.close()\n # make sure kwargs to hist are handled\n xf, yf = 20, 18\n xrot, yrot = 30, 40\n\n axes = _grouped_hist(df.A, by=df.C, cumulative=True,\n bins=4, xlabelsize=xf, xrot=xrot,\n ylabelsize=yf, yrot=yrot, density=True)\n # height of last bin (index 5) must be 1.0\n for ax in axes.ravel():\n rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]\n height = rects[-1].get_height()\n tm.assert_almost_equal(height, 1.0)\n self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,\n ylabelsize=yf, yrot=yrot)\n\n tm.close()\n axes = _grouped_hist(df.A, by=df.C, log=True)\n # scale of y must be 'log'\n self._check_ax_scales(axes, yaxis='log')\n\n tm.close()\n # propagate attr exception from matplotlib.Axes.hist\n with pytest.raises(AttributeError):\n _grouped_hist(df.A, by=df.C, foo='bar')\n\n with tm.assert_produces_warning(FutureWarning):\n df.hist(by='C', figsize='default')\n\n @pytest.mark.slow\n def test_grouped_hist_legacy2(self):\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender_int = np.random.choice([0, 1], size=n)\n df_int = DataFrame({'height': height, 'weight': weight,\n 'gender': gender_int})\n gb = df_int.groupby('gender')\n axes = gb.hist()\n assert len(axes) == 2\n assert len(self.plt.get_fignums()) == 2\n tm.close()\n\n @pytest.mark.slow\n def test_grouped_hist_layout(self):\n df = self.hist_df\n msg = \"Layout of 1x1 must be larger than required size 2\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column='weight', by=df.gender, layout=(1, 1))\n\n msg = \"Layout of 1x3 must be larger than required size 4\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column='height', by=df.category, layout=(1, 3))\n\n msg = \"At least one dimension of layout must be positive\"\n with pytest.raises(ValueError, match=msg):\n df.hist(column='height', by=df.category, layout=(-1, -1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, column='height', by=df.gender,\n layout=(2, 1))\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, column='height', by=df.gender,\n layout=(2, -1))\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\n\n axes = df.hist(column='height', by=df.category, layout=(4, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column='height', by=df.category, layout=(-1, 1))\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\n\n axes = df.hist(column='height', by=df.category,\n layout=(4, 2), figsize=(12, 8))\n self._check_axes_shape(\n axes, axes_num=4, layout=(4, 2), figsize=(12, 8))\n tm.close()\n\n # GH 6769\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(\n df.hist, column='height', by='classroom', layout=(2, 2))\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n # without column\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(df.hist, by='classroom')\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n axes = df.hist(by='gender', layout=(3, 5))\n self._check_axes_shape(axes, axes_num=2, layout=(3, 5))\n\n axes = df.hist(column=['height', 'weight', 'category'])\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\n\n @pytest.mark.slow\n def test_grouped_hist_multiple_axes(self):\n # GH 6970, GH 7069\n df = self.hist_df\n\n fig, axes = self.plt.subplots(2, 3)\n returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[0])\n assert returned[0].figure is fig\n returned = df.hist(by='classroom', ax=axes[1])\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n tm.assert_numpy_array_equal(returned, axes[1])\n assert returned[0].figure is fig\n\n with pytest.raises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n axes = df.hist(column='height', ax=axes)\n\n @pytest.mark.slow\n def test_axis_share_x(self):\n df = self.hist_df\n # GH4089\n ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)\n\n # share x\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n # don't share y\n assert not ax1._shared_y_axes.joined(ax1, ax2)\n assert not ax2._shared_y_axes.joined(ax1, ax2)\n\n @pytest.mark.slow\n def test_axis_share_y(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)\n\n # share y\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n\n # don't share x\n assert not ax1._shared_x_axes.joined(ax1, ax2)\n assert not ax2._shared_x_axes.joined(ax1, ax2)\n\n @pytest.mark.slow\n def test_axis_share_xy(self):\n df = self.hist_df\n ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,\n sharey=True)\n\n # share both x and y\n assert ax1._shared_x_axes.joined(ax1, ax2)\n assert ax2._shared_x_axes.joined(ax1, ax2)\n\n assert ax1._shared_y_axes.joined(ax1, ax2)\n assert ax2._shared_y_axes.joined(ax1, ax2)\n"
] | [
[
"pandas.util.testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.MultiIndex",
"pandas.RangeIndex",
"numpy.arange",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.util.testing.assert_frame_equal",
"pandas.Index",
"pandas.util.testing.makeDateIndex",
"pandas.MultiIndex.from_product",
"numpy.random.randn",
"numpy.array",
"pandas.util.testing.makeDataFrame"
],
[
"pandas.plotting._matplotlib.compat._mpl_ge_2_2_3",
"pandas.util.testing.ensure_clean",
"matplotlib.rcdefaults",
"pandas.util.testing.assert_is_valid_plot_return_object",
"pandas.core.dtypes.api.is_list_like",
"pandas.plotting._matplotlib.compat._mpl_ge_3_1_0",
"numpy.random.randint",
"pandas.util.testing.makeTimeDataFrame",
"numpy.arange",
"pandas.plotting._matplotlib.compat._mpl_ge_3_0_0",
"matplotlib.pyplot.gcf",
"pandas.util.testing.close",
"pandas.plotting._matplotlib.tools._flatten",
"numpy.random.choice",
"pandas.util.testing.assert_almost_equal",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.rc",
"pandas.util.testing.RNGContext",
"numpy.random.normal",
"matplotlib.pyplot.clf",
"numpy.random.uniform"
],
[
"pandas.util.testing.close",
"pandas.util.testing.RNGContext",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.makeTimeSeries",
"numpy.random.choice",
"pandas.plotting._matplotlib.hist._grouped_hist",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_almost_equal",
"pandas.DataFrame",
"matplotlib.pyplot.gcf",
"pandas.tests.plotting.common.TestPlotBase.setup_method",
"matplotlib.rcdefaults",
"numpy.random.normal",
"matplotlib.pyplot.subplot",
"numpy.random.randn",
"numpy.random.rand",
"pandas.tests.plotting.common._check_plot_works",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hemankita/refarch-kc-container-ms | [
"c2e85eacabe8a194782835b04f3410c2d7956a9b"
] | [
"tools/generateData_sensor_malfunction.py"
] | [
"import csv\nimport json\nfrom random import gauss\nimport random\nimport datetime\nimport numpy as np\nimport sys\nimport pandas as pd\n\ndf = pd.DataFrame(columns=['Timestamp', 'ID', 'Temperature(celsius)', 'Target_Temperature(celsius)', 'Amp', 'CumulativePowerConsumption', 'ContentType', 'Humidity', 'CO2', 'Time_Door_Open', \n'Maintainence_Required', 'Defrost_Cycle'])\n\ndef buildJSON():\n \n #faulty sensor data\n id = random.randint(1001,2000)\n Today= datetime.datetime.today()\n date_list = [Today + datetime.timedelta(minutes=15*x) for x in range(0, 1000)]\n range_list=np.linspace(1,2,1000)\n index=0\n for i in range_list:\n\n timestamp = date_list[index].strftime('%Y-%m-%d T%H:%M Z')\n df.loc[i] = [timestamp, id, gauss(5.0, 2.0), 4.4, gauss(2.5,1.0), gauss(10.0,2.0), random.randint(1,5),gauss(10.5, 5.5), gauss(10.5, 5.0), gauss(8.0, 2.0), 1, 6]\n index=index+1\n\n d = [dict([\n (colname, row[i]) \n for i,colname in enumerate(df.columns)]) for row in df.values]\n return json.dumps(d)\n\n\n"
] | [
[
"pandas.DataFrame",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xvinay28x/cat_dog_classifier_library | [
"4d56f90f9d3e91051dba71dcdea78930c4ac0e52"
] | [
"animal-classifier/__init__.py"
] | [
"from tensorflow import keras\n\ndef classify(path):\n model = keras.models.load_model(\"Cat_Dog_Classification.h5\")\n load_image = keras.preprocessing.image.load_image(path,target_size=(200,200))\n image_array = keras.preprocessing.image.img_to_array(load_image)\n reshape_array = image_array.reshape(1,200,200,3)\n array_normalize = reshape_array/255\n result = model.predict(array_normalize)\n if result >= 0.5:\n return 1\n else:\n return 0\n "
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_image"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ericlearning/style-transfer | [
"f387515b4ffe441c4677400a65b9e7fdb50c979f"
] | [
"FastStyleTransfer/utils.py"
] | [
"import os\nimport glob\nimport torch\nimport pandas as pd\nimport seaborn as sn\nimport torch.nn as nn\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom sklearn.metrics import confusion_matrix\nfrom PIL import Image\n\ndef set_lr(optimizer, lrs):\n\tif(len(lrs) == 1):\n\t\tfor param in optimizer.param_groups:\n\t\t\tparam['lr'] = lrs[0]\n\telse:\n\t\tfor i, param in enumerate(optimizer.param_groups):\n\t\t\tparam['lr'] = lrs[i]\n\ndef set_base_lr(optimizer, lrs):\n\tif(len(lrs) == 1):\n\t\tfor param in optimizer.param_groups:\n\t\t\tparam['initial_lr'] = lrs[0]\n\telse:\n\t\tfor i, param in enumerate(optimizer.param_groups):\n\t\t\tparam['initial_lr'] = lrs[i]\n\ndef get_lr(optimizer):\n\toptim_param_groups = optimizer.param_groups\n\tif(len(optim_param_groups) == 1):\n\t\treturn optim_param_groups[0]['lr']\n\telse:\n\t\tlrs = []\n\t\tfor param in optim_param_groups:\n\t\t\tlrs.append(param['lr'])\n\t\treturn lrs\n\ndef get_children_groups(model_children, param_places):\n\tcur_place = 0\n\tchildren_groups = []\n\n\tfor param_place in param_places:\n\t\tchildren_groups.append(model_children[cur_place:param_place])\n\t\tcur_place = param_place\n\n\treturn children_groups\n\ndef get_params(children):\n\tparams_use_grad = []\n\tfor child in children:\n\t\tfor param in child.parameters():\n\t\t\tif(param.requires_grad == True):\n\t\t\t\tparams_use_grad.append(param)\n\n\treturn params_use_grad\n\ndef get_optimizer(model, lrs, param_places):\n\tmodel_children = list(model.children())\n\n\t# only 1 learning rate\n\tif(len(lrs) == 1):\n\t\t# from the model's childrens, only get the parameters that use grad\n\t\tparam_use_grad = get_params(model_children)\n\n\t\t# set an Adam optimizer with the params that use grad, and the lr\n\t\toptimizer = optim.Adam(param_use_grad, lrs[0])\n\n\t# multiple learning rates\n\telse:\n\t\t# from the param_places, get chunks of children from model_children\n\t\t# children_groups is a list, and each item will be a list of children\n\t\tchildren_groups = get_children_groups(model_children, param_places)\n\n\t\t# from children_groups, get each of its children group's grad using params\n\t\t# param_groups_use_grad is a list, and each item will be a list of params that use grad\n\t\tparam_groups_use_grad = []\n\n\t\tfor children_group in children_groups:\n\t\t\tparam_group_use_grad = get_params(children_group)\n\t\t\tparam_groups_use_grad.append(param_group_use_grad)\n\n\t\t# zip param_groups_use_grad together with lrs\n\t\t# in order to feed in the corresponding lr to a given param_group\n\t\tparam_groups_use_grad_with_lrs = zip(param_groups_use_grad, lrs)\n\t\toptimizer = optim.Adam([{'params' : p, 'lr' : l}\n\t\t\tfor p, l in param_groups_use_grad_with_lrs])\n\n\treturn optimizer\n\ndef freeze_until(model, idx):\n\tfor i, child in enumerate(model.children()):\n\t\tif(i <= idx):\n\t\t\tfor param in child.parameters():\n\t\t\t\tparam.requires_grad = False\n\t\telse:\n\t\t\tfor param in child.parameters():\n\t\t\t\tparam.requires_grad = True\n\ndef histogram_sizes(img_dir, h_lim = None, w_lim = None):\n\ths, ws = [], []\n\tfor file in glob.iglob(os.path.join(img_dir, '**/*.*')):\n\t\ttry:\n\t\t\twith Image.open(file) as im:\n\t\t\t\th, w = im.size\n\t\t\t\ths.append(h)\n\t\t\t\tws.append(w)\n\t\texcept:\n\t\t\tprint('Not an Image file')\n\n\tif(h_lim is not None and w_lim is not None):\n\t\ths = [h for h in hs if h<h_lim]\n\t\tws = [w for w in ws if w<w_lim]\n\n\tplt.figure('Height')\n\tplt.hist(hs)\n\n\tplt.figure('Width')\n\tplt.hist(ws)\n\n\tplt.show()\n\n\treturn hs, ws\n\ndef plot_confusion_matrix(model, dl, names, classes_count, device, figsize):\n\ttrue_label = []\n\tpredicted_label = []\n\n\tfor batch in dl:\n\t\t(images, labels) = batch\n\t\ty_real = list(labels.data.cpu().numpy())\n\t\ty_pred = list(torch.argmax(model(images.to(device)), dim=1).data.cpu().numpy())\n\t\t\n\t\ttrue_label.extend(y_real)\n\t\tpredicted_label.extend(y_pred)\n\n\tcm = confusion_matrix(true_label, predicted_label)\n\tnames_with_cnt = [str(name) + ' : ' + str(cnt) for name, cnt in zip(names, classes_count)]\n\tdf = pd.DataFrame(cm, index = names_with_cnt, columns = names_with_cnt)\n\n\tplt.figure(figsize = figsize)\n\tax = plt.subplot(111)\n\tsn.heatmap(df, annot = True, ax = ax, fmt='g')\n\t\n\tplt.show()\n\ndef freeze_cur_bn(module):\n\tclassname = module.__class__.__name__\n\tif(classname.find('BatchNorm') != -1):\n\t\tmodule.eval()\n\ndef freeze_bn(model):\n\tmodel.apply(freeze_cur_bn)\n\nclass Normalize(nn.Module):\n\tdef __init__(self, mean, variance):\n\t\tsuper(Normalize, self).__init__()\n\t\tself.mean = mean.view(-1, 1, 1)\n\t\tself.variance = variance.view(-1, 1, 1)\n\n\tdef forward(self, x):\n\t\treturn (x - mean) / variance"
] | [
[
"torch.optim.Adam",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
chathurawidanage/cylon | [
"ac61b7a50880138fe67de21adee208016a94979a"
] | [
"cpp/src/experiments/generate_csv.py"
] | [
"##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\nimport numpy as np\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description='generate random data')\nparser.add_argument('-o', dest='output', type=str, help='output file', default='/tmp/csv.csv')\nparser.add_argument('-r', dest='rows', type=int, help='number of rows', default=10)\nparser.add_argument('-c', dest='cols', type=int, help='number of cols', default=4)\nparser.add_argument('-k', dest='idx_cols', type=int, nargs='+', help='index columns', default=[0])\nparser.add_argument('--krange', nargs=2, type=int, help='key range', default=(0, 10))\nparser.add_argument('--vrange', nargs=2, type=float, help='val range', default=(0., 1.))\nparser.add_argument('--no_header', action='store_true', help='exclude header')\n\n\ndef generate_file(output='/tmp/csv.csv', rows=10, cols=4, idx_cols=None, vrange=(0., 1.),\n krange=(0, 10), no_header=False):\n if idx_cols is None:\n idx_cols = [0]\n\n df = pd.DataFrame(np.random.rand(rows, cols) * (vrange[1] - vrange[0]) + vrange[0],\n columns=list(range(cols)))\n\n for i in idx_cols:\n assert cols > i >= 0\n df[i] = df[i].map(lambda x: int(\n krange[0] + (x - vrange[0]) * (krange[1] - krange[0]) / (vrange[1] - vrange[0])))\n\n df.to_csv(output, header=not no_header, index=False, float_format='%.3f')\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n args = vars(args)\n\n print(\"generate csv :\", args, flush=True)\n generate_file(**args)\n"
] | [
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZoRoronoa/Camera-Aware-Proxy | [
"352f900bbae330f18c2bfe2b3f2516fb4e31adea",
"352f900bbae330f18c2bfe2b3f2516fb4e31adea",
"352f900bbae330f18c2bfe2b3f2516fb4e31adea"
] | [
"reid/utils/evaluation_metrics/retrieval.py",
"reid/utils/evaluation_metrics/ranking.py",
"CAP-newCluster/reid/utils/clustering.py"
] | [
"import numpy as np\nfrom sklearn import metrics as sk_metrics\nimport torch\n\nclass PersonReIDMAP:\n '''\n Compute Rank@k and mean Average Precision (mAP) scores\n Used for Person ReID\n Test on MarKet and Duke\n '''\n\n def __init__(self, query_feature, query_cam, query_label, gallery_feature, gallery_cam, gallery_label, dist):\n '''\n :param query_feature: np.array, bs * feature_dim\n :param query_cam: np.array, 1d\n :param query_label: np.array, 1d\n :param gallery_feature: np.array, gallery_size * feature_dim\n :param gallery_cam: np.array, 1d\n :param gallery_label: np.array, 1d\n '''\n\n self.query_feature = query_feature\n self.query_cam = query_cam\n self.query_label = query_label\n self.gallery_feature = gallery_feature\n self.gallery_cam = gallery_cam\n self.gallery_label = gallery_label\n\n assert dist in ['cosine', 'euclidean']\n self.dist = dist\n\n # normalize feature for fast cosine computation\n if self.dist == 'cosine':\n self.query_feature = self.normalize(self.query_feature)\n self.gallery_feature = self.normalize(self.gallery_feature)\n\n APs = []\n CMC = []\n for i in range(len(query_label)):\n AP, cmc = self.evaluate(self.query_feature[i], self.query_cam[i], self.query_label[i],\n self.gallery_feature, self.gallery_cam, self.gallery_label)\n APs.append(AP)\n CMC.append(cmc)\n # print('{}/{}'.format(i, len(query_label)))\n\n self.APs = np.array(APs)\n self.mAP = np.mean(self.APs)\n\n min_len = 99999999\n for cmc in CMC:\n if len(cmc) < min_len:\n min_len = len(cmc)\n for i, cmc in enumerate(CMC):\n CMC[i] = cmc[0: min_len]\n self.CMC = np.mean(np.array(CMC), axis=0)\n\n def compute_AP(self, index, good_index):\n '''\n :param index: np.array, 1d\n :param good_index: np.array, 1d\n :return:\n '''\n\n num_good = len(good_index)\n hit = np.in1d(index, good_index)\n index_hit = np.argwhere(hit == True).flatten()\n\n if len(index_hit) == 0:\n AP = 0\n cmc = np.zeros([len(index)])\n else:\n precision = []\n for i in range(num_good):\n precision.append(float(i+1) / float((index_hit[i]+1)))\n AP = np.mean(np.array(precision))\n cmc = np.zeros([len(index)])\n cmc[index_hit[0]: ] = 1\n\n return AP, cmc\n\n def evaluate(self, query_feature, query_cam, query_label, gallery_feature, gallery_cam, gallery_label, rerank=False):\n '''\n :param query_feature: np.array, 1d\n :param query_cam: int\n :param query_label: int\n :param gallery_feature: np.array, 2d, gallerys_size * feature_dim\n :param gallery_cam: np.array, 1d\n :param gallery_label: np.array, 1d\n :return:\n '''\n\n # cosine score\n if self.dist is 'cosine':\n # feature has been normalize during intialization\n score = np.matmul(query_feature, gallery_feature.transpose())\n index = np.argsort(score)[::-1]\n elif self.dist is 'euclidean':\n #score = self.l2(query_feature.reshape([1, -1]), gallery_feature)\n #print('query_feature shape= {}, gallery_feature shape= {}'.format(query_feature.shape, gallery_feature.shape))\n score = self.l2(query_feature.reshape([1,-1]), gallery_feature)\n index = np.argsort(score.reshape([-1]))\n\n junk_index_1 = self.in1d(np.argwhere(query_label == gallery_label), np.argwhere(query_cam == gallery_cam))\n junk_index_2 = np.argwhere(gallery_label == -1)\n junk_index = np.append(junk_index_1, junk_index_2)\n\n good_index = self.in1d(np.argwhere(query_label == gallery_label), np.argwhere(query_cam != gallery_cam))\n index_wo_junk = self.notin1d(index, junk_index)\n\n return self.compute_AP(index_wo_junk, good_index)\n\n def in1d(self, array1, array2, invert=False):\n '''\n :param set1: np.array, 1d\n :param set2: np.array, 1d\n :return:\n '''\n\n mask = np.in1d(array1, array2, invert=invert)\n return array1[mask]\n\n def notin1d(self, array1, array2):\n\n return self.in1d(array1, array2, invert=True)\n\n def normalize(self, x):\n norm = np.tile(np.sqrt(np.sum(np.square(x), axis=1, keepdims=True)), [1, x.shape[1]])\n return x / norm\n\n def cosine_dist(self, x, y):\n return sk_metrics.pairwise.cosine_distances(x, y)\n\n def euclidean_dist(self, x, y):\n return sk_metrics.pairwise.euclidean_distances(x, y)\n\n def l2(self, x, y):\n x = torch.from_numpy(x)\n y = torch.from_numpy(y)\n\n m, n = x.size(0), y.size(0)\n x = x.view(m, -1)\n y = y.view(n, -1)\n\n dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n dist.addmm_(1, -2, x, y.t())\n # We use clamp to keep numerical stability\n dist = torch.clamp(dist, 1e-8, np.inf)\n return dist.numpy()\n\n",
"from __future__ import absolute_import\nfrom collections import defaultdict\n\nimport numpy as np\nfrom sklearn.metrics.base import _average_binary_score\nfrom sklearn.metrics import precision_recall_curve, auc\n# from sklearn.metrics import average_precision_score\nimport torch\n\n\ndef to_numpy(tensor):\n if torch.is_tensor(tensor):\n return tensor.cpu().numpy()\n elif type(tensor).__module__ != 'numpy':\n raise ValueError(\"Cannot convert {} to numpy array\"\n .format(type(tensor)))\n return tensor\n\n\ndef _unique_sample(ids_dict, num):\n mask = np.zeros(num, dtype=np.bool)\n for _, indices in ids_dict.items():\n i = np.random.choice(indices)\n mask[i] = True\n return mask\n\n\ndef average_precision_score(y_true, y_score, average=\"macro\",\n sample_weight=None):\n def _binary_average_precision(y_true, y_score, sample_weight=None):\n precision, recall, thresholds = precision_recall_curve(\n y_true, y_score, sample_weight=sample_weight)\n return auc(recall, precision)\n\n return _average_binary_score(_binary_average_precision, y_true, y_score,\n average, sample_weight=sample_weight)\n\n\ndef cmc(distmat, query_ids=None, gallery_ids=None,\n query_cams=None, gallery_cams=None, topk=100,\n separate_camera_set=False,\n single_gallery_shot=False,\n first_match_break=False):\n distmat = to_numpy(distmat)\n m, n = distmat.shape\n # Fill up default values\n if query_ids is None:\n query_ids = np.arange(m)\n if gallery_ids is None:\n gallery_ids = np.arange(n)\n if query_cams is None:\n query_cams = np.zeros(m).astype(np.int32)\n if gallery_cams is None:\n gallery_cams = np.ones(n).astype(np.int32)\n # Ensure numpy array\n query_ids = np.asarray(query_ids)\n gallery_ids = np.asarray(gallery_ids)\n query_cams = np.asarray(query_cams)\n gallery_cams = np.asarray(gallery_cams)\n # Sort and find correct matches\n indices = np.argsort(distmat, axis=1)\n matches = (gallery_ids[indices] == query_ids[:, np.newaxis])\n # Compute CMC for each query\n ret = np.zeros(topk)\n num_valid_queries = 0\n for i in range(m):\n # Filter out the same id and same camera\n valid = ((gallery_ids[indices[i]] != query_ids[i]) |\n (gallery_cams[indices[i]] != query_cams[i]))\n if separate_camera_set:\n # Filter out samples from same camera\n valid &= (gallery_cams[indices[i]] != query_cams[i])\n if not np.any(matches[i, valid]): continue\n if single_gallery_shot:\n repeat = 10\n gids = gallery_ids[indices[i][valid]]\n inds = np.where(valid)[0]\n ids_dict = defaultdict(list)\n for j, x in zip(inds, gids):\n ids_dict[x].append(j)\n else:\n repeat = 1\n for _ in range(repeat):\n if single_gallery_shot:\n # Randomly choose one instance for each id\n sampled = (valid & _unique_sample(ids_dict, len(valid)))\n index = np.nonzero(matches[i, sampled])[0]\n else:\n index = np.nonzero(matches[i, valid])[0]\n delta = 1. / (len(index) * repeat)\n for j, k in enumerate(index):\n if k - j >= topk: break\n if first_match_break:\n ret[k - j] += 1\n break\n ret[k - j] += delta\n num_valid_queries += 1\n if num_valid_queries == 0:\n raise RuntimeError(\"No valid query\")\n return ret.cumsum() / num_valid_queries\n\n\ndef mean_ap(distmat, query_ids=None, gallery_ids=None,\n query_cams=None, gallery_cams=None):\n distmat = to_numpy(distmat)\n m, n = distmat.shape\n # Fill up default values\n if query_ids is None:\n query_ids = np.arange(m)\n if gallery_ids is None:\n gallery_ids = np.arange(n)\n if query_cams is None:\n query_cams = np.zeros(m).astype(np.int32)\n if gallery_cams is None:\n gallery_cams = np.ones(n).astype(np.int32)\n # Ensure numpy array\n query_ids = np.asarray(query_ids)\n gallery_ids = np.asarray(gallery_ids)\n query_cams = np.asarray(query_cams)\n gallery_cams = np.asarray(gallery_cams)\n # Sort and find correct matches\n indices = np.argsort(distmat, axis=1)\n matches = (gallery_ids[indices] == query_ids[:, np.newaxis])\n # Compute AP for each query\n aps = []\n for i in range(m):\n # Filter out the same id and same camera\n valid = ((gallery_ids[indices[i]] != query_ids[i]) |\n (gallery_cams[indices[i]] != query_cams[i]))\n y_true = matches[i, valid]\n y_score = -distmat[i][indices[i]][valid]\n if not np.any(y_true): continue\n aps.append(average_precision_score(y_true, y_score))\n if len(aps) == 0:\n raise RuntimeError(\"No valid query\")\n return np.mean(aps)\n\n\n",
"from re import U\nfrom PIL.Image import new\nimport numpy\nfrom numpy.lib.arraysetops import unique\nimport torch\nfrom sklearn.cluster.dbscan_ import dbscan\nimport math\nimport faiss\nfrom reid.utils.faiss_utils import search_index_pytorch, search_raw_array_pytorch, \\\n index_init_gpu, index_init_cpu\n# torch.from_numpy(numpy.array(new_ca_features))\nres = faiss.StandardGpuResources()\nres.setDefaultNullStreamAllDevices()\n\ndef cal_distance(a, b):\n sum = 0.0\n for i in range(len(a)):\n sum += math.sqrt((a[i] - b[i]) * (a[i] - b[i]))\n return sum\n\n\ndef cluster_label(new_features, new_cams):\n from reid.utils.faiss_rerank import faiss_compute_jaccard_dist\n new_ca_features = []\n new_ir_features = []\n ca_idx_to_full_idx = []\n ir_idx_to_full_idx = []\n for i, item in enumerate(new_features):\n if new_cams[i] in [0, 1, 3, 4]:\n new_ca_features.append(item)\n ca_idx_to_full_idx.append(i)\n elif new_cams[i] in [2, 5]:\n new_ir_features.append(item)\n ir_idx_to_full_idx.append(i)\n\n W_ca = faiss_compute_jaccard_dist(torch.from_numpy(numpy.array(new_ca_features)))\n W_ir = faiss_compute_jaccard_dist(torch.from_numpy(numpy.array(new_ir_features)))\n _, updated_ca_label = dbscan(W_ca, eps=0.5, min_samples=4, metric='precomputed', n_jobs=8)\n _, updated_ir_label = dbscan(W_ir, eps=0.5, min_samples=4, metric='precomputed', n_jobs=8)\n # TODO \n for i, item in enumerate(updated_ir_label):\n if item != -1:\n updated_ir_label[i] += len(numpy.unique(updated_ca_label)) - 1\n\n ca_center_idx = {}\n ca_label_to_center = {}\n ca_center_features = []\n ca_center_to_label = []\n for i in numpy.unique(updated_ca_label):\n idx = numpy.where(updated_ca_label == i)[0]\n new_center_features = numpy.mean(numpy.array(new_ca_features)[idx], axis=0)\n ca_label_to_center[i] = len(ca_center_features)\n ca_center_to_label.append(i)\n ca_center_features.append(new_center_features)\n ca_center_idx[i] = idx\n\n ir_center_idx = {}\n ir_center_features = []\n ir_label_to_center = {}\n ir_center_to_label = []\n for i in numpy.unique(updated_ir_label):\n idx = numpy.where(updated_ir_label == i)[0]\n new_center_features = numpy.mean(numpy.array(new_ir_features)[idx], axis=0)\n ir_label_to_center[i] = len(ir_center_features)\n ir_center_to_label.append(i)\n ir_center_features.append(new_center_features)\n ir_center_idx[i] = idx\n \n cnt = 0\n ca_len = numpy.unique(updated_ca_label)\n ir_len = numpy.unique(updated_ir_label)\n\n for i in ca_len: \n if i == -1:\n continue\n tmp = torch.unsqueeze(torch.from_numpy(numpy.array(ca_center_features[ca_label_to_center[i]])), dim=0)\n _, initial_rank = search_raw_array_pytorch(res, torch.from_numpy(numpy.array(ir_center_features)), tmp, 1)\n initial_rank = initial_rank.cpu().numpy()\n for j in initial_rank[0]:\n tmp_j = torch.unsqueeze(torch.from_numpy(numpy.array(ir_center_features[j])), dim=0)\n _, initial_rank_j = search_raw_array_pytorch(res, torch.from_numpy(numpy.array(ca_center_features)), tmp_j, 1)\n initial_rank_j = initial_rank_j.cpu().numpy()\n if ca_label_to_center[i] in initial_rank_j[0]:\n cnt += 1\n idx = ir_center_idx[ir_center_to_label[j]]\n for item in idx:\n updated_ir_label[item] = i\n\n # minj = -1\n # minD = 50\n # for j in ir_len:\n # if j == -1:\n # continue\n # now_dist = cal_distance(ca_center_features[ca_label_to_center[i]], ir_center_features[ir_label_to_center[j]])\n # if(now_dist < minD):\n # minD = now_dist\n # minj = j\n # \n # minD = 50\n # mink = -1\n # for k in ca_len:\n # if k == -1:\n # continue\n # now_dist = cal_distance(ir_center_features[ir_label_to_center[minj]], ca_center_features[ca_label_to_center[k]])\n # if now_dist < minD:\n # minD = now_dist\n # mink = k\n # if mink == i:\n # cnt += 1\n # idx = ir_center_idx[minj]\n # for item in idx:\n # updated_ir_label[item] = i\n print(str(cnt) + \" classes in IR classes have been renewed to RGB\")\n updated_label = numpy.append(updated_ca_label, updated_ir_label) \n for i, item in enumerate(updated_ca_label):\n updated_label[ca_idx_to_full_idx[i]] = item\n for i, item in enumerate(updated_ir_label):\n updated_label[ir_idx_to_full_idx[i]] = item\n \n return len(updated_label[updated_label >= 0 ]), updated_label\n\n # all_center_features = ca_center_features + ir_center_features\n\n # for i in range(len(ca_center_features)):\n # if i != 0:\n # minj = -1\n # minD = 50\n # for j in range(len(ca_center_features) + 1, len(all_center_features)):\n # if cal_distance(all_center_features[i], all_center_features[j]) < minD:\n # minD = cal_distance(all_center_features[i], all_center_features[j])\n # minj = j\n # minD = 50\n # mink = -1\n # for k in range(1, len(ca_center_features)):\n # if cal_distance(all_center_features[minj], all_center_features[k]) < minD:\n # minD = cal_distance(all_center_features[minj], all_center_features[k])\n # mink = k\n # if mink == i:\n # ca_label = updated_ca_label[ca_center_idx[i][0]]\n # ca_label = ca_center_idx[i][0]\n # idx = ir_center_idx[minj]\n # for w in idx:\n # updated_ir_label[w] = ca_label\n\n # updated_label = numpy.append(updated_ca_label, updated_ir_label)\n # return len(updated_label[updated_label >= 0 ]), updated_label"
] | [
[
"numpy.square",
"numpy.in1d",
"torch.from_numpy",
"numpy.argwhere",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.append",
"sklearn.metrics.pairwise.cosine_distances",
"numpy.mean",
"numpy.argsort",
"torch.clamp",
"numpy.array",
"torch.pow"
],
[
"numpy.nonzero",
"numpy.random.choice",
"numpy.asarray",
"sklearn.metrics.auc",
"numpy.arange",
"torch.is_tensor",
"sklearn.metrics.precision_recall_curve",
"numpy.ones",
"sklearn.metrics.base._average_binary_score",
"numpy.mean",
"numpy.any",
"numpy.argsort",
"numpy.zeros",
"numpy.where"
],
[
"sklearn.cluster.dbscan_.dbscan",
"numpy.unique",
"numpy.append",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
miketrumpis/lfp_scroller | [
"ce4dbf85bb4d31f2eacfb5d68a5049499637722c"
] | [
"fast_scroller/h5data.py"
] | [
"import numpy as np\nfrom scipy.linalg import LinAlgError\nfrom scipy.signal import lfilter, lfilter_zi, hilbert\nfrom scipy.interpolate import interp1d\nimport h5py\nfrom tqdm import tqdm\nfrom ecogdata.util import input_as_2d\nfrom ecogdata.util import nextpow2\n\n\ndef h5mean(array, axis, rowmask=(), start=0, stop=None):\n \"\"\"Compute mean of a 2D HDF5 array in blocks\"\"\"\n\n shape = array.shape\n if axis < 0:\n axis += len(shape)\n if stop is None:\n stop = shape[1]\n if axis==1:\n if len(rowmask):\n mn_size = rowmask.sum()\n else:\n mn_size = shape[0]\n else:\n mn_size = shape[1 - axis]\n mn = np.zeros(mn_size, 'd')\n # For averaging in both dimensions, still iterate chunks in time\n # If averaging over channels:\n # * fill in the chunk averages along the way\n # If averaging over time\n # * accumulate the samples (scaled by 1/N)\n itr = H5Chunks(array, axis=1, slices=True)\n for n, sl in tqdm(enumerate(itr), desc='Computing mean', leave=True, total=itr.n_blocks):\n t_sl = sl[1]\n # just pass until t_sl.start < requested start < t_sl.stop\n if start >= t_sl.stop:\n print('Continuing')\n continue\n # now modify first good slice\n elif start > t_sl.start:\n t_sl = slice(start, t_sl.stop)\n sl = (sl[0], t_sl)\n # break loops if stop < t_sl.start\n if stop < t_sl.start:\n break\n # now modify lsat good slice\n elif stop < t_sl.stop:\n t_sl = slice(t_sl.start, stop)\n sl = (sl[0], t_sl)\n x_sl = array[sl]\n if len(rowmask):\n x_sl = x_sl[rowmask]\n \n if axis == 0:\n mn[sl[1]] = x_sl.mean(0)\n else:\n mn[:] += x_sl.sum(1) / float(array.shape[1])\n return mn\n\n\ndef h5stat(array, fn, rowmask=()):\n \"\"\"Compute timeseries of a channel-wise statistic for a 2D HDF5 array in blocks\"\"\"\n\n shape = array.shape\n T = shape[1]\n series = np.zeros(T, 'd')\n itr = H5Chunks(array, axis=1, slices=True)\n for n, sl in tqdm(enumerate(itr), desc='Computing series',\n leave=True, total=itr.n_blocks):\n x_sl = array[sl]\n if len(rowmask):\n x_sl = x_sl[rowmask]\n series[sl[1]] = fn(x_sl)\n return series\n\n\nclass ReadCache(object):\n # TODO -- enable catch for full slicing\n \"\"\"\n Buffers row indexes from memmap or hdf5 file.\n\n For cases where array[0, m:n], array[1, m:n], array[2, m:n] are\n accessed sequentially, this object buffers the C x (n-m)\n submatrix before yielding individual rows.\n\n Access such as array[p:q, m:n] is handled by the underlying\n array's __getitem__ method.\n \"\"\"\n \n def __init__(self, array):\n self._array = array\n self._current_slice = None\n self._current_seg = ()\n self.dtype = array.dtype\n self.shape = array.shape\n\n def __len__(self):\n return len(self._array)\n\n @property\n def file_array(self):\n return self._array\n\n def __getitem__(self, sl):\n indx, srange = sl\n # Only access diretly if the first part of the slice is also a slice.\n # In other cases, slice all first and then use numpy indexing\n if isinstance(indx, slice):\n return self._array[sl].copy()\n if self._current_slice != srange:\n all_sl = (slice(None), srange)\n self._current_seg = self._array[all_sl]\n self._current_slice = srange\n # always return the full range after slicing with possibly\n # complex original range\n new_range = slice(None)\n new_sl = (indx, new_range)\n return self._current_seg[new_sl].copy()\n\n\nclass CommonReferenceReadCache(ReadCache):\n \"\"\"Returns common-average re-referenced blocks\"\"\"\n\n def __getitem__(self, sl):\n indx, srange = sl\n if isinstance(indx, slice):\n # This returns without CAR?\n return self._array[sl].copy()\n if self._current_slice != srange:\n all_sl = (slice(None), srange)\n if self.dtype in np.sctypes['int']:\n self._current_seg = self._array[all_sl].astype('d')\n else:\n self._current_seg = self._array[all_sl].copy()\n self._current_seg -= self._current_seg.mean(0)\n self._current_slice = srange\n # always return the full range after slicing with possibly\n # complex original range\n new_range = slice(None)\n new_sl = (indx, new_range)\n return self._current_seg[new_sl].copy()\n\n\nclass FilteredReadCache(ReadCache):\n \"\"\"\n Apply row-by-row filters to a ReadCache\n \"\"\"\n\n def __init__(self, array, filters):\n if not isinstance(filters, (tuple, list)):\n f = filters\n filters = [ f ] * len(array)\n self.filters = filters\n super(FilteredReadCache, self).__init__(array)\n\n def __getitem__(self, sl):\n idx = sl[0]\n x = super(FilteredReadCache, self).__getitem__(sl)\n if isinstance(idx, int):\n return self.filters[idx]( x )\n y = np.empty_like(x)\n for x_, y_, f in zip(x[idx], y[idx], self.filters[idx]):\n y_[:] = f(x_)\n return y\n\n\ndef _make_subtract(z):\n def _f(x):\n return x - z\n return _f\n \n\nclass DCOffsetReadCache(FilteredReadCache):\n \"\"\"\n A filtered read cache with a simple offset subtraction.\n \"\"\"\n\n def __init__(self, array, offsets):\n #filters = [lambda x: x - off for off in offsets]\n filters = [_make_subtract(off) for off in offsets]\n super(DCOffsetReadCache, self).__init__(array, filters)\n self.offsets = offsets\n\n\nclass H5Chunks(object):\n \"\"\"Iterates an HDF5 over \"chunks\" with ndarray-like access\"\"\"\n\n def __init__(self, h5array, out=None, axis=1, min_chunk=None, slices=False, reverse=False):\n \"\"\"\n Efficient block iterator for HDF5 arrays (streams over chunking sizes to read whole blocks at a time).\n\n Parameters\n ----------\n h5array: h5py.Dataset\n Vector timeseries (chan x time) or (time x chan)\n out: h5py.Dataset\n Output array for write-back. May be equal to h5array for read/write arrays. Write-back disabled if None\n axis: int\n Axis to iterate over\n min_chunk: int\n Ensure the output blocks are greater than this size\n slices: bool\n Return array slicing rather than data\n reverse: bool\n Yield reverse-sequence data\n\n \"\"\"\n chunk = h5array.chunks\n if len(chunk) > 2:\n raise ValueError('Only iterates for 2D arrays')\n self.h5array = h5array\n while axis < 0:\n axis += len(chunk)\n if chunk[axis] < chunk[1-axis]:\n print('chunk size larger in other dimension!')\n\n self.axis = axis\n self.size = h5array.shape[axis]\n self.chunk = chunk[axis]\n if min_chunk is not None:\n while self.chunk < min_chunk:\n self.chunk += chunk[axis]\n self.n_blocks = self.size // self.chunk\n if self.n_blocks * self.chunk < self.size:\n self.n_blocks += 1\n self.__it = self.n_blocks - 1 if reverse else 0\n self.reverse = reverse\n self.slices = slices\n self._output_source = out\n\n def write_out(self, data):\n if self._output_source is None:\n print('No output defined!')\n return\n if self.reverse:\n # data is reversed\n data = data[:, ::-1] if self.axis == 1 else data[::-1, :]\n self._output_source[self._current_sl] = data\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.__it >= self.n_blocks or self.__it < 0:\n raise StopIteration()\n n = self.__it\n rng = slice(n * self.chunk, min(self.size, (n + 1) * self.chunk))\n self._current_sl = (slice(None), rng) if self.axis else (rng, slice(None))\n if self.reverse:\n self.__it -= 1\n else:\n self.__it += 1\n if self.slices:\n return self._current_sl\n arr = self.h5array[self._current_sl]\n if self.reverse:\n return arr[:, ::-1] if self.axis == 1 else arr[::-1, :]\n return arr\n\n\nclass HandOffIter:\n \"\"\"\n Iterates over several 2D HDF5 arrays with hand-off between files. Hand-off procedure includes attemping to match\n the DC offsets between signals around the end and beginning of recording edges.\n\n Presently iterates over axis=1.\n\n Also supports write-back to the currently visible buffer within an iteration.\n\n \"\"\"\n\n # TODO: support reverse iteration\n\n def __init__(self, arrays, out=None, min_chunk=None, chans=None, blank_points=10):\n \"\"\"\n Construct hand-off iterator from HDF5 files.\n\n Parameters\n ----------\n arrays: sequence\n sequence of h5py.Datasets\n out: h5py.Dataset, str\n out may be a pre-created Dataset of the correct size or the path of output file. If output_file=='same',\n then write-back to the same input files. If None, then there is no output source.\n min_chunk: int\n Ensure the output blocks are greater than this size\n chans: list\n channels to expose on iteration (all by default)\n blank_points: int\n Blank these many points when handing off between files. Fill in +/- blank region with linear\n interpolation between valid points.\n\n \"\"\"\n hdf_files = [array.file.filename for array in arrays]\n self.files = hdf_files\n self.arrays = arrays\n rec_lengths = [array.shape[1] for array in arrays]\n chunk_sizes = []\n num_blocks = 0\n if min_chunk is None:\n # todo: fix dumb 2000 pts hard coding\n min_chunk = blank_points + 2000\n else:\n min_chunk = max(blank_points + 2000, min_chunk)\n for array in arrays:\n size = array.chunks[1]\n if min_chunk is not None:\n while size < min_chunk:\n size += array.chunks[1]\n if size > array.shape[1]:\n raise ValueError('Minimum chunk size {} is greater than the length of >=1 arrays'.format(min_chunk))\n chunk_sizes.append(size)\n # todo: is this +1 count correct?\n num_blocks += array.shape[1] // size + 1\n n_chan = arrays[0].shape[0]\n self.n_blocks = num_blocks\n if chans is None:\n chans = slice(None)\n else:\n if not np.iterable(chans):\n chans = (chans,)\n n_chan = len(chans)\n self.total_length = np.sum(rec_lengths)\n self.rec_lengths = rec_lengths\n self.chunk_sizes = chunk_sizes\n # Output status will be checked through the value of self._output_file:\n # if None, do nothing\n # if 'same', write back to input sources\n # else write to self._output_source defined here\n if isinstance(out, str):\n self._output_file = out\n if self._output_file.lower() != 'same':\n hdf = h5py.File(self._output_file, 'w')\n array_name = arrays[0].name.strip('/')\n out = hdf.create_dataset(array_name, shape=(n_chan, self.total_length), dtype='f', chunks=True)\n hdf.create_dataset('break_points', data=np.cumsum(rec_lengths[:-1], dtype='i'))\n self._output_source = out\n self._closing_output = True\n elif out is not None:\n self._output_source = out\n self._output_file = out.file.filename\n self._closing_output = False\n else:\n self._output_file = None\n self._closing_output = False\n self.chans = chans\n self._current_source = 0\n self._current_offset = None\n self._blanking_slice = False\n self._blank_points = blank_points\n\n def __iter__(self):\n # set up initial offset as the mean(s) in the first file\n self._current_source = self.arrays[0]\n means = self._slice_source(np.s_[self._blank_points:self._blank_points + 2000], offset=False).mean(axis=1)\n if self._output_file == 'same':\n self._output_source = self.arrays[0]\n self._current_source_num = 0\n self._current_offset = means[:, None]\n self._current_step = self.chunk_sizes[0]\n self._input_point = 0\n self._output_point = 0\n # starting on a blanking slice\n self._blanking_slice = True\n self._end_of_iter = False\n return self\n\n def _slice_source(self, time_slice, offset=True):\n if isinstance(self.chans, slice):\n arr = self._current_source[self.chans, time_slice]\n else:\n arr = np.array([self._current_source[c, time_slice] for c in self.chans])\n return arr - self._current_offset if offset else arr\n\n def _hand_off(self, start):\n # Right now the current step size will run off the end of the current source.\n # So grab the remainder of this source and hand-off to the next source.\n # Also reset the offset level to the average of the last few points\n # array_name = self.array_name\n end_point = self._current_source.shape[1]\n remainder = self._slice_source(np.s_[start:])\n old_mean = remainder.mean(1)[:, None]\n # Actually... use more points if the remainder is short\n if self._current_source.shape[1] - start < 100:\n longer_tail = self._slice_source(np.s[-100:])\n old_mean = longer_tail.mean(1)[:, None]\n # self._current_source.file.close()\n self._current_source_num += 1\n if self._current_source_num >= len(self.files):\n # do not change source or step size, just signal that the end is nigh\n self._end_of_iter = True\n else:\n self._current_source = self.arrays[self._current_source_num]\n self._current_step = self.chunk_sizes[self._current_source_num]\n self._blanking_slice = True\n self._break_point = self._output_point + (end_point - start)\n # get the mean of the first few points in the new source\n new_mean = self._slice_source(np.s_[self._blank_points:self._blank_points + 2000], offset=False).mean(1)\n # new_mean = np.array([self._current_source[c, self._blank_points:200].mean() for c in self.chans])\n # this is the offset to move the new mean to the old mean\n self._current_offset = new_mean[:, None] - old_mean\n return remainder\n\n def write_out(self, data):\n if self._output_file is None:\n print('No output file defined!')\n return\n elif self._output_file == 'same':\n # this condition means that data came from two sources in a hand-off\n if data.shape[1] > self._input_point:\n # last part is from current source\n self._current_source[:, :self._input_point] = data[:, -self._input_point:]\n # first part is from previous source\n n_prev = data.shape[1] - self._input_point\n prev_source = self.arrays[self._current_source_num - 1]\n prev_source[:, -n_prev:] = data[:, :n_prev]\n else:\n max_n = self._current_source.shape[1]\n start_pt = self._input_point - self._current_step\n stop_pt = min(max_n, self._input_point)\n this_slice = np.s_[:, start_pt:stop_pt]\n self._current_source[this_slice] = data\n return\n # Write this data into the output array.\n # If this is a blanking slice (b/c of hand-off) then ???\n a = self._output_point\n b = a + data.shape[1]\n self._output_source[:, a:b] = data\n self._output_source.flush()\n self._output_point = b\n\n\n def __next__(self):\n if self._end_of_iter:\n if self._closing_output:\n self._output_source.file.close()\n raise StopIteration\n start = self._input_point\n stop = start + self._current_step\n if stop > self._current_source.shape[1]:\n # print('hand off slice: {}-{}, file length {}'.format(start, stop, self._current_source.shape[1]))\n remainder = self._hand_off(start)\n # if the hand-off logic has found end-of-files then simply return the last bit and raise StopIteration\n # next time around\n if self._end_of_iter:\n # advance the input array point counter so that it can be rewound as needed in write_out\n self._input_point += self._current_step\n return remainder\n next_strip = self._slice_source(np.s_[:self._current_step])\n # Need to handle blanking!\n r_weight = np.linspace(0, 1, self._blank_points)\n left_point = remainder[:, -1][:, None]\n right_point = next_strip[:, self._blank_points][:, None]\n next_strip[:, :self._blank_points] = r_weight * right_point + (1 - r_weight) * left_point\n arr_slice = np.c_[remainder, next_strip]\n # next input is 1X the current step\n self._input_point = self._current_step\n # print('new input point: {}, file length {}'.format(self._input_point, self._current_source.shape[1]))\n return arr_slice\n else:\n # easy case!\n arr_slice = self._slice_source(np.s_[start:stop])\n self._input_point += self._current_step\n if start == 0 and self._current_source_num == 0:\n # just blank the initial points to zero\n arr_slice[:, :self._blank_points] = 0\n return arr_slice\n\n\ndef block_itr_factory(x, **kwargs):\n if isinstance(x, (tuple, list)):\n if 'axis' in kwargs and kwargs['axis'] == 1:\n # just drop this since it works right anyway\n kwargs.pop('axis')\n args = set(kwargs.keys())\n extra_args = args - {'out', 'min_chunks', 'chans', 'blank_points'}\n if len(extra_args):\n print('Dropping arguments not (yet) supported for HandOffIter: {}'.format(extra_args))\n supported_args = args - extra_args\n kwargs = dict((k, kwargs[k]) for k in supported_args)\n return HandOffIter(x, **kwargs)\n else:\n return H5Chunks(x, **kwargs)\n\n\ndef bfilter(b, a, x, axis=-1, out=None, filtfilt=False):\n \"\"\"\n Apply linear filter inplace over array x streaming from disk.\n\n Parameters\n ----------\n b: ndarray\n polynomial coefs for transfer function denominator\n a: ndarray\n polynomial coefs for transfer function numerator\n x: h5py.Dataset, list\n Either a single or multiple datasets. If multiple, then a HandOffIter will be used to iterate. In this mode,\n if out is given as a string then the full output will be concatenated to a single HDF5 file. Otherwise output\n will be written back to each individual file.\n axis: int\n Array axis to apply filter\n out: h5py.Dataset, str\n Output array (or file name, see details above). If multiple inputs are given, a value of None will be\n converted to 'same'\n filtfilt: bool\n If True, perform zero-phase filtering with the forward-reverse technique\n\n Returns\n -------\n out: h5py.Dataset\n Output array. Not well defined if using HandOffIter in 'same' output mode\n\n \"\"\"\n try:\n zii = lfilter_zi(b, a)\n except LinAlgError:\n # the integrating filter doesn't have valid zi\n zii = np.array([0.0])\n\n zi_sl = np.s_[None, :] if axis in (-1, 1) else np.s_[:, None]\n xc_sl = np.s_[:, :1] if axis in (-1, 1) else np.s_[:1, :]\n fir_size = len(b)\n if out is None:\n if isinstance(x, (list, tuple)):\n out = 'same'\n else:\n out = x\n itr = block_itr_factory(x, axis=axis, out=out, min_chunk=fir_size)\n for n, xc in tqdm(enumerate(itr), desc='Blockwise filtering',\n leave=True, total=itr.n_blocks):\n if n == 0:\n zi = zii[zi_sl] * xc[xc_sl]\n xcf, zi = lfilter(b, a, xc, axis=axis, zi=zi)\n itr.write_out(xcf)\n\n # presently hand off iteration only goes forward so can't filt-filt\n if isinstance(itr, HandOffIter) or not filtfilt:\n out = itr._output_source\n del xc\n del xcf\n return out\n\n # Now read and write to the same out array (however it was earlier defined)\n itr = H5Chunks(out, axis=axis, min_chunk=fir_size, out=out, reverse=True)\n for n, xc in tqdm(enumerate(itr), desc='Blockwise filtering (reverse)',\n leave=True, total=itr.n_blocks):\n if n == 0:\n zi = zii[zi_sl] * xc[xc_sl]\n xcf, zi = lfilter(b, a, xc, axis=axis, zi=zi)\n itr.write_out(xcf)\n del xc\n del xcf\n return out\n\n\ndef passthrough(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Copying to output', leave=True, total=itr.n_blocks):\n itr.write_out(xc)\n\n\n@input_as_2d(in_arr=(0, 1))\ndef interpolate_blanked(x, mask, inplace=False, kind='linear'):\n if inplace:\n y = x\n else:\n y = x.copy()\n a = np.arange(x.shape[1])\n for row_x, row_y, row_m in zip(x, y, mask):\n fv = row_x[~row_m].mean()\n f = interp1d(a[~row_m], row_x[~row_m], kind=kind,\n bounds_error=False, fill_value=fv)\n #row_y[~row_m] = row_x[~row_m]\n row_y[row_m] = f( a[row_m] )\n return y\n \n\ndef block_nan_filter(x, y, kind='linear'):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='NaN Filtering', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n nan_mask = np.isnan(xc)\n if not nan_mask.any():\n # y[sl] = xc\n itr.write_out(xc)\n continue\n xc = interpolate_blanked(xc, nan_mask, inplace=True, kind=kind)\n # y[sl] = xc\n itr.write_out(xc)\n \n\ndef square_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Squaring', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n # y[sl] = xc ** 2\n itr.write_out(xc ** 2)\n\n\ndef abs_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Rectifying', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n # y[sl] = np.abs(xc)\n itr.write_out(np.abs(xc))\n\n\ndef hilbert_envelope_filter(x, y):\n itr = block_itr_factory(x, axis=1, out=y)\n for xc in tqdm(itr, desc='Hilbert Transform', leave=True, total=itr.n_blocks):\n # xc = x[sl]\n n = xc.shape[1]\n nfft = nextpow2(n)\n\n # if n is closer to the previous power of 2, then split this block into two computations\n if (nfft - n) > (n - nfft / 2):\n n1 = int(n / 2)\n nfft = int(nfft / 2)\n y1 = hilbert(xc[..., :n1], N=nfft)[..., :n1]\n y2 = hilbert(xc[..., n1:], N=nfft)[..., :n - n1]\n # y[sl] = np.hstack((np.abs(y1), np.abs(y2)))\n itr.write_out(np.hstack((np.abs(y1), np.abs(y2))))\n else:\n y1 = hilbert(xc, N=nfft)[..., :n]\n # y[sl] = np.abs(y1)\n itr.write_out(np.abs(y1))\n"
] | [
[
"numpy.abs",
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"numpy.empty_like",
"scipy.signal.lfilter_zi",
"numpy.cumsum",
"scipy.interpolate.interp1d",
"numpy.iterable",
"scipy.signal.lfilter",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"scipy.signal.hilbert"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
fakecoinbase/TheCyberHeadslashCyberHead | [
"b1c5d8c157ff5bb976778ff5f7901d82e41d7d3e"
] | [
"cyberhead/modules/brokers/coinbase/Coinbase.py"
] | [
"import cbpro\nimport pandas as pd\nfrom base64 import b64encode\n\nclass Coinbase:\n\tdef __init__(self, API_KEY, API_SECRET, API_PASS, ENV_URL=\"https://api-public.sandbox.pro.coinbase.com\"):\n\t\tself.API_KEY = API_KEY\n\t\tself.API_SECRET = API_SECRET\n\t\tself.API_PASS = API_PASS\n\t\tself.ENV_URL = ENV_URL\n\t\tself.client = cbpro.AuthenticatedClient(self.API_KEY, self.API_SECRET, self.API_PASS, api_url=self.ENV_URL)\n\n\tdef auth(self):\n\t\tprint('Authenticating Coinbase')\n\n\tdef place_market(self, action, ticker, amount):\n\t\torder = self.client.place_market_order(\n\t\t\t\tproduct_id=ticker,\n\t\t\t\tside=action,\n\t\t\t\tfunds=amount\n\t\t\t)\n\t\treturn place_market\n\n\tdef place_limit_order(self, action, ticker, entry_price, size):\n\t\tentry_order = self.client.place_limit_order(product_id=ticker,\n\t\t\t\t\t\t\tside=action,\n\t\t\t\t\t\t\tprice=entry_price,\n\t\t\t\t\t\t\tsize=size)\n\t\tprint(entry_order)\n\t\treturn entry_order\n\n\tdef get_accounts(self):\n\t\treturn self.client.get_accounts()\n\n\tdef orders(self):\n\t\treturn self.client.get_orders()\n\n\tdef fills(self):\n\t\treturn self.client.get_fills()\n\n\tdef historical_rates(self, ticker: str):\n\t\trates = self.client.get_product_historic_rates(ticker, granularity=86400)\n\t\tdf = pd.DataFrame(rates, columns=[\"time\",\"low\",\"high\",\"open\",\"close\",\"volume\"])\n\t\treturn df\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jdlaubrie/shell-elem | [
"f87cb9ca9179533d3a645a494e7ef4d39666ddc6"
] | [
"3rd_check/surgery/penalty.py"
] | [
"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nNbrOfNodes = 35\r\nkeygnra = ' TIME: GANDRA STEP: 80.000 FRAME: 1.000'\r\nkeystent = ' TIME: STENT STEP: 1.000 FRAME: 1.000'\r\nkeygnrb = ' TIME: GANDRB STEP: 100.000 FRAME: 1.000'\r\n# File for gain parameter 01\r\n#--------------------------------------------------------------------------\r\n#--------------------------------------------------------------------------\r\nfile_g01 = open('surgery_p7.rsn', 'r')\r\ngain01 = file_g01.readlines()\r\ng01 = pd.Series(gain01)\r\ng01 = g01.replace(r'\\n','', regex=True)\r\ng01 = g01.replace(r'\\r\\n','', regex=True)\r\ng01 = g01.replace(r'\\r','', regex=True)\r\nindex_Time_g01 = g01[g01.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g01 = index_Time_g01.index.values\r\n#--------------------------------------------------------------------------\r\nG01 = {}\r\nfor idx in index_Time_g01.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g01[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G01[g01[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g01 = np.array([], dtype=np.int64)\r\nData_g01.shape = (-1, 7)\r\nfor key in sorted(G01.keys()):\r\n Data_g01 = np.append(Data_g01,[G01[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g01_gnra = np.array([], dtype=np.int64)\r\nData_g01_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_gnra = np.append(Data_g01_gnra,[G01[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g01_stent = np.array([], dtype=np.int64)\r\nData_g01_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_stent = np.append(Data_g01_stent,[G01[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g01_gnrb = np.array([], dtype=np.int64)\r\nData_g01_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g01_gnrb = np.append(Data_g01_gnrb,[G01[keygnrb][node,:]], axis=0)\r\n\r\nData_g01=Data_g01[np.argsort(Data_g01[:,0])]\r\n#--------------------------------------------------------------------------\r\n# File for gain parameter 02\r\n#--------------------------------------------------------------------------\r\nfile_g02 = open('surgery_ref.rsn', 'r')\r\ngain02 = file_g02.readlines()\r\ng02 = pd.Series(gain02)\r\ng02 = g02.replace(r'\\n','', regex=True)\r\ng02 = g02.replace(r'\\r\\n','', regex=True)\r\ng02 = g02.replace(r'\\r','', regex=True)\r\nindex_Time_g02 = g02[g02.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g02 = index_Time_g02.index.values\r\n#--------------------------------------------------------------------------\r\nG02 = {}\r\nfor idx in index_Time_g02.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g02[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G02[g02[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g02 = np.array([], dtype=np.int64)\r\nData_g02.shape = (-1, 7)\r\nfor key in sorted(G02.keys()):\r\n Data_g02 = np.append(Data_g02,[G02[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g02_gnra = np.array([], dtype=np.int64)\r\nData_g02_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_gnra = np.append(Data_g02_gnra,[G02[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g02_stent = np.array([], dtype=np.int64)\r\nData_g02_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_stent = np.append(Data_g02_stent,[G02[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g02_gnrb = np.array([], dtype=np.int64)\r\nData_g02_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g02_gnrb = np.append(Data_g02_gnrb,[G02[keygnrb][node,:]], axis=0)\r\n\r\nData_g02=Data_g02[np.argsort(Data_g02[:,0])]\r\n#--------------------------------------------------------------------------\r\n# File for gain parameter 03\r\n#--------------------------------------------------------------------------\r\nfile_g03 = open('surgery_p9.rsn', 'r')\r\ngain03 = file_g03.readlines()\r\ng03 = pd.Series(gain03)\r\ng03 = g03.replace(r'\\n','', regex=True)\r\ng03 = g03.replace(r'\\r\\n','', regex=True)\r\ng03 = g03.replace(r'\\r','', regex=True)\r\nindex_Time_g03 = g03[g03.str.contains('TIME', case=False, regex=False)]\r\nindex_TimeValues_g03 = index_Time_g03.index.values\r\n#--------------------------------------------------------------------------\r\nG03 = {}\r\nfor idx in index_Time_g03.index.values:\r\n index_start = idx + 1\r\n index_end = index_start + NbrOfNodes\r\n tmp_df = g03[index_start:index_end].str.strip()\r\n tmp_df = tmp_df.str.split(' ',expand=True)\r\n np.array(tmp_df.values, dtype=float)\r\n G03[g03[idx]]=np.array(tmp_df.values, dtype=float)\r\n#every mesh along time\r\nData_g03 = np.array([], dtype=np.int64)\r\nData_g03.shape = (-1, 7)\r\nfor key in sorted(G03.keys()):\r\n Data_g03 = np.append(Data_g03,[G03[key][0,:]], axis=0)\r\n#mesh for this particular key GNRA\r\nData_g03_gnra = np.array([], dtype=np.int64)\r\nData_g03_gnra.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_gnra = np.append(Data_g03_gnra,[G03[keygnra][node,:]], axis=0)\r\n#mesh for this particular key STENT\r\nData_g03_stent = np.array([], dtype=np.int64)\r\nData_g03_stent.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_stent = np.append(Data_g03_stent,[G03[keystent][node,:]], axis=0)\r\n#mesh for this particular key GNRB\r\nData_g03_gnrb = np.array([], dtype=np.int64)\r\nData_g03_gnrb.shape = (-1, 7)\r\nfor node in range(NbrOfNodes):\r\n Data_g03_gnrb = np.append(Data_g03_gnrb,[G03[keygnrb][node,:]], axis=0)\r\n\r\nData_g03=Data_g03[np.argsort(Data_g03[:,0])]\r\n#--------------------------------------------------------------------------\r\n\r\nfig = plt.figure()\r\nplt.rcParams.update({'font.size': 5})\r\nplt.rc('text', usetex=False)\r\n\r\nplt.subplot(4,3,1)\r\nplt.plot(Data_g01[:,0],Data_g01[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'a',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,180,0,150])\r\n\r\nplt.subplot(4,3,2)\r\nplt.plot(Data_g01[:,0],Data_g01[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'b',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.legend(loc='center right')\r\nplt.axis([0,180,0,350])\r\n\r\nplt.subplot(4,3,3)\r\nplt.plot(Data_g01[:,0],Data_g01[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02[:,0],Data_g02[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03[:,0],Data_g03[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Time [months]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'c',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,180,10,13])\r\n\r\nplt.subplot(4,3,4)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'd',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,5)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'e',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,6)\r\nplt.plot(Data_g01_gnra[:,2]*1000.0,Data_g01_gnra[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnra[:,2]*1000.0,Data_g02_gnra[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnra[:,2]*1000.0,Data_g03_gnra[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'f',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nplt.subplot(4,3,7)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'g',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,8)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'h',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,9)\r\nplt.plot(Data_g01_stent[:,2]*1000.0,Data_g01_stent[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_stent[:,2]*1000.0,Data_g02_stent[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_stent[:,2]*1000.0,Data_g03_stent[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'i',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nplt.subplot(4,3,10)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,4]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,4]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,4]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Axial Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'j',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,150])\r\n\r\nplt.subplot(4,3,11)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,5]/1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,5]/1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,5]/1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Circumferential Stress [kPa]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'k',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,0,350])\r\n\r\nplt.subplot(4,3,12)\r\nplt.plot(Data_g01_gnrb[:,2]*1000.0,Data_g01_gnrb[:,3]*1000.0,'b',label='Penalty=1*10^7',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g02_gnrb[:,2]*1000.0,Data_g02_gnrb[:,3]*1000.0,'r',label='Penalty=1*10^5',linewidth=1.0,markersize=10)\r\nplt.plot(Data_g03_gnrb[:,2]*1000.0,Data_g03_gnrb[:,3]*1000.0,'g',label='Penalty=1*10^9',linewidth=1.0,markersize=10)\r\nplt.text(0.5,0.05,r'Axial position [mm]', {'color': 'k', 'fontsize': 6},\r\n ha='center',va='center',clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.5, r'Radius [mm]',{'color': 'k', 'fontsize': 6,},\r\n ha='left',va='center',rotation=90,clip_on=False,transform=plt.gca().transAxes)\r\nplt.text(0.95, 0.95, r'l',{'color': 'k', 'fontsize': 6,\r\n 'bbox': dict(boxstyle=\"round\", fc=\"w\", ec=\"k\", pad=0.2)},\r\n ha='right',va='top',transform=plt.gca().transAxes)\r\nplt.axis([0,100,10,13])\r\n\r\nfig.tight_layout()\r\nplt.show\r\n\r\nFIGURENAME = 'penalty.eps'\r\nplt.savefig(FIGURENAME)\r\nplt.savefig(fname=FIGURENAME,\r\n dpi=None,\r\n facecolor='w',\r\n edgecolor='w',\r\n orientation='portrait',\r\n format=None,\r\n transparent=False,\r\n bbox_inches=None,\r\n pad_inches=0.1,\r\n frameon=None,\r\n metadata=None)\r\n\r\nplt.close('all')\r\n\"\"\"\r\n#--------------------------------------------------------------------------\r\n\r\nradii = (Data_g02[-1,3]*1000.0, Data_g01[-1,3]*1000.0, Data_g03[-1,3]*1000.0)\r\n\r\nfig, ax = plt.subplots()\r\n\r\nindex = np.arange(3)\r\nbar_width = 0.45\r\n\r\nopacity = 0.4\r\nerror_config = {'ecolor': '0.3'}\r\n\r\nrects1 = ax.bar(index, radii, bar_width,\r\n alpha=opacity, color='b',\r\n error_kw=error_config, label='Penalty')\r\n\r\nax.set_xlabel('Penalty')\r\nax.set_ylabel('Radius [mm]')\r\nax.set_xticks(index + bar_width / 2)\r\nax.set_xticklabels(('1e5', '1e7', '1e9'))\r\nplt.axis([-0.25,2.7,0,20])\r\n\r\nfig.tight_layout()\r\nplt.show\r\n\r\nFIGURENAME = 'sensitivity_penalty.eps'\r\nplt.savefig(FIGURENAME)\r\nplt.savefig(fname=FIGURENAME,\r\n dpi=None,\r\n facecolor='w',\r\n edgecolor='w',\r\n orientation='portrait',\r\n format=None,\r\n transparent=False,\r\n bbox_inches=None,\r\n pad_inches=0.1,\r\n frameon=None,\r\n metadata=None)\r\n\r\nplt.close('all')\r\n\"\"\"\r\n#--------------------------------------------------------------------------\r\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"pandas.Series",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.append",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.argsort",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NRuf77/proset | [
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831",
"101d491e05c2423faddca31029232982f46d8831"
] | [
"scripts/wine/wine_explain.py",
"scripts/checker/checker_knn_fit.py",
"proset/utility/other.py",
"scripts/cancer/cancer_prepare_data.py",
"scripts/iris_2f/iris_2f_xgb_diagnostics.py",
"scripts/xor_6_6f/xor_6_6f_xgb_diagnostics.py",
"scripts/checker_rot/checker_rot_xgb_fit.py",
"scripts/wine/wine_knn_diagnostics.py"
] | [
"\"\"\"Explain proset classifier trained on wine classification data.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nfrom copy import deepcopy\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport shap\r\n\r\nimport proset.utility as utility\r\n\r\n\r\nprint(\"* Apply user settings\")\r\ninput_path = \"scripts/results\"\r\noutput_path = \"scripts/reports\"\r\ninput_files = [\r\n \"wine_2d_05_model.gz\",\r\n \"wine_2d_50_model.gz\",\r\n \"wine_2d_95_model.gz\",\r\n \"wine_1d_model.gz\",\r\n \"wine_fix_model.gz\",\r\n \"wine_fix_opt_model.gz\"\r\n]\r\nprint(\" Select input file:\")\r\nfor i, file_name in enumerate(input_files):\r\n print(\" {} - {}\".format(i, file_name))\r\nchoice = int(input())\r\ninput_file = input_files[choice]\r\nexport_file = input_file.replace(\".gz\", \"_explain.xlsx\")\r\nmodel_name = input_file.replace(\".gz\", \"\")\r\n\r\nprint(\"* Load model fit results\")\r\nwith gzip.open(os.path.join(input_path, input_file), mode=\"rb\") as file:\r\n result = pickle.load(file)\r\n\r\nprint(\"* Determine reference point\")\r\nscale = np.sqrt(result[\"model\"][\"transform\"].var_)\r\noffset = result[\"model\"][\"transform\"].mean_\r\ntrain_features = result[\"model\"][\"transform\"].transform(result[\"data\"][\"X_train\"])\r\ntrain_labels = result[\"data\"][\"y_train\"]\r\nreference = utility.choose_reference_point(\r\n features=train_features,\r\n model=result[\"model\"][\"model\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nutility.print_point_report(\r\n reference=reference,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n target_names=result[\"model\"].classes_\r\n)\r\n\r\nprint(\"* Show global results\")\r\ntest_features = result[\"model\"][\"transform\"].transform(result[\"data\"][\"X_test\"])\r\ntest_labels = result[\"data\"][\"y_test\"]\r\nprediction, familiarity = result[\"model\"][\"model\"].predict(X=test_features, compute_familiarity=True)\r\nmisclassified = prediction != test_labels\r\nplotter = utility.ClassifierPlots(\r\n model=result[\"model\"][\"model\"],\r\n model_name=model_name,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nx_range, y_range = plotter.plot_batch_map(\r\n batch=1,\r\n features=test_features,\r\n target=test_labels,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n reference=reference[\"features_raw\"]\r\n)\r\nplotter.plot_features(\r\n batch=1,\r\n features=test_features,\r\n target=test_labels,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n reference=reference[\"features_raw\"],\r\n show_index=False\r\n)\r\n\r\nprint(\"* Compute global SHAP values\")\r\nshrunk_model = deepcopy(result[\"model\"][\"model\"])\r\nshrunk_model.shrink()\r\nactive_features = reference[\"active_features\"]\r\nactive_feature_names = result[\"data\"][\"feature_names\"][active_features]\r\nexplainer = shap.Explainer(\r\n model=shrunk_model.predict_proba,\r\n masker=reference[\"features_raw\"][0:1, active_features],\r\n feature_names=active_feature_names\r\n)\r\nshap_values = explainer(test_features[:, active_features])\r\nfor i, label in enumerate(result[\"model\"].classes_):\r\n plt.figure()\r\n shap.plots.bar(shap_values[:, :, i])\r\n plt.title(\"Average SHAP values for class {} prediction\".format(label))\r\n\r\nprint(\"* Find single point with worst classification result\")\r\nproba = result[\"model\"][\"model\"].predict_proba(test_features)\r\ntruth_int = result[\"model\"][\"model\"].label_encoder_.transform(test_labels)\r\nworst_ix = np.argmin(proba[np.arange(test_labels.shape[0]), truth_int])\r\nworst_features = test_features[worst_ix:(worst_ix + 1), :]\r\nworst_label = test_labels[worst_ix]\r\nworst_label_int = truth_int[worst_ix]\r\nworst_point = {\r\n \"index\": worst_ix,\r\n \"features_raw\": worst_features,\r\n \"features_processed\": worst_features[:, active_features] * scale[active_features] + offset[active_features],\r\n \"prediction\": proba[worst_ix, :],\r\n \"num_features\": test_features.shape[1],\r\n \"active_features\": active_features\r\n} # use active_features here to ensure same order of content as reference\r\nprint(\" True class = '{}'\".format(test_labels[worst_ix]))\r\nutility.print_point_report(\r\n reference=worst_point,\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n target_names=result[\"model\"].classes_\r\n)\r\n\r\nprint(\"* Generate explanation report\")\r\nexplain = result[\"model\"][\"model\"].explain(\r\n X=worst_point[\"features_raw\"],\r\n y=worst_label,\r\n familiarity=familiarity,\r\n sample_name=\"test sample {}\".format(worst_ix),\r\n feature_names=result[\"data\"][\"feature_names\"],\r\n scale=scale,\r\n offset=offset\r\n)\r\nutility.write_report(file_path=os.path.join(output_path, export_file), report=explain)\r\n\r\nprint(\"* Show results for single point\")\r\nplotter.plot_batch_map(\r\n batch=1,\r\n features=train_features,\r\n target=train_labels,\r\n comment=\"training samples\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n x_range=x_range,\r\n y_range=y_range\r\n)\r\nplotter.plot_batch_map(\r\n batch=1,\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n x_range=x_range,\r\n y_range=y_range\r\n)\r\nplotter.plot_features(\r\n batch=1,\r\n features=train_features,\r\n target=train_labels,\r\n comment=\"training samples\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n show_index=False\r\n)\r\n\r\nprint(\"* Compute SHAP values for single point\")\r\nfor i in range(proba.shape[1]):\r\n explain = shap_values[worst_ix, :, i]\r\n shap.plots.force(\r\n base_value=explain.base_values,\r\n shap_values=explain.values,\r\n features=test_features[worst_ix:(worst_ix + 1), active_features],\r\n feature_names=active_feature_names,\r\n matplotlib=True\r\n )\r\n plt.gca().set_position([0.1, -0.25, 0.8, 0.8]) # force plot messes up the axes position within the figure\r\n plt.suptitle(\"SHAP force plot: probability for class '{}' is {:0.2f}, true class is '{}'\".format(\r\n result[\"model\"].classes_[i], proba[worst_ix, i], worst_label\r\n ))\r\n\r\nprint(\"* Show cross-sections of decision surface\")\r\nimportance = np.mean(np.abs(shap_values[:, :, worst_label_int].values), axis=0)\r\ntop_two = active_features[np.argsort(importance)[-1:-3:-1]]\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=None, # suppress sample plot, features only used to determine plot ranges\r\n baseline=worst_point[\"features_raw\"],\r\n plot_index=top_two,\r\n comment=\"globally most important features\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n familiarity=familiarity,\r\n quantiles=(0.01, 0.05),\r\n use_proba=True\r\n)\r\nimportance = np.abs(shap_values[worst_ix, :, worst_label_int].values)\r\ntop_two = active_features[np.argsort(importance)[-1:-3:-1]]\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=None, # suppress sample plot, features only used to determine plot ranges\r\n baseline=worst_point[\"features_raw\"],\r\n plot_index=top_two,\r\n comment=\"most important features for single point\",\r\n reference=reference[\"features_raw\"],\r\n explain_features=worst_point[\"features_raw\"],\r\n explain_target=worst_label,\r\n familiarity=familiarity,\r\n quantiles=(0.01, 0.05),\r\n use_proba=True\r\n)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Train KNN classifier on a deterministic checkerboard pattern.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nfrom proset.benchmarks import fit_knn_classifier\r\n\r\n\r\nprint(\"* Apply user settings\")\r\nrandom_state = np.random.RandomState(12345)\r\nworking_path = \"scripts/results\"\r\ndata_file = \"checker_data.gz\"\r\noutput_file = \"checker_knn_model.gz\"\r\n\r\nprint(\"* Load data\")\r\nwith gzip.open(os.path.join(working_path, data_file), mode=\"rb\") as file:\r\n data = pickle.load(file)\r\n\r\nprint(\"* Select hyperparameters via cross-validation\")\r\nresult = fit_knn_classifier(\r\n features=data[\"X_train\"],\r\n labels=data[\"y_train\"],\r\n transform=StandardScaler(),\r\n num_folds=5,\r\n random_state=random_state\r\n)\r\n\r\nprint(\"* Save results\")\r\nresult[\"data\"] = data\r\nwith gzip.open(os.path.join(working_path, output_file), mode=\"wb\") as file:\r\n pickle.dump(result, file)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Various helper functions for working with proset models.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom sklearn.base import is_classifier\r\nfrom sklearn.metrics import pairwise_distances\r\n\r\nfrom proset.shared import check_feature_names, check_scale_offset, LOG_OFFSET\r\n\r\n\r\ndef print_hyperparameter_report(result): # pragma: no cover\r\n \"\"\"Print report for hyperparameter selection.\r\n\r\n :param result: as return value of select_hyperparameters()\r\n :return: no return value; results printed to console\r\n \"\"\"\r\n print(\"{:9s} {:8s} {:8s} {:8s}\".format(\"stage 1\", \"lambda_v\", \"lambda_w\", \"log-loss\"))\r\n print(\"{:9s} {:8.1e} {:8.1e} {:8.2f}\".format(\r\n \"optimal\",\r\n result[\"stage_1\"][\"lambda_grid\"][result[\"stage_1\"][\"best_index\"], 0],\r\n result[\"stage_1\"][\"lambda_grid\"][result[\"stage_1\"][\"best_index\"], 1],\r\n -result[\"stage_1\"][\"scores\"][result[\"stage_1\"][\"best_index\"]]\r\n ))\r\n print(\"{:9s} {:8s} {:8s} {:8.2f}\".format(\"threshold\", \"\", \"\", -result[\"stage_1\"][\"threshold\"]))\r\n print(\"{:9s} {:8.1e} {:8.1e} {:8.2f}\\n\".format(\r\n \"selected\",\r\n result[\"stage_1\"][\"lambda_grid\"][result[\"stage_1\"][\"selected_index\"], 0],\r\n result[\"stage_1\"][\"lambda_grid\"][result[\"stage_1\"][\"selected_index\"], 1],\r\n -result[\"stage_1\"][\"scores\"][result[\"stage_1\"][\"selected_index\"]]\r\n ))\r\n print(\"{:9s} {:8s} {:8s} {:8s}\".format(\"stage 2\", \"batches\", \"\", \"log-loss\"))\r\n print(\"{:9s} {:8d} {:8s} {:8.2f}\".format(\r\n \"optimal\",\r\n result[\"stage_2\"][\"num_batch_grid\"][result[\"stage_2\"][\"best_index\"]],\r\n \"\",\r\n -result[\"stage_2\"][\"scores\"][result[\"stage_2\"][\"best_index\"]]\r\n ))\r\n print(\"{:9s} {:8s} {:8s} {:8.2f}\".format(\"threshold\", \"\", \"\", -result[\"stage_2\"][\"threshold\"]))\r\n print(\"{:9s} {:8d} {:8s} {:8.2f}\\n\".format(\r\n \"selected\",\r\n result[\"stage_2\"][\"num_batch_grid\"][result[\"stage_2\"][\"selected_index\"]],\r\n \"\",\r\n -result[\"stage_2\"][\"scores\"][result[\"stage_2\"][\"selected_index\"]]\r\n ))\r\n\r\n\r\ndef print_feature_report(model, feature_names=None): # pragma: no cover\r\n \"\"\"Print summary of selected features per batch with weights.\r\n\r\n :param model: a fitted proset model\r\n :param feature_names: list of strings or None; feature names; pass None to use X0, X1, etc.\r\n :return: no return value; results printed to console\r\n \"\"\"\r\n report = model.set_manager_.get_feature_weights()\r\n feature_names = check_feature_names(\r\n num_features=model.n_features_in_,\r\n feature_names=feature_names,\r\n active_features=report[\"feature_index\"]\r\n )\r\n max_length = max(np.max([len(name) for name in feature_names]), len(\"feature\"))\r\n base_format = \"{:\" + str(max_length) + \"s} \"\r\n header = base_format.format(\"feature\") + \" \".join([\r\n \"{:>8s}\".format(\"batch \" + str(i + 1)) for i in range(report[\"weight_matrix\"].shape[0])\r\n ])\r\n line_format = base_format + \" \".join([\"{:8.2f}\"] * report[\"weight_matrix\"].shape[0])\r\n print(header)\r\n for i in range(report[\"weight_matrix\"].shape[1]):\r\n print(line_format.format(feature_names[i], *report[\"weight_matrix\"][:, i]))\r\n print(\"\")\r\n\r\n\r\ndef choose_reference_point(features, model, scale=None, offset=None):\r\n \"\"\"Choose a 'typical' sample as reference point for model explanation.\r\n\r\n :param features: 2D numpy float array; feature matrix\r\n :param model: fitted proset model\r\n :param scale: 1D numpy array of positive floats or None; scale for transforming prototype features back to their\r\n original values; pass None for no transform\r\n :param offset: 1D numpy array of floats or None; offset for transforming prototype features back to their original\r\n values; pass None for no transform\r\n :return: dict with the following fields:\r\n - index: non-negative integer; row index for selected point in features\r\n - features_raw: the corresponding row of features as a 2D array\r\n - features_processed: as above but reduced to active feature and transformed back to original values if scale or\r\n offset are given\r\n - prediction: for a classifier, the predicted probabilities for each class belonging to the selected point; for\r\n a regressor, the predicted target value\r\n - num_features: positive integer; original number of features\r\n - active_features: 1D numpy array of non-negative integers; active features for the model\r\n \"\"\"\r\n scale, offset = check_scale_offset(num_features=features.shape[1], scale=scale, offset=offset)\r\n num_features = features.shape[1]\r\n if is_classifier(model):\r\n prediction = model.predict_proba(features)\r\n else: # pragma: no cover\r\n raise NotImplementedError(\"Function choose_reference_point() does not handle regressors yet.\")\r\n active_features = model.set_manager_.get_active_features()\r\n index = _find_best_point(\r\n features=features[:, active_features],\r\n prediction=prediction,\r\n is_classifier_=is_classifier(model)\r\n )\r\n return {\r\n \"index\": index,\r\n \"features_raw\": features[index:(index + 1), :].copy(),\r\n \"features_processed\":\r\n features[index:(index + 1), active_features] * scale[active_features] + offset[active_features],\r\n \"prediction\": prediction[index],\r\n \"num_features\": num_features,\r\n \"active_features\": active_features\r\n }\r\n\r\n\r\ndef _find_best_point(features, prediction, is_classifier_):\r\n \"\"\"Identify the best reference point for model explanation.\r\n\r\n :param features: 2D numpy float array; feature matrix\r\n :param prediction: numpy array; to explain a classifier, pass the matrix of predicted probabilities corresponding to\r\n the features; regressors are not supported yet\r\n :param is_classifier_: boolean; whether the model to be explained is a classifier or regressor\r\n :return: non-negative integer; row index\r\n \"\"\"\r\n if not is_classifier_: # pragma: no cover\r\n raise NotImplementedError(\"Function choose_reference_point() does not handle regressors yet.\")\r\n points = _compute_borda_points(np.mean(pairwise_distances(prediction), axis=1))\r\n if features.shape[1] > 0: # safeguard in case the model has no active features\r\n points += _compute_borda_points(np.mean(pairwise_distances(features), axis=1))\r\n candidates = np.nonzero(points == np.max(points))[0]\r\n if candidates.shape[0] > 1:\r\n entropy = -np.sum(np.log(prediction[candidates] * LOG_OFFSET) * prediction[candidates], axis=1)\r\n # this formulation is for classifiers only\r\n return candidates[np.argmax(entropy)]\r\n return candidates[0]\r\n\r\n\r\ndef _compute_borda_points(metric):\r\n \"\"\"Compute points for Borda voting with ties from value of a metric that needs to be minimized.\r\n\r\n Each sample is assigned a full point for every other sample that has a strictly larger value of the metric and a\r\n half point for each sample with the same value.\r\n\r\n :param metric: 1D numpy float array; metric values - lower is better\r\n :return: 1D numpy float array of the same length as metric; points assigned to each score\r\n \"\"\"\r\n inverse, counts = np.unique(metric, return_inverse=True, return_counts=True)[1:]\r\n points = (counts - 1) / 2.0 + np.hstack([0, np.cumsum(counts[-1:0:-1])])[-1::-1]\r\n return points[inverse]\r\n\r\n\r\ndef print_point_report(reference, feature_names=None, feature_format=\"1f\", target_names=None): # pragma: no cover\r\n \"\"\"Print information on selected point.\r\n\r\n :param reference: as return value of choose_reference_point()\r\n :param feature_names: list of strings or None; feature names; pass None to use X0, X1, etc.\r\n :param feature_format: string; format specifier for feature values converted to string; provide only decimals and\r\n convention ('f' for float, 'e' for scientific, etc.)\r\n :param target_names: string, list of strings, or None; for a regression problem, pass a single string or None to use\r\n no name; for a classification problem, pass a list of class labels or None to use integer labels\r\n :return: no return value; results printed to console\r\n \"\"\"\r\n feature_names, target_names, is_classifier_ = _check_point_input(\r\n reference=reference,\r\n feature_names=feature_names,\r\n target_names=target_names\r\n )\r\n print(\"Properties of point with sample index {}:\".format(reference[\"index\"]))\r\n max_length = np.max([len(name) for name in feature_names] + [len(\"feature\")])\r\n base_format = \"{:>\" + str(max_length) + \"s} \"\r\n header_format = base_format + \"{:>8s}\"\r\n row_format = base_format + \"{:8.\" + feature_format + \"}\"\r\n print(header_format.format(\"feature\", \"value\"))\r\n for i, name in enumerate(feature_names):\r\n print(row_format.format(name, reference[\"features_processed\"][0, i]))\r\n print(\"Prediction for point with sample index {}:\".format(reference[\"index\"]))\r\n if is_classifier_:\r\n max_length = np.max([len(name) for name in target_names] + [len(\"class\")])\r\n base_format = \"{:>\" + str(max_length) + \"s} \"\r\n header_format = base_format + \"{:>10s}\"\r\n row_format = base_format + \"{:11.2f}\"\r\n print(header_format.format(\"class\", \"probability\"))\r\n for i, name in enumerate(target_names):\r\n print(row_format.format(name, reference[\"prediction\"][i]))\r\n else:\r\n print(\"The predicted target value {} is {}.\".format(target_names, reference[\"prediction\"]))\r\n\r\n\r\ndef _check_point_input(reference, feature_names, target_names):\r\n \"\"\"Check input for print_point_report() for consistency and apply defaults where required.\r\n\r\n :param reference: see docstring of print_point_report() for details\r\n :param feature_names: see docstring of print_point_report() for details\r\n :param target_names: see docstring of print_point_report() for details\r\n :return: three return values:\r\n - list of strings: feature names; feature names for active features\r\n - string or list of strings: target names; as input or defaults if input is None\r\n - boolean: True if reference belongs to a classifier, False if it belongs to a regressor\r\n \"\"\"\r\n feature_names = check_feature_names(\r\n num_features=reference[\"num_features\"],\r\n feature_names=feature_names,\r\n active_features=reference[\"active_features\"]\r\n )\r\n is_classifier_ = isinstance(reference[\"prediction\"], np.ndarray)\r\n if is_classifier_:\r\n if isinstance(target_names, str):\r\n raise TypeError(\r\n \"Parameter target_names must be a list of strings or None if reference belongs to a classifier.\"\r\n )\r\n num_classes = reference[\"prediction\"].shape[0]\r\n if target_names is not None:\r\n if len(target_names) != num_classes:\r\n raise ValueError(\" \".join([\r\n \"Parameter target_names must have one element per class if passing a list\",\r\n \"and reference belongs to a classifier.\"\r\n ]))\r\n target_names = deepcopy(target_names)\r\n else:\r\n target_names = [str(i) for i in range(num_classes)]\r\n else:\r\n if isinstance(target_names, list):\r\n raise TypeError(\"Parameter target_names must be a string or None if reference belongs to a regressor.\")\r\n if target_names is None:\r\n target_names = \"value\"\r\n return feature_names, target_names, is_classifier_\r\n",
"\"\"\"Prepare breast cancer data as benchmark case.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom sklearn.datasets import load_breast_cancer\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nprint(\"* Apply user settings\")\r\nrandom_state = np.random.RandomState(12345)\r\noutput_path = \"scripts/results\"\r\noutput_file = \"cancer_data.gz\"\r\n\r\nprint(\"* Load and format data\")\r\ndata = load_breast_cancer()\r\nX = data[\"data\"]\r\nfeature_names = data[\"feature_names\"]\r\ny = data[\"target_names\"][data[\"target\"]] # use string target\r\n\r\nprint(\"* Make train-test split\")\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=random_state, stratify=y)\r\n\r\nprint(\"* Save data\")\r\ndata = {\r\n \"X_train\": X_train,\r\n \"X_test\": X_test,\r\n \"y_train\": y_train,\r\n \"y_test\": y_test,\r\n \"feature_names\": feature_names\r\n}\r\nwith gzip.open(os.path.join(output_path, output_file), mode=\"wb\") as file:\r\n pickle.dump(data, file)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Score XGBoost classifier trained on two features of Fisher's iris data.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom sklearn.metrics import classification_report, log_loss, roc_auc_score\r\n\r\nimport proset.utility as utility\r\nfrom proset.benchmarks import print_xgb_classifier_report\r\n\r\n\r\nprint(\"* Apply user settings\")\r\ninput_path = \"scripts/results\"\r\ninput_file = \"iris_2f_xgb_model.gz\"\r\nmodel_name = input_file.replace(\".gz\", \"\")\r\n\r\nprint(\"* Load model fit results\")\r\nwith gzip.open(os.path.join(input_path, input_file), mode=\"rb\") as file:\r\n result = pickle.load(file)\r\n\r\nprint(\"* Show results\")\r\ntest_features = result[\"data\"][\"X_test\"]\r\ntest_labels = result[\"data\"][\"y_test\"]\r\ntest_labels_int = result[\"encoder\"].transform(test_labels)\r\nprediction = result[\"encoder\"].inverse_transform(result[\"model\"].predict(test_features))\r\nprobabilities = result[\"model\"].predict_proba(test_features)\r\nprint(\"- Hyperparameter selection\")\r\nprint_xgb_classifier_report(result)\r\nprint(\"- Final model\")\r\nprint(\"active features = {}\".format(np.sum(result[\"model\"].feature_importances_ > 0.0)))\r\nprint(\"log-loss = {:.2f}\".format(log_loss(y_true=test_labels, y_pred=probabilities)))\r\nprint(\"roc-auc = {:.2f}\".format(roc_auc_score(y_true=test_labels, y_score=probabilities, multi_class=\"ovo\")))\r\nprint(\"- Classification report\")\r\nprint(classification_report(y_true=test_labels, y_pred=prediction))\r\nplotter = utility.ClassifierPlots(\r\n model=result[\"model\"],\r\n model_name=model_name,\r\n feature_names=result[\"data\"][\"feature_names\"]\r\n)\r\ntrain_features = result[\"data\"][\"X_train\"]\r\ntrain_labels_int = result[\"encoder\"].transform(result[\"data\"][\"y_train\"])\r\nmisclassified = prediction != test_labels\r\nx_range, y_range = plotter.plot_surface(\r\n features=train_features,\r\n target=train_labels_int,\r\n comment=\"training samples\",\r\n use_proba=True\r\n)\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=test_labels_int,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n x_range=x_range,\r\n y_range=y_range,\r\n use_proba=True\r\n)\r\nplotter.plot_surface(\r\n features=test_features,\r\n target=test_labels_int,\r\n comment=\"test samples\",\r\n highlight=misclassified,\r\n highlight_name=\"misclassified\",\r\n x_range=x_range,\r\n y_range=y_range,\r\n)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Score XGBoost classifier trained on the 'continuous XOR' problem with 6 relevant and 6 irrelevant features.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom sklearn.metrics import classification_report, log_loss, roc_auc_score\r\n\r\nimport proset.utility as utility\r\nfrom proset.benchmarks import print_xgb_classifier_report\r\n\r\n\r\nprint(\"* Apply user settings\")\r\ninput_path = \"scripts/results\"\r\ninput_file = \"xor_6_6f_xgb_model.gz\"\r\nmodel_name = input_file.replace(\".gz\", \"\")\r\n\r\nprint(\"* Load model fit results\")\r\nwith gzip.open(os.path.join(input_path, input_file), mode=\"rb\") as file:\r\n result = pickle.load(file)\r\n\r\nprint(\"* Show results\")\r\ntrain_features = result[\"data\"][\"X_train\"]\r\ntrain_labels = result[\"data\"][\"y_train\"]\r\ntest_features = result[\"data\"][\"X_test\"]\r\ntest_labels = result[\"data\"][\"y_test\"]\r\nprediction = result[\"model\"].predict(test_features)\r\nprobabilities = result[\"model\"].predict_proba(test_features)\r\nmisclassified = prediction != test_labels\r\nprint(\"- Hyperparameter selection\")\r\nprint_xgb_classifier_report(result)\r\nprint(\"- Final model\")\r\nprint(\"active features = {}\".format(np.sum(result[\"model\"].feature_importances_ > 0.0)))\r\nprint(\"log-loss = {:.2f}\".format(log_loss(y_true=test_labels, y_pred=probabilities)))\r\nprint(\"roc-auc = {:.2f}\".format(roc_auc_score(y_true=test_labels, y_score=probabilities[:, 1])))\r\nprint(\"- Classification report\")\r\nprint(classification_report(y_true=test_labels, y_pred=prediction))\r\nplotter = utility.ClassifierPlots(\r\n model=result[\"model\"],\r\n model_name=model_name,\r\n feature_names=result[\"data\"][\"feature_names\"]\r\n)\r\nix = np.prod(result[\"data\"][\"X_train\"][:, 2:6], axis=1) >= 0\r\n# select test samples which have identical class based on the first two features\r\nx_range, y_range = plotter.plot_surface(\r\n features=train_features[ix, :],\r\n target=train_labels[ix],\r\n baseline=np.ones((1, 12)) * 0.5, # fix remaining features to positive sign\r\n plot_index=np.array([0, 1]),\r\n comment=\"training samples\",\r\n use_proba=True\r\n)\r\nix = np.prod(result[\"data\"][\"X_test\"][:, 2:6], axis=1) >= 0\r\n# noinspection PyUnresolvedReferences\r\nplotter.plot_surface(\r\n features=test_features[ix, :],\r\n target=test_labels[ix],\r\n baseline=np.ones((1, 12)) * 0.5, # fix remaining features to positive sign\r\n plot_index=np.array([0, 1]),\r\n comment=\"test samples\",\r\n highlight=misclassified[ix],\r\n highlight_name=\"misclassified\",\r\n x_range=x_range,\r\n y_range=y_range\r\n)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Train XGBoost classifier on the rotated checkerboard pattern.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nimport numpy as np\r\n\r\nfrom proset.benchmarks import fit_xgb_classifier\r\n\r\n\r\nprint(\"* Apply user settings\")\r\nrandom_state = np.random.RandomState(12345)\r\nworking_path = \"scripts/results\"\r\ndata_file = \"checker_rot_data.gz\"\r\noutput_file = \"checker_rot_xgb_model.gz\"\r\n\r\nprint(\"* Load data\")\r\nwith gzip.open(os.path.join(working_path, data_file), mode=\"rb\") as file:\r\n data = pickle.load(file)\r\n\r\nprint(\"* Select hyperparameters via cross-validation\")\r\nresult = fit_xgb_classifier(\r\n features=data[\"X_train\"],\r\n labels=data[\"y_train\"],\r\n max_depth=30, # default of 20 means depth 19 is selected\r\n colsample_range=(0.1, 0.9),\r\n subsample_range=(0.1, 0.9),\r\n num_folds=5,\r\n random_state=random_state\r\n)\r\n\r\nprint(\"* Save results\")\r\nresult[\"data\"] = data\r\nwith gzip.open(os.path.join(working_path, output_file), mode=\"wb\") as file:\r\n pickle.dump(result, file)\r\n\r\nprint(\"* Done\")\r\n",
"\"\"\"Score k-nearest neighbor classifier trained on wine classification data.\r\n\r\nCopyright by Nikolaus Ruf\r\nReleased under the MIT license - see LICENSE file for details\r\n\"\"\"\r\n\r\nimport gzip\r\nimport os\r\nimport pickle\r\n\r\nfrom sklearn.metrics import classification_report, log_loss, roc_auc_score\r\n\r\n\r\nprint(\"* Apply user settings\")\r\ninput_path = \"scripts/results\"\r\ninput_file = \"wine_knn_model.gz\"\r\n\r\nprint(\"* Load model fit results\")\r\nwith gzip.open(os.path.join(input_path, input_file), mode=\"rb\") as file:\r\n result = pickle.load(file)\r\n\r\nprint(\"* Show results\")\r\ntest_labels = result[\"data\"][\"y_test\"]\r\nprediction = result[\"model\"].predict(result[\"data\"][\"X_test\"])\r\nprobabilities = result[\"model\"].predict_proba(result[\"data\"][\"X_test\"])\r\nprint(\"- Hyperparameter selection\")\r\nprint(\"optimal k = {}\".format(result[\"info\"][\"k_grid\"][result[\"info\"][\"best_index\"]]))\r\nprint(\"optimal log-loss = {:.2f}\".format(result[\"info\"][\"scores\"][result[\"info\"][\"best_index\"]]))\r\nprint(\"threshold = {:.2f}\".format(result[\"info\"][\"threshold\"]))\r\nprint(\"selected k = {}\".format(result[\"info\"][\"k_grid\"][result[\"info\"][\"selected_index\"]]))\r\nprint(\"selected log-loss = {:.2f}\".format(result[\"info\"][\"scores\"][result[\"info\"][\"selected_index\"]]))\r\nprint(\"- Final model\")\r\nprint(\"log-loss = {:.2f}\".format(log_loss(y_true=test_labels, y_pred=probabilities)))\r\nprint(\"roc-auc = {:.2f}\".format(roc_auc_score(y_true=test_labels, y_score=probabilities, multi_class=\"ovo\")))\r\nprint(\"- Classification report\")\r\nprint(classification_report(y_true=test_labels, y_pred=prediction))\r\n\r\nprint(\"* Done\")\r\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.abs",
"numpy.sqrt",
"numpy.arange",
"numpy.argsort",
"matplotlib.pyplot.figure"
],
[
"sklearn.preprocessing.StandardScaler",
"numpy.random.RandomState"
],
[
"sklearn.metrics.pairwise_distances",
"numpy.log",
"numpy.unique",
"numpy.cumsum",
"numpy.max",
"numpy.argmax",
"sklearn.base.is_classifier"
],
[
"numpy.random.RandomState",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_breast_cancer"
],
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.log_loss",
"sklearn.metrics.classification_report",
"numpy.sum"
],
[
"sklearn.metrics.roc_auc_score",
"numpy.ones",
"sklearn.metrics.log_loss",
"numpy.prod",
"numpy.array",
"sklearn.metrics.classification_report",
"numpy.sum"
],
[
"numpy.random.RandomState"
],
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.log_loss",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joelnmdyer/SignatuRE | [
"085a9d727e504bd25bbebdebaa58867211a52c8d",
"085a9d727e504bd25bbebdebaa58867211a52c8d"
] | [
"signature/train_and_sample.py",
"signature/utils/compute_metrics.py"
] | [
"import argparse\nimport logging\nimport numpy as np\nimport os\nimport sbi.utils as utils\nfrom sbi.inference.base import infer\nfrom sbi import analysis as analysis\nfrom sbi.inference import SMCABC, SNRE_A, simulate_for_sbi, prepare_for_sbi\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels.api as sm\nimport time\nimport torch\n\n# Custom scripts/modules/packages\nfrom signature.inference import kernel_methods\nfrom signature.utils import networks\nfrom signature.utils import io, sampling\n\n\ndef train_clf(task, method, L, K=2, n_components_raw=100, seed=0):\n\n\t\"\"\"\n\tTrains a binary classifier with method <method> to distinguish between\n\tsamples (x, theta) from the joint distribution p(x, theta) and from the\n\tproduct of the marginals p(x)p(theta) associated with <task>.\n\n\tInput:\n\t- task:\t\t\tstr, name of model to run inference on, must be recognised\n\t\t\t\t\tby function get_task above.\n\t- method:\t\tstr, name of classifier to use, either \"signature\" or\n\t\t\t\t\t\"gru-resnet\"\n\t- L:\t\t\tint, number of training examples (simulations) to generate\n\t- K:\t\t\tint, number of contrasting examples. Only used when\n\t\t\t\t\tmethod == \"signature\"\n\t- seed:\t\t\tint, seed for random number generator\n\t\"\"\"\n\n\tprior, sbi_prior, obs, simulator = io.get_task(task)\n\n\tif method in [\"signature\", \"k2\"]:\n\n\t\tclf, x0, _, inn_prods, theta_kern = kernel_methods.train_kernel_classifier(prior,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t simulator,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obs,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t L,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t K,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t n_components_raw,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t task,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t method)\n\n\telif method[:10] == \"gru-resnet\":\n\t\n\t\tIDIM = 1\n\t\tdef sbi_simulator(x):\n\t\t\treturn simulator(x)\n\t\tif task == \"GSE\":\n\t\t\tobs = obs[:, :-1]\n\t\t\tIDIM = 2\n\t\t\t# Remove time indices from GSE output\n\t\t\tdef sbi_simulator(x):\n\t\t\t\treturn simulator(x)[:,:-1]\n\t\tODIM = 3\n\t\tif method != \"gru-resnet\":\n\t\t\tODIM = eval(method[10:])\n\t\tsimulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)\n\n\t\t# Instantiate the neural density ratio estimator\n\t\tembedding_net = networks.GRU(input_dim=IDIM, hidden_dim=32, num_layers=2,\n\t\t\t\t\t\t\toutput_dim=ODIM)\n\t\tn_pars_embedding = sum(p.numel() for p in embedding_net.parameters() if p.requires_grad)\n\t\tlogging.info(\"Embedding net has {0} parameters\".format(n_pars_embedding))\n\t\tclassifier = utils.get_nn_models.classifier_nn('resnet',\n\t\t\t\t\t\t\t\t\t\t\t\t\t embedding_net_x=embedding_net)\n\n\t\t# Setup the inference procedure with the SNRE-A procedure\n\t\tinference = SNRE_A(prior=_prior, classifier=classifier)\n\n\t\t# Run the inference procedure on one round and L simulated data points\n\t\ttheta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)\n\t\tif task not in [\"GSE\"]:\n\t\t\tx = x.unsqueeze(-1)\n\t\telif task == \"GSE\":\n\t\t\t# Print this out to see that it gives you everything in the right place\n\t\t\tx = x.reshape(L, -1, 2)\n\t\tdensity_estimator = inference.append_simulations(theta, x).train()\n\t\tposterior = inference.build_posterior(density_estimator)\n\t\tposterior.set_default_x(obs.reshape(1,-1,IDIM))\n\n\t\tclf = posterior\n\t\tinn_prods = None\n\t\ttheta_kern = None\n\t\tx0 = obs\n\t\tprior = _prior\n\n\telif method in [\"hc\", \"smcabc\"]:\n\n\t\tdef slope_intercept(data):\n\t\t\treg = LinearRegression().fit(data[:-1].reshape(-1,1), data[1:].reshape(-1,1))\n\t\t\tslope = reg.coef_\n\t\t\tintercept = reg.intercept_\n\t\t\treturn slope, intercept\n\n\t\tif task == \"OU\":\t\t\t\n\t\t\tdef summarise(data):\t\n\t\t\t\tslope, intercept = slope_intercept(data)\n\t\t\t\tsummary = np.array([np.mean(data), slope[0,0], intercept[0]])\n\t\t\t\treturn summary\n\n\t\telif task == \"MA2\":\n\t\t\tdef summarise(data):\n\t\t\t\tvar = np.var(data)\n\t\t\t\trhos = sm.tsa.acf(data, nlags=2)[1:]\n\t\t\t\treturn np.array([var, rhos[0], rhos[1]])\n\t\n\t\telif task == \"GSE\":\n\t\t\tdef summarise(data):\n\t\t\t\tdata = data[:, :-1]\n\t\t\t\tN = data.shape[0]\n\t\t\t\tx, y = data[:,0], data[:,1]\n\t\t\t\txmean = np.mean(x)\n\t\t\t\tymean = np.mean(y)\n\t\t\t\txvar = np.var(x, ddof=1)\n\t\t\t\tyvar = np.var(y, ddof=1)\n\t\t\t\tif xvar == 0.:\n\t\t\t\t\txvar = 1e-30\n\t\t\t\tif yvar == 0.:\n\t\t\t\t\tyvar = 1e-30\n\t\t\t\tx, y = (x - xmean)/np.sqrt(xvar), (y - ymean)/np.sqrt(yvar)\n\t\t\t\tacx, acy = [], []\n\t\t\t\tfor lag in [1,2]:\n\t\t\t\t\tacx.append(np.dot(x[:-lag], x[lag:]) / (N - 1))\n\t\t\t\t\tacy.append(np.dot(y[:-lag], y[lag:]) / (N - 1))\n\t\t\t\tccxy = np.dot(x, y)/(N-1)\n\t\t\t\tsummary = np.array([xmean, ymean, np.log(xvar + 1), np.log(yvar+1), ccxy] + acx + acy)\n\t\t\t\treturn summary\n\n\t\tdef sbi_simulator(x):\n\t\t\tdata = simulator(x)\n\t\t\treturn summarise(data)\n\n\n\t\tif method == \"hc\":\n\n\t\t\tx0 = summarise(obs)\n\t\t\tsimulator_wrapper, _prior = prepare_for_sbi(sbi_simulator, sbi_prior)\n\t\t\t# Instantiate the neural density ratio estimator\n\t\t\tclassifier = utils.get_nn_models.classifier_nn('resnet')\n\n\t\t\t# Setup the inference procedure with the SNRE-A procedure\n\t\t\tinference = SNRE_A(prior=_prior, classifier=classifier)\n\n\t\t\t# Run the inference procedure on one round and L simulated data points\n\t\t\ttheta, x = simulate_for_sbi(simulator_wrapper, _prior, num_simulations=L)\n\t\t\tdensity_estimator = inference.append_simulations(theta, x).train()\n\t\t\tposterior = inference.build_posterior(density_estimator)\n\t\t\tposterior.set_default_x(x0)\n\n\t\t\tclf = posterior\n\n\t\telif method == \"smcabc\":\n\n\t\t\tdef _simulator(theta):\n\t\t\t\treturn simulator(theta)[:, :-1].reshape(-1)\n\n\t\t\tprint(_simulator(prior.sample()))\n\n\t\t\tsimulator_wrapper, _prior = prepare_for_sbi(_simulator, sbi_prior)\n\t\t\tinference = SMCABC(simulator_wrapper, _prior, num_workers=20)\n\t\t\tclf = inference\n\t\t\tx0 = obs[:, :-1].reshape(-1)\n\n\t\tprint(x0)\n\t\tinn_prods = None\n\t\ttheta_kern = None\n\t\tprior = _prior\n\n\treturn clf, x0, prior, inn_prods, theta_kern\n\n\ndef sample(method, clf, x0, start, sampling_method, n_samples=[50_000, 100_000], prior=None,\n\t\t inn_prods=None, theta_kern=None):\n\n\t\"\"\"\n\tUses a density ratio estimator clf to sample from the posterior for x0\n\tand prior.\n\n\tInputs:\n\t- method:\t\tstr, either \"signature\" or \"gru-resnet\" depending on which\n\t\t\t\t\tclassifier is being used\n\t- clf:\t\t\tthe density ratio estimator\n\t- x0:\t\t\tthe preprocessed observation\n\t- start:\t\tnp.array consisting of the start point for MCMC. Recommend\n\t\t\t\t\tusing true parameter value that generated x0 for this\n\t- n_samples:\tlist of length 2 consisting of ints > 0. Trial run of MCMC\n\t\t\t\t\tuses n_samples[0] steps to estimate covariance matrix of\n\t\t\t\t\tGaussian proposal density; proper run uses n_samples[1]\n\t- prior:\t\tprior distribution, only used if method == \"signature\",\n\t\t\t\t\totherwise ignored. Default None\n\t\"\"\"\n\n\tif method in [\"signature\", \"k2\"]:\n\n\t\tif prior is None:\n\t\t\traise ValueError(\"Must provide prior for kernel classifier\")\n\n\t\tdef create_log_ratio_estimator(clf, x):\n\t\t\t\"Create a ratio estimator from the signature-based classifier\"\n\t\t\tX_test = inn_prods(x)\n\t\t\tclf.set_xkern(X_test.reshape(-1,1))\n\n\t\t\tlr = clf.lr\n\t\t\tcoefficients = lr.coef_.T\n\t\t\tintercept = lr.intercept_\n\t\t\tvector = (clf._mapping).dot(coefficients)\n\n\t\t\tdef log_ratio_estimator(theta):\n\t\t\t\tT_test = theta_kern(theta)\n\t\t\t\treturn T_test.dot(vector) + intercept\n\t\t\t\n\t\t\treturn log_ratio_estimator\n\n\t\tcustom_log_ratio_estimator = create_log_ratio_estimator(clf, x0)\n\t\tcustom_ratio_estimator = lambda theta: np.exp(custom_log_ratio_estimator(theta))\n\n\t\tdef kernel_posterior(theta):\n\t\t\t\"\"\"\n\t\t\tFunction to evaluate estimation of posterior density for\n\t\t\tkernel-based classifier.\n\t\t\t\"\"\"\n\t\t\tprior_logpdf = prior.log_prob(theta)\n\t\t\tif prior_logpdf == -float(\"inf\"):\n\t\t\t\treturn prior_logpdf\n\t\t\telse:\n\t\t\t\tlog_weight = custom_log_ratio_estimator(theta)\n\t\t\t\treturn log_weight + prior_logpdf\n\n\t\tlog_post_prob = kernel_posterior\n\n\telif (method[:10] == \"gru-resnet\") or (method == \"hc\"):\n\n\t\tdef log_post_prob(th):\n\t\t\t# Convert th to torch.tensor\n\t\t\tth = torch.as_tensor(th).float()\n\t\t\treturn clf.log_prob(th)\n\n\t\t# For sampling importance resampling\n\t\tcustom_ratio_estimator = lambda th: float(torch.exp(clf.log_prob(th) - prior.log_prob(th)))\n\n\telif method == \"smcabc\":\n\n\t\tsamples = clf(x0, 1_000, 1_000, int(1e7), 0.8)\n\t\treturn samples\n\n\tif sampling_method == \"mh\":\n\t\t# Pilot run to estimate covariance matrix of Gaussian proposal density\n\t\tsamples = sampling.mh(log_post_prob, len(start), start, method,\n\t\t\t\t\t\t\t n_samples=n_samples[0])\n\t\tcov = np.cov(samples.T)\n\t\t# Proper run\n\t\tsamples = sampling.mh(log_post_prob, len(start), start, method,\n\t\t\t\t\t\t\t n_samples=n_samples[1], cov=cov)\n\t\tsamples = samples[::100]\n\telif sampling_method == \"sir\":\n\t\t# SIR\n\t\tsamples = sampling.sir(prior, custom_ratio_estimator, 50_000,\n\t\t\t\t\t\t\t 1_000)\n\n\treturn samples\n\n\ndef train_inference(task, method, start, L, fname, K=2, sampling_method=\"mh\",\n\t\t\t\t\tn_samples=[50_000, 100_000], seed=0, n_components_raw=100, start_time=0):\n\n\tprint(\"Training classifier...\")\n\tclf, x0, prior, s_kernel, t_kernel = train_clf(task, method, L, K=K,\n\t\t\t\t\t\t\t\t\t\t\t\t n_components_raw=n_components_raw, seed=seed)\n\tlogging.info(\"Training CPU time = {0}\".format(time.process_time() - start_time))\n\tprint(\"Sampling from posterior...\")\n\tsamples = sample(method, clf, x0, start, sampling_method, n_samples=n_samples, prior=prior,\n\t\t\t\t\t inn_prods=s_kernel, theta_kern=t_kernel)\n\tprint(\"Saving samples...\")\n\tnp.savetxt(fname, samples)\n\tprint(\"Done.\")\n\n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser(description='Ratio estimation')\n\tparser.add_argument('--task', type=str,\n\t\t\t\t\t\thelp='Name of task (simulator) to experiment with.')\n\tparser.add_argument('--method', type=str,\n\t\t\t\t\t\thelp='Name of classification pipelines to use.')\n\tparser.add_argument('--L', type=int, nargs='+',\n\t\t\t\t\t\thelp='Number of training simulations to use.')\n\tparser.add_argument('--K', type=int, default=1,\n\t\t\t\t\t\thelp='Number of contrasting examples per simulation.')\n\tparser.add_argument('--s', type=str, default='mh',\n\t\t\t\t\t\thelp=\"Sampling method in ['mh', 'sir'].\")\n\tparser.add_argument('--n', type=int, default=100,\n\t\t\t\t\t\thelp=\"Number of components retained in Nystrom DIVIDED BY (K+1).\")\n\tparser.add_argument('--seed', type=int, nargs='+', help='Seeds for RNG.')\n\targs = parser.parse_args()\n\n\tif args.method == \"sre\":\n\t\tmethod = \"signature\"\n\telse:\n\t\tmethod = args.method\n\n\tif args.task == \"OU\":\n\t\tstart = np.array([0.5, 1.])\n\telif args.task == \"MA2\":\n\t\tstart = np.array([0.6, 0.2])\n\telif args.task == \"GSE\":\n\t\tstart = np.array([1e-2, 1e-1])\n\n\tfor L in args.L:\n\n\t\tfor seed in args.seed:\n\n\t\t\t# Setup for saving output\n\t\t\tdirectory = \"./{0}/{1}/\".format(args.task, seed)\n\t\t\tif not os.path.exists(directory):\n\t\t\t\tos.makedirs(directory)\n\t\t\tif method in [\"signature\", \"k2\"]:\n\t\t\t\tfname = os.path.join(directory, \"{0}_{1}_{2}_{3}_samples.txt\".format(method, L, args.K, args.n))\n\t\t\t\tlogging.basicConfig(filename=os.path.join(directory,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"{0}_{1}_{2}.log\".format(method, L, args.K)),\n\t\t\t\t\t\t\t\t\tfilemode=\"w\", format=\"%(name)s - %(levelname)s - %(message)s\",\n\t\t\t\t\t\t\t\t\tlevel=logging.INFO)\n\t\t\telse:\t\n\t\t\t\tfname = os.path.join(directory, \"{0}_{1}_samples.txt\".format(method, L))\n\t\t\t\tlogging.basicConfig(filename=os.path.join(directory,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"{0}_{1}.log\".format(method, L)),\n\t\t\t\t\t\t\t\t\tfilemode=\"w\", format=\"%(name)s - %(levelname)s - %(message)s\",\n\t\t\t\t\t\t\t\t\tlevel=logging.INFO)\n\t\t\tlogging.info(args)\n\t\t\tlogging.info(\"Seed = {0}\".format(seed))\n\n\t\t\t# Run script\n\t\t\tstart_time = time.process_time()\n\t\t\ttrain_inference(args.task, method, start, L, fname, sampling_method=args.s,\n\t\t\t\t\t\t\tK=args.K, seed=seed, n_components_raw=args.n, start_time=start_time)\n\t\t\tlogging.info(\"Total CPU time = {0}\".format(time.process_time() - start_time))\n",
"import numpy as np\nimport ot\nimport ot.sliced\n\nMETRIC = \"euclidean\"\n\ndef metrics(ns, seeds, location_template, true_samples, thin=1, sliced=True):\n\n\t\"\"\"\n\tns and seeds are iterables containing the budgets and seeds to compute\n\tmetrics for.\n\n\tlocation_template should be a string which will be formatted with {0} and\n\t{1} corresponding to the seed and budget, respectively.\n\n\ttrue_samples should be of shape (n_samples, dim)\n\t\"\"\"\n\n\tswds, meandists = dict(), dict()\n\ttrue_mean = np.mean(true_samples, axis=0)\n\n\tfor n in ns:\n\t\tfor seed in seeds:\n\t\t\tprint()\n\t\t\tprint(n, seed)\n\t\t\ttry:\n\t\t\t\tsamples = np.loadtxt(location_template.format(seed, n))\n\t\t\t\tif thin != 1:\n\t\t\t\t\tsamples = samples[::thin]\n\t\t\t\tprint(\"Sample shape: \", samples.shape)\n\t\t\t\tif sliced:\n\t\t\t\t\tswd = ot.sliced.sliced_wasserstein_distance(samples, true_samples, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_projections=2000)\n\t\t\t\telse:\n\t\t\t\t\tM = ot.dist(samples, true_samples, metric=METRIC)\n\t\t\t\t\tgamma, log = ot.emd([], [], M, log=True)\n\t\t\t\t\tswd = log[\"cost\"]\n\t\t\t\ttry:\n\t\t\t\t\tswds[n].append(swd)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tswds[n] = [swd]\n\t\t\t\tmeandist = np.sum((np.mean(samples, axis=0) - true_mean)**2)\n\t\t\t\ttry:\n\t\t\t\t\tmeandists[n].append(meandist)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tmeandists[n] = [meandist]\n\t\t\t\tprint(swd, meandist)\n\t\t\texcept FileNotFoundError:\n\t\t\t\tprint(\"Missing file at seed\", seed, \"and budget\", n)\n\n\treturn swds, ns, meandists\n"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.var",
"numpy.cov",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"numpy.savetxt",
"numpy.array",
"torch.as_tensor"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tlunet/pySDC | [
"209e0015a46f861e3658691b7f8724cb1b36c97e",
"209e0015a46f861e3658691b7f8724cb1b36c97e",
"209e0015a46f861e3658691b7f8724cb1b36c97e"
] | [
"pySDC/playgrounds/fft/AllenCahn_contracting_circle_FFT.py",
"pySDC/projects/RDC/equidistant_RDC.py",
"pySDC/playgrounds/deprecated/Dedalus/playground_datatypes.py"
] | [
"import os\n\nimport dill\nimport matplotlib.ticker as ticker\nimport numpy as np\n\nimport pySDC.helpers.plot_helper as plt_helper\nfrom pySDC.helpers.stats_helper import filter_stats, sort_stats\nfrom pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right\nfrom pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI\nfrom pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex, allencahn2d_imex_stab\nfrom pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order\nfrom pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d\nfrom pySDC.projects.TOMS.AllenCahn_monitor import monitor\n\n\n# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf\n\n\ndef setup_parameters():\n \"\"\"\n Helper routine to fill in all relevant parameters\n\n Note that this file will be used for all versions of SDC, containing more than necessary for each individual run\n\n Returns:\n description (dict)\n controller_params (dict)\n \"\"\"\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1E-08\n level_params['dt'] = 1E-03\n level_params['nsweeps'] = [3, 1]\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['collocation_class'] = CollGaussRadau_Right\n sweeper_params['num_nodes'] = [3]\n sweeper_params['QI'] = ['LU']\n sweeper_params['QE'] = ['EE']\n sweeper_params['initial_guess'] = 'zero'\n\n # This comes as read-in for the problem class\n problem_params = dict()\n problem_params['nu'] = 2\n problem_params['L'] = 1.0\n problem_params['nvars'] = [(256, 256), (64, 64)]\n problem_params['eps'] = [0.04, 0.16]\n problem_params['radius'] = 0.25\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 20\n controller_params['hook_class'] = monitor\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = None # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = None # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh_fft2d\n\n return description, controller_params\n\n\ndef run_SDC_variant(variant=None):\n \"\"\"\n Routine to run particular SDC variant\n\n Args:\n variant (str): string describing the variant\n\n Returns:\n timing (float)\n niter (float)\n \"\"\"\n\n # load (incomplete) default parameters\n description, controller_params = setup_parameters()\n\n # add stuff based on variant\n if variant == 'semi-implicit':\n description['problem_class'] = allencahn2d_imex\n description['sweeper_class'] = imex_1st_order\n elif variant == 'semi-implicit-stab':\n description['problem_class'] = allencahn2d_imex_stab\n description['sweeper_class'] = imex_1st_order\n else:\n raise NotImplemented('Wrong variant specified, got %s' % variant)\n\n # setup parameters \"in time\"\n t0 = 0\n Tend = 0.032\n\n # instantiate controller\n controller = controller_nonMPI(num_procs=8, controller_params=controller_params, description=description)\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # plt_helper.plt.imshow(uinit)\n # plt_helper.plt.show()\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # plt_helper.plt.imshow(uend)\n # plt_helper.plt.show()\n\n # filter statistics by variant (number of iterations)\n filtered_stats = filter_stats(stats, type='niter')\n\n # convert filtered statistics to list of iterations count, sorted by process\n iter_counts = sort_stats(filtered_stats, sortby='time')\n\n # compute and print statistics\n niters = np.array([item[1] for item in iter_counts])\n out = ' Mean number of iterations: %4.2f' % np.mean(niters)\n print(out)\n out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)\n print(out)\n out = ' Position of max/min number of iterations: %2i -- %2i' % \\\n (int(np.argmax(niters)), int(np.argmin(niters)))\n print(out)\n out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))\n print(out)\n\n timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')\n\n print('Time to solution: %6.4f sec.' % timing[0][1])\n print()\n\n return stats\n\n\ndef show_results(fname, cwd=''):\n \"\"\"\n Plotting routine\n\n Args:\n fname (str): file name to read in and name plots\n cwd (str): current working directory\n \"\"\"\n\n file = open(cwd + fname + '.pkl', 'rb')\n results = dill.load(file)\n file.close()\n\n # plt_helper.mpl.style.use('classic')\n plt_helper.setup_mpl()\n\n # set up plot for timings\n fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)\n\n timings = {}\n niters = {}\n for key, item in results.items():\n timings[key] = sort_stats(filter_stats(item, type='timing_run'), sortby='time')[0][1]\n iter_counts = sort_stats(filter_stats(item, type='niter'), sortby='time')\n niters[key] = np.mean(np.array([item[1] for item in iter_counts]))\n\n xcoords = [i for i in range(len(timings))]\n sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])\n sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]\n heights_timings = [item[1] for item in sorted_timings]\n heights_niters = [item[1] for item in sorted_niters]\n keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\\n').replace('_v2', ' mod.') for item in sorted_timings]\n\n ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')\n ax1.set_ylabel('time (sec)')\n\n ax2 = ax1.twinx()\n ax2.bar(xcoords, heights_niters, color='r', align='edge', width=0.3, label='iterations (right axis)')\n ax2.set_ylabel('mean number of iterations')\n\n ax1.set_xticks(xcoords)\n ax1.set_xticklabels(keys, rotation=90, ha='center')\n\n # ask matplotlib for the plotted objects and their labels\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=0)\n\n # save plot, beautify\n f = fname + '_timings'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n # set up plot for radii\n fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)\n\n exact_radii = []\n for key, item in results.items():\n computed_radii = sort_stats(filter_stats(item, type='computed_radius'), sortby='time')\n\n xcoords = [item0[0] for item0 in computed_radii]\n radii = [item0[1] for item0 in computed_radii]\n if key[0] + ' ' + key[1] == 'semi-implicit-stab exact':\n ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))\n\n exact_radii = sort_stats(filter_stats(item, type='exact_radius'), sortby='time')\n\n # diff = np.array([abs(item0[1] - item1[1]) for item0, item1 in zip(exact_radii, computed_radii)])\n # max_pos = int(np.argmax(diff))\n # assert max(diff) < 0.07, 'ERROR: computed radius is too far away from exact radius, got %s' % max(diff)\n # assert 0.028 < computed_radii[max_pos][0] < 0.03, \\\n # 'ERROR: largest difference is at wrong time, got %s' % computed_radii[max_pos][0]\n\n xcoords = [item[0] for item in exact_radii]\n radii = [item[1] for item in exact_radii]\n ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')\n\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))\n ax.set_ylabel('radius')\n ax.set_xlabel('time')\n ax.grid()\n ax.legend(loc=3)\n\n # save plot, beautify\n f = fname + '_radii'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n # set up plot for interface width\n fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)\n\n interface_width = []\n for key, item in results.items():\n interface_width = sort_stats(filter_stats(item, type='interface_width'), sortby='time')\n xcoords = [item[0] for item in interface_width]\n width = [item[1] for item in interface_width]\n if key[0] + ' ' + key[1] == 'fully-implicit exact':\n ax.plot(xcoords, width, label=key[0] + ' ' + key[1])\n\n xcoords = [item[0] for item in interface_width]\n init_width = [interface_width[0][1]] * len(xcoords)\n ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')\n\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))\n ax.set_ylabel(r'interface width ($\\epsilon$)')\n ax.set_xlabel('time')\n ax.grid()\n ax.legend(loc=3)\n\n # save plot, beautify\n f = fname + '_interface'\n plt_helper.savefig(f)\n\n assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'\n assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'\n assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'\n\n return None\n\n\ndef main(cwd=''):\n \"\"\"\n Main driver\n\n Args:\n cwd (str): current working directory (need this for testing)\n \"\"\"\n\n # Loop over variants, exact and inexact solves\n results = {}\n for variant in ['semi-implicit-stab']:\n\n results[(variant, 'exact')] = run_SDC_variant(variant=variant)\n\n # dump result\n fname = 'data/results_SDC_variants_AllenCahn_1E-03'\n file = open(cwd + fname + '.pkl', 'wb')\n dill.dump(results, file)\n file.close()\n assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'\n\n # visualize\n show_results(fname, cwd=cwd)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import numpy as np\nfrom scipy.special.orthogonal import roots_legendre\nfrom scipy.interpolate import BarycentricInterpolator\n\nfrom pySDC.core.Errors import CollocationError, ParameterError\nfrom pySDC.implementations.collocation_classes.equidistant import Equidistant\n\n\nclass MyBarycentricInterpolator(BarycentricInterpolator):\n \"\"\"\n Overwrite BarycentricInterolator to inject custom weights\n \"\"\"\n def __init__(self, xi, yi=None, weights=None, axis=0):\n super(MyBarycentricInterpolator, self).__init__(xi, yi, axis)\n self.wi = weights\n\n\nclass Equidistant_RDC(Equidistant):\n \"\"\"\n Implements equidistant nodes with blended barycentric interpolation\n\n Attributes:\n fh_weights: blended FH weights for barycentric interpolation\n \"\"\"\n\n def __init__(self, num_nodes, tleft, tright):\n \"\"\"\n Initialization\n\n Args:\n num_nodes: number of nodes\n tleft (float): left interval boundary (usually 0)\n tright (float): right interval boundary (usually 1)\n \"\"\"\n\n if type(num_nodes) is int:\n max_d = 15\n nnodes = num_nodes\n else:\n if type(num_nodes) is not tuple:\n raise ParameterError('Expecting int or tuple for num_nodes parameter, got %s' % type(num_nodes))\n if len(num_nodes) != 2:\n raise ParameterError('Expecting 1 or 2 arguments for num_nodes, got %s' % num_nodes)\n if type(num_nodes[0]) is not int:\n raise ParameterError('Expecting int type for first num_nodes argument, got %s' % type(num_nodes[0]))\n if type(num_nodes[1]) is not int:\n raise ParameterError('Expecting int type for second num_nodes argument, got %s' % type(num_nodes[1]))\n max_d = num_nodes[1]\n nnodes = num_nodes[0]\n\n if nnodes < 2:\n raise CollocationError(\"Number of nodes should be at least 2 for equidistant, but is %d\" % num_nodes)\n\n super(Equidistant, self).__init__(nnodes, tleft, tright)\n\n self.order = self.num_nodes\n self.nodes = self._getNodes\n\n d = min(self.num_nodes - 1, max_d)\n self.fh_weights = self._getFHWeights(d)\n self.weights = self._getWeights(tleft, tright)\n\n self.Qmat = self._gen_Qmatrix\n self.Smat = self._gen_Smatrix\n self.delta_m = self._gen_deltas\n self.left_is_node = True\n self.right_is_node = True\n\n def _getFHWeights(self, d):\n \"\"\"\n Computes blended FH weights for barycentric interpolation\n\n This method is ported from Georges Klein's matlab function\n\n Args:\n d (int): blending parameter\n\n Returns:\n numpy.ndarray: weights\n \"\"\"\n\n n = self.num_nodes - 1\n w = np.zeros(n + 1)\n\n for k in range(0, n + 1):\n ji = max(k - d, 0)\n jf = min(k, n - d)\n sumcoeff = []\n for i in range(ji, jf + 1):\n prodterm = []\n for j in range(i, i + d + 1):\n if j == k:\n prodterm.append(1)\n else:\n prodterm.append(self.nodes[k] - self.nodes[j])\n product = 1.0 / np.prod(prodterm)\n sumcoeff.append((-1) ** (i - 1) * product)\n y = sorted(sumcoeff, key=abs)\n w[k] = np.sum(y)\n\n return w\n\n def _getWeights(self, a, b):\n \"\"\"\n Computes weights using custom barycentric interpolation\n\n Args:\n a (float): left interval boundary\n b (float): right interval boundary\n\n Returns:\n numpy.ndarray: weights of the collocation formula given by the nodes\n \"\"\"\n if self.nodes is None:\n raise CollocationError(\"Need nodes before computing weights, got %s\" % self.nodes)\n\n circ_one = np.zeros(self.num_nodes)\n circ_one[0] = 1.0\n tcks = []\n for i in range(self.num_nodes):\n tcks.append(MyBarycentricInterpolator(self.nodes, np.roll(circ_one, i), self.fh_weights))\n\n # Generate evaluation points for quadrature\n tau, omega = roots_legendre(self.num_nodes)\n phi = (b - a) / 2 * tau + (b + a) / 2\n\n weights = [np.sum((b - a) / 2 * omega * p(phi)) for p in tcks]\n weights = np.array(weights)\n\n return weights\n\n @property\n def _gen_Qmatrix(self):\n \"\"\"\n Compute tleft-to-node integration matrix for later use in collocation formulation\n\n Returns:\n numpy.ndarray: matrix containing the weights for tleft to node\n \"\"\"\n if self.nodes is None:\n raise CollocationError(f\"Need nodes before computing weights, got {self.nodes}\")\n M = self.num_nodes\n Q = np.zeros([M + 1, M + 1])\n\n # Generate Lagrange polynomials associated to the nodes\n circ_one = np.zeros(self.num_nodes)\n circ_one[0] = 1.0\n tcks = []\n for i in range(M):\n tcks.append(MyBarycentricInterpolator(self.nodes, np.roll(circ_one, i), self.fh_weights))\n\n # Generate evaluation points for quadrature\n a, b = self.tleft, self.nodes[:, None]\n tau, omega = roots_legendre(self.num_nodes)\n tau, omega = tau[None, :], omega[None, :]\n phi = (b - a) / 2 * tau + (b + a) / 2\n\n # Compute quadrature\n intQ = np.array([np.sum((b - a) / 2 * omega * p(phi), axis=-1) for p in tcks])\n\n # Store into Q matrix\n Q[1:, 1:] = intQ.T\n\n return Q\n",
"\nfrom dedalus import public as de\nfrom dedalus import core\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport time\n\nfrom pySDC.playgrounds.Dedalus.dedalus_field import dedalus_field\n\nclass wrapper(core.field.Field):\n\n def __abs__(self):\n\n abs = super(wrapper, self).__abs__()\n while hasattr(abs, 'evaluate'):\n abs = abs.evaluate()\n print(type(abs), self)\n return np.amax(abs['g'])\n\n\nmytype = core.field.Field\n\nde.logging_setup.rootlogger.setLevel('INFO')\n\nxbasis = de.Fourier('x', 1024, interval=(0,1), dealias=1)\n\ndomain = de.Domain([xbasis], grid_dtype=np.float64, comm=None)\n\nprint(domain.global_grid_shape(), domain.local_grid_shape())\n\nx = domain.grid(0, scales=1)\n\nd1 = dedalus_field(domain)\nd1.values['g'] = np.sin(2*np.pi*x)\nd2 = dedalus_field(domain)\nd2.values['g'] = np.sin(2*np.pi*x)\n\nprint((d1 + d2).values['g'])\n\n\nd1 = mytype(domain)\nd1['g'] = np.sin(2*np.pi*x)\nd2 = mytype(domain)\nd2['g'] = np.sin(2*np.pi*x)\n\nprint((d1 + d2).evaluate()['g'])\nprint((d1 - d2).evaluate()['g'])\nprint((2.0*d2).evaluate()['g'])\nprint(np.amax(abs(d2).evaluate()['g']))\n\n\nd1 = wrapper(domain)\nd1['g'] = np.sin(2*np.pi*x)\nd2 = wrapper(domain)\nd2['g'] = np.sin(2*np.pi*x)\n\nprint((d1 + d2).evaluate()['g'])\nprint((d1 - d2).evaluate()['g'])\nprint((2.0*d2).evaluate()['g'])\nprint(abs(d2))\nprint(np.amax(abs(d1+d2).evaluate()['g']))\n# print(np.amax(abs(d2).evaluate()['g']))\nexit()\n\n\ng = domain.new_field()\ng['g'] = np.sin(2*np.pi*x)\n\nt0 = time.time()\nfor i in range(10000):\n f = domain.new_field()\n f['g'] = g['g'] + g['g']\n # f['c'][:] = g['c'][:]\nt1 = time.time()\nprint(t1-t0)\n\nt0 = time.time()\nfor i in range(10000):\n f = (g + g).evaluate()\n # f['c'][:] = g['c'][:]\nt1 = time.time()\nprint(t1-t0)\n\nt0 = time.time()\nfor i in range(10000):\n f = np.zeros(tuple(domain.global_grid_shape()))\n f = g['g'] + g['g']\n # f['c'] = g['c']\nt1 = time.time()\nprint(t1-t0)\n\nt0 = time.time()\nfor i in range(10000):\n # f = wrapper(domain)\n f = g+g\n # f['c'] = g['c']\nt1 = time.time()\nprint(t1-t0)\n\n\n\n# fxx = xbasis.Differentiate(f)\n#\n# g = de.operators.FieldCopyField(f).evaluate()\n#\n# print((f + g).evaluate())\n#\n#\n# f['g'][:] = 1.0\n# print(f['g'], g['g'])\n# print(f, g)\n\n\n\n\n\n"
] | [
[
"numpy.ptp",
"numpy.std",
"numpy.argmax",
"numpy.mean",
"numpy.argmin",
"numpy.var",
"matplotlib.ticker.FormatStrFormatter",
"numpy.array"
],
[
"scipy.special.orthogonal.roots_legendre",
"numpy.prod",
"numpy.roll",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.amax",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shuai-Xie/LP-DeepSSL | [
"9389c6cb0b83c7ca509ce284c4d86b600ca44a9b"
] | [
"mean_teacher/losses.py"
] | [
"# Copyright (c) 2018, Curious AI Ltd. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\n\"\"\"Custom loss functions\"\"\"\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport pdb\nimport numpy as np\n\ndef softmax_mse_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by num_classes\n Divide by the batch size afterwards if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_softmax = F.softmax(input_logits, dim=1)\n target_softmax = F.softmax(target_logits, dim=1)\n num_classes = input_logits.size()[1]\n return F.mse_loss(input_softmax, target_softmax, size_average=False) / num_classes\n\n\ndef softmax_kl_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns KL divergence\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_log_softmax = F.log_softmax(input_logits, dim=1) # log(q)\n target_softmax = F.softmax(target_logits, dim=1) # p\n return F.kl_div(input_log_softmax, target_softmax, size_average=False)\n\n\ndef symmetric_mse_loss(input1, input2):\n \"\"\"Like F.mse_loss but sends gradients to both directions.\n cuz input1/input2 are tensors with grad, while target in F.mse_loss has no grad.\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to both input1 and input2.\n \"\"\"\n assert input1.size() == input2.size()\n num_classes = input1.size()[1]\n return torch.sum((input1 - input2)**2) / num_classes"
] | [
[
"torch.nn.functional.kl_div",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.sum",
"torch.nn.functional.mse_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
balrajmarimuthu/CarND-Capstone | [
"bc3e52c5e940e3da51efad219ab89fb3580fb717"
] | [
"ros/src/tl_detector/tl_detector.py"
] | [
"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport cv2\nimport yaml\nfrom scipy.spatial import KDTree\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n self.pose = None\n self.waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.camera_image = None\n self.lights = []\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def get_closest_waypoint(self, x , y):\n \"\"\"Identifies the closest path waypoint to the given position\n https://en.wikipedia.org/wiki/Closest_pair_of_points_problem\n Args:\n pose (Pose): position to match a waypoint to\n\n Returns:\n int: index of the closest waypoint in self.waypoints\n\n \"\"\"\n #TODO implement\n closest_idx = self.waypoint_tree.query([x,y], 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n# if(not self.has_image):\n# self.prev_light_loc = None\n# return False\n\n# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n# #Get classification\n# return self.light_classifier.get_classification(cv_image)\n return light.state\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #light = None\n closest_light = None\n line_wp_idx = None\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n #car_position = self.get_closest_waypoint(self.pose.pose)\n car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)\n \n #TODO find the closest visible traffic light (if one exists)\n diff = len(self.waypoints.waypoints)\n for i, light in enumerate(self.lights):\n #Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n #Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n if d>=0 and d<diff:\n diff = d\n closest_light = light\n line_wp_idx = temp_wp_idx\n \n if closest_light:\n state = self.get_light_state(closest_light)\n return line_wp_idx, state\n \n return -1, TrafficLight.UNKNOWN\n \n\n if light:\n state = self.get_light_state(light)\n return light_wp, state\n self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n"
] | [
[
"scipy.spatial.KDTree"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
uunal/adapter-transformers | [
"73a95a75f803e8fd243fc3d55ff3a9d557891377"
] | [
"src/transformers/adapters/models/distilbert.py"
] | [
"from typing import Union\n\nimport torch\nfrom torch import nn\n\nfrom ..composition import AdapterCompositionBlock, parse_composition\nfrom ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin\nfrom .bert import BertEncoderAdaptersMixin, BertModelHeadsMixin, BertOutputAdaptersMixin, BertSelfOutputAdaptersMixin\n\n\nclass DistilBertSelfAttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):\n \"\"\"Adds attention adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return self.parent.sa_layer_norm\n\n\nclass DistilBertOutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):\n \"\"\"Adds output adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return self.parent.output_layer_norm\n\n\nclass DistilBertTransfomerBlockAdaptersMixin:\n \"\"\"Adds adapters to the TransformerBlock module of DistilBert.\"\"\"\n\n def _init_adapter_modules(self):\n self.attention_adapters = DistilBertSelfAttentionAdaptersModule(self)\n self.output_adapters = DistilBertOutputAdaptersModule(self)\n self.attention_adapters._init_adapter_modules()\n self.output_adapters._init_adapter_modules()\n self.register_forward_pre_hook(self._adapter_block_pre_hook)\n\n def add_fusion_layer(self, adapter_names):\n self.attention_adapters.add_fusion_layer(adapter_names)\n self.output_adapters.add_fusion_layer(adapter_names)\n\n def add_adapter(self, adapter_name: str, layer_idx: int):\n self.attention_adapters.add_adapter(adapter_name, layer_idx)\n self.output_adapters.add_adapter(adapter_name, layer_idx)\n\n def delete_adapter(self, adapter_name):\n self.attention_adapters.delete_adapter(adapter_name)\n self.output_adapters.delete_adapter(adapter_name)\n\n def delete_fusion_layer(self, adapter_names):\n self.attention_adapters.delete_fusion_layer(adapter_names)\n self.output_adapters.delete_fusion_layer(adapter_names)\n\n def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):\n self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n\n # Makes sure the \"parent\" reference always points to the correct module.\n # This is especially relevant when using torch data parallelism.\n @staticmethod\n def _adapter_block_pre_hook(module, input_tensors):\n object.__setattr__(module.attention_adapters, \"parent\", module)\n object.__setattr__(module.output_adapters, \"parent\", module)\n\n\nclass DistilBertTransformerAdaptersMixin(BertEncoderAdaptersMixin):\n \"\"\"Adds adapters to the Transformer module of DistilBert.\"\"\"\n\n pass\n\n\nclass DistilBertModelAdaptersMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):\n \"\"\"Adds adapters to the DistilBert module.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n \"\"\"Sets the model into mode for training the given adapters.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, True, False)\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"Sets the model into mode for training of adapter fusion determined by a list of adapter names.\"\"\"\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.transformer.enable_adapters(adapter_setup, unfreeze_adapters, True)\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def _add_adapter(self, adapter_name):\n self.transformer.add_adapter(adapter_name)\n self.add_invertible_adapter(adapter_name)\n\n def _add_fusion_layer(self, adapter_names):\n self.transformer.add_fusion_layer(adapter_names)\n\n def _delete_adapter(self, adapter_name: str):\n self.transformer.delete_adapter(adapter_name)\n self.delete_invertible_adapter(adapter_name)\n\n def _delete_fusion_layer(self, adapter_names):\n self.transformer.delete_fusion_layer(adapter_names)\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for _, v in self.transformer.layer._modules.items():\n\n for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n def get_adapter(self, name):\n return_adapters = {}\n for idx, layer in enumerate(self.transformer.layer):\n adapters = {\n \"attention\": layer.attention_adapters.adapters,\n \"output\": layer.output_adapters.adapters,\n }\n for key, adapt in adapters.items():\n if hasattr(adapt, name):\n if idx not in return_adapters:\n return_adapters[idx] = {}\n return_adapters[idx][key] = getattr(adapt, name)\n\n return return_adapters\n\n\nclass DistilBertModelHeadsMixin(BertModelHeadsMixin):\n \"\"\"Adds heads to a DistilBert model.\"\"\"\n\n pass\n"
] | [
[
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gsyqax/pandas | [
"cb35d8a938c9222d903482d2f66c62fece5a7aae",
"cb35d8a938c9222d903482d2f66c62fece5a7aae",
"148f9fd74fc71cb7509c0898883036316efc6f89",
"cb35d8a938c9222d903482d2f66c62fece5a7aae",
"cb35d8a938c9222d903482d2f66c62fece5a7aae"
] | [
"pandas/tests/arrays/boolean/test_construction.py",
"pandas/core/missing.py",
"pandas/tests/frame/test_subclass.py",
"asv_bench/benchmarks/arithmetic.py",
"pandas/tests/tseries/frequencies/test_freq_code.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.arrays import BooleanArray\nfrom pandas.core.arrays.boolean import coerce_to_array\n\n\[email protected]\ndef data():\n return pd.array(\n [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],\n dtype=\"boolean\",\n )\n\n\ndef test_boolean_array_constructor():\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n result = BooleanArray(values, mask)\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n with pytest.raises(TypeError, match=\"values should be boolean numpy array\"):\n BooleanArray(values.tolist(), mask)\n\n with pytest.raises(TypeError, match=\"mask should be boolean numpy array\"):\n BooleanArray(values, mask.tolist())\n\n with pytest.raises(TypeError, match=\"values should be boolean numpy array\"):\n BooleanArray(values.astype(int), mask)\n\n with pytest.raises(TypeError, match=\"mask should be boolean numpy array\"):\n BooleanArray(values, None)\n\n with pytest.raises(ValueError, match=\"values must be a 1D array\"):\n BooleanArray(values.reshape(1, -1), mask)\n\n with pytest.raises(ValueError, match=\"mask must be a 1D array\"):\n BooleanArray(values, mask.reshape(1, -1))\n\n\ndef test_boolean_array_constructor_copy():\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n result = BooleanArray(values, mask)\n assert result._data is values\n assert result._mask is mask\n\n result = BooleanArray(values, mask, copy=True)\n assert result._data is not values\n assert result._mask is not mask\n\n\ndef test_to_boolean_array():\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, False])\n )\n\n result = pd.array([True, False, True], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True]), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, True], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n expected = BooleanArray(\n np.array([True, False, True]), np.array([False, False, True])\n )\n\n result = pd.array([True, False, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([True, False, None], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_all_none():\n expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))\n\n result = pd.array([None, None, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n result = pd.array(np.array([None, None, None], dtype=object), dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\[email protected](\n \"a, b\",\n [\n ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),\n ([True, np.nan], [True, None]),\n ([True, pd.NA], [True, None]),\n ([np.nan, np.nan], [None, None]),\n (np.array([np.nan, np.nan], dtype=float), [None, None]),\n ],\n)\ndef test_to_boolean_array_missing_indicators(a, b):\n result = pd.array(a, dtype=\"boolean\")\n expected = pd.array(b, dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\[email protected](\n \"values\",\n [\n [\"foo\", \"bar\"],\n [\"1\", \"2\"],\n # \"foo\",\n [1, 2],\n [1.0, 2.0],\n pd.date_range(\"20130101\", periods=2),\n np.array([\"foo\"]),\n np.array([1, 2]),\n np.array([1.0, 2.0]),\n [np.nan, {\"a\": 1}],\n ],\n)\ndef test_to_boolean_array_error(values):\n # error in converting existing arrays to BooleanArray\n msg = \"Need to pass bool-like value\"\n with pytest.raises(TypeError, match=msg):\n pd.array(values, dtype=\"boolean\")\n\n\ndef test_to_boolean_array_from_integer_array():\n result = pd.array(np.array([1, 0, 1, 0]), dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1, 0, 1, None]), dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_float_array():\n result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_integer_like():\n # integers of 0's and 1's\n result = pd.array([1, 0, 1, 0], dtype=\"boolean\")\n expected = pd.array([True, False, True, False], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n # with missing values\n result = pd.array([1, 0, 1, None], dtype=\"boolean\")\n expected = pd.array([True, False, True, None], dtype=\"boolean\")\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_coerce_to_array():\n # TODO this is currently not public API\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is values\n assert result._mask is mask\n result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))\n expected = BooleanArray(values, mask)\n tm.assert_extension_array_equal(result, expected)\n assert result._data is not values\n assert result._mask is not mask\n\n # mixed missing from values and mask\n values = [True, False, None, False]\n mask = np.array([False, False, False, True], dtype=\"bool\")\n result = BooleanArray(*coerce_to_array(values, mask=mask))\n expected = BooleanArray(\n np.array([True, False, True, True]), np.array([False, False, True, True])\n )\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))\n tm.assert_extension_array_equal(result, expected)\n result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))\n tm.assert_extension_array_equal(result, expected)\n\n # raise errors for wrong dimension\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n\n with pytest.raises(ValueError, match=\"values must be a 1D list-like\"):\n coerce_to_array(values.reshape(1, -1))\n\n with pytest.raises(ValueError, match=\"mask must be a 1D list-like\"):\n coerce_to_array(values, mask=mask.reshape(1, -1))\n\n\ndef test_coerce_to_array_from_boolean_array():\n # passing BooleanArray to coerce_to_array\n values = np.array([True, False, True, False], dtype=\"bool\")\n mask = np.array([False, False, False, True], dtype=\"bool\")\n arr = BooleanArray(values, mask)\n result = BooleanArray(*coerce_to_array(arr))\n tm.assert_extension_array_equal(result, arr)\n # no copy\n assert result._data is arr._data\n assert result._mask is arr._mask\n\n result = BooleanArray(*coerce_to_array(arr), copy=True)\n tm.assert_extension_array_equal(result, arr)\n assert result._data is not arr._data\n assert result._mask is not arr._mask\n\n with pytest.raises(ValueError, match=\"cannot pass mask for BooleanArray input\"):\n coerce_to_array(arr, mask=mask)\n\n\ndef test_coerce_to_numpy_array():\n # with missing values -> object dtype\n arr = pd.array([True, False, None], dtype=\"boolean\")\n result = np.array(arr)\n expected = np.array([True, False, pd.NA], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n # also with no missing values -> object dtype\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = np.array(arr)\n expected = np.array([True, False, True], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n # force bool dtype\n result = np.array(arr, dtype=\"bool\")\n expected = np.array([True, False, True], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n # with missing values will raise error\n arr = pd.array([True, False, None], dtype=\"boolean\")\n msg = (\n \"cannot convert to 'bool'-dtype NumPy array with missing values. \"\n \"Specify an appropriate 'na_value' for this dtype.\"\n )\n with pytest.raises(ValueError, match=msg):\n np.array(arr, dtype=\"bool\")\n\n\ndef test_to_boolean_array_from_strings():\n result = BooleanArray._from_sequence_of_strings(\n np.array([\"True\", \"False\", np.nan], dtype=object)\n )\n expected = BooleanArray(\n np.array([True, False, False]), np.array([False, False, True])\n )\n\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_boolean_array_from_strings_invalid_string():\n with pytest.raises(ValueError, match=\"cannot be cast\"):\n BooleanArray._from_sequence_of_strings([\"donkey\"])\n\n\[email protected](\"box\", [True, False], ids=[\"series\", \"array\"])\ndef test_to_numpy(box):\n con = pd.Series if box else pd.array\n # default (with or without missing values) -> object dtype\n arr = con([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy()\n expected = np.array([True, False, True], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy()\n expected = np.array([True, False, pd.NA], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy(dtype=\"str\")\n expected = np.array([True, False, pd.NA], dtype=\"<U5\")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values -> can convert to bool, otherwise raises\n arr = con([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=\"bool\")\n expected = np.array([True, False, True], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n\n arr = con([True, False, None], dtype=\"boolean\")\n with pytest.raises(ValueError, match=\"cannot convert to 'bool'-dtype\"):\n result = arr.to_numpy(dtype=\"bool\")\n\n # specify dtype and na_value\n arr = con([True, False, None], dtype=\"boolean\")\n result = arr.to_numpy(dtype=object, na_value=None)\n expected = np.array([True, False, None], dtype=\"object\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=bool, na_value=False)\n expected = np.array([True, False, False], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=\"int64\", na_value=-99)\n expected = np.array([1, 0, -99], dtype=\"int64\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.to_numpy(dtype=\"float64\", na_value=np.nan)\n expected = np.array([1, 0, np.nan], dtype=\"float64\")\n tm.assert_numpy_array_equal(result, expected)\n\n # converting to int or float without specifying na_value raises\n with pytest.raises(ValueError, match=\"cannot convert to 'int64'-dtype\"):\n arr.to_numpy(dtype=\"int64\")\n with pytest.raises(ValueError, match=\"cannot convert to 'float64'-dtype\"):\n arr.to_numpy(dtype=\"float64\")\n\n\ndef test_to_numpy_copy():\n # to_numpy can be zero-copy if no missing values\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=bool)\n result[0] = False\n tm.assert_extension_array_equal(\n arr, pd.array([False, False, True], dtype=\"boolean\")\n )\n\n arr = pd.array([True, False, True], dtype=\"boolean\")\n result = arr.to_numpy(dtype=bool, copy=True)\n result[0] = False\n tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype=\"boolean\"))\n\n\n# FIXME: don't leave commented out\n# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion\n# manually in the indexing code\n# def test_indexing_boolean_mask():\n# arr = pd.array([1, 2, 3, 4], dtype=\"Int64\")\n# mask = pd.array([True, False, True, False], dtype=\"boolean\")\n# result = arr[mask]\n# expected = pd.array([1, 3], dtype=\"Int64\")\n# tm.assert_extension_array_equal(result, expected)\n\n# # missing values -> error\n# mask = pd.array([True, False, True, None], dtype=\"boolean\")\n# with pytest.raises(IndexError):\n# result = arr[mask]\n",
"\"\"\"\nRoutines for filling missing data.\n\"\"\"\n\nimport numpy as np\n\nfrom pandas._libs import algos, lib\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.cast import infer_dtype_from_array\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_integer_dtype,\n is_numeric_v_string_like,\n is_scalar,\n is_timedelta64_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.missing import isna\n\n\ndef mask_missing(arr, values_to_mask):\n \"\"\"\n Return a masking array of same size/shape as arr\n with entries equaling any member of values_to_mask set to True\n \"\"\"\n dtype, values_to_mask = infer_dtype_from_array(values_to_mask)\n\n try:\n values_to_mask = np.array(values_to_mask, dtype=dtype)\n\n except Exception:\n values_to_mask = np.array(values_to_mask, dtype=object)\n\n na_mask = isna(values_to_mask)\n nonna = values_to_mask[~na_mask]\n\n mask = None\n for x in nonna:\n if mask is None:\n if is_numeric_v_string_like(arr, x):\n # GH#29553 prevent numpy deprecation warnings\n mask = False\n else:\n mask = arr == x\n\n # if x is a string and arr is not, then we get False and we must\n # expand the mask to size arr.shape\n if is_scalar(mask):\n mask = np.zeros(arr.shape, dtype=bool)\n else:\n if is_numeric_v_string_like(arr, x):\n # GH#29553 prevent numpy deprecation warnings\n mask |= False\n else:\n mask |= arr == x\n\n if na_mask.any():\n if mask is None:\n mask = isna(arr)\n else:\n mask |= isna(arr)\n\n # GH 21977\n if mask is None:\n mask = np.zeros(arr.shape, dtype=bool)\n\n return mask\n\n\ndef clean_fill_method(method, allow_nearest=False):\n # asfreq is compat for resampling\n if method in [None, \"asfreq\"]:\n return None\n\n if isinstance(method, str):\n method = method.lower()\n if method == \"ffill\":\n method = \"pad\"\n elif method == \"bfill\":\n method = \"backfill\"\n\n valid_methods = [\"pad\", \"backfill\"]\n expecting = \"pad (ffill) or backfill (bfill)\"\n if allow_nearest:\n valid_methods.append(\"nearest\")\n expecting = \"pad (ffill), backfill (bfill) or nearest\"\n if method not in valid_methods:\n raise ValueError(f\"Invalid fill method. Expecting {expecting}. Got {method}\")\n return method\n\n\ndef clean_interp_method(method, **kwargs):\n order = kwargs.get(\"order\")\n valid = [\n \"linear\",\n \"time\",\n \"index\",\n \"values\",\n \"nearest\",\n \"zero\",\n \"slinear\",\n \"quadratic\",\n \"cubic\",\n \"barycentric\",\n \"polynomial\",\n \"krogh\",\n \"piecewise_polynomial\",\n \"pchip\",\n \"akima\",\n \"spline\",\n \"from_derivatives\",\n \"cubicspline\",\n ]\n if method in (\"spline\", \"polynomial\") and order is None:\n raise ValueError(\"You must specify the order of the spline or polynomial.\")\n if method not in valid:\n raise ValueError(f\"method must be one of {valid}. Got '{method}' instead.\")\n\n return method\n\n\ndef find_valid_index(values, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n int or None\n \"\"\"\n assert how in [\"first\", \"last\"]\n\n if len(values) == 0: # early stop\n return None\n\n is_valid = ~isna(values)\n\n if values.ndim == 2:\n is_valid = is_valid.any(1) # reduce axis 1\n\n if how == \"first\":\n idxpos = is_valid[::].argmax()\n\n if how == \"last\":\n idxpos = len(values) - 1 - is_valid[::-1].argmax()\n\n chk_notna = is_valid[idxpos]\n\n if not chk_notna:\n return None\n return idxpos\n\n\ndef interpolate_1d(\n xvalues,\n yvalues,\n method=\"linear\",\n limit=None,\n limit_direction=\"forward\",\n limit_area=None,\n fill_value=None,\n bounds_error=False,\n order=None,\n **kwargs,\n):\n \"\"\"\n Logic for the 1-d interpolation. The result should be 1-d, inputs\n xvalues and yvalues will each be 1-d arrays of the same length.\n\n Bounds_error is currently hardcoded to False since non-scipy ones don't\n take it as an argument.\n \"\"\"\n # Treat the original, non-scipy methods first.\n\n invalid = isna(yvalues)\n valid = ~invalid\n\n if not valid.any():\n # have to call np.asarray(xvalues) since xvalues could be an Index\n # which can't be mutated\n result = np.empty_like(np.asarray(xvalues), dtype=np.float64)\n result.fill(np.nan)\n return result\n\n if valid.all():\n return yvalues\n\n if method == \"time\":\n if not getattr(xvalues, \"is_all_dates\", None):\n # if not issubclass(xvalues.dtype.type, np.datetime64):\n raise ValueError(\n \"time-weighted interpolation only works \"\n \"on Series or DataFrames with a \"\n \"DatetimeIndex\"\n )\n method = \"values\"\n\n valid_limit_directions = [\"forward\", \"backward\", \"both\"]\n limit_direction = limit_direction.lower()\n if limit_direction not in valid_limit_directions:\n raise ValueError(\n \"Invalid limit_direction: expecting one of \"\n f\"{valid_limit_directions}, got '{limit_direction}'.\"\n )\n\n if limit_area is not None:\n valid_limit_areas = [\"inside\", \"outside\"]\n limit_area = limit_area.lower()\n if limit_area not in valid_limit_areas:\n raise ValueError(\n f\"Invalid limit_area: expecting one of {valid_limit_areas}, got \"\n f\"{limit_area}.\"\n )\n\n # default limit is unlimited GH #16282\n limit = algos._validate_limit(nobs=None, limit=limit)\n\n # These are sets of index pointers to invalid values... i.e. {0, 1, etc...\n all_nans = set(np.flatnonzero(invalid))\n start_nans = set(range(find_valid_index(yvalues, \"first\")))\n end_nans = set(range(1 + find_valid_index(yvalues, \"last\"), len(valid)))\n mid_nans = all_nans - start_nans - end_nans\n\n # Like the sets above, preserve_nans contains indices of invalid values,\n # but in this case, it is the final set of indices that need to be\n # preserved as NaN after the interpolation.\n\n # For example if limit_direction='forward' then preserve_nans will\n # contain indices of NaNs at the beginning of the series, and NaNs that\n # are more than'limit' away from the prior non-NaN.\n\n # set preserve_nans based on direction using _interp_limit\n if limit_direction == \"forward\":\n preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))\n elif limit_direction == \"backward\":\n preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))\n else:\n # both directions... just use _interp_limit\n preserve_nans = set(_interp_limit(invalid, limit, limit))\n\n # if limit_area is set, add either mid or outside indices\n # to preserve_nans GH #16284\n if limit_area == \"inside\":\n # preserve NaNs on the outside\n preserve_nans |= start_nans | end_nans\n elif limit_area == \"outside\":\n # preserve NaNs on the inside\n preserve_nans |= mid_nans\n\n # sort preserve_nans and covert to list\n preserve_nans = sorted(preserve_nans)\n\n xvalues = getattr(xvalues, \"values\", xvalues)\n yvalues = getattr(yvalues, \"values\", yvalues)\n result = yvalues.copy()\n\n if method in [\"linear\", \"time\", \"index\", \"values\"]:\n if method in (\"values\", \"index\"):\n inds = np.asarray(xvalues)\n # hack for DatetimeIndex, #1646\n if needs_i8_conversion(inds.dtype):\n inds = inds.view(np.int64)\n if inds.dtype == np.object_:\n inds = lib.maybe_convert_objects(inds)\n else:\n inds = xvalues\n # np.interp requires sorted X values, #21037\n indexer = np.argsort(inds[valid])\n result[invalid] = np.interp(\n inds[invalid], inds[valid][indexer], yvalues[valid][indexer]\n )\n result[preserve_nans] = np.nan\n return result\n\n sp_methods = [\n \"nearest\",\n \"zero\",\n \"slinear\",\n \"quadratic\",\n \"cubic\",\n \"barycentric\",\n \"krogh\",\n \"spline\",\n \"polynomial\",\n \"from_derivatives\",\n \"piecewise_polynomial\",\n \"pchip\",\n \"akima\",\n \"cubicspline\",\n ]\n\n if method in sp_methods:\n inds = np.asarray(xvalues)\n # hack for DatetimeIndex, #1646\n if issubclass(inds.dtype.type, np.datetime64):\n inds = inds.view(np.int64)\n result[invalid] = _interpolate_scipy_wrapper(\n inds[valid],\n yvalues[valid],\n inds[invalid],\n method=method,\n fill_value=fill_value,\n bounds_error=bounds_error,\n order=order,\n **kwargs,\n )\n result[preserve_nans] = np.nan\n return result\n\n\ndef _interpolate_scipy_wrapper(\n x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs\n):\n \"\"\"\n Passed off to scipy.interpolate.interp1d. method is scipy's kind.\n Returns an array interpolated at new_x. Add any new methods to\n the list in _clean_interp_method.\n \"\"\"\n extra = f\"{method} interpolation requires SciPy.\"\n import_optional_dependency(\"scipy\", extra=extra)\n from scipy import interpolate\n\n new_x = np.asarray(new_x)\n\n # ignores some kwargs that could be passed along.\n alt_methods = {\n \"barycentric\": interpolate.barycentric_interpolate,\n \"krogh\": interpolate.krogh_interpolate,\n \"from_derivatives\": _from_derivatives,\n \"piecewise_polynomial\": _from_derivatives,\n }\n\n if getattr(x, \"is_all_dates\", False):\n # GH 5975, scipy.interp1d can't handle datetime64s\n x, new_x = x._values.astype(\"i8\"), new_x.astype(\"i8\")\n\n if method == \"pchip\":\n alt_methods[\"pchip\"] = interpolate.pchip_interpolate\n elif method == \"akima\":\n alt_methods[\"akima\"] = _akima_interpolate\n elif method == \"cubicspline\":\n alt_methods[\"cubicspline\"] = _cubicspline_interpolate\n\n interp1d_methods = [\n \"nearest\",\n \"zero\",\n \"slinear\",\n \"quadratic\",\n \"cubic\",\n \"polynomial\",\n ]\n if method in interp1d_methods:\n if method == \"polynomial\":\n method = order\n terp = interpolate.interp1d(\n x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error\n )\n new_y = terp(new_x)\n elif method == \"spline\":\n # GH #10633, #24014\n if isna(order) or (order <= 0):\n raise ValueError(\n f\"order needs to be specified and greater than 0; got order: {order}\"\n )\n terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)\n new_y = terp(new_x)\n else:\n # GH 7295: need to be able to write for some reason\n # in some circumstances: check all three\n if not x.flags.writeable:\n x = x.copy()\n if not y.flags.writeable:\n y = y.copy()\n if not new_x.flags.writeable:\n new_x = new_x.copy()\n method = alt_methods[method]\n new_y = method(x, y, new_x, **kwargs)\n return new_y\n\n\ndef _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):\n \"\"\"\n Convenience function for interpolate.BPoly.from_derivatives.\n\n Construct a piecewise polynomial in the Bernstein basis, compatible\n with the specified values and derivatives at breakpoints.\n\n Parameters\n ----------\n xi : array_like\n sorted 1D array of x-coordinates\n yi : array_like or list of array-likes\n yi[i][j] is the j-th derivative known at xi[i]\n order: None or int or array_like of ints. Default: None.\n Specifies the degree of local polynomials. If not None, some\n derivatives are ignored.\n der : int or list\n How many derivatives to extract; None for all potentially nonzero\n derivatives (that is a number equal to the number of points), or a\n list of derivatives to extract. This number includes the function\n value as 0th derivative.\n extrapolate : bool, optional\n Whether to extrapolate to ouf-of-bounds points based on first and last\n intervals, or to return NaNs. Default: True.\n\n See Also\n --------\n scipy.interpolate.BPoly.from_derivatives\n\n Returns\n -------\n y : scalar or array_like\n The result, of length R or length M or M by R.\n \"\"\"\n from scipy import interpolate\n\n # return the method for compat with scipy version & backwards compat\n method = interpolate.BPoly.from_derivatives\n m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)\n\n return m(x)\n\n\ndef _akima_interpolate(xi, yi, x, der=0, axis=0):\n \"\"\"\n Convenience function for akima interpolation.\n xi and yi are arrays of values used to approximate some function f,\n with ``yi = f(xi)``.\n\n See `Akima1DInterpolator` for details.\n\n Parameters\n ----------\n xi : array_like\n A sorted list of x-coordinates, of length N.\n yi : array_like\n A 1-D array of real values. `yi`'s length along the interpolation\n axis must be equal to the length of `xi`. If N-D array, use axis\n parameter to select correct axis.\n x : scalar or array_like\n Of length M.\n der : int, optional\n How many derivatives to extract; None for all potentially\n nonzero derivatives (that is a number equal to the number\n of points), or a list of derivatives to extract. This number\n includes the function value as 0th derivative.\n axis : int, optional\n Axis in the yi array corresponding to the x-coordinate values.\n\n See Also\n --------\n scipy.interpolate.Akima1DInterpolator\n\n Returns\n -------\n y : scalar or array_like\n The result, of length R or length M or M by R,\n\n \"\"\"\n from scipy import interpolate\n\n P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)\n\n return P(x, nu=der)\n\n\ndef _cubicspline_interpolate(xi, yi, x, axis=0, bc_type=\"not-a-knot\", extrapolate=None):\n \"\"\"\n Convenience function for cubic spline data interpolator.\n\n See `scipy.interpolate.CubicSpline` for details.\n\n Parameters\n ----------\n xi : array_like, shape (n,)\n 1-d array containing values of the independent variable.\n Values must be real, finite and in strictly increasing order.\n yi : array_like\n Array containing values of the dependent variable. It can have\n arbitrary number of dimensions, but the length along ``axis``\n (see below) must match the length of ``x``. Values must be finite.\n x : scalar or array_like, shape (m,)\n axis : int, optional\n Axis along which `y` is assumed to be varying. Meaning that for\n ``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.\n Default is 0.\n bc_type : string or 2-tuple, optional\n Boundary condition type. Two additional equations, given by the\n boundary conditions, are required to determine all coefficients of\n polynomials on each segment [2]_.\n If `bc_type` is a string, then the specified condition will be applied\n at both ends of a spline. Available conditions are:\n * 'not-a-knot' (default): The first and second segment at a curve end\n are the same polynomial. It is a good default when there is no\n information on boundary conditions.\n * 'periodic': The interpolated functions is assumed to be periodic\n of period ``x[-1] - x[0]``. The first and last value of `y` must be\n identical: ``y[0] == y[-1]``. This boundary condition will result in\n ``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.\n * 'clamped': The first derivative at curves ends are zero. Assuming\n a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.\n * 'natural': The second derivative at curve ends are zero. Assuming\n a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.\n If `bc_type` is a 2-tuple, the first and the second value will be\n applied at the curve start and end respectively. The tuple values can\n be one of the previously mentioned strings (except 'periodic') or a\n tuple `(order, deriv_values)` allowing to specify arbitrary\n derivatives at curve ends:\n * `order`: the derivative order, 1 or 2.\n * `deriv_value`: array_like containing derivative values, shape must\n be the same as `y`, excluding ``axis`` dimension. For example, if\n `y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with\n the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D\n and have the shape (n0, n1).\n extrapolate : {bool, 'periodic', None}, optional\n If bool, determines whether to extrapolate to out-of-bounds points\n based on first and last intervals, or to return NaNs. If 'periodic',\n periodic extrapolation is used. If None (default), ``extrapolate`` is\n set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.\n\n See Also\n --------\n scipy.interpolate.CubicHermiteSpline\n\n Returns\n -------\n y : scalar or array_like\n The result, of shape (m,)\n\n References\n ----------\n .. [1] `Cubic Spline Interpolation\n <https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_\n on Wikiversity.\n .. [2] Carl de Boor, \"A Practical Guide to Splines\", Springer-Verlag, 1978.\n \"\"\"\n from scipy import interpolate\n\n P = interpolate.CubicSpline(\n xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate\n )\n\n return P(x)\n\n\ndef interpolate_2d(\n values, method=\"pad\", axis=0, limit=None, fill_value=None, dtype=None\n):\n \"\"\"\n Perform an actual interpolation of values, values will be make 2-d if\n needed fills inplace, returns the result.\n \"\"\"\n orig_values = values\n\n transf = (lambda x: x) if axis == 0 else (lambda x: x.T)\n\n # reshape a 1 dim if needed\n ndim = values.ndim\n if values.ndim == 1:\n if axis != 0: # pragma: no cover\n raise AssertionError(\"cannot interpolate on a ndim == 1 with axis != 0\")\n values = values.reshape(tuple((1,) + values.shape))\n\n if fill_value is None:\n mask = None\n else: # todo create faster fill func without masking\n mask = mask_missing(transf(values), fill_value)\n\n method = clean_fill_method(method)\n if method == \"pad\":\n values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))\n else:\n values = transf(\n backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype)\n )\n\n # reshape back\n if ndim == 1:\n values = values[0]\n\n if orig_values.dtype.kind == \"M\":\n # convert float back to datetime64\n values = values.astype(orig_values.dtype)\n\n return values\n\n\ndef _cast_values_for_fillna(values, dtype):\n \"\"\"\n Cast values to a dtype that algos.pad and algos.backfill can handle.\n \"\"\"\n # TODO: for int-dtypes we make a copy, but for everything else this\n # alters the values in-place. Is this intentional?\n\n if (\n is_datetime64_dtype(dtype)\n or is_datetime64tz_dtype(dtype)\n or is_timedelta64_dtype(dtype)\n ):\n values = values.view(np.int64)\n\n elif is_integer_dtype(values):\n # NB: this check needs to come after the datetime64 check above\n values = ensure_float64(values)\n\n return values\n\n\ndef _fillna_prep(values, mask=None, dtype=None):\n # boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d\n if dtype is None:\n dtype = values.dtype\n\n if mask is None:\n # This needs to occur before datetime/timedeltas are cast to int64\n mask = isna(values)\n\n values = _cast_values_for_fillna(values, dtype)\n\n mask = mask.view(np.uint8)\n return values, mask\n\n\ndef pad_1d(values, limit=None, mask=None, dtype=None):\n values, mask = _fillna_prep(values, mask, dtype)\n algos.pad_inplace(values, mask, limit=limit)\n return values\n\n\ndef backfill_1d(values, limit=None, mask=None, dtype=None):\n values, mask = _fillna_prep(values, mask, dtype)\n algos.backfill_inplace(values, mask, limit=limit)\n return values\n\n\ndef pad_2d(values, limit=None, mask=None, dtype=None):\n values, mask = _fillna_prep(values, mask, dtype)\n\n if np.all(values.shape):\n algos.pad_2d_inplace(values, mask, limit=limit)\n else:\n # for test coverage\n pass\n return values\n\n\ndef backfill_2d(values, limit=None, mask=None, dtype=None):\n values, mask = _fillna_prep(values, mask, dtype)\n\n if np.all(values.shape):\n algos.backfill_2d_inplace(values, mask, limit=limit)\n else:\n # for test coverage\n pass\n return values\n\n\n_fill_methods = {\"pad\": pad_1d, \"backfill\": backfill_1d}\n\n\ndef get_fill_func(method):\n method = clean_fill_method(method)\n return _fill_methods[method]\n\n\ndef clean_reindex_fill_method(method):\n return clean_fill_method(method, allow_nearest=True)\n\n\ndef _interp_limit(invalid, fw_limit, bw_limit):\n \"\"\"\n Get indexers of values that won't be filled\n because they exceed the limits.\n\n Parameters\n ----------\n invalid : boolean ndarray\n fw_limit : int or None\n forward limit to index\n bw_limit : int or None\n backward limit to index\n\n Returns\n -------\n set of indexers\n\n Notes\n -----\n This is equivalent to the more readable, but slower\n\n .. code-block:: python\n\n def _interp_limit(invalid, fw_limit, bw_limit):\n for x in np.where(invalid)[0]:\n if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():\n yield x\n \"\"\"\n # handle forward first; the backward direction is the same except\n # 1. operate on the reversed array\n # 2. subtract the returned indices from N - 1\n N = len(invalid)\n f_idx = set()\n b_idx = set()\n\n def inner(invalid, limit):\n limit = min(limit, N)\n windowed = _rolling_window(invalid, limit + 1).all(1)\n idx = set(np.where(windowed)[0] + limit) | set(\n np.where((~invalid[: limit + 1]).cumsum() == 0)[0]\n )\n return idx\n\n if fw_limit is not None:\n\n if fw_limit == 0:\n f_idx = set(np.where(invalid)[0])\n else:\n f_idx = inner(invalid, fw_limit)\n\n if bw_limit is not None:\n\n if bw_limit == 0:\n # then we don't even need to care about backwards\n # just use forwards\n return f_idx\n else:\n b_idx = list(inner(invalid[::-1], bw_limit))\n b_idx = set(N - 1 - np.asarray(b_idx))\n if fw_limit == 0:\n return b_idx\n\n return f_idx & b_idx\n\n\ndef _rolling_window(a, window):\n \"\"\"\n [True, True, False, True, False], 2 ->\n\n [\n [True, True],\n [True, False],\n [False, True],\n [True, False],\n ]\n \"\"\"\n # https://stackoverflow.com/a/6811241\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n",
"import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, MultiIndex, Series\nimport pandas._testing as tm\n\n\nclass TestDataFrameSubclassing:\n def test_frame_subclassing_and_slicing(self):\n # Subclass frame and ensure it returns the right class on slicing it\n # In reference to PR 9632\n\n class CustomSeries(Series):\n @property\n def _constructor(self):\n return CustomSeries\n\n def custom_series_function(self):\n return \"OK\"\n\n class CustomDataFrame(DataFrame):\n \"\"\"\n Subclasses pandas DF, fills DF with simulation results, adds some\n custom plotting functions.\n \"\"\"\n\n def __init__(self, *args, **kw):\n super().__init__(*args, **kw)\n\n @property\n def _constructor(self):\n return CustomDataFrame\n\n _constructor_sliced = CustomSeries\n\n def custom_frame_function(self):\n return \"OK\"\n\n data = {\"col1\": range(10), \"col2\": range(10)}\n cdf = CustomDataFrame(data)\n\n # Did we get back our own DF class?\n assert isinstance(cdf, CustomDataFrame)\n\n # Do we get back our own Series class after selecting a column?\n cdf_series = cdf.col1\n assert isinstance(cdf_series, CustomSeries)\n assert cdf_series.custom_series_function() == \"OK\"\n\n # Do we get back our own DF class after slicing row-wise?\n cdf_rows = cdf[1:5]\n assert isinstance(cdf_rows, CustomDataFrame)\n assert cdf_rows.custom_frame_function() == \"OK\"\n\n # Make sure sliced part of multi-index frame is custom class\n mcol = pd.MultiIndex.from_tuples([(\"A\", \"A\"), (\"A\", \"B\")])\n cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n assert isinstance(cdf_multi[\"A\"], CustomDataFrame)\n\n mcol = pd.MultiIndex.from_tuples([(\"A\", \"\"), (\"B\", \"\")])\n cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)\n assert isinstance(cdf_multi2[\"A\"], CustomSeries)\n\n def test_dataframe_metadata(self):\n df = tm.SubclassedDataFrame(\n {\"X\": [1, 2, 3], \"Y\": [1, 2, 3]}, index=[\"a\", \"b\", \"c\"]\n )\n df.testattr = \"XXX\"\n\n assert df.testattr == \"XXX\"\n assert df[[\"X\"]].testattr == \"XXX\"\n assert df.loc[[\"a\", \"b\"], :].testattr == \"XXX\"\n assert df.iloc[[0, 1], :].testattr == \"XXX\"\n\n # see gh-9776\n assert df.iloc[0:1, :].testattr == \"XXX\"\n\n # see gh-10553\n unpickled = tm.round_trip_pickle(df)\n tm.assert_frame_equal(df, unpickled)\n assert df._metadata == unpickled._metadata\n assert df.testattr == unpickled.testattr\n\n def test_indexing_sliced(self):\n # GH 11559\n df = tm.SubclassedDataFrame(\n {\"X\": [1, 2, 3], \"Y\": [4, 5, 6], \"Z\": [7, 8, 9]}, index=[\"a\", \"b\", \"c\"]\n )\n res = df.loc[:, \"X\"]\n exp = tm.SubclassedSeries([1, 2, 3], index=list(\"abc\"), name=\"X\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.iloc[:, 1]\n exp = tm.SubclassedSeries([4, 5, 6], index=list(\"abc\"), name=\"Y\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc[:, \"Z\"]\n exp = tm.SubclassedSeries([7, 8, 9], index=list(\"abc\"), name=\"Z\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc[\"a\", :]\n exp = tm.SubclassedSeries([1, 4, 7], index=list(\"XYZ\"), name=\"a\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.iloc[1, :]\n exp = tm.SubclassedSeries([2, 5, 8], index=list(\"XYZ\"), name=\"b\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n res = df.loc[\"c\", :]\n exp = tm.SubclassedSeries([3, 6, 9], index=list(\"XYZ\"), name=\"c\")\n tm.assert_series_equal(res, exp)\n assert isinstance(res, tm.SubclassedSeries)\n\n def test_subclass_attr_err_propagation(self):\n # GH 11808\n class A(DataFrame):\n @property\n def bar(self):\n return self.i_dont_exist\n\n with pytest.raises(AttributeError, match=\".*i_dont_exist.*\"):\n A().bar\n\n def test_subclass_align(self):\n # GH 12983\n df1 = tm.SubclassedDataFrame(\n {\"a\": [1, 3, 5], \"b\": [1, 3, 5]}, index=list(\"ACE\")\n )\n df2 = tm.SubclassedDataFrame(\n {\"c\": [1, 2, 4], \"d\": [1, 2, 4]}, index=list(\"ABD\")\n )\n\n res1, res2 = df1.align(df2, axis=0)\n exp1 = tm.SubclassedDataFrame(\n {\"a\": [1, np.nan, 3, np.nan, 5], \"b\": [1, np.nan, 3, np.nan, 5]},\n index=list(\"ABCDE\"),\n )\n exp2 = tm.SubclassedDataFrame(\n {\"c\": [1, 2, np.nan, 4, np.nan], \"d\": [1, 2, np.nan, 4, np.nan]},\n index=list(\"ABCDE\"),\n )\n assert isinstance(res1, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res1, exp1)\n assert isinstance(res2, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res2, exp2)\n\n res1, res2 = df1.a.align(df2.c)\n assert isinstance(res1, tm.SubclassedSeries)\n tm.assert_series_equal(res1, exp1.a)\n assert isinstance(res2, tm.SubclassedSeries)\n tm.assert_series_equal(res2, exp2.c)\n\n def test_subclass_align_combinations(self):\n # GH 12983\n df = tm.SubclassedDataFrame({\"a\": [1, 3, 5], \"b\": [1, 3, 5]}, index=list(\"ACE\"))\n s = tm.SubclassedSeries([1, 2, 4], index=list(\"ABD\"), name=\"x\")\n\n # frame + series\n res1, res2 = df.align(s, axis=0)\n exp1 = tm.SubclassedDataFrame(\n {\"a\": [1, np.nan, 3, np.nan, 5], \"b\": [1, np.nan, 3, np.nan, 5]},\n index=list(\"ABCDE\"),\n )\n # name is lost when\n exp2 = tm.SubclassedSeries(\n [1, 2, np.nan, 4, np.nan], index=list(\"ABCDE\"), name=\"x\"\n )\n\n assert isinstance(res1, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res1, exp1)\n assert isinstance(res2, tm.SubclassedSeries)\n tm.assert_series_equal(res2, exp2)\n\n # series + frame\n res1, res2 = s.align(df)\n assert isinstance(res1, tm.SubclassedSeries)\n tm.assert_series_equal(res1, exp2)\n assert isinstance(res2, tm.SubclassedDataFrame)\n tm.assert_frame_equal(res2, exp1)\n\n def test_subclass_iterrows(self):\n # GH 13977\n df = tm.SubclassedDataFrame({\"a\": [1]})\n for i, row in df.iterrows():\n assert isinstance(row, tm.SubclassedSeries)\n tm.assert_series_equal(row, df.loc[i])\n\n def test_subclass_stack(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=[\"a\", \"b\", \"c\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n\n res = df.stack()\n exp = tm.SubclassedSeries(\n [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list(\"aaabbbccc\"), list(\"XYZXYZXYZ\")]\n )\n\n tm.assert_series_equal(res, exp)\n\n def test_subclass_stack_multi(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AABB\"), list(\"cdcd\"))), names=[\"aaa\", \"ccc\"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWXX\"), list(\"yzyz\"))), names=[\"www\", \"yyy\"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 12],\n [11, 13],\n [20, 22],\n [21, 23],\n [30, 32],\n [31, 33],\n [40, 42],\n [41, 43],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AAAABBBB\"), list(\"ccddccdd\"), list(\"yzyzyzyz\"))),\n names=[\"aaa\", \"ccc\", \"yyy\"],\n ),\n columns=Index([\"W\", \"X\"], name=\"www\"),\n )\n\n res = df.stack()\n tm.assert_frame_equal(res, exp)\n\n res = df.stack(\"yyy\")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 11],\n [12, 13],\n [20, 21],\n [22, 23],\n [30, 31],\n [32, 33],\n [40, 41],\n [42, 43],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AAAABBBB\"), list(\"ccddccdd\"), list(\"WXWXWXWX\"))),\n names=[\"aaa\", \"ccc\", \"www\"],\n ),\n columns=Index([\"y\", \"z\"], name=\"yyy\"),\n )\n\n res = df.stack(\"www\")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_stack_multi_mixed(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [\n [10, 11, 12.0, 13.0],\n [20, 21, 22.0, 23.0],\n [30, 31, 32.0, 33.0],\n [40, 41, 42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AABB\"), list(\"cdcd\"))), names=[\"aaa\", \"ccc\"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWXX\"), list(\"yzyz\"))), names=[\"www\", \"yyy\"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 12.0],\n [11, 13.0],\n [20, 22.0],\n [21, 23.0],\n [30, 32.0],\n [31, 33.0],\n [40, 42.0],\n [41, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AAAABBBB\"), list(\"ccddccdd\"), list(\"yzyzyzyz\"))),\n names=[\"aaa\", \"ccc\", \"yyy\"],\n ),\n columns=Index([\"W\", \"X\"], name=\"www\"),\n )\n\n res = df.stack()\n tm.assert_frame_equal(res, exp)\n\n res = df.stack(\"yyy\")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10.0, 11.0],\n [12.0, 13.0],\n [20.0, 21.0],\n [22.0, 23.0],\n [30.0, 31.0],\n [32.0, 33.0],\n [40.0, 41.0],\n [42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AAAABBBB\"), list(\"ccddccdd\"), list(\"WXWXWXWX\"))),\n names=[\"aaa\", \"ccc\", \"www\"],\n ),\n columns=Index([\"y\", \"z\"], name=\"yyy\"),\n )\n\n res = df.stack(\"www\")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_unstack(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=[\"a\", \"b\", \"c\"],\n columns=[\"X\", \"Y\", \"Z\"],\n )\n\n res = df.unstack()\n exp = tm.SubclassedSeries(\n [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list(\"XXXYYYZZZ\"), list(\"abcabcabc\")]\n )\n\n tm.assert_series_equal(res, exp)\n\n def test_subclass_unstack_multi(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AABB\"), list(\"cdcd\"))), names=[\"aaa\", \"ccc\"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWXX\"), list(\"yzyz\"))), names=[\"www\", \"yyy\"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [[10, 20, 11, 21, 12, 22, 13, 23], [30, 40, 31, 41, 32, 42, 33, 43]],\n index=Index([\"A\", \"B\"], name=\"aaa\"),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWWWXXXX\"), list(\"yyzzyyzz\"), list(\"cdcdcdcd\"))),\n names=[\"www\", \"yyy\", \"ccc\"],\n ),\n )\n\n res = df.unstack()\n tm.assert_frame_equal(res, exp)\n\n res = df.unstack(\"ccc\")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [[10, 30, 11, 31, 12, 32, 13, 33], [20, 40, 21, 41, 22, 42, 23, 43]],\n index=Index([\"c\", \"d\"], name=\"ccc\"),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWWWXXXX\"), list(\"yyzzyyzz\"), list(\"ABABABAB\"))),\n names=[\"www\", \"yyy\", \"aaa\"],\n ),\n )\n\n res = df.unstack(\"aaa\")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_unstack_multi_mixed(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n [\n [10, 11, 12.0, 13.0],\n [20, 21, 22.0, 23.0],\n [30, 31, 32.0, 33.0],\n [40, 41, 42.0, 43.0],\n ],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AABB\"), list(\"cdcd\"))), names=[\"aaa\", \"ccc\"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWXX\"), list(\"yzyz\"))), names=[\"www\", \"yyy\"]\n ),\n )\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0],\n [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0],\n ],\n index=Index([\"A\", \"B\"], name=\"aaa\"),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWWWXXXX\"), list(\"yyzzyyzz\"), list(\"cdcdcdcd\"))),\n names=[\"www\", \"yyy\", \"ccc\"],\n ),\n )\n\n res = df.unstack()\n tm.assert_frame_equal(res, exp)\n\n res = df.unstack(\"ccc\")\n tm.assert_frame_equal(res, exp)\n\n exp = tm.SubclassedDataFrame(\n [\n [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0],\n [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0],\n ],\n index=Index([\"c\", \"d\"], name=\"ccc\"),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWWWXXXX\"), list(\"yyzzyyzz\"), list(\"ABABABAB\"))),\n names=[\"www\", \"yyy\", \"aaa\"],\n ),\n )\n\n res = df.unstack(\"aaa\")\n tm.assert_frame_equal(res, exp)\n\n def test_subclass_pivot(self):\n # GH 15564\n df = tm.SubclassedDataFrame(\n {\n \"index\": [\"A\", \"B\", \"C\", \"C\", \"B\", \"A\"],\n \"columns\": [\"One\", \"One\", \"One\", \"Two\", \"Two\", \"Two\"],\n \"values\": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0],\n }\n )\n\n pivoted = df.pivot(index=\"index\", columns=\"columns\", values=\"values\")\n\n expected = tm.SubclassedDataFrame(\n {\n \"One\": {\"A\": 1.0, \"B\": 2.0, \"C\": 3.0},\n \"Two\": {\"A\": 1.0, \"B\": 2.0, \"C\": 3.0},\n }\n )\n\n expected.index.name, expected.columns.name = \"index\", \"columns\"\n\n tm.assert_frame_equal(pivoted, expected)\n\n def test_subclassed_melt(self):\n # GH 15564\n cheese = tm.SubclassedDataFrame(\n {\n \"first\": [\"John\", \"Mary\"],\n \"last\": [\"Doe\", \"Bo\"],\n \"height\": [5.5, 6.0],\n \"weight\": [130, 150],\n }\n )\n\n melted = pd.melt(cheese, id_vars=[\"first\", \"last\"])\n\n expected = tm.SubclassedDataFrame(\n [\n [\"John\", \"Doe\", \"height\", 5.5],\n [\"Mary\", \"Bo\", \"height\", 6.0],\n [\"John\", \"Doe\", \"weight\", 130],\n [\"Mary\", \"Bo\", \"weight\", 150],\n ],\n columns=[\"first\", \"last\", \"variable\", \"value\"],\n )\n\n tm.assert_frame_equal(melted, expected)\n\n def test_subclassed_wide_to_long(self):\n # GH 9762\n\n np.random.seed(123)\n x = np.random.randn(3)\n df = tm.SubclassedDataFrame(\n {\n \"A1970\": {0: \"a\", 1: \"b\", 2: \"c\"},\n \"A1980\": {0: \"d\", 1: \"e\", 2: \"f\"},\n \"B1970\": {0: 2.5, 1: 1.2, 2: 0.7},\n \"B1980\": {0: 3.2, 1: 1.3, 2: 0.1},\n \"X\": dict(zip(range(3), x)),\n }\n )\n\n df[\"id\"] = df.index\n exp_data = {\n \"X\": x.tolist() + x.tolist(),\n \"A\": [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"],\n \"B\": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],\n \"year\": [1970, 1970, 1970, 1980, 1980, 1980],\n \"id\": [0, 1, 2, 0, 1, 2],\n }\n expected = tm.SubclassedDataFrame(exp_data)\n expected = expected.set_index([\"id\", \"year\"])[[\"X\", \"A\", \"B\"]]\n long_frame = pd.wide_to_long(df, [\"A\", \"B\"], i=\"id\", j=\"year\")\n\n tm.assert_frame_equal(long_frame, expected)\n\n def test_subclassed_apply(self):\n # GH 19822\n\n def check_row_subclass(row):\n assert isinstance(row, tm.SubclassedSeries)\n\n def strech(row):\n if row[\"variable\"] == \"height\":\n row[\"value\"] += 0.5\n return row\n\n df = tm.SubclassedDataFrame(\n [\n [\"John\", \"Doe\", \"height\", 5.5],\n [\"Mary\", \"Bo\", \"height\", 6.0],\n [\"John\", \"Doe\", \"weight\", 130],\n [\"Mary\", \"Bo\", \"weight\", 150],\n ],\n columns=[\"first\", \"last\", \"variable\", \"value\"],\n )\n\n df.apply(lambda x: check_row_subclass(x))\n df.apply(lambda x: check_row_subclass(x), axis=1)\n\n expected = tm.SubclassedDataFrame(\n [\n [\"John\", \"Doe\", \"height\", 6.0],\n [\"Mary\", \"Bo\", \"height\", 6.5],\n [\"John\", \"Doe\", \"weight\", 130],\n [\"Mary\", \"Bo\", \"weight\", 150],\n ],\n columns=[\"first\", \"last\", \"variable\", \"value\"],\n )\n\n result = df.apply(lambda x: strech(x), axis=1)\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n expected = tm.SubclassedDataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])\n\n result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1)\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n result = df.apply(lambda x: [1, 2, 3], axis=1, result_type=\"expand\")\n assert isinstance(result, tm.SubclassedDataFrame)\n tm.assert_frame_equal(result, expected)\n\n expected = tm.SubclassedSeries([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])\n\n result = df.apply(lambda x: [1, 2, 3], axis=1)\n assert not isinstance(result, tm.SubclassedDataFrame)\n tm.assert_series_equal(result, expected)\n\n def test_subclassed_reductions(self, all_reductions):\n # GH 25596\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = getattr(df, all_reductions)()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_subclassed_count(self):\n\n df = tm.SubclassedDataFrame(\n {\n \"Person\": [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n \"Age\": [24.0, np.nan, 21.0, 33, 26],\n \"Single\": [False, True, True, True, False],\n }\n )\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame({\"A\": [1, 0, 3], \"B\": [0, 5, 6], \"C\": [7, 8, 0]})\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame(\n [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]],\n index=MultiIndex.from_tuples(\n list(zip(list(\"AABB\"), list(\"cdcd\"))), names=[\"aaa\", \"ccc\"]\n ),\n columns=MultiIndex.from_tuples(\n list(zip(list(\"WWXX\"), list(\"yzyz\"))), names=[\"www\", \"yyy\"]\n ),\n )\n result = df.count(level=1)\n assert isinstance(result, tm.SubclassedDataFrame)\n\n df = tm.SubclassedDataFrame()\n result = df.count()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_isin(self):\n\n df = tm.SubclassedDataFrame(\n {\"num_legs\": [2, 4], \"num_wings\": [2, 0]}, index=[\"falcon\", \"dog\"]\n )\n result = df.isin([0, 2])\n assert isinstance(result, tm.SubclassedDataFrame)\n\n def test_duplicated(self):\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = df.duplicated()\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame()\n result = df.duplicated()\n assert isinstance(result, tm.SubclassedSeries)\n\n @pytest.mark.parametrize(\"idx_method\", [\"idxmax\", \"idxmin\"])\n def test_idx(self, idx_method):\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = getattr(df, idx_method)()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_dot(self):\n\n df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n s = tm.SubclassedSeries([1, 1, 2, 1])\n result = df.dot(s)\n assert isinstance(result, tm.SubclassedSeries)\n\n df = tm.SubclassedDataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n s = tm.SubclassedDataFrame([1, 1, 2, 1])\n result = df.dot(s)\n assert isinstance(result, tm.SubclassedDataFrame)\n\n def test_memory_usage(self):\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = df.memory_usage()\n assert isinstance(result, tm.SubclassedSeries)\n\n result = df.memory_usage(index=False)\n assert isinstance(result, tm.SubclassedSeries)\n\n @td.skip_if_no_scipy\n def test_corrwith(self):\n index = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n columns = [\"one\", \"two\", \"three\", \"four\"]\n df1 = tm.SubclassedDataFrame(\n np.random.randn(5, 4), index=index, columns=columns\n )\n df2 = tm.SubclassedDataFrame(\n np.random.randn(4, 4), index=index[:4], columns=columns\n )\n correls = df1.corrwith(df2, axis=1, drop=True, method=\"kendall\")\n\n assert isinstance(correls, (tm.SubclassedSeries))\n\n def test_asof(self):\n\n N = 3\n rng = pd.date_range(\"1/1/1990\", periods=N, freq=\"53s\")\n df = tm.SubclassedDataFrame(\n {\n \"A\": [np.nan, np.nan, np.nan],\n \"B\": [np.nan, np.nan, np.nan],\n \"C\": [np.nan, np.nan, np.nan],\n },\n index=rng,\n )\n\n result = df.asof(rng[-2:])\n assert isinstance(result, tm.SubclassedDataFrame)\n\n result = df.asof(rng[-2])\n assert isinstance(result, tm.SubclassedSeries)\n\n result = df.asof(\"1989-12-31\")\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_idxmin_preserves_subclass(self):\n # GH 28330\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = df.idxmin()\n assert isinstance(result, tm.SubclassedSeries)\n\n def test_idxmax_preserves_subclass(self):\n # GH 28330\n\n df = tm.SubclassedDataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n result = df.idxmax()\n assert isinstance(result, tm.SubclassedSeries)\n",
"import operator\nimport warnings\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import DataFrame, Series, Timestamp, date_range, to_timedelta\nimport pandas._testing as tm\nfrom pandas.core.algorithms import checked_add_with_arr\n\nfrom .pandas_vb_common import numeric_dtypes\n\ntry:\n import pandas.core.computation.expressions as expr\nexcept ImportError:\n import pandas.computation.expressions as expr\ntry:\n import pandas.tseries.holiday\nexcept ImportError:\n pass\n\n\nclass IntFrameWithScalar:\n params = [\n [np.float64, np.int64],\n [2, 3.0, np.int32(4), np.float64(5)],\n [\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.floordiv,\n operator.pow,\n operator.mod,\n operator.eq,\n operator.ne,\n operator.gt,\n operator.ge,\n operator.lt,\n operator.le,\n ],\n ]\n param_names = [\"dtype\", \"scalar\", \"op\"]\n\n def setup(self, dtype, scalar, op):\n arr = np.random.randn(20000, 100)\n self.df = DataFrame(arr.astype(dtype))\n\n def time_frame_op_with_scalar(self, dtype, scalar, op):\n op(self.df, scalar)\n\n\nclass OpWithFillValue:\n def setup(self):\n # GH#31300\n arr = np.arange(10 ** 6)\n df = DataFrame({\"A\": arr})\n ser = df[\"A\"]\n\n self.df = df\n self.ser = ser\n\n def time_frame_op_with_fill_value_no_nas(self):\n self.df.add(self.df, fill_value=4)\n\n def time_series_op_with_fill_value_no_nas(self):\n self.ser.add(self.ser, fill_value=4)\n\n\nclass MixedFrameWithSeriesAxis:\n params = [\n [\n \"eq\",\n \"ne\",\n \"lt\",\n \"le\",\n \"ge\",\n \"gt\",\n \"add\",\n \"sub\",\n \"truediv\",\n \"floordiv\",\n \"mul\",\n \"pow\",\n ]\n ]\n param_names = [\"opname\"]\n\n def setup(self, opname):\n arr = np.arange(10 ** 6).reshape(1000, -1)\n df = DataFrame(arr)\n df[\"C\"] = 1.0\n self.df = df\n self.ser = df[0]\n self.row = df.iloc[0]\n\n def time_frame_op_with_series_axis0(self, opname):\n getattr(self.df, opname)(self.ser, axis=0)\n\n def time_frame_op_with_series_axis1(self, opname):\n getattr(operator, opname)(self.df, self.ser)\n\n\nclass FrameWithFrameWide:\n # Many-columns, mixed dtypes\n\n params = [\n [\n # GH#32779 has discussion of which operators are included here\n operator.add,\n operator.floordiv,\n operator.gt,\n ]\n ]\n param_names = [\"op\"]\n\n def setup(self, op):\n # we choose dtypes so as to make the blocks\n # a) not perfectly match between right and left\n # b) appreciably bigger than single columns\n n_cols = 2000\n n_rows = 500\n\n # construct dataframe with 2 blocks\n arr1 = np.random.randn(n_rows, int(n_cols / 2)).astype(\"f8\")\n arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype(\"f4\")\n df = pd.concat(\n [pd.DataFrame(arr1), pd.DataFrame(arr2)], axis=1, ignore_index=True,\n )\n # should already be the case, but just to be sure\n df._consolidate_inplace()\n\n # TODO: GH#33198 the setting here shoudlnt need two steps\n arr1 = np.random.randn(n_rows, int(n_cols / 4)).astype(\"f8\")\n arr2 = np.random.randn(n_rows, int(n_cols / 2)).astype(\"i8\")\n arr3 = np.random.randn(n_rows, int(n_cols / 4)).astype(\"f8\")\n df2 = pd.concat(\n [pd.DataFrame(arr1), pd.DataFrame(arr2), pd.DataFrame(arr3)],\n axis=1,\n ignore_index=True,\n )\n # should already be the case, but just to be sure\n df2._consolidate_inplace()\n\n self.left = df\n self.right = df2\n\n def time_op_different_blocks(self, op):\n # blocks (and dtypes) are not aligned\n op(self.left, self.right)\n\n def time_op_same_blocks(self, op):\n # blocks (and dtypes) are aligned\n op(self.left, self.left)\n\n\nclass Ops:\n\n params = [[True, False], [\"default\", 1]]\n param_names = [\"use_numexpr\", \"threads\"]\n\n def setup(self, use_numexpr, threads):\n self.df = DataFrame(np.random.randn(20000, 100))\n self.df2 = DataFrame(np.random.randn(20000, 100))\n\n if threads != \"default\":\n expr.set_numexpr_threads(threads)\n if not use_numexpr:\n expr.set_use_numexpr(False)\n\n def time_frame_add(self, use_numexpr, threads):\n self.df + self.df2\n\n def time_frame_mult(self, use_numexpr, threads):\n self.df * self.df2\n\n def time_frame_multi_and(self, use_numexpr, threads):\n self.df[(self.df > 0) & (self.df2 > 0)]\n\n def time_frame_comparison(self, use_numexpr, threads):\n self.df > self.df2\n\n def teardown(self, use_numexpr, threads):\n expr.set_use_numexpr(True)\n expr.set_numexpr_threads()\n\n\nclass Ops2:\n def setup(self):\n N = 10 ** 3\n self.df = DataFrame(np.random.randn(N, N))\n self.df2 = DataFrame(np.random.randn(N, N))\n\n self.df_int = DataFrame(\n np.random.randint(\n np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)\n )\n )\n self.df2_int = DataFrame(\n np.random.randint(\n np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(N, N)\n )\n )\n\n self.s = Series(np.random.randn(N))\n\n # Division\n\n def time_frame_float_div(self):\n self.df // self.df2\n\n def time_frame_float_div_by_zero(self):\n self.df / 0\n\n def time_frame_float_floor_by_zero(self):\n self.df // 0\n\n def time_frame_int_div_by_zero(self):\n self.df_int / 0\n\n # Modulo\n\n def time_frame_int_mod(self):\n self.df_int % self.df2_int\n\n def time_frame_float_mod(self):\n self.df % self.df2\n\n # Dot product\n\n def time_frame_dot(self):\n self.df.dot(self.df2)\n\n def time_series_dot(self):\n self.s.dot(self.s)\n\n def time_frame_series_dot(self):\n self.df.dot(self.s)\n\n\nclass Timeseries:\n\n params = [None, \"US/Eastern\"]\n param_names = [\"tz\"]\n\n def setup(self, tz):\n N = 10 ** 6\n halfway = (N // 2) - 1\n self.s = Series(date_range(\"20010101\", periods=N, freq=\"T\", tz=tz))\n self.ts = self.s[halfway]\n\n self.s2 = Series(date_range(\"20010101\", periods=N, freq=\"s\", tz=tz))\n\n def time_series_timestamp_compare(self, tz):\n self.s <= self.ts\n\n def time_timestamp_series_compare(self, tz):\n self.ts >= self.s\n\n def time_timestamp_ops_diff(self, tz):\n self.s2.diff()\n\n def time_timestamp_ops_diff_with_shift(self, tz):\n self.s - self.s.shift()\n\n\nclass IrregularOps:\n def setup(self):\n N = 10 ** 5\n idx = date_range(start=\"1/1/2000\", periods=N, freq=\"s\")\n s = Series(np.random.randn(N), index=idx)\n self.left = s.sample(frac=1)\n self.right = s.sample(frac=1)\n\n def time_add(self):\n self.left + self.right\n\n\nclass TimedeltaOps:\n def setup(self):\n self.td = to_timedelta(np.arange(1000000))\n self.ts = Timestamp(\"2000\")\n\n def time_add_td_ts(self):\n self.td + self.ts\n\n\nclass CategoricalComparisons:\n params = [\"__lt__\", \"__le__\", \"__eq__\", \"__ne__\", \"__ge__\", \"__gt__\"]\n param_names = [\"op\"]\n\n def setup(self, op):\n N = 10 ** 5\n self.cat = pd.Categorical(list(\"aabbcd\") * N, ordered=True)\n\n def time_categorical_op(self, op):\n getattr(self.cat, op)(\"b\")\n\n\nclass IndexArithmetic:\n\n params = [\"float\", \"int\"]\n param_names = [\"dtype\"]\n\n def setup(self, dtype):\n N = 10 ** 6\n indexes = {\"int\": \"makeIntIndex\", \"float\": \"makeFloatIndex\"}\n self.index = getattr(tm, indexes[dtype])(N)\n\n def time_add(self, dtype):\n self.index + 2\n\n def time_subtract(self, dtype):\n self.index - 2\n\n def time_multiply(self, dtype):\n self.index * 2\n\n def time_divide(self, dtype):\n self.index / 2\n\n def time_modulo(self, dtype):\n self.index % 2\n\n\nclass NumericInferOps:\n # from GH 7332\n params = numeric_dtypes\n param_names = [\"dtype\"]\n\n def setup(self, dtype):\n N = 5 * 10 ** 5\n self.df = DataFrame(\n {\"A\": np.arange(N).astype(dtype), \"B\": np.arange(N).astype(dtype)}\n )\n\n def time_add(self, dtype):\n self.df[\"A\"] + self.df[\"B\"]\n\n def time_subtract(self, dtype):\n self.df[\"A\"] - self.df[\"B\"]\n\n def time_multiply(self, dtype):\n self.df[\"A\"] * self.df[\"B\"]\n\n def time_divide(self, dtype):\n self.df[\"A\"] / self.df[\"B\"]\n\n def time_modulo(self, dtype):\n self.df[\"A\"] % self.df[\"B\"]\n\n\nclass DateInferOps:\n # from GH 7332\n def setup_cache(self):\n N = 5 * 10 ** 5\n df = DataFrame({\"datetime64\": np.arange(N).astype(\"datetime64[ms]\")})\n df[\"timedelta\"] = df[\"datetime64\"] - df[\"datetime64\"]\n return df\n\n def time_subtract_datetimes(self, df):\n df[\"datetime64\"] - df[\"datetime64\"]\n\n def time_timedelta_plus_datetime(self, df):\n df[\"timedelta\"] + df[\"datetime64\"]\n\n def time_add_timedeltas(self, df):\n df[\"timedelta\"] + df[\"timedelta\"]\n\n\nclass AddOverflowScalar:\n\n params = [1, -1, 0]\n param_names = [\"scalar\"]\n\n def setup(self, scalar):\n N = 10 ** 6\n self.arr = np.arange(N)\n\n def time_add_overflow_scalar(self, scalar):\n checked_add_with_arr(self.arr, scalar)\n\n\nclass AddOverflowArray:\n def setup(self):\n N = 10 ** 6\n self.arr = np.arange(N)\n self.arr_rev = np.arange(-N, 0)\n self.arr_mixed = np.array([1, -1]).repeat(N / 2)\n self.arr_nan_1 = np.random.choice([True, False], size=N)\n self.arr_nan_2 = np.random.choice([True, False], size=N)\n\n def time_add_overflow_arr_rev(self):\n checked_add_with_arr(self.arr, self.arr_rev)\n\n def time_add_overflow_arr_mask_nan(self):\n checked_add_with_arr(self.arr, self.arr_mixed, arr_mask=self.arr_nan_1)\n\n def time_add_overflow_b_mask_nan(self):\n checked_add_with_arr(self.arr, self.arr_mixed, b_mask=self.arr_nan_1)\n\n def time_add_overflow_both_arg_nan(self):\n checked_add_with_arr(\n self.arr, self.arr_mixed, arr_mask=self.arr_nan_1, b_mask=self.arr_nan_2\n )\n\n\nhcal = pd.tseries.holiday.USFederalHolidayCalendar()\n# These offsets currently raise a NotImplimentedError with .apply_index()\nnon_apply = [\n pd.offsets.Day(),\n pd.offsets.BYearEnd(),\n pd.offsets.BYearBegin(),\n pd.offsets.BQuarterEnd(),\n pd.offsets.BQuarterBegin(),\n pd.offsets.BMonthEnd(),\n pd.offsets.BMonthBegin(),\n pd.offsets.CustomBusinessDay(),\n pd.offsets.CustomBusinessDay(calendar=hcal),\n pd.offsets.CustomBusinessMonthBegin(calendar=hcal),\n pd.offsets.CustomBusinessMonthEnd(calendar=hcal),\n pd.offsets.CustomBusinessMonthEnd(calendar=hcal),\n]\nother_offsets = [\n pd.offsets.YearEnd(),\n pd.offsets.YearBegin(),\n pd.offsets.QuarterEnd(),\n pd.offsets.QuarterBegin(),\n pd.offsets.MonthEnd(),\n pd.offsets.MonthBegin(),\n pd.offsets.DateOffset(months=2, days=2),\n pd.offsets.BusinessDay(),\n pd.offsets.SemiMonthEnd(),\n pd.offsets.SemiMonthBegin(),\n]\noffsets = non_apply + other_offsets\n\n\nclass OffsetArrayArithmetic:\n\n params = offsets\n param_names = [\"offset\"]\n\n def setup(self, offset):\n N = 10000\n rng = pd.date_range(start=\"1/1/2000\", periods=N, freq=\"T\")\n self.rng = rng\n self.ser = pd.Series(rng)\n\n def time_add_series_offset(self, offset):\n with warnings.catch_warnings(record=True):\n self.ser + offset\n\n def time_add_dti_offset(self, offset):\n with warnings.catch_warnings(record=True):\n self.rng + offset\n\n\nclass ApplyIndex:\n params = other_offsets\n param_names = [\"offset\"]\n\n def setup(self, offset):\n N = 10000\n rng = pd.date_range(start=\"1/1/2000\", periods=N, freq=\"T\")\n self.rng = rng\n\n def time_apply_index(self, offset):\n offset.apply_index(self.rng)\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n",
"import pytest\n\nfrom pandas._libs.tslibs.frequencies import (\n FreqGroup,\n _attrname_to_abbrevs,\n _period_code_map,\n get_freq_code,\n get_freq_group,\n get_to_timestamp_base,\n)\nfrom pandas._libs.tslibs.resolution import Resolution as _reso\n\nimport pandas.tseries.offsets as offsets\n\n\[email protected](params=list(_period_code_map.items()))\ndef period_code_item(request):\n return request.param\n\n\[email protected](\n \"freqstr,expected\",\n [\n (\"A\", 1000),\n (\"3A\", 1000),\n (\"-1A\", 1000),\n (\"Y\", 1000),\n (\"3Y\", 1000),\n (\"-1Y\", 1000),\n (\"W\", 4000),\n (\"W-MON\", 4001),\n (\"W-FRI\", 4005),\n ],\n)\ndef test_freq_code(freqstr, expected):\n assert get_freq_code(freqstr)[0] == expected\n\n\ndef test_freq_code_match(period_code_item):\n freqstr, code = period_code_item\n assert get_freq_code(freqstr)[0] == code\n\n\[email protected](\n \"freqstr,expected\",\n [\n (\"A\", 1000),\n (\"3A\", 1000),\n (\"-1A\", 1000),\n (\"A-JAN\", 1000),\n (\"A-MAY\", 1000),\n (\"Y\", 1000),\n (\"3Y\", 1000),\n (\"-1Y\", 1000),\n (\"Y-JAN\", 1000),\n (\"Y-MAY\", 1000),\n (offsets.YearEnd(), 1000),\n (offsets.YearEnd(month=1), 1000),\n (offsets.YearEnd(month=5), 1000),\n (\"W\", 4000),\n (\"W-MON\", 4000),\n (\"W-FRI\", 4000),\n (offsets.Week(), 4000),\n (offsets.Week(weekday=1), 4000),\n (offsets.Week(weekday=5), 4000),\n (\"T\", FreqGroup.FR_MIN),\n ],\n)\ndef test_freq_group(freqstr, expected):\n assert get_freq_group(freqstr) == expected\n\n\ndef test_freq_group_match(period_code_item):\n freqstr, code = period_code_item\n\n str_group = get_freq_group(freqstr)\n code_group = get_freq_group(code)\n\n assert str_group == code_group == code // 1000 * 1000\n\n\[email protected](\n \"freqstr,exp_freqstr\",\n [(\"D\", \"D\"), (\"W\", \"D\"), (\"M\", \"D\"), (\"S\", \"S\"), (\"T\", \"S\"), (\"H\", \"S\")],\n)\ndef test_get_to_timestamp_base(freqstr, exp_freqstr):\n tsb = get_to_timestamp_base\n\n assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]\n\n\[email protected](\n \"freqstr,expected\",\n [\n (\"A\", \"year\"),\n (\"Q\", \"quarter\"),\n (\"M\", \"month\"),\n (\"D\", \"day\"),\n (\"H\", \"hour\"),\n (\"T\", \"minute\"),\n (\"S\", \"second\"),\n (\"L\", \"millisecond\"),\n (\"U\", \"microsecond\"),\n (\"N\", \"nanosecond\"),\n ],\n)\ndef test_get_str_from_freq(freqstr, expected):\n assert _reso.get_str_from_freq(freqstr) == expected\n\n\[email protected](\"freq\", [\"A\", \"Q\", \"M\", \"D\", \"H\", \"T\", \"S\", \"L\", \"U\", \"N\"])\ndef test_get_freq_roundtrip(freq):\n result = _attrname_to_abbrevs[_reso.get_str_from_freq(freq)]\n assert freq == result\n\n\[email protected](\"freq\", [\"D\", \"H\", \"T\", \"S\", \"L\", \"U\"])\ndef test_get_freq_roundtrip2(freq):\n result = _attrname_to_abbrevs[_reso.get_str(_reso.get_reso_from_freq(freq))]\n assert freq == result\n\n\[email protected](\n \"args,expected\",\n [\n ((1.5, \"T\"), (90, \"S\")),\n ((62.4, \"T\"), (3744, \"S\")),\n ((1.04, \"H\"), (3744, \"S\")),\n ((1, \"D\"), (1, \"D\")),\n ((0.342931, \"H\"), (1234551600, \"U\")),\n ((1.2345, \"D\"), (106660800, \"L\")),\n ],\n)\ndef test_resolution_bumping(args, expected):\n # see gh-14378\n assert _reso.get_stride_from_decimal(*args) == expected\n\n\[email protected](\n \"args\",\n [\n (0.5, \"N\"),\n # Too much precision in the input can prevent.\n (0.3429324798798269273987982, \"H\"),\n ],\n)\ndef test_cat(args):\n msg = \"Could not convert to integer offset at any resolution\"\n\n with pytest.raises(ValueError, match=msg):\n _reso.get_stride_from_decimal(*args)\n\n\[email protected](\n \"freq_input,expected\",\n [\n # Frequency string.\n (\"A\", (get_freq_code(\"A\")[0], 1)),\n (\"3D\", (get_freq_code(\"D\")[0], 3)),\n (\"-2M\", (get_freq_code(\"M\")[0], -2)),\n # Tuple.\n ((\"D\", 1), (get_freq_code(\"D\")[0], 1)),\n ((\"A\", 3), (get_freq_code(\"A\")[0], 3)),\n ((\"M\", -2), (get_freq_code(\"M\")[0], -2)),\n ((5, \"T\"), (FreqGroup.FR_MIN, 5)),\n # Numeric Tuple.\n ((1000, 1), (1000, 1)),\n # Offsets.\n (offsets.Day(), (get_freq_code(\"D\")[0], 1)),\n (offsets.Day(3), (get_freq_code(\"D\")[0], 3)),\n (offsets.Day(-2), (get_freq_code(\"D\")[0], -2)),\n (offsets.MonthEnd(), (get_freq_code(\"M\")[0], 1)),\n (offsets.MonthEnd(3), (get_freq_code(\"M\")[0], 3)),\n (offsets.MonthEnd(-2), (get_freq_code(\"M\")[0], -2)),\n (offsets.Week(), (get_freq_code(\"W\")[0], 1)),\n (offsets.Week(3), (get_freq_code(\"W\")[0], 3)),\n (offsets.Week(-2), (get_freq_code(\"W\")[0], -2)),\n (offsets.Hour(), (FreqGroup.FR_HR, 1)),\n # Monday is weekday=0.\n (offsets.Week(weekday=1), (get_freq_code(\"W-TUE\")[0], 1)),\n (offsets.Week(3, weekday=0), (get_freq_code(\"W-MON\")[0], 3)),\n (offsets.Week(-2, weekday=4), (get_freq_code(\"W-FRI\")[0], -2)),\n ],\n)\ndef test_get_freq_code(freq_input, expected):\n assert get_freq_code(freq_input) == expected\n\n\ndef test_get_code_invalid():\n with pytest.raises(ValueError, match=\"Invalid frequency\"):\n get_freq_code((5, \"baz\"))\n"
] | [
[
"pandas._testing.assert_numpy_array_equal",
"pandas.core.arrays.boolean.coerce_to_array",
"pandas.arrays.BooleanArray._from_sequence_of_strings",
"pandas.array",
"pandas.arrays.BooleanArray",
"pandas._testing.assert_extension_array_equal",
"pandas.date_range",
"numpy.array"
],
[
"pandas.core.dtypes.common.is_numeric_v_string_like",
"numpy.asarray",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.all",
"numpy.lib.stride_tricks.as_strided",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.algos.pad_inplace",
"numpy.where",
"scipy.interpolate.UnivariateSpline",
"pandas.core.dtypes.common.ensure_float64",
"pandas._libs.algos._validate_limit",
"scipy.interpolate.Akima1DInterpolator",
"pandas.compat._optional.import_optional_dependency",
"numpy.flatnonzero",
"scipy.interpolate.interp1d",
"scipy.interpolate.CubicSpline",
"numpy.interp",
"numpy.zeros",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas._libs.algos.backfill_inplace",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.argsort",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.cast.infer_dtype_from_array",
"pandas.core.dtypes.common.is_scalar",
"pandas._libs.algos.backfill_2d_inplace",
"pandas._libs.algos.pad_2d_inplace",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.maybe_convert_objects"
],
[
"numpy.random.seed",
"pandas.wide_to_long",
"pandas.MultiIndex.from_tuples",
"pandas.Index",
"pandas._testing.SubclassedSeries",
"numpy.random.randn",
"pandas._testing.SubclassedDataFrame",
"pandas._testing.round_trip_pickle",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"pandas.melt"
],
[
"pandas.Series",
"pandas.offsets.Day",
"pandas.offsets.BQuarterEnd",
"pandas.offsets.DateOffset",
"pandas.DataFrame",
"pandas.offsets.CustomBusinessMonthBegin",
"numpy.random.randn",
"numpy.iinfo",
"pandas.offsets.QuarterBegin",
"pandas.offsets.CustomBusinessDay",
"pandas.Timestamp",
"pandas.offsets.MonthBegin",
"numpy.arange",
"pandas.offsets.BYearEnd",
"pandas.offsets.QuarterEnd",
"pandas.offsets.MonthEnd",
"pandas.offsets.BMonthBegin",
"pandas.computation.expressions.set_use_numexpr",
"pandas.offsets.BMonthEnd",
"pandas.offsets.BQuarterBegin",
"pandas.offsets.SemiMonthEnd",
"numpy.random.choice",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.offsets.YearBegin",
"pandas.tseries.holiday.USFederalHolidayCalendar",
"pandas.date_range",
"numpy.array",
"pandas.offsets.YearEnd",
"pandas.computation.expressions.set_numexpr_threads",
"pandas.offsets.SemiMonthBegin",
"numpy.int32",
"pandas.offsets.CustomBusinessMonthEnd",
"numpy.float64",
"pandas.offsets.BYearBegin",
"pandas.offsets.BusinessDay"
],
[
"pandas.tseries.offsets.Hour",
"pandas.tseries.offsets.Day",
"pandas._libs.tslibs.resolution.Resolution.get_stride_from_decimal",
"pandas.tseries.offsets.YearEnd",
"pandas._libs.tslibs.frequencies.get_freq_group",
"pandas._libs.tslibs.resolution.Resolution.get_str_from_freq",
"pandas.tseries.offsets.Week",
"pandas._libs.tslibs.resolution.Resolution.get_reso_from_freq",
"pandas._libs.tslibs.frequencies.get_freq_code",
"pandas.tseries.offsets.MonthEnd",
"pandas._libs.tslibs.frequencies._period_code_map.items"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VitoRazor/Lidar_RGB_detector | [
"5308ba24a90d6e8d73940be4b40d31eccb4df94b"
] | [
"second/pytorch/train.py"
] | [
"import copy\nimport json\nimport os\nfrom pathlib import Path\nimport pickle\nimport shutil\nimport time\nimport re \nimport fire\nimport numpy as np\nimport torch\nfrom google.protobuf import text_format\n\nimport second.data.kitti_common as kitti\nimport torchplus\nfrom second.builder import target_assigner_builder, voxel_builder\nfrom second.core import box_np_ops\nfrom second.data.preprocess import merge_second_batch, merge_second_batch_multigpu\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder import (box_coder_builder, input_reader_builder,\n lr_scheduler_builder, optimizer_builder,\n second_builder)\nfrom second.utils.log_tool import SimpleModelLog\nfrom second.utils.progress_bar import ProgressBar\nimport psutil\n\ndef example_convert_to_torch(example, dtype=torch.float32,\n device=None) -> dict:\n device = device or torch.device(\"cuda:0\")\n example_torch = {}\n float_names = [\n \"voxels\", \"anchors\", \"reg_targets\", \"reg_weights\", \"bev_map\", \"importance\"\n ]\n for k, v in example.items():\n if k in float_names:\n # slow when directly provide fp32 data with dtype=torch.half\n example_torch[k] = torch.tensor(\n v, dtype=torch.float32, device=device).to(dtype)\n elif k in [\"coordinates\", \"labels\", \"num_points\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.int32, device=device)\n elif k in [\"anchors_mask\"]:\n example_torch[k] = torch.tensor(\n v, dtype=torch.uint8, device=device)\n elif k == \"calib\":\n calib = {}\n for k1, v1 in v.items():\n calib[k1] = torch.tensor(\n v1, dtype=dtype, device=device).to(dtype)\n example_torch[k] = calib\n elif k == \"num_voxels\":\n example_torch[k] = torch.tensor(v)\n else:\n example_torch[k] = v\n return example_torch\n\n\ndef build_network(model_cfg, measure_time=False, KL=False):\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim\n print(KL)\n net = second_builder.build(\n model_cfg, voxel_generator, target_assigner, measure_time=measure_time, KL = KL )\n return net\n\ndef _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\ndef freeze_params(params: dict, include: str=None, exclude: str=None):\n assert isinstance(params, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n remain_params = []\n for k, p in params.items():\n if include_re is not None:\n if include_re.match(k) is not None:\n continue \n if exclude_re is not None:\n if exclude_re.match(k) is None:\n continue \n remain_params.append(p)\n return remain_params\n\ndef freeze_params_v2(params: dict, include: str=None, exclude: str=None):\n assert isinstance(params, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n for k, p in params.items():\n if include_re is not None:\n if include_re.match(k) is not None:\n p.requires_grad = False\n if exclude_re is not None:\n if exclude_re.match(k) is None:\n p.requires_grad = False\n\ndef filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):\n assert isinstance(state_dict, dict)\n include_re = None\n if include is not None:\n include_re = re.compile(include)\n exclude_re = None\n if exclude is not None:\n exclude_re = re.compile(exclude)\n res_dict = {}\n for k, p in state_dict.items():\n if include_re is not None:\n if include_re.match(k) is None:\n continue\n if exclude_re is not None:\n if exclude_re.match(k) is not None:\n continue \n res_dict[k] = p\n return res_dict\n\n\ndef train(config_path,\n model_dir,\n KL = False,\n result_path=None,\n create_folder=False,\n display_step=50,\n summary_step=5,\n pretrained_path=None,\n pretrained_include=None,\n pretrained_exclude=None,\n freeze_include=None,\n freeze_exclude=None,\n multi_gpu=False,\n measure_time=False,\n resume=False):\n \"\"\"train a VoxelNet model specified by a config file.\n \"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n model_dir = str(Path(model_dir).resolve())\n if create_folder:\n if Path(model_dir).exists():\n model_dir = torchplus.train.create_folder(model_dir)\n model_dir = Path(model_dir)\n if not resume and model_dir.exists():\n raise ValueError(\"model dir exists and you don't specify resume.\")\n model_dir.mkdir(parents=True, exist_ok=True)\n if result_path is None:\n result_path = model_dir / 'results'\n config_file_bkp = \"pipeline.config\"\n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to train with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n proto_str = text_format.MessageToString(config, indent=2)\n with (model_dir / config_file_bkp).open(\"w\") as f:\n f.write(proto_str)\n\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n if model_cfg.rpn.module_class_name == \"RPN_KL\":\n KL = True\n else:\n KL = False\n print(KL)\n net = build_network(model_cfg, measure_time,KL).to(device)\n # if train_cfg.enable_mixed_precision:\n # net.half()\n # net.metrics_to_float()\n # net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n print(\"num parameters:\", len(list(net.parameters())))\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n if pretrained_path is not None:\n model_dict = net.state_dict()\n pretrained_dict = torch.load(pretrained_path)\n pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)\n new_pretrained_dict = {}\n for k, v in pretrained_dict.items():\n if k in model_dict and v.shape == model_dict[k].shape:\n new_pretrained_dict[k] = v \n print(\"Load pretrained parameters:\")\n for k, v in new_pretrained_dict.items():\n print(k, v.shape)\n model_dict.update(new_pretrained_dict) \n net.load_state_dict(model_dict)\n freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)\n net.clear_global_step()\n net.clear_metrics()\n if multi_gpu:\n net_parallel = torch.nn.DataParallel(net)\n else:\n net_parallel = net\n optimizer_cfg = train_cfg.optimizer\n loss_scale = train_cfg.loss_scale_factor\n fastai_optimizer = optimizer_builder.build(\n optimizer_cfg,\n net,\n mixed=False,\n loss_scale=loss_scale)\n if loss_scale < 0:\n loss_scale = \"dynamic\"\n if train_cfg.enable_mixed_precision:\n max_num_voxels = input_cfg.preprocess.max_number_of_voxels * input_cfg.batch_size\n assert max_num_voxels < 65535, \"spconv fp16 training only support this\"\n from apex import amp\n net, amp_optimizer = amp.initialize(net, fastai_optimizer,\n opt_level=\"O2\",\n keep_batchnorm_fp32=True,\n loss_scale=loss_scale\n )\n net.metrics_to_float()\n else:\n amp_optimizer = fastai_optimizer\n torchplus.train.try_restore_latest_checkpoints(model_dir,\n [fastai_optimizer])\n lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,\n train_cfg.steps)\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n if multi_gpu:\n num_gpu = torch.cuda.device_count()\n print(f\"MULTI-GPU: use {num_gpu} gpu\")\n collate_fn = merge_second_batch_multigpu\n else:\n collate_fn = merge_second_batch\n num_gpu = 1\n\n ######################\n # PREPARE INPUT\n ######################\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n multi_gpu=multi_gpu)\n eval_dataset = input_reader_builder.build(\n eval_input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size * num_gpu,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers * num_gpu,\n pin_memory=False,\n collate_fn=collate_fn,\n worker_init_fn=_worker_init_fn,\n drop_last=not multi_gpu)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=eval_input_cfg.batch_size, # only support multi-gpu train\n shuffle=False,\n num_workers=eval_input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n ######################\n # TRAINING\n ######################\n model_logging = SimpleModelLog(model_dir)\n model_logging.open()\n model_logging.log_text(proto_str + \"\\n\", 0, tag=\"config\")\n start_step = net.get_global_step()\n total_step = train_cfg.steps\n t = time.time()\n steps_per_eval = train_cfg.steps_per_eval\n clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch\n\n amp_optimizer.zero_grad()\n step_times = []\n step = start_step\n try:\n while True:\n if clear_metrics_every_epoch:\n net.clear_metrics()\n for example in dataloader:\n lr_scheduler.step(net.get_global_step())\n time_metrics = example[\"metrics\"]\n example.pop(\"metrics\")\n example_torch = example_convert_to_torch(example, float_dtype)\n batch_size = example[\"anchors\"].shape[0]\n # print(\"num_points:\",max(example_torch['num_points']))\n # print(\"num_voxels:\",example_torch['num_voxels'].shape)\n # print(\"anchors:\",example_torch['anchors'].shape)\n # print(\"voxels:\",example_torch['voxels'].shape)\n # print(example_torch['voxels'][0:3])\n # print(\"coordinates:\",example_torch['coordinates'].shape)\n # exit()\n ret_dict = net_parallel(example_torch)\n cls_preds = ret_dict[\"cls_preds\"]\n loss = ret_dict[\"loss\"].mean()\n cls_loss_reduced = ret_dict[\"cls_loss_reduced\"].mean()\n loc_loss_reduced = ret_dict[\"loc_loss_reduced\"].mean()\n cls_pos_loss = ret_dict[\"cls_pos_loss\"].mean()\n cls_neg_loss = ret_dict[\"cls_neg_loss\"].mean()\n loc_loss = ret_dict[\"loc_loss\"]\n cls_loss = ret_dict[\"cls_loss\"]\n \n cared = ret_dict[\"cared\"]\n labels = example_torch[\"labels\"]\n if train_cfg.enable_mixed_precision:\n with amp.scale_loss(loss, amp_optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)\n amp_optimizer.step()\n amp_optimizer.zero_grad()\n net.update_global_step()\n net_metrics = net.update_metrics(cls_loss_reduced,\n loc_loss_reduced, cls_preds,\n labels, cared)\n\n step_time = (time.time() - t)\n step_times.append(step_time)\n t = time.time()\n metrics = {}\n num_pos = int((labels > 0)[0].float().sum().cpu().numpy())\n num_neg = int((labels == 0)[0].float().sum().cpu().numpy())\n if 'anchors_mask' not in example_torch:\n num_anchors = example_torch['anchors'].shape[1]\n else:\n num_anchors = int(example_torch['anchors_mask'][0].sum())\n global_step = net.get_global_step()\n\n if global_step % display_step == 0:\n if measure_time:\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n\n loc_loss_elem = [\n float(loc_loss[:, :, i].sum().detach().cpu().numpy() /\n batch_size) for i in range(loc_loss.shape[-1])\n ]\n metrics[\"runtime\"] = {\n \"step\": global_step,\n \"steptime\": np.mean(step_times),\n }\n metrics[\"runtime\"].update(time_metrics[0])\n step_times = []\n metrics.update(net_metrics)\n metrics[\"loss\"][\"loc_elem\"] = loc_loss_elem\n metrics[\"loss\"][\"cls_pos_rt\"] = float(\n cls_pos_loss.detach().cpu().numpy())\n metrics[\"loss\"][\"cls_neg_rt\"] = float(\n cls_neg_loss.detach().cpu().numpy())\n if model_cfg.use_direction_classifier:\n dir_loss_reduced = ret_dict[\"dir_loss_reduced\"].mean()\n metrics[\"loss\"][\"dir_rt\"] = float(\n dir_loss_reduced.detach().cpu().numpy())\n\n metrics[\"misc\"] = {\n \"num_vox\": int(example_torch[\"voxels\"].shape[0]),\n \"num_pos\": int(num_pos),\n \"num_neg\": int(num_neg),\n \"num_anchors\": int(num_anchors),\n \"lr\": float(amp_optimizer.lr),\n \"mem_usage\": psutil.virtual_memory().percent,\n }\n model_logging.log_metrics(metrics, global_step)\n\n if global_step % steps_per_eval == 0:\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n net.get_global_step())\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n model_logging.log_text(\"#################################\",\n global_step)\n model_logging.log_text(\"# EVAL\", global_step)\n model_logging.log_text(\"#################################\",\n global_step)\n model_logging.log_text(\"Generate output labels...\", global_step)\n t = time.time()\n detections = []\n prog_bar = ProgressBar()\n net.clear_timer()\n prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)\n // eval_input_cfg.batch_size)\n for example in iter(eval_dataloader):\n example = example_convert_to_torch(example, float_dtype)\n detections += net(example)\n prog_bar.print_bar()\n\n sec_per_ex = len(eval_dataset) / (time.time() - t)\n model_logging.log_text(\n f'generate label finished({sec_per_ex:.2f}/s). start eval:',\n global_step)\n result_dict = eval_dataset.dataset.evaluation(\n detections, str(result_path_step))\n for k, v in result_dict[\"results\"].items():\n model_logging.log_text(\"Evaluation {}\".format(k), global_step)\n model_logging.log_text(v, global_step)\n model_logging.log_metrics(result_dict[\"detail\"], global_step)\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(detections, f)\n net.train()\n step += 1\n if step >= total_step:\n break\n if step >= total_step:\n break\n except Exception as e:\n print(json.dumps(example[\"metadata\"], indent=2))\n model_logging.log_text(str(e), step)\n model_logging.log_text(json.dumps(example[\"metadata\"], indent=2), step)\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n step)\n raise e\n finally:\n model_logging.close()\n torchplus.train.save_models(model_dir, [net, amp_optimizer],\n net.get_global_step())\n\n\ndef evaluate(config_path,\n model_dir=None,\n result_path=None,\n ckpt_path=None,\n measure_time=False,\n batch_size=None,\n **kwargs):\n \"\"\"Don't support pickle_result anymore. if you want to generate kitti label file,\n please use kitti_anno_to_label_file and convert_detection_to_kitti_annos\n in second.data.kitti_dataset.\n \"\"\"\n assert len(kwargs) == 0\n model_dir = str(Path(model_dir).resolve())\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n result_name = 'eval_results'\n if result_path is None:\n model_dir = Path(model_dir)\n result_path = model_dir / result_name\n else:\n result_path = Path(result_path)\n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to eval with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n net = build_network(model_cfg, measure_time=measure_time).to(device)\n if train_cfg.enable_mixed_precision:\n net.half()\n print(\"half inference!\")\n net.metrics_to_float()\n net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n\n if ckpt_path is None:\n assert model_dir is not None\n torchplus.train.try_restore_latest_checkpoints(model_dir, [net])\n else:\n torchplus.train.restore(ckpt_path, net)\n batch_size = batch_size or input_cfg.batch_size\n eval_dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=False,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner)\n eval_dataloader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch)\n\n if train_cfg.enable_mixed_precision:\n float_dtype = torch.float16\n else:\n float_dtype = torch.float32\n\n net.eval()\n result_path_step = result_path / f\"step_{net.get_global_step()}\"\n result_path_step.mkdir(parents=True, exist_ok=True)\n t = time.time()\n detections = []\n print(\"Generate output labels...\")\n bar = ProgressBar()\n bar.start((len(eval_dataset) + batch_size - 1) // batch_size)\n prep_example_times = []\n prep_times = []\n t2 = time.time()\n\n for example in iter(eval_dataloader):\n if measure_time:\n prep_times.append(time.time() - t2)\n torch.cuda.synchronize()\n t1 = time.time()\n example = example_convert_to_torch(example, float_dtype)\n if measure_time:\n torch.cuda.synchronize()\n prep_example_times.append(time.time() - t1)\n with torch.no_grad():\n detections += net(example)\n bar.print_bar()\n if measure_time:\n t2 = time.time()\n\n sec_per_example = len(eval_dataset) / (time.time() - t)\n print(f'generate label finished({sec_per_example:.2f}/s). start eval:')\n if measure_time:\n print(\n f\"avg example to torch time: {np.mean(prep_example_times) * 1000:.3f} ms\"\n )\n print(f\"avg prep time: {np.mean(prep_times) * 1000:.3f} ms\")\n for name, val in net.get_avg_time_dict().items():\n print(f\"avg {name} time = {val * 1000:.3f} ms\")\n with open(result_path_step / \"result.pkl\", 'wb') as f:\n pickle.dump(detections, f)\n result_dict = eval_dataset.dataset.evaluation(detections,\n str(result_path_step))\n if result_dict is not None:\n for k, v in result_dict[\"results\"].items():\n print(\"Evaluation {}\".format(k))\n print(v)\n\ndef helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):\n \"\"\"get information of target assign to tune thresholds in anchor generator.\n \"\"\" \n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to train with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n proto_str = text_format.MessageToString(config, indent=2)\n\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n net = build_network(model_cfg, False, KL)\n # if train_cfg.enable_mixed_precision:\n # net.half()\n # net.metrics_to_float()\n # net.convert_norm_to_float(net)\n target_assigner = net.target_assigner\n voxel_generator = net.voxel_generator\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=voxel_generator,\n target_assigner=target_assigner,\n multi_gpu=False)\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n num_workers=0,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=False)\n \n class_count = {}\n anchor_count = {}\n class_count_tune = {}\n anchor_count_tune = {}\n for c in target_assigner.classes:\n class_count[c] = 0\n anchor_count[c] = 0\n class_count_tune[c] = 0\n anchor_count_tune[c] = 0\n\n\n step = 0\n classes = target_assigner.classes\n if target_rate is None:\n num_tune_epoch = 0\n for epoch in range(num_tune_epoch):\n for example in dataloader:\n gt_names = example[\"gt_names\"]\n for name in gt_names:\n class_count_tune[name] += 1\n \n labels = example['labels']\n for i in range(1, len(classes) + 1):\n anchor_count_tune[classes[i - 1]] += int(np.sum(labels == i))\n if target_rate is not None:\n for name, rate in target_rate.items():\n if class_count_tune[name] > update_freq:\n # calc rate\n current_rate = anchor_count_tune[name] / class_count_tune[name]\n if current_rate > rate:\n target_assigner._anchor_generators[classes.index(name)].match_threshold += update_delta\n target_assigner._anchor_generators[classes.index(name)].unmatch_threshold += update_delta\n else:\n target_assigner._anchor_generators[classes.index(name)].match_threshold -= update_delta\n target_assigner._anchor_generators[classes.index(name)].unmatch_threshold -= update_delta\n anchor_count_tune[name] = 0\n class_count_tune[name] = 0\n step += 1\n for c in target_assigner.classes:\n class_count[c] = 0\n anchor_count[c] = 0\n total_voxel_gene_time = 0\n count = 0\n\n for example in dataloader:\n gt_names = example[\"gt_names\"]\n total_voxel_gene_time += example[\"metrics\"][0][\"voxel_gene_time\"]\n count += 1\n\n for name in gt_names:\n class_count[name] += 1\n \n labels = example['labels']\n for i in range(1, len(classes) + 1):\n anchor_count[classes[i - 1]] += int(np.sum(labels == i))\n print(\"avg voxel gene time\", total_voxel_gene_time / count)\n\n print(json.dumps(class_count, indent=2))\n print(json.dumps(anchor_count, indent=2))\n if target_rate is not None:\n for ag in target_assigner._anchor_generators:\n if ag.class_name in target_rate:\n print(ag.class_name, ag.match_threshold, ag.unmatch_threshold)\n\ndef mcnms_parameters_search(config_path,\n model_dir,\n preds_path):\n pass\n\n\nif __name__ == '__main__':\n fire.Fire()\n"
] | [
[
"numpy.random.get_state",
"torch.cuda.synchronize",
"numpy.random.seed",
"torch.load",
"torch.cuda.device_count",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"torch.device",
"torch.nn.DataParallel",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KyunghoWon-GIST/PyRiemann-with-OpenViBE | [
"2a070fdadb040ce6edad81aef497d054ddd70130"
] | [
"python-Riemann-online.py"
] | [
"import pickle\r\nimport numpy as np\r\nimport pyriemann\r\nimport sklearn\r\nimport scipy\r\nimport matplotlib as mpl\r\nmpl.use('Qt5Agg') # for using pyplot (pip install pyqt5)\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import signal\r\nfrom scipy.signal import butter, filtfilt, sosfiltfilt\r\n\r\n# Pyriemann with OV Python scripting plugin --------------------------------------------------- written by Kyungho Won\r\n#\r\n# Step\r\n# 1. Loads covariance matrices estimated using calibration EEG at the beginning and fits MDM (__init__)\r\n# 2. During test scenario, python scripting module receives the segmented EEG from OpenViBE every epoch (input: signal)\r\n# 3. In Python scripting plugin, the segmented EEG is band-pass filtered and transformed to a covariance matrix\r\n# 4. The Fitted MDM predicts the current label with the covariance matrix\r\n# 5. Python scripting plugin sends stimulution (predicted labels) as an output (output: stimulation)\r\n\r\n# 6. Ohter external modules could be added\r\n\r\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order):\r\n\tnyq = fs/2\r\n\tlow = lowcut/nyq\r\n\thigh = highcut/nyq\r\n\tsos = butter(order, [low, high], btype='band', output='sos')\r\n\t# demean before filtering\r\n\tmeandat = np.mean(data, axis=1)\r\n\tdata = data - meandat[:, np.newaxis]\r\n\ty = sosfiltfilt(sos, data) # zero-phase filter # data: [ch x time]\r\n\t# specify pandlen to make the result the same as Matlab filtfilt()\r\n\treturn y\r\n\r\ndef draw_feedback(nth, nClass):\r\n\tlabels_arr = ['LEFT','RIGHT','UP','DOWN']\r\n\tmpl.rcParams['toolbar'] = 'None' # Remove tool bar (upper bar)\r\n\r\n\tplt.clf()\r\n\tplt.plot(0,0)\r\n\tax = plt.gca()\r\n\tax.set_facecolor('black')\r\n\tplt.xlim([-10, 10])\r\n\tplt.ylim([-10, 10])\r\n\tplt.axis('off')\r\n\tplt.title('%02d Predicted: %s' %(nth, labels_arr[int(nClass)-1]))\r\n\r\n\tif nClass == 1: # left\r\n\t\tplt.arrow(0,0, -4, 0, width=1)\r\n\telif nClass == 2: # right\r\n\t\tplt.arrow(0,0, 4, 0, width=1)\r\n\telif nClass == 3: # up\r\n\t\tplt.arrow(0,0, 0, 4, width=1)\r\n\telif nClass == 4: # down\r\n\t\tplt.arrow(0,0, 0, -4, width=1)\r\n\r\n\r\nclass MyOVBox(OVBox):\r\n\tdef __init__(self):\r\n\t\tOVBox.__init__(self)\r\n\t\tself.signalHeader = None\r\n\t\tself.nth_trial = 0\r\n\r\n\tdef initialize(self):\r\n\t\t# Append to the box output a stimulation header. \r\n\t\tself.output[0].append(OVStimulationHeader(0., 0.))\r\n\r\n\t\t# Load covariance matrices estimated from the calibrated EEG\r\n\t\tload_file = open(self.setting['Trained model path'], 'rb')\r\n\t\ttrained = pickle.load(load_file)\r\n\t\tself.mdm = pyriemann.classification.MDM()\r\n\t\tself.mdm.metric = 'Riemann'\r\n\t\tself.mdm.fit(trained['COV'], trained['Labels'])\t\r\n\t\tprint('Training accuracy is', np.sum(self.mdm.predict(trained['COV'])==trained['Labels'])/len(trained['Labels']))\r\n\t\tprint('== Trained COV:', trained['COV'].shape)\r\n\t\tprint('==', self.mdm)\r\n\t\tprint('\\n\\n')\r\n\r\n\t\t# User defined parameters\r\n\t\tself.lowbp = int(self.setting['low bp'])\r\n\t\tself.highbp = int(self.setting['high bp'])\r\n\t\tself.filterorder = int(self.setting['filter order'])\r\n\t\tself.sampling = int(self.setting['sampling rate'])\r\n\t\tself.isfeedback = self.setting['Feedback']\r\n\t\tself.ans_mi = [769, 770, 780, 774] # left right up down\r\n\r\n\t\tplt.ion()\r\n\r\n\tdef process(self):\r\n\t\tfor chunkIdx in range( len(self.input[0]) ):\r\n\t\t\t# borrowed from python-signal-average.py\r\n\t\t\tif(type(self.input[0][chunkIdx]) == OVSignalHeader): # called only once\r\n\t\t\t\tself.signalHeader = self.input[0].pop()\r\n\r\n\t\t\telif(type(self.input[0][chunkIdx]) == OVSignalBuffer): # called every epoch\r\n\t\t\t\tchunk = self.input[0].pop()\r\n\t\t\t\tnumpyBuffer = np.array(chunk, dtype=np.float64).reshape(tuple(self.signalHeader.dimensionSizes))\r\n\t\t\t\t# numpyBuffer has [ch x time]\r\n\t\t\t\tnumpyBuffer = butter_bandpass_filter(numpyBuffer, self.lowbp, self.highbp, self.sampling, self.filterorder)\r\n\r\n\t\t\t\t# Pyriemann only accpets 3D inputs with [nMatrices, nCh, nTime]\r\n\t\t\t\tcur_input = np.expand_dims(numpyBuffer, axis=0) # now (1, nCh, nTime)\r\n\t\t\t\tCOV_cur = pyriemann.estimation.Covariances().fit_transform(cur_input)\r\n\t\t\t\tpredict_class = self.mdm.predict(COV_cur) # among [1, 2, 3, 4]\r\n\t\t\t\tprint(predict_class)\r\n\r\n\t\t\t\t# send stimulation (classified results)\r\n\t\t\t\tstimSet = OVStimulationSet(self.getCurrentTime(), self.getCurrentTime()+1./self.getClock())\r\n\t\t\t\tstimSet.append(OVStimulation(self.ans_mi[int(predict_class)-1], self.getCurrentTime(), 0.))\r\n\t\t\t\tself.output[0].append(stimSet)\r\n\t\t\t\tself.nth_trial = self.nth_trial + 1\r\n\r\n\t\t\t\tif self.isfeedback == 'True':\r\n\t\t\t\t\tdraw_feedback(self.nth_trial, predict_class)\r\n\t\t\t\t\t\t\t\t\r\n\tdef uninitialize(self):\r\n\t\tend = self.getCurrentTime()\r\n\t\tself.output[0].append(OVStimulationEnd(end,end))\r\n\t\tprint('uninitialize')\r\n\t\tplt.ioff()\r\n\t\tplt.close()\r\n\r\nbox = MyOVBox()\t# When it ends (the last call)\r\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.expand_dims",
"matplotlib.use",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ioff",
"scipy.signal.butter",
"matplotlib.pyplot.clf",
"numpy.mean",
"matplotlib.pyplot.xlim",
"scipy.signal.sosfiltfilt",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.arrow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
yage99/tensorflow | [
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"c7fa71b32a3635eb25596ae80d007b41007769c4",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd"
] | [
"tensorflow/python/data/experimental/service/server_lib_test.py",
"tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py",
"tensorflow/python/kernel_tests/signal/spectral_ops_test.py",
"tensorflow/python/kernel_tests/confusion_matrix_test.py",
"tensorflow/python/keras/layers/einsum_dense_test.py",
"tensorflow/lite/tools/signature/signature_def_utils_test.py",
"tensorflow/examples/speech_commands/wav_to_features_test.py",
"tensorflow/python/keras/distribute/multi_worker_tutorial_test.py",
"tensorflow/python/grappler/arithmetic_optimizer_test.py",
"tensorflow/python/data/kernel_tests/tf_record_dataset_test.py",
"tensorflow/python/data/experimental/ops/counter.py",
"tensorflow/python/distribute/client/metric_utils_test.py",
"tensorflow/lite/python/util.py",
"tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py",
"tensorflow/python/keras/tests/saver_test.py",
"tensorflow/lite/testing/model_coverage/model_coverage_lib_test.py",
"tensorflow/python/distribute/values.py",
"tensorflow/python/tools/selective_registration_header_lib.py",
"tensorflow/lite/micro/testing/generate_test_models.py",
"tensorflow/python/kernel_tests/conv_ops_3d_test.py",
"tensorflow/python/autograph/lang/special_functions_test.py",
"tensorflow/python/ops/ragged/ragged_operators_test.py",
"tensorflow/python/framework/memory_checker.py",
"tensorflow/python/compiler/tensorrt/test/combined_nms_test.py",
"tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py",
"tensorflow/python/kernel_tests/matrix_inverse_op_test.py",
"tensorflow/compiler/xla/python_api/types.py",
"tensorflow/python/eager/context_test.py",
"tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py",
"tensorflow/compiler/tests/data_format_ops_test.py",
"tensorflow/python/keras/combinations.py",
"tensorflow/lite/tools/convert_image_to_csv.py",
"tensorflow/python/eager/benchmarks_test_base.py",
"tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py",
"tensorflow/python/keras/engine/sequential_test.py",
"tensorflow/python/keras/preprocessing/dataset_utils.py",
"tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py",
"tensorflow/python/keras/layers/preprocessing/category_crossing_distribution_test.py",
"tensorflow/python/ops/ragged/ragged_concat_op_test.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.data service server lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.experimental.service import server_lib\n\nfrom tensorflow.python.platform import test\n\n\nclass ServerLibTest(test.TestCase):\n\n def testStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0, start=False)\n dispatcher.start()\n\n def testMultipleStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0, start=True)\n dispatcher.start()\n\n def testStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address, start=False)\n worker.start()\n\n def testMultipleStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address, start=True)\n worker.start()\n\n def testStopDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n dispatcher._stop()\n\n def testStopWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n worker._stop()\n\n def testStopStartDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n with self.assertRaisesRegex(\n RuntimeError, \"Server cannot be started after it has been stopped\"):\n dispatcher.start()\n\n def testStopStartWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n with self.assertRaisesRegex(\n RuntimeError, \"Server cannot be started after it has been stopped\"):\n worker.start()\n\n def testJoinDispatcher(self):\n dispatcher = server_lib.DispatchServer(0)\n dispatcher._stop()\n dispatcher.join()\n\n def testJoinWorker(self):\n dispatcher = server_lib.DispatchServer(0)\n worker = server_lib.WorkerServer(0, dispatcher._address)\n worker._stop()\n worker.join()\n\n def testDispatcherNumWorkers(self):\n dispatcher = server_lib.DispatchServer(0)\n self.assertEqual(0, dispatcher._num_workers())\n worker1 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable\n self.assertEqual(1, dispatcher._num_workers())\n worker2 = server_lib.WorkerServer(0, dispatcher._address) # pylint: disable=unused-variable\n self.assertEqual(2, dispatcher._num_workers())\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: (! %p/exported_python_args 2>&1) | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long,dangerous-default-value\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common\n\n\nclass TestModule(tf.Module):\n\n @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])\n def some_function(self, x):\n return self.callee(x)\n\n # CHECK: While importing SavedModel function 'callee': in input signature:\n # CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo\n @tf.function\n def callee(self, x, n={'foo': 42}):\n return x\n\n\nif __name__ == '__main__':\n common.do_test(TestModule)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for spectral_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.signal import spectral_ops\nfrom tensorflow.python.ops.signal import window_ops\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SpectralOpsTest(test.TestCase, parameterized.TestCase):\n\n @staticmethod\n def _np_hann_periodic_window(length):\n if length == 1:\n return np.ones(1)\n odd = length % 2\n if not odd:\n length += 1\n window = 0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))\n if not odd:\n window = window[:-1]\n return window\n\n @staticmethod\n def _np_frame(data, window_length, hop_length):\n num_frames = 1 + int(np.floor((len(data) - window_length) // hop_length))\n shape = (num_frames, window_length)\n strides = (data.strides[0] * hop_length, data.strides[0])\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides)\n\n @staticmethod\n def _np_stft(data, fft_length, hop_length, window_length):\n frames = SpectralOpsTest._np_frame(data, window_length, hop_length)\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return np.fft.rfft(frames * window, fft_length)\n\n @staticmethod\n def _np_inverse_stft(stft, fft_length, hop_length, window_length):\n frames = np.fft.irfft(stft, fft_length)\n # Pad or truncate frames's inner dimension to window_length.\n frames = frames[..., :window_length]\n frames = np.pad(frames, [[0, 0]] * (frames.ndim - 1) +\n [[0, max(0, window_length - frames.shape[-1])]], \"constant\")\n window = SpectralOpsTest._np_hann_periodic_window(window_length)\n return SpectralOpsTest._np_overlap_add(frames * window, hop_length)\n\n @staticmethod\n def _np_overlap_add(stft, hop_length):\n num_frames, window_length = np.shape(stft)\n # Output length will be one complete window, plus another hop_length's\n # worth of points for each additional window.\n output_length = window_length + (num_frames - 1) * hop_length\n output = np.zeros(output_length)\n for i in range(num_frames):\n output[i * hop_length:i * hop_length + window_length] += stft[i,]\n return output\n\n def _compare(self, signal, frame_length, frame_step, fft_length, tol):\n actual_stft = spectral_ops.stft(\n signal, frame_length, frame_step, fft_length, pad_end=False)\n signal_ph = array_ops.placeholder_with_default(signal, shape=signal.shape)\n actual_stft_from_ph = spectral_ops.stft(\n signal_ph, frame_length, frame_step, fft_length, pad_end=False)\n\n actual_inverse_stft = spectral_ops.inverse_stft(\n actual_stft, frame_length, frame_step, fft_length)\n\n actual_stft, actual_stft_from_ph, actual_inverse_stft = self.evaluate(\n [actual_stft, actual_stft_from_ph, actual_inverse_stft])\n\n actual_stft_ph = array_ops.placeholder_with_default(\n actual_stft, shape=actual_stft.shape)\n actual_inverse_stft_from_ph = self.evaluate(\n spectral_ops.inverse_stft(\n actual_stft_ph, frame_length, frame_step, fft_length))\n\n # Confirm that there is no difference in output when shape/rank is fully\n # unknown or known.\n self.assertAllClose(actual_stft, actual_stft_from_ph)\n self.assertAllClose(actual_inverse_stft, actual_inverse_stft_from_ph)\n\n expected_stft = SpectralOpsTest._np_stft(\n signal, fft_length, frame_step, frame_length)\n self.assertAllClose(expected_stft, actual_stft, rtol=tol, atol=tol)\n\n expected_inverse_stft = SpectralOpsTest._np_inverse_stft(\n expected_stft, fft_length, frame_step, frame_length)\n self.assertAllClose(\n expected_inverse_stft, actual_inverse_stft, rtol=tol, atol=tol)\n\n def test_shapes(self):\n signal = np.zeros((512,)).astype(np.float32)\n\n # If fft_length is not provided, the smallest enclosing power of 2 of\n # frame_length (8) is used.\n stft = spectral_ops.stft(signal, frame_length=7, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=8, frame_step=8,\n fft_length=16, pad_end=True)\n self.assertAllEqual([64, 9], stft.shape.as_list())\n self.assertAllEqual([64, 9], self.evaluate(stft).shape)\n\n stft = spectral_ops.stft(signal, frame_length=16, frame_step=8,\n fft_length=8, pad_end=True)\n self.assertAllEqual([64, 5], stft.shape.as_list())\n self.assertAllEqual([64, 5], self.evaluate(stft).shape)\n\n stft = np.zeros((32, 9)).astype(np.complex64)\n\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length=8,\n fft_length=16, frame_step=8)\n expected_length = (stft.shape[0] - 1) * 8 + 8\n self.assertAllEqual([256], inverse_stft.shape.as_list())\n self.assertAllEqual([expected_length], self.evaluate(inverse_stft).shape)\n\n @parameterized.parameters(\n (512, 64, 32, 64, np.float32, 1e-4),\n (512, 64, 32, 64, np.float64, 1e-8),\n (512, 64, 64, 64, np.float32, 1e-4),\n (512, 64, 64, 64, np.float64, 1e-8),\n (512, 72, 64, 64, np.float32, 1e-4),\n (512, 72, 64, 64, np.float64, 1e-8),\n (512, 64, 25, 64, np.float32, 1e-4),\n (512, 64, 25, 64, np.float64, 1e-8),\n (512, 25, 15, 36, np.float32, 1e-4),\n (512, 25, 15, 36, np.float64, 1e-8),\n (123, 23, 5, 42, np.float32, 1e-4),\n (123, 23, 5, 42, np.float64, 1e-8))\n def test_stft_and_inverse_stft(self, signal_length, frame_length,\n frame_step, fft_length, np_rtype, tol):\n \"\"\"Test that spectral_ops.stft/inverse_stft match a NumPy implementation.\"\"\"\n signal = np.random.random(signal_length).astype(np_rtype)\n self._compare(signal, frame_length, frame_step, fft_length, tol)\n\n @parameterized.parameters(\n # 87.5% overlap.\n (4096, 256, 32, 256, np.float32, 1e-5, 1e-6),\n (4096, 256, 32, 256, np.float64, 1e-8, 1e-8),\n # 75% overlap.\n (4096, 256, 64, 256, np.float32, 1e-5, 1e-6),\n (4096, 256, 64, 256, np.float64, 1e-8, 1e-8),\n # Odd frame hop.\n (4096, 128, 25, 128, np.float32, 1e-3, 1e-6),\n (4096, 128, 25, 128, np.float64, 5e-4, 1e-8),\n # Odd frame length.\n (4096, 127, 32, 128, np.float32, 1e-3, 1e-6),\n (4096, 127, 32, 128, np.float64, 1e-3, 1e-8),\n # 50% overlap.\n (4096, 128, 64, 128, np.float32, 0.4, 1e-6),\n (4096, 128, 64, 128, np.float64, 0.4, 1e-8))\n def test_stft_round_trip(self, signal_length, frame_length, frame_step,\n fft_length, np_rtype, threshold,\n corrected_threshold):\n # Generate a random white Gaussian signal.\n signal = np.random.normal(size=signal_length).astype(np_rtype)\n\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length,\n pad_end=False)\n inverse_stft = spectral_ops.inverse_stft(stft, frame_length, frame_step,\n fft_length)\n inverse_stft_corrected = spectral_ops.inverse_stft(\n stft, frame_length, frame_step, fft_length,\n window_fn=spectral_ops.inverse_stft_window_fn(frame_step))\n inverse_stft, inverse_stft_corrected = self.evaluate(\n [inverse_stft, inverse_stft_corrected])\n\n # Truncate signal to the size of inverse stft.\n signal = signal[:inverse_stft.shape[0]]\n\n # Ignore the frame_length samples at either edge.\n signal = signal[frame_length:-frame_length]\n inverse_stft = inverse_stft[frame_length:-frame_length]\n inverse_stft_corrected = inverse_stft_corrected[\n frame_length:-frame_length]\n\n # Check that the inverse and original signal are close up to a scale\n # factor.\n inverse_stft_scaled = inverse_stft / np.mean(np.abs(inverse_stft))\n signal_scaled = signal / np.mean(np.abs(signal))\n self.assertLess(np.std(inverse_stft_scaled - signal_scaled), threshold)\n\n # Check that the inverse with correction and original signal are close.\n self.assertLess(np.std(inverse_stft_corrected - signal),\n corrected_threshold)\n\n @parameterized.parameters(\n (256, 32),\n (256, 64),\n (128, 25),\n (127, 32),\n (128, 64))\n def test_inverse_stft_window_fn(self, frame_length, frame_step):\n \"\"\"Test that inverse_stft_window_fn has unit gain at each window phase.\"\"\"\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n hann_window, inverse_window = self.evaluate([hann_window, inverse_window])\n\n # Expect unit gain at each phase of the window.\n product_window = hann_window * inverse_window\n for i in range(frame_step):\n self.assertAllClose(1.0, np.sum(product_window[i::frame_step]))\n\n @parameterized.parameters((256, 64), (128, 32))\n def test_inverse_stft_window_fn_special_case(self, frame_length, frame_step):\n \"\"\"Test inverse_stft_window_fn in special overlap = 3/4 case.\"\"\"\n # Cases in which frame_length is an integer multiple of 4 * frame_step are\n # special because they allow exact reproduction of the waveform with a\n # squared Hann window (Hann window in both forward and reverse transforms).\n # In the case where frame_length = 4 * frame_step, that combination\n # produces a constant gain of 1.5, and so the corrected window will be the\n # Hann window / 1.5.\n hann_window = window_ops.hann_window(frame_length, dtype=dtypes.float32)\n inverse_window_fn = spectral_ops.inverse_stft_window_fn(frame_step)\n inverse_window = inverse_window_fn(frame_length, dtype=dtypes.float32)\n self.assertAllClose(hann_window, inverse_window * 1.5)\n\n @staticmethod\n def _compute_stft_gradient(signal, frame_length=32, frame_step=16,\n fft_length=32):\n \"\"\"Computes the gradient of the STFT with respect to `signal`.\"\"\"\n stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)\n magnitude_stft = math_ops.abs(stft)\n loss = math_ops.reduce_sum(magnitude_stft)\n return gradients_impl.gradients([loss], [signal])[0]\n\n def test_gradients(self):\n \"\"\"Test that spectral_ops.stft has a working gradient.\"\"\"\n # TODO(rjryan): Update gradient tests for Eager.\n if context.executing_eagerly():\n return\n with self.session(use_gpu=True) as sess:\n signal_length = 512\n\n # An all-zero signal has all zero gradients with respect to the sum of the\n # magnitude STFT.\n empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)\n empty_signal_gradient = sess.run(\n self._compute_stft_gradient(empty_signal))\n self.assertTrue((empty_signal_gradient == 0.0).all())\n\n # A sinusoid will have non-zero components of its gradient with respect to\n # the sum of the magnitude STFT.\n sinusoid = math_ops.sin(\n 2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))\n sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))\n self.assertFalse((sinusoid_gradient == 0.0).all())\n\n @parameterized.parameters(\n (64, 16, 8, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 8, 16, np.float64, 1e-8, 1e-8),\n (64, 16, 16, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 16, 16, np.float64, 1e-8, 1e-8),\n (64, 16, 7, 16, np.float32, 2e-3, 5e-4),\n (64, 16, 7, 16, np.float64, 1e-8, 1e-8),\n (64, 7, 4, 9, np.float32, 2e-3, 5e-4),\n (64, 7, 4, 9, np.float64, 1e-8, 1e-8),\n (29, 5, 1, 10, np.float32, 2e-3, 5e-4),\n (29, 5, 1, 10, np.float64, 1e-8, 1e-8))\n def test_gradients_numerical(self, signal_length, frame_length, frame_step,\n fft_length, np_rtype, forward_tol, backward_tol):\n # TODO(rjryan): Investigate why STFT gradient error is so high.\n signal = np.random.rand(signal_length).astype(np_rtype) * 2 - 1\n\n def forward(signal):\n return spectral_ops.stft(\n signal, frame_length, frame_step, fft_length, pad_end=False)\n ((f_jacob_t,), (f_jacob_n,)) = gradient_checker_v2.compute_gradient(\n forward, [signal])\n self.assertAllClose(f_jacob_t, f_jacob_n,\n rtol=forward_tol, atol=forward_tol)\n\n def backward(stft):\n return spectral_ops.inverse_stft(\n stft, frame_length, frame_step, fft_length)\n\n stft = forward(signal)\n ((b_jacob_t,), (b_jacob_n,)) = gradient_checker_v2.compute_gradient(\n backward, [stft])\n self.assertAllClose(b_jacob_t, b_jacob_n,\n rtol=backward_tol, atol=backward_tol)\n\n @parameterized.parameters(\n itertools.product(\n (4000,),\n (256,),\n (np.float32, np.float64),\n (\"ortho\", None),\n (\"vorbis\", \"kaiser_bessel_derived\", None),\n (False, True)))\n def test_mdct_round_trip(self, signal_length, frame_length, np_rtype,\n norm, window_type, pad_end):\n if np_rtype == np.float32:\n tol = 1e-5\n else:\n if window_type == \"kaiser_bessel_derived\":\n tol = 1e-6\n else:\n tol = 1e-8\n # Generate a random white Gaussian signal.\n signal = np.random.normal(size=signal_length).astype(np_rtype)\n if window_type == \"vorbis\":\n window_fn = window_ops.vorbis_window\n elif window_type == \"kaiser_bessel_derived\":\n window_fn = window_ops.kaiser_bessel_derived_window\n elif window_type is None:\n window_fn = None\n mdct = spectral_ops.mdct(signal, frame_length, norm=norm,\n window_fn=window_fn, pad_end=pad_end)\n inverse_mdct = spectral_ops.inverse_mdct(mdct, norm=norm,\n window_fn=window_fn)\n inverse_mdct = self.evaluate(inverse_mdct)\n\n # Truncate signal and inverse_mdct to their minimum length.\n min_length = np.minimum(signal.shape[0], inverse_mdct.shape[0])\n # Ignore the half_len samples at either edge.\n half_len = frame_length // 2\n signal = signal[half_len:min_length-half_len]\n inverse_mdct = inverse_mdct[half_len:min_length-half_len]\n\n # Check that the inverse and original signal are close.\n self.assertAllClose(inverse_mdct, signal, atol=tol, rtol=tol)\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for confusion_matrix_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import confusion_matrix\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.platform import test\n\n\nclass ConfusionMatrixTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def testExample(self):\n \"\"\"This is a test of the example provided in pydoc.\"\"\"\n with self.cached_session():\n self.assertAllEqual([\n [0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1]\n ], self.evaluate(confusion_matrix.confusion_matrix(\n labels=[1, 2, 4], predictions=[2, 2, 4])))\n\n def _testConfMatrix(self, labels, predictions, truth, weights=None,\n num_classes=None):\n with self.cached_session():\n dtype = predictions.dtype\n ans = confusion_matrix.confusion_matrix(\n labels, predictions, dtype=dtype, weights=weights,\n num_classes=num_classes).eval()\n self.assertAllClose(truth, ans, atol=1e-10)\n self.assertEqual(ans.dtype, dtype)\n\n def _testBasic(self, dtype):\n labels = np.arange(5, dtype=dtype)\n predictions = np.arange(5, dtype=dtype)\n\n truth = np.asarray(\n [[1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1]],\n dtype=dtype)\n\n self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)\n\n @test_util.run_deprecated_v1\n def testInt32Basic(self):\n self._testBasic(dtype=np.int32)\n\n @test_util.run_deprecated_v1\n def testInt64Basic(self):\n self._testBasic(dtype=np.int64)\n\n def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):\n with self.cached_session() as sess:\n m_neg = array_ops.placeholder(dtype=dtypes.float32)\n m_pos = array_ops.placeholder(dtype=dtypes.float32)\n s = array_ops.placeholder(dtype=dtypes.float32)\n\n neg = random_ops.random_normal(\n [20], mean=m_neg, stddev=s, dtype=dtypes.float32)\n pos = random_ops.random_normal(\n [20], mean=m_pos, stddev=s, dtype=dtypes.float32)\n\n data = array_ops.concat([neg, pos], 0)\n data = math_ops.cast(math_ops.round(data), tf_dtype)\n data = math_ops.minimum(math_ops.maximum(data, 0), 1)\n lab = array_ops.concat(\n [\n array_ops.zeros(\n [20], dtype=tf_dtype), array_ops.ones(\n [20], dtype=tf_dtype)\n ],\n 0)\n\n cm = confusion_matrix.confusion_matrix(\n lab, data, dtype=tf_dtype, num_classes=2)\n\n d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})\n\n truth = np.zeros([2, 2], dtype=np_dtype)\n for i in xrange(len(d)):\n truth[l[i], d[i]] += 1\n\n self.assertEqual(cm_out.dtype, np_dtype)\n self.assertAllClose(cm_out, truth, atol=1e-10)\n\n @test_util.run_deprecated_v1\n def testOnTensors_int32(self):\n self._testConfMatrixOnTensors(dtypes.int32, np.int32)\n\n @test_util.run_deprecated_v1\n def testOnTensors_int64(self):\n self._testConfMatrixOnTensors(dtypes.int64, np.int64)\n\n def _testDifferentLabelsInPredictionAndTarget(self, dtype):\n labels = np.asarray([4, 5, 6], dtype=dtype)\n predictions = np.asarray([1, 2, 3], dtype=dtype)\n\n truth = np.asarray(\n [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0]],\n dtype=dtype)\n\n self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)\n\n @test_util.run_deprecated_v1\n def testInt32DifferentLabels(self, dtype=np.int32):\n self._testDifferentLabelsInPredictionAndTarget(dtype)\n\n @test_util.run_deprecated_v1\n def testInt64DifferentLabels(self, dtype=np.int64):\n self._testDifferentLabelsInPredictionAndTarget(dtype)\n\n def _testMultipleLabels(self, dtype):\n labels = np.asarray([1, 1, 2, 3, 5, 1, 3, 6, 3, 1], dtype=dtype)\n predictions = np.asarray([1, 1, 2, 3, 5, 6, 1, 2, 3, 4], dtype=dtype)\n\n truth = np.asarray(\n [[0, 0, 0, 0, 0, 0, 0],\n [0, 2, 0, 0, 1, 0, 1],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 1, 0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 0]],\n dtype=dtype)\n\n self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)\n\n @test_util.run_deprecated_v1\n def testInt32MultipleLabels(self, dtype=np.int32):\n self._testMultipleLabels(dtype)\n\n @test_util.run_deprecated_v1\n def testInt64MultipleLabels(self, dtype=np.int64):\n self._testMultipleLabels(dtype)\n\n @test_util.run_deprecated_v1\n def testWeighted(self):\n labels = np.arange(5, dtype=np.int32)\n predictions = np.arange(5, dtype=np.int32)\n weights = np.arange(5, dtype=np.int32)\n\n truth = np.asarray(\n [[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 2, 0, 0],\n [0, 0, 0, 3, 0],\n [0, 0, 0, 0, 4]],\n dtype=np.int32)\n\n self._testConfMatrix(\n labels=labels, predictions=predictions, weights=weights, truth=truth)\n\n @test_util.run_deprecated_v1\n def testLabelsTooLarge(self):\n labels = np.asarray([1, 1, 0, 3, 5], dtype=np.int32)\n predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"`labels`.*out of bound\"):\n self._testConfMatrix(\n labels=labels, predictions=predictions, num_classes=3, truth=None)\n\n def testLabelsNegative(self):\n labels = np.asarray([1, 1, 0, -1, -1], dtype=np.int32)\n predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)\n with self.assertRaisesOpError(\"`labels`.*negative values\"):\n self._testConfMatrix(\n labels=labels, predictions=predictions, num_classes=3, truth=None)\n\n @test_util.run_deprecated_v1\n def testPredictionsTooLarge(self):\n labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)\n predictions = np.asarray([2, 1, 0, 3, 5], dtype=np.int32)\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"`predictions`.*out of bound\"):\n self._testConfMatrix(\n labels=labels, predictions=predictions, num_classes=3, truth=None)\n\n def testPredictionsNegative(self):\n labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)\n predictions = np.asarray([2, 1, 0, -1, -1], dtype=np.int32)\n with self.assertRaisesOpError(\"`predictions`.*negative values\"):\n self._testConfMatrix(\n labels=labels, predictions=predictions, num_classes=3, truth=None)\n\n @test_util.run_deprecated_v1\n def testInputDifferentSize(self):\n labels = np.asarray([1, 2])\n predictions = np.asarray([1, 2, 3])\n self.assertRaisesRegex(ValueError, \"must be equal\",\n confusion_matrix.confusion_matrix, predictions,\n labels)\n\n def testOutputIsInt32(self):\n labels = np.arange(2)\n predictions = np.arange(2)\n with self.cached_session():\n cm = confusion_matrix.confusion_matrix(\n labels, predictions, dtype=dtypes.int32)\n tf_cm = self.evaluate(cm)\n self.assertEqual(tf_cm.dtype, np.int32)\n\n def testOutputIsInt64(self):\n labels = np.arange(2)\n predictions = np.arange(2)\n with self.cached_session():\n cm = confusion_matrix.confusion_matrix(\n labels, predictions, dtype=dtypes.int64)\n tf_cm = self.evaluate(cm)\n self.assertEqual(tf_cm.dtype, np.int64)\n\n\nclass RemoveSqueezableDimensionsTest(test.TestCase):\n\n @test_util.run_deprecated_v1\n def testBothScalarShape(self):\n label_values = 1.0\n prediction_values = 0.0\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder))\n\n with self.cached_session():\n self.assertAllEqual(label_values, self.evaluate(static_labels))\n self.assertAllEqual(prediction_values, self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSameShape(self):\n label_values = np.ones(shape=(2, 3, 1))\n prediction_values = np.zeros_like(label_values)\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder))\n\n with self.cached_session():\n self.assertAllEqual(label_values, self.evaluate(static_labels))\n self.assertAllEqual(prediction_values, self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSameShapeExpectedRankDiff0(self):\n label_values = np.ones(shape=(2, 3, 1))\n prediction_values = np.zeros_like(label_values)\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values, expected_rank_diff=0))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder, expected_rank_diff=0))\n\n with self.cached_session():\n self.assertAllEqual(label_values, self.evaluate(static_labels))\n self.assertAllEqual(prediction_values, self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSqueezableLabels(self):\n label_values = np.ones(shape=(2, 3, 1))\n prediction_values = np.zeros(shape=(2, 3))\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder))\n\n expected_label_values = np.reshape(label_values, newshape=(2, 3))\n with self.cached_session():\n self.assertAllEqual(expected_label_values, self.evaluate(static_labels))\n self.assertAllEqual(prediction_values, self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSqueezableLabelsExpectedRankDiffPlus1(self):\n label_values = np.ones(shape=(2, 3, 1))\n prediction_values = np.zeros(shape=(2, 3, 5))\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values, expected_rank_diff=1))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder, expected_rank_diff=1))\n\n expected_label_values = np.reshape(label_values, newshape=(2, 3))\n with self.cached_session():\n self.assertAllEqual(expected_label_values, self.evaluate(static_labels))\n self.assertAllEqual(prediction_values, self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSqueezablePredictions(self):\n label_values = np.ones(shape=(2, 3))\n prediction_values = np.zeros(shape=(2, 3, 1))\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder))\n\n expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))\n with self.cached_session():\n self.assertAllEqual(label_values, self.evaluate(static_labels))\n self.assertAllEqual(expected_prediction_values,\n self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n expected_prediction_values,\n dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testSqueezablePredictionsExpectedRankDiffMinus1(self):\n label_values = np.ones(shape=(2, 3, 5))\n prediction_values = np.zeros(shape=(2, 3, 1))\n static_labels, static_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n label_values, prediction_values, expected_rank_diff=-1))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(\n labels_placeholder, predictions_placeholder, expected_rank_diff=-1))\n\n expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))\n with self.cached_session():\n self.assertAllEqual(label_values, self.evaluate(static_labels))\n self.assertAllEqual(expected_prediction_values,\n self.evaluate(static_predictions))\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n self.assertAllEqual(\n expected_prediction_values,\n dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testUnsqueezableLabels(self):\n label_values = np.ones(shape=(2, 3, 2))\n prediction_values = np.zeros(shape=(2, 3))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n _, dynamic_predictions = (\n confusion_matrix.remove_squeezable_dimensions(labels_placeholder,\n predictions_placeholder))\n\n with self.cached_session():\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))\n\n @test_util.run_deprecated_v1\n def testUnsqueezablePredictions(self):\n label_values = np.ones(shape=(2, 3))\n prediction_values = np.zeros(shape=(2, 3, 2))\n\n labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)\n dynamic_labels, _ = (\n confusion_matrix.remove_squeezable_dimensions(labels_placeholder,\n predictions_placeholder))\n\n with self.cached_session():\n feed_dict = {\n labels_placeholder: label_values,\n predictions_placeholder: prediction_values\n }\n self.assertAllEqual(\n label_values, dynamic_labels.eval(feed_dict=feed_dict))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras-based einsum dense layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nfrom tensorflow.python import keras\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.layers import einsum_dense\nfrom tensorflow.python.platform import test\n\n\n@keras_parameterized.run_all_keras_modes\[email protected]_parameters(\n {\n \"testcase_name\": \"_1d_end_weight\",\n \"equation\": \"ab,b->a\",\n \"bias_axes\": None,\n \"input_shape\": (None, 32),\n \"output_shape\": [],\n \"expected_weight_shape\": [32],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None,)\n }, {\n \"testcase_name\": \"_2d_middle_weight\",\n \"equation\": \"ab,bc->ac\",\n \"bias_axes\": None,\n \"input_shape\": (None, 32),\n \"output_shape\": (64),\n \"expected_weight_shape\": [32, 64],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 64)\n }, {\n \"testcase_name\": \"_3d_bert\",\n \"equation\": \"abc,cde->abde\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (1, 3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_3_bias\",\n \"equation\": \"abc,cde->abde\",\n \"bias_axes\": \"e\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (1, 3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [4],\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_2_bias\",\n \"equation\": \"abc,cde->abde\",\n \"bias_axes\": \"d\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (1, 3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [3, 1],\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_1_3_bias\",\n \"equation\": \"abc,cde->abde\",\n \"bias_axes\": \"be\",\n \"input_shape\": (None, 7, 2),\n \"output_shape\": (7, 3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [7, 1, 4],\n \"expected_output_shape\": (None, 7, 3, 4)\n }, {\n \"testcase_name\": \"_3d_bert_projection\",\n \"equation\": \"BFNH,NHD->BFD\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2, 3),\n \"output_shape\": (1, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 1, 4)\n }, {\n \"testcase_name\": \"_2d_bert\",\n \"equation\": \"abc,cd->abd\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (1, 4),\n \"expected_weight_shape\": [2, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 1, 4)\n }, {\n \"testcase_name\": \"_embedding_1d\",\n \"equation\": \"i,d->id\",\n \"bias_axes\": None,\n \"input_shape\": (None,),\n \"output_shape\": (2),\n \"expected_weight_shape\": [2],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 2)\n }, {\n \"testcase_name\": \"_xlnet_lm\",\n \"equation\": \"ibd,nd->ibn\",\n \"bias_axes\": None,\n \"input_shape\": (None, None, 1),\n \"output_shape\": (None, 2),\n \"expected_weight_shape\": [2, 1],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, None, 2)\n }, {\n \"testcase_name\": \"_2d_precast\",\n \"equation\": \"...b,bc->...c\",\n \"bias_axes\": None,\n \"input_shape\": (None, 32),\n \"output_shape\": (64),\n \"expected_weight_shape\": [32, 64],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 64)\n }, {\n \"testcase_name\": \"_2d_precast_multiple_elided_dims\",\n \"equation\": \"...b,bc->...c\",\n \"bias_axes\": None,\n \"input_shape\": (None, None, 32),\n \"output_shape\": (64),\n \"expected_weight_shape\": [32, 64],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, None, 64)\n }, {\n \"testcase_name\": \"_3d_precast\",\n \"equation\": \"...c,cde->...de\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_precast_3_bias\",\n \"equation\": \"...c,cde->...de\",\n \"bias_axes\": \"e\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [4],\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_precast_2_bias\",\n \"equation\": \"...c,cde->...de\",\n \"bias_axes\": \"d\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [3, 1],\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_3d_precast_2_3_bias\",\n \"equation\": \"...c,cde->...de\",\n \"bias_axes\": \"de\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [2, 3, 4],\n \"expected_bias_shape\": [3, 4],\n \"expected_output_shape\": (None, 1, 3, 4)\n }, {\n \"testcase_name\": \"_2d_postcast\",\n \"equation\": \"bc...,cd->bd...\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2, 3),\n \"output_shape\": (4),\n \"expected_weight_shape\": [1, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 4, 2, 3)\n }, {\n \"testcase_name\": \"_3d_postcast\",\n \"equation\": \"bc...,cde->bde...\",\n \"bias_axes\": None,\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [1, 3, 4],\n \"expected_bias_shape\": None,\n \"expected_output_shape\": (None, 3, 4, 2)\n }, {\n \"testcase_name\": \"_3d_postcast_1_bias\",\n \"equation\": \"bc...,cde->bde...\",\n \"bias_axes\": \"d\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [1, 3, 4],\n \"expected_bias_shape\": [3, 1, 1],\n \"expected_output_shape\": (None, 3, 4, 2)\n }, {\n \"testcase_name\": \"_3d_postcast_2_bias\",\n \"equation\": \"bc...,cde->bde...\",\n \"bias_axes\": \"e\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [1, 3, 4],\n \"expected_bias_shape\": [4, 1],\n \"expected_output_shape\": (None, 3, 4, 2)\n }, {\n \"testcase_name\": \"_3d_postcast_1_2_bias\",\n \"equation\": \"bc...,cde->bde...\",\n \"bias_axes\": \"de\",\n \"input_shape\": (None, 1, 2),\n \"output_shape\": (3, 4),\n \"expected_weight_shape\": [1, 3, 4],\n \"expected_bias_shape\": [3, 4, 1],\n \"expected_output_shape\": (None, 3, 4, 2)\n })\nclass TestEinsumDenseLayer(keras_parameterized.TestCase):\n\n def test_weight_shapes(self, equation, bias_axes, input_shape, output_shape,\n expected_weight_shape, expected_bias_shape,\n expected_output_shape):\n del expected_output_shape # Not used in this test.\n\n weight_shape, bias_shape, _ = einsum_dense._analyze_einsum_string(\n equation, bias_axes, input_shape, output_shape)\n\n self.assertAllEqual(expected_weight_shape, weight_shape)\n self.assertAllEqual(expected_bias_shape, bias_shape)\n\n def test_layer_creation(self, equation, bias_axes, input_shape, output_shape,\n expected_weight_shape, expected_bias_shape,\n expected_output_shape):\n # Keras elides the 0-dimension of the input shape when constructing inputs.\n non_batch_input_shape = list(input_shape)[1:]\n\n input_tensor = keras.Input(shape=non_batch_input_shape)\n layer = einsum_dense.EinsumDense(\n equation=equation, output_shape=output_shape, bias_axes=bias_axes)\n output_tensor = layer(input_tensor)\n\n self.assertAllEqual(expected_weight_shape, layer.kernel.shape.as_list())\n if expected_bias_shape is None:\n self.assertIsNone(layer.bias)\n else:\n self.assertAllEqual(expected_bias_shape, layer.bias.shape.as_list())\n self.assertAllEqual(expected_output_shape, output_tensor.shape.as_list())\n\n\n@keras_parameterized.run_all_keras_modes\nclass TestEinsumLayerAPI(keras_parameterized.TestCase):\n\n def test_layer_api(self):\n input_data = np.array([[1.0, 2.0], [3.0, 4.0]])\n kwargs = {\n \"equation\": \"...b,bc->...c\",\n \"bias_axes\": \"c\",\n \"output_shape\": 4,\n \"bias_initializer\": keras.initializers.constant(0.03),\n \"kernel_initializer\": keras.initializers.constant(0.5),\n \"dtype\": input_data.dtype\n }\n expected_output = np.array([[1.53, 1.53, 1.53, 1.53],\n [3.53, 3.53, 3.53, 3.53]])\n\n output_data = testing_utils.layer_test(\n einsum_dense.EinsumDense,\n kwargs=kwargs,\n input_shape=(None, 2),\n input_data=input_data)\n\n self.assertAllClose(expected_output, output_data)\n\n def test_unspecified_bias_dim_fails(self):\n input_tensor = keras.Input(shape=(32,))\n layer = einsum_dense.EinsumDense(\n equation=\"ab,bc->ac\", output_shape=64, bias_axes=\"y\")\n with self.assertRaisesRegex(\n ValueError, \".*is not a part of the output specification.*\"):\n _ = layer(input_tensor)\n\n def test_incompatible_input_output_shape_fails(self):\n input_tensor = keras.Input(shape=(32, 64))\n layer = einsum_dense.EinsumDense(\n equation=\"abc,cd->abd\", output_shape=(10, 96))\n with self.assertRaisesRegex(\n ValueError, \".*Input shape and output shape do not match at shared \"\n \"dimension 'b'.*\"):\n _ = layer(input_tensor)\n\n def test_unspecified_output_dim_fails(self):\n input_tensor = keras.Input(shape=(32,))\n layer = einsum_dense.EinsumDense(equation=\"ab,bc->cd\", output_shape=64)\n with self.assertRaisesRegex(\n ValueError, \".*Dimension 'd' was specified in the output 'cd' but has \"\n \"no corresponding dim.*\"):\n _ = layer(input_tensor)\n\n def test_unspecified_weight_dim_fails(self):\n input_tensor = keras.Input(shape=(32,))\n layer = einsum_dense.EinsumDense(equation=\"ab,zd->ad\", output_shape=64)\n with self.assertRaisesRegex(ValueError,\n \".*Weight dimension 'z' did not have a match \"):\n _ = layer(input_tensor)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for signature_def_util.py.\n\n - Tests adding a SignatureDef to TFLite metadata.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.lite.tools.signature import signature_def_utils\n\n\nclass SignatureDefUtilsTest(tf.test.TestCase):\n\n def testAddSignatureDefToFlatbufferMetadata(self):\n \"\"\"Test a SavedModel conversion has correct Metadata.\"\"\"\n filename = tf.compat.v1.resource_loader.get_path_to_datafile(\n '../../testdata/add.bin')\n if not os.path.exists(filename):\n raise IOError('File \"{0}\" does not exist in {1}.'.format(\n filename,\n tf.compat.v1.resource_loader.get_root_dir_with_all_resources()))\n\n with tf.io.gfile.GFile(filename, 'rb') as fp:\n tflite_model = bytearray(fp.read())\n\n self.assertIsNotNone(tflite_model, 'TFLite model is none')\n sig_input_tensor = meta_graph_pb2.TensorInfo(\n dtype=tf.as_dtype(tf.float32).as_datatype_enum,\n tensor_shape=tf.TensorShape([1, 8, 8, 3]).as_proto())\n sig_input_tensor_signature = {'x': sig_input_tensor}\n sig_output_tensor = meta_graph_pb2.TensorInfo(\n dtype=tf.as_dtype(tf.float32).as_datatype_enum,\n tensor_shape=tf.TensorShape([1, 8, 8, 3]).as_proto())\n sig_output_tensor_signature = {'y': sig_output_tensor}\n predict_signature_def = (\n tf.compat.v1.saved_model.build_signature_def(\n sig_input_tensor_signature, sig_output_tensor_signature,\n tf.saved_model.PREDICT_METHOD_NAME))\n serving_key = tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n signature_def_map = {serving_key: predict_signature_def}\n tflite_model = signature_def_utils.set_signature_defs(\n tflite_model, signature_def_map)\n saved_signature_def_map = signature_def_utils.get_signature_defs(\n tflite_model)\n signature_def = saved_signature_def_map.get(serving_key)\n self.assertIsNotNone(signature_def, 'SignatureDef not found')\n self.assertEqual(signature_def.SerializeToString(),\n predict_signature_def.SerializeToString())\n remove_tflite_model = (\n signature_def_utils.clear_signature_defs(tflite_model))\n signature_def_map = signature_def_utils.get_signature_defs(\n remove_tflite_model)\n self.assertIsNone(signature_def_map.get(serving_key),\n 'SignatureDef found, but should be missing')\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for data input for speech commands.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\nfrom tensorflow.examples.speech_commands import wav_to_features\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass WavToFeaturesTest(test.TestCase):\n\n def _getWavData(self):\n with self.cached_session():\n sample_data = tf.zeros([32000, 2])\n wav_encoder = tf.audio.encode_wav(sample_data, 16000)\n wav_data = self.evaluate(wav_encoder)\n return wav_data\n\n def _saveTestWavFile(self, filename, wav_data):\n with open(filename, \"wb\") as f:\n f.write(wav_data)\n\n def _saveWavFolders(self, root_dir, labels, how_many):\n wav_data = self._getWavData()\n for label in labels:\n dir_name = os.path.join(root_dir, label)\n os.mkdir(dir_name)\n for i in range(how_many):\n file_path = os.path.join(dir_name, \"some_audio_%d.wav\" % i)\n self._saveTestWavFile(file_path, wav_data)\n\n @test_util.run_deprecated_v1\n def testWavToFeatures(self):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 100)\n input_file_path = os.path.join(tmp_dir, \"input.wav\")\n output_file_path = os.path.join(tmp_dir, \"output.c\")\n wav_data = self._getWavData()\n self._saveTestWavFile(input_file_path, wav_data)\n wav_to_features.wav_to_features(16000, 1000, 10, 10, 40, True, \"average\",\n input_file_path, output_file_path)\n with open(output_file_path, \"rb\") as f:\n content = f.read()\n self.assertIn(b\"const unsigned char g_input_data\", content)\n\n @test_util.run_deprecated_v1\n def testWavToFeaturesMicro(self):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 100)\n input_file_path = os.path.join(tmp_dir, \"input.wav\")\n output_file_path = os.path.join(tmp_dir, \"output.c\")\n wav_data = self._getWavData()\n self._saveTestWavFile(input_file_path, wav_data)\n wav_to_features.wav_to_features(16000, 1000, 10, 10, 40, True, \"micro\",\n input_file_path, output_file_path)\n with open(output_file_path, \"rb\") as f:\n content = f.read()\n self.assertIn(b\"const unsigned char g_input_data\", content)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test for multi-worker training tutorial.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport contextlib\nimport os\nimport re\nimport zipfile\nfrom absl import logging\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.experimental.ops import distribute_options\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.datasets import mnist\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training.tracking import util as tracking_util\nfrom tensorflow.python.util import nest\n\n\nclass MultiWorkerTutorialTest(parameterized.TestCase, test.TestCase):\n \"\"\"Test multi-worker training flow demo'ed in go/multi-worker-with-keras.\"\"\"\n\n @contextlib.contextmanager\n def skip_fetch_failure_exception(self):\n try:\n yield\n except zipfile.BadZipfile as e:\n self.skipTest('Data loading error: Bad magic number for file header.')\n except Exception as e: # pylint: disable=broad-except\n if 'URL fetch failure' in str(e):\n self.skipTest('URL fetch error not considered failure of the test.')\n else:\n raise\n\n @combinations.generate(\n combinations.combine(\n mode=['eager'],\n shard_policy=[None] + list(distribute_options.AutoShardPolicy)))\n def testMultiWorkerTutorial(self, mode, shard_policy):\n \"\"\"Test multi-worker training flow demo'ed in go/multi-worker-with-keras.\n\n This test should be kept in sync with the code samples in\n go/multi-worker-with-keras.\n\n Args:\n mode: Runtime mode.\n shard_policy: None or any of tf.data.experimental.AutoShardPolicy for\n testing.\n \"\"\"\n if shard_policy is distribute_options.AutoShardPolicy.FILE:\n self.skipTest('TensorSliceDataset is not shardable with FILE policy.')\n\n def mnist_dataset(batch_size):\n with self.skip_fetch_failure_exception():\n (x_train, y_train), _ = mnist.load_data()\n # The `x` arrays are in uint8 and have values in the range [0, 255].\n # We need to convert them to float32 with values in the range [0, 1]\n x_train = x_train / np.float32(255)\n y_train = y_train.astype(np.int64)\n train_dataset = dataset_ops.DatasetV2.from_tensor_slices(\n (x_train, y_train)).shuffle(60000).repeat().batch(batch_size)\n return train_dataset\n\n def build_and_compile_cnn_model():\n model = keras.Sequential([\n keras.layers.Input(shape=(28, 28)),\n keras.layers.Reshape(target_shape=(28, 28, 1)),\n keras.layers.Conv2D(32, 3, activation='relu'),\n keras.layers.Flatten(),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10)\n ])\n model.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=gradient_descent.SGD(learning_rate=0.001),\n metrics=['accuracy'])\n return model\n\n per_worker_batch_size = 64\n\n single_worker_dataset = mnist_dataset(per_worker_batch_size)\n single_worker_model = build_and_compile_cnn_model()\n single_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70)\n\n num_workers = 4\n\n def proc_func(model_path, checkpoint_dir):\n global_batch_size = per_worker_batch_size * num_workers\n strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()\n with strategy.scope():\n multi_worker_model = build_and_compile_cnn_model()\n\n callbacks = [\n keras.callbacks.ModelCheckpoint(\n filepath=os.path.join(self.get_temp_dir(), 'checkpoint'))\n ]\n\n multi_worker_dataset = mnist_dataset(global_batch_size)\n if shard_policy:\n options = dataset_ops.Options()\n options.experimental_distribute.auto_shard_policy = shard_policy\n multi_worker_dataset = multi_worker_dataset.with_options(options)\n\n multi_worker_model.fit(\n multi_worker_dataset,\n epochs=2,\n steps_per_epoch=20,\n callbacks=callbacks)\n\n def _is_chief(task_type, task_id):\n return task_type is None or task_type == 'chief' or (\n task_type == 'worker' and task_id == 0)\n\n def _get_temp_dir(dirpath, task_id):\n base_dirpath = 'workertemp_' + str(task_id)\n temp_dir = os.path.join(dirpath, base_dirpath)\n file_io.recursive_create_dir_v2(temp_dir)\n return temp_dir\n\n def write_filepath(filepath, task_type, task_id):\n dirpath = os.path.dirname(filepath)\n base = os.path.basename(filepath)\n if not _is_chief(task_type, task_id):\n dirpath = _get_temp_dir(dirpath, task_id)\n return os.path.join(dirpath, base)\n\n task_type, task_id = (strategy.cluster_resolver.task_type,\n strategy.cluster_resolver.task_id)\n write_model_path = write_filepath(model_path, task_type, task_id)\n\n multi_worker_model.save(write_model_path)\n if not _is_chief(task_type, task_id):\n file_io.delete_recursively_v2(os.path.dirname(write_model_path))\n\n # Make sure chief finishes saving before non-chief's assertions.\n multi_process_runner.barrier().wait()\n\n if not file_io.file_exists(model_path):\n raise RuntimeError()\n if file_io.file_exists(write_model_path) != _is_chief(task_type, task_id):\n raise RuntimeError()\n\n loaded_model = keras.saving.save.load_model(model_path)\n loaded_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20)\n\n checkpoint = tracking_util.Checkpoint(model=multi_worker_model)\n write_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id)\n checkpoint_manager = checkpoint_management.CheckpointManager(\n checkpoint, directory=write_checkpoint_dir, max_to_keep=1)\n\n checkpoint_manager.save()\n if not _is_chief(task_type, task_id):\n file_io.delete_recursively_v2(write_checkpoint_dir)\n\n # Make sure chief finishes saving before non-chief's assertions.\n multi_process_runner.barrier().wait()\n\n if not file_io.file_exists(checkpoint_dir):\n raise RuntimeError()\n if file_io.file_exists(write_checkpoint_dir) != _is_chief(\n task_type, task_id):\n raise RuntimeError()\n\n latest_checkpoint = checkpoint_management.latest_checkpoint(\n checkpoint_dir)\n checkpoint.restore(latest_checkpoint)\n multi_worker_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20)\n\n logging.info('testMultiWorkerTutorial successfully ends')\n\n model_path = os.path.join(self.get_temp_dir(), 'model.tf')\n checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')\n with test_util.skip_if_error(self, errors_impl.UnavailableError):\n mpr_result = multi_process_runner.run(\n proc_func,\n multi_worker_test_base.create_cluster_spec(num_workers=num_workers),\n args=(model_path, checkpoint_dir),\n list_stdout=True)\n\n self.assertTrue(\n any([\n 'testMultiWorkerTutorial successfully ends' in msg\n for msg in mpr_result.stdout\n ]))\n\n def extract_accuracy(worker_id, input_string):\n match = re.match(\n r'\\[worker\\-{}\\].*accuracy: (\\d+\\.\\d+).*'.format(worker_id),\n input_string)\n return None if match is None else float(match.group(1))\n\n for worker_id in range(num_workers):\n accu_result = nest.map_structure(\n lambda x: extract_accuracy(worker_id, x), # pylint: disable=cell-var-from-loop\n mpr_result.stdout)\n self.assertTrue(\n any(accu_result), 'Every worker is supposed to have accuracy result.')\n\n\nif __name__ == '__main__':\n multi_process_runner.test_main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Grappler Arithmetic Optimizer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass ArithmeticOptimizerTest(test.TestCase):\n\n # See b/146524878.\n def testFunctionArgShapeInference(self):\n\n @def_function.function\n def f(x, y):\n return math_ops.matmul(\n x, array_ops.reshape(array_ops.transpose(y), [384, 1536]))\n\n with context.eager_mode():\n x = array_ops.ones((1, 384))\n y = array_ops.ones((1536, 384))\n with context.collect_graphs(optimized=True) as graphs:\n f(x, y).numpy()\n self.assertLen(graphs, 1)\n self.assertLen(graphs[0].node, 4)\n self.assertEqual(graphs[0].node[2].name,\n 'ArithmeticOptimizer/FoldTransposeIntoMatMul_MatMul')\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.TFRecordDataset`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport zlib\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import readers\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.lib.io import python_io\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\nclass TFRecordDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n def setUp(self):\n super(TFRecordDatasetTest, self).setUp()\n self._num_files = 2\n self._num_records = 7\n self.test_filenames = self._createFiles()\n\n def _dataset_factory(self,\n filenames,\n compression_type=\"\",\n num_epochs=1,\n batch_size=None):\n\n repeat_dataset = readers.TFRecordDataset(\n filenames, compression_type).repeat(num_epochs)\n if batch_size:\n return repeat_dataset.batch(batch_size)\n return repeat_dataset\n\n def _record(self, f, r):\n return compat.as_bytes(\"Record %d of file %d\" % (r, f))\n\n def _createFiles(self):\n filenames = []\n for i in range(self._num_files):\n fn = os.path.join(self.get_temp_dir(), \"tf_record.%d.txt\" % i)\n filenames.append(fn)\n writer = python_io.TFRecordWriter(fn)\n for j in range(self._num_records):\n writer.write(self._record(i, j))\n writer.close()\n return filenames\n\n @combinations.generate(test_base.default_test_combinations())\n def testTFRecordDatasetConstructorErrorsTensorInput(self):\n with self.assertRaisesRegex(TypeError,\n \"filenames.*must be.*Tensor.*string\"):\n readers.TFRecordDataset([1, 2, 3])\n with self.assertRaisesRegex(TypeError,\n \"filenames.*must be.*Tensor.*string\"):\n readers.TFRecordDataset(constant_op.constant([1, 2, 3]))\n # convert_to_tensor raises different errors in graph and eager\n with self.assertRaises(Exception):\n readers.TFRecordDataset(object())\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadOneEpoch(self):\n # Basic test: read from file 0.\n dataset = self._dataset_factory(self.test_filenames[0])\n self.assertDatasetProduces(\n dataset,\n expected_output=[self._record(0, i) for i in range(self._num_records)])\n\n # Basic test: read from file 1.\n dataset = self._dataset_factory(self.test_filenames[1])\n self.assertDatasetProduces(\n dataset,\n expected_output=[self._record(1, i) for i in range(self._num_records)])\n\n # Basic test: read from both files.\n dataset = self._dataset_factory(self.test_filenames)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadTenEpochs(self):\n dataset = self._dataset_factory(self.test_filenames, num_epochs=10)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n self.assertDatasetProduces(dataset, expected_output=expected_output * 10)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadTenEpochsOfBatches(self):\n dataset = self._dataset_factory(\n self.test_filenames, num_epochs=10, batch_size=self._num_records)\n expected_output = []\n for j in range(self._num_files):\n expected_output.append(\n [self._record(j, i) for i in range(self._num_records)])\n self.assertDatasetProduces(dataset, expected_output=expected_output * 10)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadZlibFiles(self):\n zlib_files = []\n for i, fn in enumerate(self.test_filenames):\n with open(fn, \"rb\") as f:\n cdata = zlib.compress(f.read())\n\n zfn = os.path.join(self.get_temp_dir(), \"tfrecord_%s.z\" % i)\n with open(zfn, \"wb\") as f:\n f.write(cdata)\n zlib_files.append(zfn)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n dataset = self._dataset_factory(zlib_files, compression_type=\"ZLIB\")\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadGzipFiles(self):\n gzip_files = []\n for i, fn in enumerate(self.test_filenames):\n with open(fn, \"rb\") as f:\n gzfn = os.path.join(self.get_temp_dir(), \"tfrecord_%s.gz\" % i)\n with gzip.GzipFile(gzfn, \"wb\") as gzf:\n gzf.write(f.read())\n gzip_files.append(gzfn)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n dataset = self._dataset_factory(gzip_files, compression_type=\"GZIP\")\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadWithBuffer(self):\n one_mebibyte = 2**20\n dataset = readers.TFRecordDataset(\n self.test_filenames, buffer_size=one_mebibyte)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadFromDatasetOfFiles(self):\n files = dataset_ops.Dataset.from_tensor_slices(self.test_filenames)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n dataset = readers.TFRecordDataset(files)\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadTenEpochsFromDatasetOfFilesInParallel(self):\n files = dataset_ops.Dataset.from_tensor_slices(\n self.test_filenames).repeat(10)\n expected_output = []\n for j in range(self._num_files):\n expected_output.extend(\n [self._record(j, i) for i in range(self._num_records)])\n dataset = readers.TFRecordDataset(files, num_parallel_reads=4)\n self.assertDatasetProduces(\n dataset, expected_output=expected_output * 10, assert_items_equal=True)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Counter Dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.data.experimental.ops import scan_ops\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"data.experimental.Counter\", v1=[])\ndef CounterV2(start=0, step=1, dtype=dtypes.int64):\n \"\"\"Creates a `Dataset` that counts from `start` in steps of size `step`.\n\n For example:\n\n ```python\n Dataset.count() == [0, 1, 2, ...)\n Dataset.count(2) == [2, 3, ...)\n Dataset.count(2, 5) == [2, 7, 12, ...)\n Dataset.count(0, -1) == [0, -1, -2, ...)\n Dataset.count(10, -1) == [10, 9, ...)\n ```\n\n Args:\n start: (Optional.) The starting value for the counter. Defaults to 0.\n step: (Optional.) The step size for the counter. Defaults to 1.\n dtype: (Optional.) The data type for counter elements. Defaults to\n `tf.int64`.\n\n Returns:\n A `Dataset` of scalar `dtype` elements.\n \"\"\"\n with ops.name_scope(\"counter\"):\n start = ops.convert_to_tensor(start, dtype=dtype, name=\"start\")\n step = ops.convert_to_tensor(step, dtype=dtype, name=\"step\")\n return dataset_ops.Dataset.from_tensors(0).repeat(None).apply(\n scan_ops.scan(start, lambda state, _: (state + step, state)))\n\n\n@tf_export(v1=[\"data.experimental.Counter\"])\ndef CounterV1(start=0, step=1, dtype=dtypes.int64):\n return dataset_ops.DatasetV1Adapter(CounterV2(start, step, dtype))\nCounterV1.__doc__ = CounterV2.__doc__\n\nif tf2.enabled():\n Counter = CounterV2\nelse:\n Counter = CounterV1\n",
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for metrics collecting in client.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute.client import client\nfrom tensorflow.python.distribute.client import metric_utils\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.training.server_lib import ClusterSpec\n\n\nclass MetricUtilsTest(test.TestCase):\n\n def testClientMetrics(self):\n metric_utils.enable_metrics = True\n\n cluster_def = multi_worker_test_base.create_in_process_cluster(\n num_workers=1, num_ps=1, rpc_layer='grpc')\n cluster_def['chief'] = [\n 'localhost:%d' % multi_worker_test_base.pick_unused_port()\n ]\n cluster_resolver = SimpleClusterResolver(\n ClusterSpec(cluster_def), rpc_layer='grpc')\n cluster = client.Cluster(cluster_resolver)\n\n @def_function.function\n def func():\n time.sleep(0.5)\n return 3\n\n result = cluster.schedule(func, args=None, kwargs=None)\n result = cluster.schedule(func, args=None, kwargs=None)\n cluster.join()\n self.assertEqual(result._get_value().numpy(), 3)\n\n # Tracing, closure execution, and remote_value fetching should be executed\n # exactly once for running this function.\n metric_tracing = metric_utils.get_metric_summary('function_tracing')\n self.assertEqual(metric_tracing['num'], 1)\n # Tracing time should be longer than the sleep time in Python function.\n self.assertGreater(metric_tracing['sum'], 0.5)\n metric_closure = metric_utils.get_metric_summary('closure_execution')\n self.assertEqual(metric_closure['num'], 2)\n metric_remote_value = metric_utils.get_metric_summary('remote_value_fetch')\n self.assertEqual(metric_remote_value['num'], 2)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions used by multiple converter files.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport datetime\nimport sys\n\nfrom absl import logging\nimport six\nfrom six.moves import range\n\nimport flatbuffers\nfrom tensorflow.core.protobuf import config_pb2 as _config_pb2\nfrom tensorflow.core.protobuf import graph_debug_info_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2\nfrom tensorflow.lite.python import lite_constants as _lite_constants\nfrom tensorflow.lite.python import schema_py_generated as schema_fb\nfrom tensorflow.lite.python.op_hint import convert_op_hints_to_stubs\nfrom tensorflow.lite.python.op_hint import find_all_hinted_output_nodes\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import convert_to_constants as _convert_to_constants\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import error_interpolation as _error_interpolation\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.training.saver import export_meta_graph as _export_meta_graph\n\n# Map of tf.dtypes to TFLite types_flag_pb2.\n_MAP_TF_TO_TFLITE_TYPES = {\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.float16: _types_pb2.FLOAT16,\n dtypes.int32: _types_pb2.INT32,\n dtypes.uint8: _types_pb2.QUANTIZED_UINT8,\n dtypes.int64: _types_pb2.INT64,\n dtypes.string: _types_pb2.STRING,\n dtypes.bool: _types_pb2.BOOL,\n dtypes.int16: _types_pb2.QUANTIZED_INT16,\n dtypes.complex64: _types_pb2.COMPLEX64,\n dtypes.int8: _types_pb2.INT8,\n dtypes.float64: _types_pb2.FLOAT64,\n dtypes.complex128: _types_pb2.COMPLEX128,\n}\n\n_MAP_TFLITE_ENUM_TO_TF_TYPES = {\n 0: dtypes.float32,\n 1: dtypes.float16,\n 2: dtypes.int32,\n 3: dtypes.uint8,\n 4: dtypes.int64,\n 5: dtypes.string,\n 6: dtypes.bool,\n 7: dtypes.int16,\n 8: dtypes.complex64,\n 9: dtypes.int8,\n 10: dtypes.float64,\n 11: dtypes.complex128,\n}\n\n_TFLITE_FILE_IDENTIFIER = b\"TFL3\"\n\n_TFLITE_MODEL_INPUT_OUTPUT_TYPES = (_lite_constants.FLOAT, _lite_constants.INT8,\n _lite_constants.QUANTIZED_UINT8)\n\n\ndef convert_dtype_to_tflite_type(tf_dtype):\n \"\"\"Converts tf.dtype to TFLite proto type.\n\n Args:\n tf_dtype: tf.dtype\n\n Raises:\n ValueError: Unsupported tf.dtype.\n\n Returns:\n types_flag_pb2.\n \"\"\"\n result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)\n if result is None:\n raise ValueError(\"Unsupported tf.dtype {0}\".format(tf_dtype))\n return result\n\n\ndef _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n \"\"\"Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).\n\n Args:\n tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)\n\n Raises:\n ValueError: If an invalid tflite enum type is provided.\n\n Returns:\n tf type (eg: tf.float32)\n \"\"\"\n tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n if tf_type is None:\n raise ValueError(\n \"Unsupported enum {}. The valid map of enum to tf types is : {}\"\n .format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n return tf_type\n\n\ndef _get_dtype_name(tf_type):\n \"\"\"Converts tf.dtype (eg: tf.float32) to str (eg: \"tf.float32\").\"\"\"\n return \"tf.\" + tf_type.name\n\n\ndef get_tensor_name(tensor):\n \"\"\"Returns name of the input tensor.\n\n Args:\n tensor: tf.Tensor\n\n Returns:\n str\n \"\"\"\n parts = six.ensure_str(tensor.name).split(\":\")\n if len(parts) > 2:\n raise ValueError(\"Tensor name invalid. Expect 0 or 1 colon, got {0}\".format(\n len(parts) - 1))\n\n # To be consistent with the tensor naming scheme in tensorflow, we need\n # drop the ':0' suffix for the first tensor.\n if len(parts) > 1 and parts[1] != \"0\":\n return tensor.name\n return parts[0]\n\n\ndef get_tensors_from_tensor_names(graph, tensor_names):\n \"\"\"Gets the Tensors associated with the `tensor_names` in the provided graph.\n\n Args:\n graph: TensorFlow Graph.\n tensor_names: List of strings that represent names of tensors in the graph.\n\n Returns:\n A list of Tensor objects in the same order the names are provided.\n\n Raises:\n ValueError:\n tensor_names contains an invalid tensor name.\n \"\"\"\n # Get the list of all of the tensors.\n tensor_name_to_tensor = {}\n for op in graph.get_operations():\n for tensor in op.values():\n tensor_name_to_tensor[get_tensor_name(tensor)] = tensor\n\n # Get the tensors associated with tensor_names.\n tensors = []\n invalid_tensors = []\n for name in tensor_names:\n if not isinstance(name, six.string_types):\n raise ValueError(\"Invalid type for a tensor name in the provided graph. \"\n \"Expected type for a tensor name is 'str', instead got \"\n \"type '{}' for tensor name '{}'\".format(\n type(name), name))\n\n tensor = tensor_name_to_tensor.get(name)\n if tensor is None:\n invalid_tensors.append(name)\n else:\n tensors.append(tensor)\n\n # Throw ValueError if any user input names are not valid tensors.\n if invalid_tensors:\n raise ValueError(\"Invalid tensors '{}' were found.\".format(\n \",\".join(invalid_tensors)))\n return tensors\n\n\ndef set_tensor_shapes(tensors, shapes):\n \"\"\"Sets Tensor shape for each tensor if the shape is defined.\n\n Args:\n tensors: TensorFlow ops.Tensor.\n shapes: Dict of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n\n Raises:\n ValueError:\n `shapes` contains an invalid tensor.\n `shapes` contains an invalid shape for a valid tensor.\n \"\"\"\n if shapes:\n tensor_names_to_tensor = {\n get_tensor_name(tensor): tensor for tensor in tensors\n }\n for name, shape in shapes.items():\n if name not in tensor_names_to_tensor:\n raise ValueError(\"Invalid tensor \\'{}\\' found in tensor shapes \"\n \"map.\".format(name))\n if shape is not None:\n tensor = tensor_names_to_tensor[name]\n try:\n tensor.set_shape(shape)\n except ValueError as error:\n message = (\"The shape of tensor '{0}' cannot be changed from {1} to \"\n \"{2}. {3}\".format(name, tensor.shape, shape, str(error)))\n raise ValueError(message)\n\n\ndef get_grappler_config(optimizers_list):\n \"\"\"Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\n Args:\n optimizers_list: List of strings that represents the list of optimizers.\n\n Returns:\n tf.ConfigProto.\n \"\"\"\n config = _config_pb2.ConfigProto()\n rewrite_options = config.graph_options.rewrite_options\n for optimizer in optimizers_list:\n rewrite_options.optimizers.append(optimizer)\n return config\n\n\ndef run_graph_optimizations(graph_def,\n input_arrays,\n output_arrays,\n config,\n graph=None):\n \"\"\"Apply standard TensorFlow optimizations to the graph_def.\n\n Args:\n graph_def: Frozen GraphDef to be optimized.\n input_arrays: List of arrays that are considered inputs of the graph.\n output_arrays: List of arrays that are considered outputs of the graph.\n config: tf.ConfigProto.\n graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)\n\n Returns:\n A new, optimized GraphDef.\n \"\"\"\n meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)\n\n signature = _meta_graph_pb2.SignatureDef()\n for array in input_arrays:\n signature.inputs[array.name].name = array.name\n signature.inputs[array.name].dtype = array.dtype.as_datatype_enum\n signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n\n for array in output_arrays:\n signature.outputs[array.name].name = array.name\n signature.outputs[array.name].dtype = array.dtype.as_datatype_enum\n signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n\n meta_graph.signature_def[\"not_used_key\"].CopyFrom(signature)\n\n # We need to add a collection called 'train_op' so that grappler\n # knows what the outputs are.\n fetch_collection = _meta_graph_pb2.CollectionDef()\n for array in input_arrays + output_arrays:\n fetch_collection.node_list.value.append(array.name)\n meta_graph.collection_def[\"train_op\"].CopyFrom(fetch_collection)\n\n return tf_optimizer.OptimizeGraph(config, meta_graph)\n\n\ndef _convert_op_hints_if_present(sess, graph_def, output_tensors,\n hinted_outputs_nodes):\n if is_frozen_graph(sess):\n raise ValueError(\"Try to convert op hints, needs unfrozen graph.\")\n output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]\n graph_def = tf_graph_util.convert_variables_to_constants(\n sess, graph_def, output_arrays + hinted_outputs_nodes)\n graph_def = convert_op_hints_to_stubs(graph_def=graph_def)\n return graph_def\n\n\ndef freeze_graph(sess, input_tensors, output_tensors):\n \"\"\"Returns a frozen GraphDef.\n\n Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the\n existing GraphDef is returned. The Grappler pass is only run on models that\n are frozen in order to inline the functions in the graph.\n If OpHints is present, it will try to convert the OpHint graph.\n\n Args:\n sess: TensorFlow Session.\n input_tensors: List of input tensors.\n output_tensors: List of output tensors (only .name is used from this).\n\n Returns:\n Frozen GraphDef.\n \"\"\"\n # Runs a Grappler pass in order to inline any functions in the graph.\n # Asides from inlining any simple function, Grappler will also try to lower\n # while loop into switch merge representation which is undesired for Ophints,\n # so we simply remove those attributes to prevent Grappler from doing so.\n graph_def = _convert_to_constants.disable_lower_using_switch_merge(\n sess.graph_def)\n config = get_grappler_config([\"function\"])\n graph_def = run_graph_optimizations(\n graph_def, input_tensors, output_tensors, config, graph=sess.graph)\n\n # If ophints are present, just convert them.\n hinted_outputs_nodes = find_all_hinted_output_nodes(sess)\n if hinted_outputs_nodes:\n return _convert_op_hints_if_present(sess, graph_def, output_tensors,\n hinted_outputs_nodes)\n\n if not is_frozen_graph(sess):\n output_node_names = [tensor.name.split(\":\")[0] for tensor in output_tensors]\n return tf_graph_util.convert_variables_to_constants(sess, graph_def,\n output_node_names)\n else:\n return sess.graph_def\n\n\ndef is_frozen_graph(sess):\n \"\"\"Determines if the graph is frozen.\n\n Determines if a graph has previously been frozen by checking for any\n operations of type Variable*. If variables are found, the graph is not frozen.\n\n Args:\n sess: TensorFlow Session.\n\n Returns:\n Bool.\n \"\"\"\n for op in sess.graph.get_operations():\n if six.ensure_str(op.type).startswith(\"Variable\") or six.ensure_str(\n op.type).endswith(\"VariableOp\"):\n return False\n return True\n\n\ndef build_debug_info_func(original_graph):\n \"\"\"Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\n Args:\n original_graph: The original `Graph` containing all the op stack traces.\n\n Returns:\n A function which retrieves the stack traces from the original graph and\n converts them to a `GraphDebugInfo` for a given set of nodes.\n \"\"\"\n\n def f(original_nodes):\n \"\"\"Function to create `GraphDebugInfo` for the given `original_nodes`.\"\"\"\n if not original_graph:\n return None\n # For the given nodes, gets all the op definitions in the original graph.\n useful_ops = []\n for func, name in original_nodes:\n try:\n if not func:\n useful_ops.append((func, original_graph.get_operation_by_name(name)))\n else:\n sub_func = original_graph._get_function(func) # pylint: disable=protected-access\n if isinstance(sub_func, function._EagerDefinedFunction): # pylint: disable=protected-access\n useful_ops.append(\n (func, sub_func.graph.get_operation_by_name(name)))\n else:\n sys.stderr.write(\n \"Use '@tf.function' or '@defun' to decorate the function.\")\n continue\n except KeyError:\n # New node created by graph optimizer. No stack trace from source code.\n continue\n # Convert all the op definitions to stack traces in terms of GraphDebugInfo.\n return _error_interpolation.create_graph_debug_info_def(useful_ops)\n\n return f\n\n\ndef convert_debug_info_func(saved_debug_info):\n \"\"\"Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\n Args:\n saved_debug_info: The `GraphDebugInfo` containing all the debug info.\n\n Returns:\n A function which retrieves the stack traces from the original graph and\n converts them to a `GraphDebugInfo` for a given set of nodes.\n \"\"\"\n\n def f(original_nodes):\n \"\"\"Function to create `GraphDebugInfo` for the given `original_nodes`.\"\"\"\n if not saved_debug_info:\n return None\n\n output_debug_info = graph_debug_info_pb2.GraphDebugInfo()\n # All the files are copied over, so the index wouldn't be changed.\n output_debug_info.files[:] = saved_debug_info.files\n # We only copy over the debug info for the input nodes\n for func, node in original_nodes:\n debug_key = node + \"@\" + func\n output_debug_info.traces[debug_key].CopyFrom(\n saved_debug_info.traces[debug_key])\n return output_debug_info\n\n return f\n\n\ndef get_debug_info(nodes_to_debug_info_func, converted_graph):\n \"\"\"Returns the debug info for the original nodes in the `converted_graph`.\n\n Args:\n nodes_to_debug_info_func: The method to collect the op debug info for the\n nodes.\n converted_graph: A `GraphDef` after optimization and transformation.\n\n Returns:\n `GraphDebugInfo` for all the original nodes in `converted_graph`.\n \"\"\"\n if not nodes_to_debug_info_func:\n return None\n\n # Collect all the debug info nodes from the converted_graph\n original_nodes = set()\n for node in converted_graph.node:\n debug_nodes = node.experimental_debug_info.original_node_names\n debug_funcs = node.experimental_debug_info.original_func_names\n # If the `original_node_names` are empty, uses the node name directly.\n if not debug_nodes:\n original_nodes.add((\"\", node.name))\n else:\n for i in range(len(debug_nodes)):\n debug_func = \"\" if i >= len(debug_funcs) else debug_funcs[i]\n original_nodes.add((debug_func, debug_nodes[i]))\n\n # Convert the nodes to the debug info proto object.\n return nodes_to_debug_info_func(original_nodes)\n\n\ndef convert_bytes_to_c_source(data,\n array_name,\n max_line_width=80,\n include_guard=None,\n include_path=None,\n use_tensorflow_license=False):\n \"\"\"Returns strings representing a C constant array containing `data`.\n\n Args:\n data: Byte array that will be converted into a C constant.\n array_name: String to use as the variable name for the constant array.\n max_line_width: The longest line length, for formatting purposes.\n include_guard: Name to use for the include guard macro definition.\n include_path: Optional path to include in the source file.\n use_tensorflow_license: Whether to include the standard TensorFlow Apache2\n license in the generated files.\n\n Returns:\n Text that can be compiled as a C source file to link in the data as a\n literal array of values.\n Text that can be used as a C header file to reference the literal array.\n \"\"\"\n\n starting_pad = \" \"\n array_lines = []\n array_line = starting_pad\n for value in bytearray(data):\n if (len(array_line) + 4) > max_line_width:\n array_lines.append(array_line + \"\\n\")\n array_line = starting_pad\n array_line += \" 0x%02x,\" % (value)\n if len(array_line) > len(starting_pad):\n array_lines.append(array_line + \"\\n\")\n array_values = \"\".join(array_lines)\n\n if include_guard is None:\n include_guard = \"TENSORFLOW_LITE_UTIL_\" + array_name.upper() + \"_DATA_H_\"\n\n if include_path is not None:\n include_line = \"#include \\\"{include_path}\\\"\\n\".format(\n include_path=include_path)\n else:\n include_line = \"\"\n\n if use_tensorflow_license:\n license_text = \"\"\"\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n\"\"\".format(year=datetime.date.today().year)\n else:\n license_text = \"\"\n\n source_template = \"\"\"{license_text}\n// This is a TensorFlow Lite model file that has been converted into a C data\n// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.\n// This form is useful for compiling into a binary for devices that don't have a\n// file system.\n\n{include_line}\n// We need to keep the data array aligned on some architectures.\n#ifdef __has_attribute\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\n#else\n#define HAVE_ATTRIBUTE(x) 0\n#endif\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))\n#else\n#define DATA_ALIGN_ATTRIBUTE\n#endif\n\nconst unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{\n{array_values}}};\nconst int {array_name}_len = {array_length};\n\"\"\"\n\n source_text = source_template.format(\n array_name=array_name,\n array_length=len(data),\n array_values=array_values,\n license_text=license_text,\n include_line=include_line)\n\n header_template = \"\"\"\n{license_text}\n\n// This is a TensorFlow Lite model file that has been converted into a C data\n// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.\n// This form is useful for compiling into a binary for devices that don't have a\n// file system.\n\n#ifndef {include_guard}\n#define {include_guard}\n\nextern const unsigned char {array_name}[];\nextern const int {array_name}_len;\n\n#endif // {include_guard}\n\"\"\"\n\n header_text = header_template.format(\n array_name=array_name,\n include_guard=include_guard,\n license_text=license_text)\n\n return source_text, header_text\n\n\ndef _convert_model_from_bytearray_to_object(model_bytearray):\n \"\"\"Converts a tflite model from a bytearray into a parsable object.\"\"\"\n model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)\n model_object = schema_fb.ModelT.InitFromObj(model_object)\n model_object = copy.deepcopy(model_object)\n model_object.subgraphs[0].inputs[0] = model_object.subgraphs[0].inputs[0]\n return model_object\n\n\ndef _convert_model_from_object_to_bytearray(model_object):\n \"\"\"Converts a tflite model from a parsable object into a bytearray.\"\"\"\n # Initial size of the buffer, which will grow automatically if needed\n builder = flatbuffers.Builder(1024)\n model_offset = model_object.Pack(builder)\n builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)\n return bytes(builder.Output())\n\n\ndef _remove_tensors_from_model(model, remove_tensors_idxs):\n \"\"\"Remove tensors from model.\"\"\"\n if not remove_tensors_idxs:\n return\n if len(model.subgraphs) > 1:\n raise ValueError(\"Model must only have one subgraph. Instead, it has \"\n \"{} subgraphs.\".format(len(model.subgraphs)))\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n\n logging.debug(\"Removing tensors at indices : %s\", remove_tensors_idxs)\n # An optimized check to validate if \"remove_tensors_idxs\" (eg: [4,5,6]) is an\n # exact subset, with ordering, of \"tensors\" indices (eg: [0,1,2,3,4,5,6]).\n if min(remove_tensors_idxs) == len(tensors) - len(remove_tensors_idxs):\n logging.debug(\"Removing tensors only at the end of the tensor list\")\n del tensors[min(remove_tensors_idxs):]\n else:\n logging.debug(\"Removing tensors requires updating the model\")\n # Map the old tensor indices to new tensor indices\n d_old_to_new_tensors = {}\n left_shift_by = 0\n for idx in range(len(tensors)):\n if idx in remove_tensors_idxs:\n left_shift_by += 1\n else:\n d_old_to_new_tensors[idx] = idx - left_shift_by\n logging.debug(\"Old to new tensors map: %s\", d_old_to_new_tensors.__str__())\n # Update tensor indices referenced throughout the model\n def update_tensors(tensor_idxs):\n for i, ti in enumerate(tensor_idxs):\n tensor_idxs[i] = d_old_to_new_tensors.get(ti, -1)\n update_tensors(subgraph.inputs)\n update_tensors(subgraph.outputs)\n for op in operators:\n update_tensors(op.inputs)\n update_tensors(op.outputs)\n # Delete the tensors\n for idx in sorted(remove_tensors_idxs, reverse=True):\n tensors.pop(idx)\n logging.debug(\"Removed tensors marked for deletion\")\n\n\ndef _validate_and_find_int8_quantized_inputs_outputs(model):\n \"\"\"Validate that model input is quantized and output is dequantized.\"\"\"\n if len(model.subgraphs) > 1:\n raise ValueError(\"Model must only have one subgraph. Instead, it has \"\n \"{} subgraphs.\".format(len(model.subgraphs)))\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n\n # Ensure model has atleast one quantize and dequantize operator\n quant_opcode_idx, dequant_opcode_idx = None, None\n for idx, opcode in enumerate(model.operatorCodes):\n if opcode.builtinCode == schema_fb.BuiltinOperator.QUANTIZE:\n quant_opcode_idx = idx\n elif opcode.builtinCode == schema_fb.BuiltinOperator.DEQUANTIZE:\n dequant_opcode_idx = idx\n if quant_opcode_idx is not None and dequant_opcode_idx is not None:\n break\n if quant_opcode_idx is None and dequant_opcode_idx is None:\n raise ValueError(\"Model is not integer quantized as it does not \"\n \"contain quantize/dequantize operators.\")\n\n # Ensure model inputs and outputs are integer quantized\n input_quant_ops, output_dequant_ops = [], []\n for op in operators:\n # Find input quantize operator\n if op.opcodeIndex == quant_opcode_idx and op.inputs[0] in subgraph.inputs:\n pos, float_tensor, int_tensor = \\\n \"input\", tensors[op.inputs[0]], tensors[op.outputs[0]]\n input_quant_ops.append(op)\n # Find output dequantize operator\n elif op.opcodeIndex == dequant_opcode_idx and \\\n op.outputs[0] in subgraph.outputs:\n pos, float_tensor, int_tensor = \\\n \"output\", tensors[op.outputs[0]], tensors[op.inputs[0]]\n output_dequant_ops.append(op)\n # Otherwise, ignore\n else:\n continue\n # If found, validate the input/output tensor type\n if float_tensor.type != schema_fb.TensorType.FLOAT32:\n raise ValueError(\n \"Model {} type must be tf.float32. Expected type for tensor with \"\n \"name '{}' is tf.float32, instead type is tf.{}\".format(\n pos, float_tensor.name,\n _convert_tflite_enum_type_to_tf_type(float_tensor.type).name))\n if int_tensor.type != schema_fb.TensorType.INT8:\n raise ValueError(\n \"Model is not integer quantized. Expected type for tensor with \"\n \"name '{}' is tf.int8, instead type is tf.{}\".format(\n int_tensor.name,\n _convert_tflite_enum_type_to_tf_type(int_tensor.type).name))\n\n return input_quant_ops, output_dequant_ops\n\n\ndef modify_integer_quantized_model_io_type(\n model, inference_input_type=_lite_constants.FLOAT,\n inference_output_type=_lite_constants.FLOAT):\n \"\"\"Modify the float input/output type of an integer quantized model.\n\n Args:\n model: An int8 quantized tflite model with float input and output.\n inference_input_type: tf.DType representing final input type.\n (default tf.float32)\n inference_output_type: tf.DType representing final output type.\n (default tf.float32)\n\n Returns:\n An int8 quantized tflite model with modified input and/or output type.\n\n Raises:\n ValueError: If the model is not int8 quantized or the inference_input_type\n and/or inference_input_type is unsupported.\n RuntimeError: If the modification was unsuccessful.\n\n \"\"\"\n # Return if input and output types default to float\n if inference_input_type == _lite_constants.FLOAT and \\\n inference_output_type == _lite_constants.FLOAT:\n return model\n\n # Validate input and output types\n if inference_input_type not in _TFLITE_MODEL_INPUT_OUTPUT_TYPES:\n raise ValueError(\"The `inference_input_type` should be in {}\".format(\n tuple(_get_dtype_name(t) for t in _TFLITE_MODEL_INPUT_OUTPUT_TYPES)))\n if inference_output_type not in _TFLITE_MODEL_INPUT_OUTPUT_TYPES:\n raise ValueError(\"The `inference_output_type` should be in {}\".format(\n tuple(_get_dtype_name(t) for t in _TFLITE_MODEL_INPUT_OUTPUT_TYPES)))\n\n logging.debug((\"Attempting to modify the model input from tf.float32 to %s \"\n \"and output from tf.float32 to %s\"),\n _get_dtype_name(inference_input_type),\n _get_dtype_name(inference_output_type))\n # Convert the model to an object\n model = _convert_model_from_bytearray_to_object(model)\n\n # Validate the integer quantized model\n input_quant_ops, output_dequant_ops = \\\n _validate_and_find_int8_quantized_inputs_outputs(model)\n\n # Initialize references and variables\n if len(model.subgraphs) > 1:\n raise ValueError(\"Model must only have one subgraph. Instead, it has \"\n \"{} subgraphs.\".format(len(model.subgraphs)))\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n remove_tensors_idxs = set()\n\n # Modify model input type\n if inference_input_type == _lite_constants.QUANTIZED_UINT8:\n # Change quant op (float to int8) to quant op (uint8 to int8)\n for op in input_quant_ops:\n int8_quantization = tensors[op.outputs[0]].quantization\n uint8_quantization = schema_fb.QuantizationParametersT()\n uint8_quantization.scale = [int8_quantization.scale[0]]\n uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]\n tensors[op.inputs[0]].quantization = uint8_quantization\n tensors[op.inputs[0]].type = schema_fb.TensorType.UINT8\n elif inference_input_type == _lite_constants.INT8:\n # Remove the inputs and the quant operator\n for op in input_quant_ops:\n subgraph.inputs[subgraph.inputs == op.inputs[0]] = op.outputs[0]\n remove_tensors_idxs.add(op.inputs[0])\n operators.remove(op)\n\n # Modify model output type\n if inference_output_type == _lite_constants.QUANTIZED_UINT8:\n # Change dequant op (int8 to float) to quant op (int8 to uint8)\n for op in output_dequant_ops:\n op.opcodeIndex = input_quant_ops[0].opcodeIndex\n int8_quantization = tensors[op.inputs[0]].quantization\n uint8_quantization = schema_fb.QuantizationParametersT()\n uint8_quantization.scale = [int8_quantization.scale[0]]\n uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]\n tensors[op.outputs[0]].quantization = uint8_quantization\n tensors[op.outputs[0]].type = schema_fb.TensorType.UINT8\n elif inference_output_type == _lite_constants.INT8:\n # Remove the outputs and the dequant operator\n for op in output_dequant_ops:\n subgraph.outputs[subgraph.outputs == op.outputs[0]] = op.inputs[0]\n remove_tensors_idxs.add(op.outputs[0])\n operators.remove(op)\n\n # Remove tensors marked for deletion.\n _remove_tensors_from_model(model, remove_tensors_idxs)\n\n # Convert the model to a bytearray\n model = _convert_model_from_object_to_bytearray(model)\n\n return model\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for sparse_ops.sparse_tensor_dense_matmul.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport time\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import test\n\n\ndef _maybe_complex(x):\n if x.dtype.kind == \"c\": # complex\n return (x + 1j * x) / 2\n return x\n\n\nclass SparseTensorDenseMatMulTest(test.TestCase):\n\n def _testMatmul(self,\n x,\n y,\n adjoint_a=False,\n adjoint_b=False,\n indices_dtype=np.int64):\n x_mat = np.matrix(x)\n if adjoint_a:\n x_mat = x_mat.H\n y_mat = np.matrix(y)\n if adjoint_b:\n y_mat = y_mat.H\n\n np_ans = x_mat * y_mat\n\n x_indices = np.vstack(np.where(x)).astype(indices_dtype).T\n x_values = x[np.where(x)]\n x_shape = x.shape\n\n with self.cached_session(use_gpu=True):\n sp_x_value = sparse_tensor.SparseTensorValue(\n indices=x_indices, values=x_values, dense_shape=x_shape)\n tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(\n sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)\n tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(\n sparse_tensor.SparseTensor.from_value(sp_x_value),\n y,\n adjoint_a=adjoint_a,\n adjoint_b=adjoint_b)\n\n # Ensure that the RHS shape is known at least.\n self.assertEqual(tf_value_ans.get_shape()[1], np_ans.shape[1])\n self.assertEqual(tf_tensor_ans.get_shape()[1], np_ans.shape[1])\n\n for out in (self.evaluate(tf_value_ans), self.evaluate(tf_tensor_ans)):\n if x.dtype == np.float32:\n self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)\n elif x.dtype == np.float64:\n self.assertAllClose(np_ans, out, rtol=1e-6, atol=1e-6)\n else:\n self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)\n\n def _testBasic(self, value_dtype, indices_dtype=np.int64):\n x = _maybe_complex(np.random.rand(10, 10).astype(value_dtype))\n x[np.abs(x) < 0.5] = 0 # Make it sparse\n\n y = _maybe_complex(np.random.randn(10, 20).astype(value_dtype))\n\n self._testMatmul(x, y, indices_dtype=indices_dtype)\n\n @test_util.run_deprecated_v1\n def testBasic(self):\n np.random.seed(127) # Repeatable results\n self._testBasic(np.int32)\n self._testBasic(np.float32)\n self._testBasic(np.float64)\n self._testBasic(np.complex64)\n self._testBasic(np.complex128)\n self._testBasic(np.int32, indices_dtype=np.int32)\n self._testBasic(np.float32, indices_dtype=np.int32)\n\n @test_util.run_deprecated_v1\n def testShapeInference(self):\n x = np.random.rand(10, 10)\n x[np.abs(x) < 0.5] = 0 # Make it sparse\n y = np.random.randn(10, 20)\n x_indices = np.vstack(np.where(x)).astype(np.int64).T\n x_values = x[np.where(x)]\n x_shape = x.shape\n x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)\n result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)\n self.assertEqual(result.get_shape(), (10, 20))\n\n x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)\n x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,\n x_shape_unknown)\n result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(\n x_st_shape_unknown, y)\n self.assertEqual(result_left_shape_unknown.get_shape().as_list(),\n [None, 20])\n\n x_shape_inconsistent = [10, 15]\n x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,\n x_shape_inconsistent)\n with self.assertRaisesRegex(ValueError, \"Dimensions must be equal\"):\n sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)\n\n @test_util.deprecated_graph_mode_only\n def testInvalidIndicesForSparseTensorDenseMatmul(self):\n # Note: use_gpu=False because nice errors are only returned from CPU kernel.\n with self.session(use_gpu=False):\n indices = np.matrix([[1, 10]]).astype(np.int64)\n values = np.array([10]).astype(np.float32)\n shape = [3, 2]\n sparse_t = sparse_tensor.SparseTensor(indices, values, shape)\n\n # Test multiplying by both a small and large dense matrix, to hit\n # both cases in the kernel.\n dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)\n with self.assertRaisesOpError(\n \"k .10. from index.0,1. out of bounds .>=2.\"):\n self.evaluate(sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t))\n dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)\n with self.assertRaisesOpError(\n \"k .10. from index.0,1. out of bounds .>=2.\"):\n self.evaluate(sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t))\n\n # Repeat with adjoint_a, to get a different error.\n dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)\n with self.assertRaisesOpError(\n \"m .10. from index.0,1. out of bounds .>=2.\"):\n self.evaluate(\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t, adjoint_a=True))\n dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)\n with self.assertRaisesOpError(\n \"m .10. from index.0,1. out of bounds .>=2.\"):\n self.evaluate(\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t, adjoint_a=True))\n\n def testInvalidIndicesForSparseTensorDenseMatmulOnGPU(self):\n # Note: use_gpu=False because nice errors are only returned from CPU kerne\n if not test.is_gpu_available():\n return\n with self.session(use_gpu=True):\n indices = np.array([[1, 10]]).astype(np.int64)\n values = np.array([10]).astype(np.float32)\n shape = [3, 2]\n sparse_t = sparse_tensor.SparseTensor(indices, values, shape)\n\n # Test multiplying by both a small and large dense matrix, to hit\n # both cases in the kernel.\n dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)\n expected_t = np.array([[0] * 5, [np.nan] * 5, [0] * 5], dtype=np.float32)\n self.assertAllClose(expected_t,\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t))\n dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)\n expected_t = np.array(\n [[0] * 500, [np.nan] * 500, [0] * 500], dtype=np.float32)\n self.assertAllClose(expected_t,\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t))\n\n # Repeat with adjoint_a, now the error is that the sparse index\n # is OOO w.r.t. the output. The GPU kernel can't do much here,\n # so it just doesn't accumulate.\n\n dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)\n expected_t = np.array([[0] * 5, [0] * 5], dtype=np.float32)\n self.assertAllClose(expected_t,\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t, adjoint_a=True))\n\n dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)\n expected_t = np.array([[0] * 500, [0] * 500], dtype=np.float32)\n self.assertAllClose(expected_t,\n sparse_ops.sparse_tensor_dense_matmul(\n sparse_t, dense_t, adjoint_a=True))\n\n # Tests setting one dimension to be a high value.\n def _testLarge(self, np_dtype):\n r1 = np.random.randint(6000, 20000)\n r2 = np.random.randint(1, 10)\n r3 = np.random.randint(1, 10)\n\n for m, k, n in [(r1, r2, r3),\n (r2, r1, r3),\n (r2, r3, r1)]:\n x = _maybe_complex(np.random.rand(m, k).astype(np_dtype))\n x[np.abs(x) < 0.8] = 0\n\n y = _maybe_complex(np.random.randn(k, n).astype(np_dtype))\n\n self._testMatmul(x, y, adjoint_a=False, adjoint_b=False)\n self._testMatmul(x.transpose(), y, adjoint_a=True, adjoint_b=False)\n self._testMatmul(x, y.transpose(), adjoint_a=False, adjoint_b=True)\n self._testMatmul(\n x.transpose(), y.transpose(), adjoint_a=True, adjoint_b=True)\n\n np.random.seed(127) # Repeatable results\n self._testLarge(np.float32)\n self._testLarge(np.float64)\n self._testLarge(np.complex64)\n self._testLarge(np.complex128)\n\n # Tests random sized matrices.\n @test_util.run_deprecated_v1\n def testFloatRandom(self):\n np.random.seed(127) # Repeatable results\n for _ in range(8):\n for adjoint_a in [True, False]:\n for adjoint_b in [True, False]:\n for thresh in [0.0, 0.2, 0.8, 1.0]:\n n, k, m = np.random.randint(1, 100, size=3)\n x = np.random.rand(n, k).astype(np.float32)\n x[x < thresh] = 0 # Make it sparse\n y = np.random.randn(k, m).astype(np.float32)\n x = x.transpose() if adjoint_a else x\n y = y.transpose() if adjoint_b else y\n self._testMatmul(x, y, adjoint_a, adjoint_b)\n\n\ndef _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,\n adjoint_b):\n\n def body(t, prev):\n with ops.control_dependencies([prev]):\n return (t + 1, math_ops.matmul(\n x,\n y,\n transpose_a=adjoint_a,\n transpose_b=adjoint_b,\n a_is_sparse=True,\n b_is_sparse=False))\n\n t0 = constant_op.constant(0)\n v0 = constant_op.constant(0.0)\n\n def _timeit(iterations, _):\n (_, final) = control_flow_ops.while_loop(\n lambda t, _: t < iterations,\n body, (t0, v0),\n parallel_iterations=1,\n back_prop=False,\n shape_invariants=(tensor_shape.TensorShape(()),\n tensor_shape.TensorShape(None)))\n return [final]\n\n return _timeit\n\n\ndef _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,\n y, adjoint_a,\n adjoint_b):\n sp_x = sparse_tensor.SparseTensor(\n indices=x_ind, values=x_val, dense_shape=x_shape)\n\n def body(t, prev):\n with ops.control_dependencies([prev]):\n return (t + 1, sparse_ops.sparse_tensor_dense_matmul(\n sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))\n\n t0 = constant_op.constant(0)\n v0 = constant_op.constant(0.0)\n\n def _timeit(iterations, _):\n (_, final) = control_flow_ops.while_loop(\n lambda t, _: t < iterations,\n body, (t0, v0),\n parallel_iterations=1,\n back_prop=False,\n shape_invariants=(tensor_shape.TensorShape(()),\n tensor_shape.TensorShape(None)))\n return [final]\n\n return _timeit\n\n\ndef sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,\n m,\n k,\n n,\n adjoint_a,\n adjoint_b,\n use_gpu,\n skip_dense=False):\n config = config_pb2.ConfigProto()\n config.allow_soft_placement = True\n\n # Configurable for benchmarking:\n # config.intra_op_parallelism_threads = 100\n # config.gpu_options.per_process_gpu_memory_fraction = 0.3\n\n np.random.seed([6, 117]) # Reproducibility\n x = np.random.rand(m, k).astype(np.float32)\n x[x < thresh] = 0\n y = np.random.randn(k, n).astype(np.float32)\n if adjoint_a:\n x = x.T\n if adjoint_b:\n y = y.T\n\n def _timer(sess, ops_fn, iterations):\n # Warm in\n sess.run(ops_fn(10, sess))\n\n # Timing run\n start = time.time()\n sess.run(ops_fn(iterations, sess))\n end = time.time()\n\n return (end - start) / (1.0 * iterations) # Average runtime per iteration\n\n # Using regular matmul, marking one of the matrices as dense.\n if skip_dense:\n delta_dense = float(\"nan\")\n else:\n with session.Session(config=config, graph=ops.Graph()) as sess:\n if not use_gpu:\n with ops.device(\"/cpu:0\"):\n x_t = constant_op.constant(x)\n y_t = constant_op.constant(y)\n ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(\n x_t, y_t, adjoint_a, adjoint_b)\n else:\n with ops.device(\"/device:GPU:0\"):\n x_t = constant_op.constant(x)\n y_t = constant_op.constant(y)\n ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(\n x_t, y_t, adjoint_a, adjoint_b)\n delta_dense = _timer(sess, ops_fn, 200)\n\n # Using sparse_tensor_dense_matmul.\n with session.Session(\"\", config=config, graph=ops.Graph()) as sess:\n if not use_gpu:\n with ops.device(\"/cpu:0\"):\n x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)\n x_val = constant_op.constant(x[np.where(x)])\n x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))\n y_t = constant_op.constant(y)\n ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(\n x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)\n else:\n with ops.device(\"/device:GPU:0\"):\n x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)\n x_val = constant_op.constant(x[np.where(x)])\n x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))\n y_t = constant_op.constant(y)\n ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(\n x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)\n delta_sparse = _timer(sess, ops_fn, 200)\n\n print(\"%g \\t %d \\t %s \\t %d \\t %d \\t %g \\t %g \\t %g\" %\n (1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,\n delta_sparse / delta_dense))\n\n\ndef main(_):\n print(\"DenseDense MatMul (w/ Sparse Flag) vs. SparseTensorDense MatMul\")\n print(\"Matrix sizes:\")\n print(\" A sparse [m, k] with % nonzero values between 1% and 80%\")\n print(\" B dense [k, n]\")\n print(\"\")\n print(\"% nnz \\t n \\t gpu \\t m \\t k \\t dt(dense) \\t dt(sparse) \"\n \"\\t dt(sparse)/dt(dense)\")\n\n for thresh in (0.99, 0.8, 0.5, 0.2):\n for n in (50, 100):\n for use_gpu in (True, False):\n for m in (100, 1000):\n for k in (100, 1000):\n sparse_tensor_dense_vs_dense_matmul_benchmark(\n thresh, m, k, n, False, False, use_gpu=use_gpu)\n\n # Enable for large scale benchmarks, these ones take a long time to run.\n #\n # for use_gpu in (True, False):\n # sparse_tensor_dense_vs_dense_matmul_benchmark(\n # thresh=0.99, m=1000000, k=1000, n=100, adjoint_a=False,\n # adjoint_b=False, use_gpu=use_gpu, skip_dense=True)\n\n\nif __name__ == \"__main__\":\n if \"--benchmarks\" in sys.argv:\n sys.argv.remove(\"--benchmarks\")\n app.run()\n else:\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for tensorflow.python.training.saver.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.keras.engine import training\nfrom tensorflow.python.keras.layers import core\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import saver as saver_module\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.tracking import util as trackable_utils\n\n\nclass NonLayerTrackable(module.Module):\n\n def __init__(self):\n super(NonLayerTrackable, self).__init__()\n self.a_variable = trackable_utils.add_variable(\n self, name=\"a_variable\", shape=[])\n\n\nclass MyModel(training.Model):\n \"\"\"A concrete Model for testing.\"\"\"\n\n def __init__(self):\n super(MyModel, self).__init__()\n self._named_dense = core.Dense(1, use_bias=True)\n self._second = core.Dense(1, use_bias=False)\n # We can still track Trackables which aren't Layers.\n self._non_layer = NonLayerTrackable()\n\n def call(self, values):\n ret = self._second(self._named_dense(values))\n return ret\n\n\nclass TrackableCompatibilityTests(test.TestCase):\n\n def _initialized_model(self):\n input_value = constant_op.constant([[3.]])\n model = MyModel()\n optimizer = adam.AdamOptimizer(0.001)\n optimizer_step = training_util.get_or_create_global_step()\n root_trackable = trackable_utils.Checkpoint(\n optimizer=optimizer, model=model, optimizer_step=optimizer_step)\n train_op = optimizer.minimize(\n functools.partial(model, input_value),\n global_step=optimizer_step)\n self.evaluate(trackable_utils.gather_initializers(\n root_trackable))\n self.evaluate(train_op)\n # A regular variable, a slot variable, and a non-slot Optimizer variable\n # with known values to check when loading.\n self.evaluate(model._named_dense.bias.assign([1.]))\n self.evaluate(optimizer.get_slot(\n var=model._named_dense.bias, name=\"m\").assign([2.]))\n beta1_power, _ = optimizer._get_beta_accumulators()\n self.evaluate(beta1_power.assign(3.))\n return root_trackable\n\n def _set_sentinels(self, root_trackable):\n self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))\n self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, name=\"m\")\n .assign([102.]))\n beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()\n self.evaluate(beta1_power.assign(103.))\n\n def _check_sentinels(self, root_trackable):\n self.assertAllEqual(\n [1.], self.evaluate(root_trackable.model._named_dense.bias))\n self.assertAllEqual([2.], self.evaluate(\n root_trackable.optimizer.get_slot(\n var=root_trackable.model._named_dense.bias, name=\"m\")))\n beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()\n self.assertAllEqual(3., self.evaluate(beta1_power))\n\n def testLoadFromObjectBasedGraph(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n save_graph = ops_lib.Graph()\n with save_graph.as_default(), self.session(graph=save_graph) as sess:\n root = self._initialized_model()\n object_saver = trackable_utils.Checkpoint(root=root)\n save_path = object_saver.save(file_prefix=checkpoint_prefix)\n\n # An incompatible object-based checkpoint to check error messages\n var = variables.Variable(1., name=\"a\")\n self.evaluate(var.initializer)\n second_saver = trackable_utils.Checkpoint(v=var)\n second_path = second_saver.save(file_prefix=os.path.join(\n checkpoint_directory, \"second\"))\n\n restore_graph = ops_lib.Graph()\n with restore_graph.as_default(), self.session(\n graph=restore_graph) as sess:\n root = self._initialized_model()\n self._set_sentinels(root)\n saver = saver_module.Saver()\n saver.restore(sess=sess, save_path=save_path)\n self._check_sentinels(root)\n before_second_restore_ops = restore_graph.get_operations()\n # Test that multiple restores do not pollute the graph\n saver.restore(sess=sess, save_path=save_path)\n self.assertEqual(before_second_restore_ops,\n restore_graph.get_operations())\n with self.assertRaisesRegex(errors.NotFoundError,\n \"Could not find some variables\"):\n saver.restore(sess=sess, save_path=second_path)\n\n def testLoadFromObjectBasedEager(self):\n checkpoint_directory = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_directory, \"ckpt\")\n\n save_graph = ops_lib.Graph()\n with save_graph.as_default(), self.session(graph=save_graph):\n root = self._initialized_model()\n object_saver = trackable_utils.Checkpoint(root=root)\n save_path = object_saver.save(file_prefix=checkpoint_prefix)\n\n with context.eager_mode():\n root = self._initialized_model()\n self._set_sentinels(root)\n saver = saver_module.Saver(\n root.model.variables + root.optimizer.variables())\n saver.restore(sess=None, save_path=save_path)\n self._check_sentinels(root)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for model_coverage_lib.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.testing.model_coverage import model_coverage_lib as model_coverage\nfrom tensorflow.python import keras\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.training.training_util import write_graph\n\n\nclass EvaluateFrozenGraph(test.TestCase):\n\n def _saveFrozenGraph(self, sess):\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n return graph_def_file\n\n def testFloat(self):\n with ops.Graph().as_default():\n with session.Session().as_default() as sess:\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n\n filename = self._saveFrozenGraph(sess)\n model_coverage.test_frozen_graph(filename, ['Placeholder'], ['add'])\n\n def testInputWithRange(self):\n with ops.Graph().as_default():\n with session.Session().as_default() as sess:\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n\n filename = self._saveFrozenGraph(sess)\n model_coverage.test_frozen_graph(\n filename, ['Placeholder'], ['add'],\n input_data_range={'Placeholder': (0, 10)})\n\n def testMultipleOutputs(self):\n with ops.Graph().as_default():\n with session.Session().as_default() as sess:\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16], dtype=dtypes.float32, name='inputB')\n\n weight = constant_op.constant(-1.0, shape=[16, 16])\n bias = constant_op.constant(-1.0, shape=[16])\n layer = math_ops.matmul(in_tensor_1, weight) + bias\n _ = math_ops.reduce_mean(math_ops.square(layer - in_tensor_2))\n\n filename = self._saveFrozenGraph(sess)\n model_coverage.test_frozen_graph(filename, ['inputA', 'inputB'],\n ['add', 'Mean'])\n\n def testFunctions(self):\n \"\"\"Tests functions.\"\"\"\n\n @def_function.function\n def plus_placeholder(x, placeholder):\n return x + placeholder\n\n with ops.Graph().as_default():\n placeholder = array_ops.placeholder(\n dtype=dtypes.float32, shape=[1], name='input')\n variable_node = constant_op.constant(1.0, name='variable_node')\n defun_node = plus_placeholder(variable_node, placeholder)\n _ = math_ops.multiply(defun_node, 2.0, name='output_node')\n\n # Initialize variables in the model.\n sess = session.Session()\n\n filename = self._saveFrozenGraph(sess)\n model_coverage.test_frozen_graph(filename, ['input'], ['output_node'])\n\n def _getQuantizedModel(self):\n np.random.seed(0)\n with ops.Graph().as_default():\n with session.Session().as_default() as sess:\n # The tensor needs to have more than 1024 elements for quantize_weights\n # to kick in. Thus, the [33, 33] shape.\n in_tensor_1 = array_ops.placeholder(\n shape=[33, 33], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = constant_op.constant(\n np.random.uniform(low=-10., high=10., size=(33, 33)),\n shape=[33, 33],\n dtype=dtypes.float32,\n name='inputB')\n _ = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')\n\n filename = self._saveFrozenGraph(sess)\n return filename\n\n def testQuantized(self):\n filename = self._getQuantizedModel()\n model_coverage.test_frozen_graph_quant(filename, ['inputA'], ['output'])\n\n def testQuantizedInputShapes(self):\n filename = self._getQuantizedModel()\n model_coverage.test_frozen_graph_quant(\n filename, ['inputA'], ['output'], input_shapes={'inputA': [33, 33]})\n\n def testQuantizedFlexAll(self):\n filename = self._getQuantizedModel()\n model_coverage.test_frozen_graph_quant(\n filename, ['inputA'], ['output'],\n target_ops=set([lite.OpsSet.SELECT_TF_OPS]))\n\n\nclass EvaluateSavedModel(test.TestCase):\n\n def testFloat(self):\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with ops.Graph().as_default():\n with session.Session().as_default() as sess:\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')\n out_tensor = in_tensor_1 + in_tensor_2\n\n inputs = {'x': in_tensor_1, 'y': in_tensor_2}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n model_coverage.test_saved_model(saved_model_dir)\n\n\nclass EvaluateKerasModel(test.TestCase):\n\n def _getSingleInputKerasModel(self):\n \"\"\"Returns single input Sequential tf.keras model.\"\"\"\n keras.backend.clear_session()\n\n xs = [-1, 0, 1, 2, 3, 4]\n ys = [-3, -1, 1, 3, 5, 7]\n\n model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.train_on_batch(xs, ys)\n return model\n\n def _saveKerasModel(self, model):\n try:\n fd, keras_file = tempfile.mkstemp('.h5')\n keras.models.save_model(model, keras_file)\n finally:\n os.close(fd)\n return keras_file\n\n @test_util.run_v1_only('Keras test fails under v2, see b/157266669')\n def testFloat(self):\n model = self._getSingleInputKerasModel()\n keras_file = self._saveKerasModel(model)\n\n model_coverage.test_keras_model(keras_file)\n\n @test_util.run_v1_only('Keras test fails under v2, see b/157266669')\n def testPostTrainingQuantize(self):\n model = self._getSingleInputKerasModel()\n keras_file = self._saveKerasModel(model)\n\n model_coverage.test_keras_model(keras_file, post_training_quantize=True)\n\n @test_util.run_v1_only('Keras test fails under v2, see b/157266669')\n def testTargetOps(self):\n model = self._getSingleInputKerasModel()\n keras_file = self._saveKerasModel(model)\n\n model_coverage.test_keras_model(\n keras_file,\n target_ops=set([lite.OpsSet.TFLITE_BUILTINS,\n lite.OpsSet.SELECT_TF_OPS]))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Various classes representing distributed values.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import packed_distributed_variable as packed\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.training.saving import saveable_object\nfrom tensorflow.python.training.saving import saveable_object_util\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.types import core\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _on_write_update_replica(var, update_fn, value, **kwargs):\n \"\"\"Updates variables with ON_WRITE synchronization in replica context.\"\"\"\n if var.aggregation == vs.VariableAggregation.NONE:\n return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access\n\n def merge_fn(strategy, value, **kwargs):\n \"\"\"Aggregate values and update all variables in cross replica context.\"\"\"\n # Don't allow MEAN with non float dtype, since it may cause unexpected\n # precision loss. Python3 and NumPy automatically upcast integers to\n # float in division, but we should always preserve the type.\n #\n # Note that to be backward compatible we allow the case when the value\n # is *always* the same on each replica. I.E. value is not a\n # PerReplica. Refer to regroup() to see how values are grouped.\n if var.aggregation == vs.VariableAggregation.MEAN and (\n not var.dtype.is_floating) and isinstance(value, PerReplica):\n raise ValueError(\n \"Cannot update non-float variables with \"\n \"tf.VariableAggregation.MEAN aggregation in replica context. \"\n \"Either change the variable dtype to float or update it in \"\n \"cross-replica context.\")\n\n assert strategy == var.distribute_strategy\n v = values_util.apply_aggregation(strategy, value, var.aggregation, var)\n return var._update_cross_replica(update_fn, v, **kwargs) # pylint: disable=protected-access\n\n return ds_context.get_replica_context().merge_call(\n merge_fn, args=(value,), kwargs=kwargs)\n\n\n@tf_export(\"distribute.DistributedValues\", v1=[])\nclass DistributedValues(object):\n \"\"\"Base class for representing distributed values.\n\n A subclass instance of `tf.distribute.DistributedValues` is created when\n creating variables within a distribution strategy, iterating a\n `tf.distribute.DistributedDataset` or through `tf.distribute.Strategy.run`.\n This base class should never be instantiated directly.\n `tf.distribute.DistributedValues` contains a value per replica. Depending on\n the subclass, the values could either be synced on update, synced on demand,\n or never synced.\n\n `tf.distribute.DistributedValues` can be reduced to obtain single value across\n replicas, as input into `tf.distribute.Strategy.run` or the per-replica values\n inspected using `tf.distribute.Strategy.experimental_local_results`.\n\n Example usage:\n\n 1. Created from a `tf.distribute.DistributedDataset`:\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)\n >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n >>> distributed_values = next(dataset_iterator)\n\n 2. Returned by `run`:\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> @tf.function\n ... def run():\n ... ctx = tf.distribute.get_replica_context()\n ... return ctx.replica_id_in_sync_group\n >>> distributed_values = strategy.run(run)\n\n 3. As input into `run`:\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)\n >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n >>> distributed_values = next(dataset_iterator)\n >>> @tf.function\n ... def run(input):\n ... return input + 1.0\n >>> updated_value = strategy.run(run, args=(distributed_values,))\n\n 4. Reduce value:\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)\n >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n >>> distributed_values = next(dataset_iterator)\n >>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM,\n ... distributed_values,\n ... axis = 0)\n\n 5. Inspect local replica values:\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)\n >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n >>> per_replica_values = strategy.experimental_local_results(\n ... distributed_values)\n >>> per_replica_values\n (<tf.Tensor: shape=(1,), dtype=float32, numpy=array([5.], dtype=float32)>,\n <tf.Tensor: shape=(1,), dtype=float32, numpy=array([6.], dtype=float32)>)\n\n \"\"\"\n\n def __init__(self, values):\n \"\"\"Should only be called by subclass __init__.\"\"\"\n self._values = tuple(values)\n\n def _get(self):\n \"\"\"Returns the value for the current device or raises a ValueError.\"\"\"\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n return self._get_cross_replica()\n else:\n return self._values[replica_id]\n\n def _get_cross_replica(self):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes which support cross-\"\n \"replica accesses.\")\n\n def _get_on_device_or_primary(self):\n \"\"\"Returns value in same replica or device if possible, else the _primary.\"\"\"\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n # Try to find a value on the current device.\n current_device = device_util.canonicalize(device_util.current())\n for value in self._values:\n if device_util.canonicalize(value.device) == current_device:\n return value\n return self._primary\n else:\n return self._values[replica_id]\n\n @property\n def _primary(self):\n \"\"\"Returns a representative component.\"\"\"\n return self._values[0]\n\n @property\n def _devices(self):\n return tuple(v.device for v in self._values)\n\n def __str__(self):\n debug_str = \",\\n\".join(\n \" %d: %s\" % (i, v) for i, v in enumerate(self._values))\n return \"%s:{\\n%s\\n}\" % (self.__class__.__name__, debug_str)\n\n def __repr__(self):\n debug_repr = \",\\n\".join(\n \" %d: %r\" % (i, v) for i, v in enumerate(self._values))\n return \"%s:{\\n%s\\n}\" % (self.__class__.__name__, debug_repr)\n\n\n# NOTE(josh11b,apassos): It would be great if we could inspect the values this was\n# initialized with and use that to generate the overloaded operators here.\n# Unfortunately, Python's rules for special methods don't allow this, see\n# https://docs.python.org/3/reference/datamodel.html#special-method-names\n# \"if a class defines a method named __getitem__(), and x is an instance of\n# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i).\"\n# In particular, these special methods don't go through __getattr__, and\n# it will only use those methods if they are defined in the class, not the\n# object.\nclass DistributedDelegate(DistributedValues):\n \"\"\"A map from device to values; acts as the same type as the values.\"\"\"\n\n def __getattr__(self, name):\n # The '_use_resource_variables' and the attrs starts with '_self' are used\n # for restoring the saved_model proto, and '_attribute_sentinel' is used for\n # Layer tracking. At the point these attrs are queried, the variable has not\n # been initialized. Thus it should not query those of the underlying\n # components.\n if name.startswith(\"_self_\") or name in (\"_use_resource_variables\",\n \"_attribute_sentinel\",\n \"_distributed_container\"):\n return super(DistributedDelegate, self).__getattr__(name)\n\n # This allows copy.copy(DistributedDelegate). When copying an object,\n # copy.copy doesn't invoke its __init__ method, instead it makes a new\n # empty object, then copies the attributes over. copy.copy looks for\n # attributes like \"__getstate__\" in case the object implements its custom\n # copying. Since DistributedDelegate doesn't have those attributes defined,\n # __getattr__ will be invoked, which tries to access \"_values\" attributes,\n # but that doesn't exist either because this is an empty object, and again\n # __getattr__ is invoked, leading to an infinite recursion.\n if name == \"_values\":\n raise AttributeError()\n\n # TODO(priyag): This needs to be made robust against pitfalls from mix use\n # __getattr__ and @property. See b/120402273.\n return getattr(self._get(), name)\n\n @property\n def values(self):\n \"\"\"Returns the per replica values.\"\"\"\n return self._values\n\n def _get_as_operand(self):\n \"\"\"Returns the value for operations for the current device.\n\n Some implementations, e.g. `TPUMirroredVariable`, are not able to return the\n value type within a replica context. They can, however, return a value that\n can be used by the operations below.\n \"\"\"\n return self._get()\n\n # pylint: disable=multiple-statements\n def __add__(self, o):\n return self._get_as_operand() + o\n\n def __radd__(self, o):\n return o + self._get_as_operand()\n\n def __sub__(self, o):\n return self._get_as_operand() - o\n\n def __rsub__(self, o):\n return o - self._get_as_operand()\n\n def __mul__(self, o):\n return self._get_as_operand() * o\n\n def __rmul__(self, o):\n return o * self._get_as_operand()\n\n def __truediv__(self, o):\n return self._get_as_operand() / o\n\n def __rtruediv__(self, o):\n return o / self._get_as_operand()\n\n def __floordiv__(self, o):\n return self._get_as_operand() // o\n\n def __rfloordiv__(self, o):\n return o // self._get_as_operand()\n\n def __mod__(self, o):\n return self._get_as_operand() % o\n\n def __rmod__(self, o):\n return o % self._get_as_operand()\n\n def __lt__(self, o):\n return self._get_as_operand() < o\n\n def __le__(self, o):\n return self._get_as_operand() <= o\n\n def __gt__(self, o):\n return self._get_as_operand() > o\n\n def __ge__(self, o):\n return self._get_as_operand() >= o\n\n def __and__(self, o):\n return self._get_as_operand() & o\n\n def __rand__(self, o):\n return o & self._get_as_operand()\n\n def __or__(self, o):\n return self._get_as_operand() | o\n\n def __ror__(self, o):\n return o | self._get_as_operand()\n\n def __xor__(self, o):\n return self._get_as_operand() ^ o\n\n def __rxor__(self, o):\n return o ^ self._get_as_operand()\n\n def __getitem__(self, o):\n return self._get_as_operand()[o]\n\n def __pow__(self, o, modulo=None):\n return pow(self._get_as_operand(), o, modulo)\n\n def __rpow__(self, o):\n return pow(o, self._get_as_operand())\n\n def __invert__(self):\n return ~self._get_as_operand()\n\n def __neg__(self):\n return -self._get_as_operand()\n\n def __abs__(self):\n return abs(self._get_as_operand())\n\n def __div__(self, o):\n try:\n return self._get_as_operand().__div__(o)\n except AttributeError:\n # See https://docs.python.org/3/library/constants.html#NotImplemented\n return NotImplemented\n\n def __rdiv__(self, o):\n try:\n return self._get_as_operand().__rdiv__(o)\n except AttributeError:\n # See https://docs.python.org/3/library/constants.html#NotImplemented\n return NotImplemented\n\n def __matmul__(self, o):\n try:\n return self._get_as_operand().__matmul__(o)\n except AttributeError:\n # See https://docs.python.org/3/library/constants.html#NotImplemented\n return NotImplemented\n\n def __rmatmul__(self, o):\n try:\n return self._get_as_operand().__rmatmul__(o)\n except AttributeError:\n # See https://docs.python.org/3/library/constants.html#NotImplemented\n return NotImplemented\n\n # TODO(josh11b): Even more operator overloads.\n\n\nclass PerReplica(DistributedValues, composite_tensor.CompositeTensor):\n \"\"\"Holds a map from replica to unsynchronized values.\"\"\"\n\n @property\n def _type_spec(self):\n return PerReplicaSpec(\n *(type_spec.type_spec_from_value(v) for v in self._values))\n\n @property\n def values(self):\n \"\"\"Returns the per replica values.\"\"\"\n return self._values\n\n\nclass PerReplicaSpec(type_spec.TypeSpec):\n \"\"\"Type specification for a `PerReplica`.\"\"\"\n\n __slots__ = [\"_value_specs\"]\n\n value_type = property(lambda self: PerReplica)\n\n def __init__(self, *value_specs):\n self._value_specs = tuple(value_specs)\n\n def _serialize(self):\n return self._value_specs\n\n @property\n def _component_specs(self):\n return self._value_specs\n\n def _to_components(self, value):\n replica_context = ds_context.get_replica_context()\n if replica_context is not None and replica_context.num_replicas_in_sync > 1:\n raise ValueError(\n \"Flattening a PerReplica to components is not supported in replica \"\n \"context.\")\n return value._values # pylint: disable=protected-access\n\n def _from_components(self, tensor_list):\n return PerReplica(tensor_list)\n\n\n# Note that unlike PerReplica, Mirrored values inherit from\n# DistributedDelegate and so can be used directly in cross-replica mode.\n# TODO(tomhennigan) Should this extend CompositeTensor?\nclass Mirrored(DistributedDelegate):\n \"\"\"Holds a map from replica to values which are kept in sync.\"\"\"\n\n def _get_cross_replica(self):\n return self._get_on_device_or_primary()\n\n def _as_graph_element(self):\n obj = self._get()\n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return obj\n\n\nclass DistributedVarOp(object):\n \"\"\"A class that looks like `tf.Operation`.\"\"\"\n\n def __init__(self, name, graph, traceback, typ):\n self.name = name\n self.graph = graph\n self.traceback = traceback\n self.type = typ\n\n def __eq__(self, o):\n if not isinstance(o, self.__class__):\n raise NotImplementedError\n return (self.name == o.name and self.graph == o.graph and\n self.traceback == o.traceback and self.type == o.type)\n\n def __hash__(self):\n return hash((self.name, self.graph, self.traceback, self.type))\n\n\nclass DistributedVariable(DistributedDelegate, variables_lib.Variable,\n core.Tensor):\n \"\"\"Holds a map from replica to variables.\"\"\"\n\n def __init__(self, strategy, values, aggregation, var_policy=None):\n self._distribute_strategy = strategy\n self._aggregation = aggregation\n super(DistributedVariable, self).__init__(values)\n self._common_name = self._primary.name.split(\":\")[0]\n\n # Packed variable is used to reduce the overhead of function execution.\n # For a DistributedVariable, only one variable handle is captured into a\n # function graph. It's only supported in eager mode.\n if ops.executing_eagerly_outside_functions() and getattr(\n strategy, \"_enable_packed_variable_in_eager_mode\", False):\n name = \"%s/packed/\" % self._common_name\n self._packed_var = packed.PackedDistributedVariable(values, name=name)\n else:\n self._packed_var = None\n\n # tf.keras keeps track of variables initialized using this attribute. When\n # tf.keras gets the default session, it initializes all uninitialized vars.\n # We need to make _keras_initialized a member of DistributedVariable because\n # without this it will use `__getattr__` which will delegate to a component\n # variable.\n self._keras_initialized = False\n # Typically, a `DistributedVariable`'s initializer is composed of the\n # initializers of the components variables. However, in some cases, such as\n # when restoring from a checkpoint, we may set the _initializer_op\n # property on the entire `DistributedVariable`.\n self._initializer_op = None\n # Set a VariablePolicy which decides how we replicate/aggregate the given\n # variable.\n self._policy = var_policy\n\n def _use_packed_variable(self):\n # Don't use packed variable when under a SaveContext to avoid explicit\n # device placement on variable consuming ops.\n return self._packed_var is not None and not save_context.in_save_context()\n\n def is_initialized(self, name=None):\n \"\"\"Identifies if all the component variables are initialized.\n\n Args:\n name: Name of the final `logical_and` op.\n\n Returns:\n The op that evaluates to True or False depending on if all the\n component variables are initialized.\n \"\"\"\n if values_util.is_saving_non_distributed():\n return self._primary.is_initialized()\n if self._use_packed_variable():\n return self._packed_var.is_initialized()\n result = self._primary.is_initialized()\n # We iterate through the list of values except the last one to allow us to\n # name the final `logical_and` op the same name that is passed by the user\n # to the `is_initialized` op. For distributed variables, the\n # `is_initialized` op is a `logical_and` op.\n for v in self._values[1:-1]:\n result = math_ops.logical_and(result, v.is_initialized())\n result = math_ops.logical_and(\n result, self._values[-1].is_initialized(), name=name)\n return result\n\n @property\n def initializer(self):\n if values_util.is_saving_non_distributed():\n return self._primary.initializer\n if self._initializer_op:\n init_op = self._initializer_op\n else:\n # return grouped ops of all the var initializations of component values of\n # the mirrored variable\n init_op = control_flow_ops.group(\n tuple(v.initializer for v in self._values))\n return init_op\n\n def initialized_value(self):\n return self._get_on_device_or_primary().initialized_value()\n\n @property\n def initial_value(self):\n return self._get_on_device_or_primary().initial_value\n\n @property\n def constraint(self):\n return self._primary.constraint\n\n @property\n def graph(self):\n return self._primary.graph\n\n @property\n def _shared_name(self):\n return self._common_name\n\n @property\n def _unique_id(self):\n return self._primary._unique_id # pylint: disable=protected-access\n\n @property\n def _graph_key(self):\n \"\"\"Lets Optimizers know which graph this variable is from.\"\"\"\n return self._primary._graph_key # pylint: disable=protected-access\n\n @property\n def name(self):\n return self._primary.name\n\n @property\n def dtype(self):\n return self._primary.dtype\n\n @property\n def shape(self):\n return self._primary.shape\n\n @property\n def synchronization(self):\n return self._primary.synchronization\n\n @property\n def aggregation(self):\n return self._aggregation\n\n @property\n def _packed_variable(self):\n if self._use_packed_variable():\n return self._packed_var\n return None\n\n @property\n def handle(self):\n if values_util.is_saving_non_distributed():\n return self._primary.handle\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n raise ValueError(\"`handle` is not available outside the replica context\"\n \" or a `tf.distribute.Strategy.update()` call.\")\n else:\n if self._use_packed_variable():\n return self._packed_var.handle\n return self._values[replica_id].handle\n\n def eval(self, session=None):\n return self._get_on_device_or_primary().eval(session)\n\n @property\n def _save_slice_info(self):\n return self._primary._save_slice_info # pylint: disable=protected-access\n\n def _get_save_slice_info(self):\n return self._primary._get_save_slice_info() # pylint: disable=protected-access\n\n def _set_save_slice_info(self, save_slice_info):\n for v in self._values:\n v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access\n\n @property\n def device(self):\n return self._get_on_device_or_primary().device\n\n @property\n def trainable(self):\n return self._primary.trainable\n\n @property\n def distribute_strategy(self):\n return self._distribute_strategy\n\n def get_shape(self):\n return self._primary.get_shape()\n\n def to_proto(self, export_scope=None):\n return self._primary.to_proto(export_scope=export_scope)\n\n @property\n def op(self):\n if values_util.is_saving_non_distributed():\n return self._primary.op\n # We want cross-replica code that does some var.op.X calls\n # to work (even if the current device isn't in self._devices), but\n # other uses of var.op in a cross-replica context to fail.\n if ds_context.in_cross_replica_context():\n return DistributedVarOp(self._primary.op.name, self._primary.op.graph,\n self._primary.op.traceback, self._primary.op.type)\n return self._get().op\n\n @property\n def _in_graph_mode(self):\n return self._primary._in_graph_mode # pylint: disable=protected-access\n\n def _get_replica(self, replica_id):\n \"\"\"Returns the value on a device with the given replica_id.\"\"\"\n if self._use_packed_variable():\n return self._packed_var.on_device(self._devices[replica_id])\n return self._values[replica_id]\n\n def _get(self):\n \"\"\"Returns the value for the current device or raises a ValueError.\"\"\"\n if values_util.is_saving_non_distributed():\n return self._primary\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n return self._get_cross_replica()\n else:\n return self._get_replica(replica_id)\n\n def _get_on_device_or_primary(self):\n \"\"\"Returns value in same replica or device if possible, else the _primary.\"\"\"\n if values_util.is_saving_non_distributed():\n return self._primary\n replica_id = values_util.get_current_replica_id_as_int()\n if replica_id is None:\n # Try to find a value on the current device.\n current_device = device_util.canonicalize(device_util.current())\n for i, value in enumerate(self._values):\n if device_util.canonicalize(value.device) == current_device:\n return self._get_replica(i)\n return self._get_replica(0)\n else:\n return self._get_replica(replica_id)\n\n def read_value(self):\n if values_util.is_saving_non_distributed():\n return self._primary.read_value()\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n return array_ops.identity(self._get())\n\n def value(self):\n if values_util.is_saving_non_distributed():\n return self._primary.value()\n if self._policy:\n return self._policy.value(self)\n return self._get_on_device_or_primary().value()\n\n def numpy(self):\n if context.executing_eagerly():\n return self.read_value().numpy()\n else:\n raise NotImplementedError(\n \"numpy() is only available when eager execution is enabled.\")\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_sub(value, use_locking, name, read_value)\n if self._policy:\n return self._policy.assign_sub(\n self,\n value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return values_util.on_write_assign_sub(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_add(value, use_locking, name, read_value)\n if self._policy:\n return self._policy.assign_add(\n self,\n value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return values_util.on_write_assign_add(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign(value, use_locking, name, read_value)\n if self._policy:\n return self._policy.assign(\n self,\n value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return values_util.on_write_assign(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_sub(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_sub(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_add(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_add(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_mul(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_mul(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_div(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_div(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_div(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_min(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_min(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_max(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_max(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_max(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(sparse_delta, use_locking, name)\n if self._policy:\n return self._policy.scatter_update(\n self, sparse_delta, use_locking=use_locking, name=name)\n return values_util.scatter_update(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"Overrides Trackable method.\n\n This allows both name-based and object-based save and restore of\n DistributedVariables.\n\n Returns:\n A dictionary mapping attribute names to `SaveableObject` factories.\n \"\"\"\n\n def _saveable_factory(name=self._common_name):\n return _DistributedVariableSaveable(self, self._primary, name)\n\n return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}\n\n def _as_graph_element(self):\n if values_util.is_saving_non_distributed():\n return self._primary._as_graph_element() # pylint: disable=protected-access\n if self._policy:\n return self._policy._as_graph_element(self) # pylint: disable=protected-access\n\n raise NotImplementedError(\"No policy set for calling _as_graph_element.\")\n\n def _get_cross_replica(self):\n if values_util.is_saving_non_distributed():\n return self._primary\n if self._policy:\n return self._policy._get_cross_replica(self) # pylint: disable=protected-access\n\n raise NotImplementedError(\n \"This method should be overridden by sub-classes which support cross-\"\n \"replica accesses.\")\n\n def _update_cross_replica(self, update_fn, value, **kwargs):\n \"\"\"Applies updates across replicas.\n\n Args:\n update_fn: A callable to pass to `strategy.extended.update` to update the\n variable. It should has the same signature as `Variable.assign()`.\n value: value to be passed to `update_fn`.\n **kwargs: remaining arguments to `update_fn`.\n\n Returns:\n Updated variable or `tf.Operation`.\n \"\"\"\n return self.distribute_strategy.extended.update(\n self, update_fn, args=(value,), kwargs=kwargs, group=True)\n\n def _update_replica(self, update_fn, value, **kwargs):\n \"\"\"Applies updates in one replica.\n\n Args:\n update_fn: A callable to update the variable. It should has the same\n signature as `Variable.assign()`.\n value: value to be passed to `update_fn`.\n **kwargs: remaining arguments to `update_fn`.\n\n Returns:\n Updated variable or `tf.Operation`.\n \"\"\"\n if self._policy:\n return self._policy._update_replica(self, update_fn, value, **kwargs) # pylint: disable=protected-access\n raise NotImplementedError(\"should be implemented by subclass.\")\n\n def _update(self, update_fn, value, **kwargs):\n \"\"\"Applies updates depending on the context.\n\n The method calls `_update_replica` in replica context,\n `_update_cross_replica` in cross replica context, and `update_fn` in update\n context.\n\n If `read_value` is True, the method returns the updated Variable. If\n `read_value` is False, the method returns the update `tf.Operation`.\n\n Args:\n update_fn: A callable to pass to `strategy.extended.update` to update the\n variable. It should have the same signature as `Variable.assign()`.\n value: value to be passed to `update_fn`.\n **kwargs: keyword arguments to `update_fn`.\n\n Returns:\n Updated variable or `tf.Operation`.\n\n \"\"\"\n if values_util.is_saving_non_distributed():\n return update_fn(self._primary, value, **kwargs)\n with ds_context.enter_or_assert_strategy(self.distribute_strategy):\n if ds_context.in_cross_replica_context():\n update_replica_id = distribute_lib.get_update_replica_id()\n if update_replica_id is not None:\n replica_value = self._get_replica(update_replica_id)\n return update_fn(replica_value, value, **kwargs)\n return self._update_cross_replica(update_fn, value, **kwargs)\n else:\n values_util.assert_replica_context(self.distribute_strategy)\n return self._update_replica(update_fn, value, **kwargs)\n\n def _should_act_as_resource_variable(self):\n \"\"\"Pass resource_variable_ops.is_resource_variable check.\"\"\"\n pass\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n \"\"\"Converts a variable to a tensor.\"\"\"\n if values_util.is_saving_non_distributed():\n return ops.convert_to_tensor(\n self._primary, dtype=dtype, name=name, as_ref=as_ref)\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n return ops.convert_to_tensor(\n self._get(), dtype=dtype, name=name, as_ref=as_ref)\n\n def _map_resources(self, save_options):\n \"\"\"For implementing `Trackable`.\"\"\"\n # Initialize for self._primary first, so that obj_map[self._primary] and\n # resource_map[self._primary.handle] contain mapped values.\n obj_map, resource_map = self._primary._map_resources(save_options) # pylint:disable=protected-access\n for v in [v for v in self._values if v != self._primary]:\n\n if (save_options.experimental_variable_policy # pylint:disable=protected-access\n ._expand_distributed_variables()):\n v_obj_map, v_resource_map = v._map_resources(save_options) # pylint:disable=protected-access\n obj_map.update(v_obj_map)\n resource_map.update(v_resource_map)\n else:\n obj_map[v] = obj_map[self._primary]\n resource_map[v.handle] = resource_map[self._primary.handle]\n obj_map[self] = obj_map[self._primary]\n resource_map[self] = resource_map[self._primary.handle]\n if self._packed_var is not None:\n resource_map[self._packed_var.packed_handle] = resource_map[\n self._primary.handle]\n return obj_map, resource_map\n\n\nclass _DistributedVariableSaveable(saveable_object.SaveableObject):\n \"\"\"Class for defining how to restore a DistributedVariable.\"\"\"\n\n def __init__(self, distributed_variable, primary_variable, name):\n self._distributed_variable = distributed_variable\n if not self._distributed_variable._policy:\n raise ValueError(\"VariablePolicy has not been set for the distributed \"\n \"variable.\")\n tensor, spec = distributed_variable._policy.get_saveable(\n distributed_variable, primary_variable, name)\n super(_DistributedVariableSaveable, self).__init__(tensor, spec, name)\n\n def restore(self, restored_tensors, restored_shapes):\n \"\"\"Restore the same value into all variables.\"\"\"\n tensor, = restored_tensors\n return self._distributed_variable._policy.get_restore_ops( # pylint: disable=protected-access\n self._distributed_variable, tensor)\n\n\nclass _MirroredSaveable(saveable_object_util.ResourceVariableSaveable):\n \"\"\"Class for defining how to restore a MirroredVariable.\"\"\"\n\n def __init__(self, mirrored_variable, primary_variable, name):\n self._mirrored_variable = mirrored_variable\n super(_MirroredSaveable, self).__init__(primary_variable, \"\", name)\n\n def restore(self, restored_tensors, restored_shapes):\n \"\"\"Restore the same value into all variables.\"\"\"\n tensor, = restored_tensors\n packed_var = self._mirrored_variable._packed_variable # pylint: disable=protected-access\n if packed_var is not None:\n return control_flow_ops.group(\n tuple(\n values_util.assign_on_device(d, packed_var, tensor)\n for d in packed_var.devices))\n return control_flow_ops.group(\n tuple(\n values_util.assign_on_device(v.device, v, tensor)\n for v in self._mirrored_variable.values))\n\n\nclass MirroredVariable(DistributedVariable, Mirrored):\n \"\"\"Holds a map from replica to variables whose values are kept in sync.\"\"\"\n\n def _update_replica(self, update_fn, value, **kwargs):\n return _on_write_update_replica(self, update_fn, value, **kwargs)\n\n def scatter_min(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(*args, **kwargs)\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_min\", aggregation=self._aggregation))\n return super(MirroredVariable, self).scatter_min(*args, **kwargs)\n\n def scatter_max(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(*args, **kwargs)\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_max\", aggregation=self._aggregation))\n return super(MirroredVariable, self).scatter_max(*args, **kwargs)\n\n def scatter_update(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(*args, **kwargs)\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_update\", aggregation=self._aggregation))\n return super(MirroredVariable, self).scatter_update(*args, **kwargs)\n\n def _get_cross_replica(self):\n if values_util.is_saving_non_distributed():\n return self._primary.read_value()\n # Return identity, to avoid directly exposing the variable to the user and\n # allowing it to be modified by mistake.\n return array_ops.identity(Mirrored._get_cross_replica(self))\n\n def _as_graph_element(self):\n return self._get_on_device_or_primary()._as_graph_element() # pylint: disable=protected-access\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"Overrides Trackable method.\n\n This allows both name-based and object-based save and restore of\n MirroredVariables.\n\n Returns:\n A dictionary mapping attribute names to `SaveableObject` factories.\n \"\"\"\n\n def _saveable_factory(name=self._common_name):\n return _MirroredSaveable(self, self._primary, name)\n\n return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n \"\"\"Converts a variable to a tensor.\"\"\"\n # TODO(b/154017756): Make _dense_var_to_tensor consistent between ON_READ\n # and ON_WRITE.\n # Try to avoid assignments to and other mutations of MirroredVariable\n # state except through a DistributionStrategy.extended.update() call.\n if as_ref:\n # A TF 1.x case where the variable is a boolean variable and used like:\n # tf.cond(v, true_fn, false_fn).\n raise ValueError(\n \"You may be using variable created under distribute strategy in TF \"\n \"1.x control flows. Try explicitly converting the variable to Tensor \"\n \"using variable.read_value(), or switch to TF 2.x.\")\n return ops.convert_to_tensor(\n self._get(), dtype=dtype, name=name, as_ref=as_ref)\n\n\nclass _SyncOnReadSaveable(saveable_object.SaveableObject):\n \"\"\"Class for defining how to restore a SyncOnReadVariable.\"\"\"\n\n def __init__(self, sync_on_read_variable, name):\n self._sync_on_read_variable = sync_on_read_variable\n\n # We use a callable so that we don't have to evaluate this expression\n # in the case where we are trying to restore instead of save.\n def tensor():\n strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access\n return strategy.extended.read_var(sync_on_read_variable)\n\n spec = saveable_object.SaveSpec(\n tensor=tensor,\n slice_spec=\"\",\n name=name,\n dtype=sync_on_read_variable.dtype,\n device=sync_on_read_variable._primary.device) # pylint: disable=protected-access\n\n super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n \"\"\"Restore the same value into all variables.\"\"\"\n # To preserve the sum across save and restore, we have to divide the\n # total across all devices when restoring a variable that was summed\n # when saving.\n tensor, = restored_tensors\n if self._sync_on_read_variable.aggregation == vs.VariableAggregation.SUM:\n # pylint: disable=protected-access\n strategy = self._sync_on_read_variable._distribute_strategy\n tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync,\n self._sync_on_read_variable.dtype)\n # pylint: enable=protected-access\n return control_flow_ops.group(\n tuple(\n values_util.assign_on_device(v.device, v, tensor)\n for v in self._sync_on_read_variable.values))\n\n\nclass SyncOnReadVariable(DistributedVariable):\n \"\"\"Holds a map from replica to variables whose values are reduced on save.\"\"\"\n\n def _update_replica(self, update_fn, value, **kwargs):\n return update_fn(self._get_on_device_or_primary(), value, **kwargs)\n\n # TODO(b/154017756): Make assign behaivor in cross replica context consistent\n # with MirroredVariable.\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_sub(value, use_locking, name, read_value)\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n if ds_context.in_cross_replica_context() and not _in_update_replica():\n return values_util.on_read_assign_sub_cross_replica(\n self, value, read_value=read_value)\n else:\n return super(SyncOnReadVariable,\n self).assign_sub(value, use_locking, name, read_value)\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_add(value, use_locking, name, read_value)\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n if ds_context.in_cross_replica_context() and not _in_update_replica():\n return values_util.on_read_assign_add_cross_replica(\n self, value, read_value=read_value)\n else:\n return super(SyncOnReadVariable,\n self).assign_add(value, use_locking, name, read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign(value, use_locking, name, read_value)\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n if ds_context.in_cross_replica_context() and not _in_update_replica():\n return values_util.on_read_assign_cross_replica(\n self, value, read_value=read_value)\n else:\n return super(SyncOnReadVariable,\n self).assign(value, use_locking, name, read_value)\n\n def _scatter_not_implemented(self, method):\n raise NotImplementedError(\n \"Variables with `synchronization=ON_READ` doesn't support `%s`\" %\n method)\n\n def scatter_sub(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_sub\")\n\n def scatter_add(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_add\")\n\n def scatter_mul(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_mul\")\n\n def scatter_div(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_div\")\n\n def scatter_min(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_min\")\n\n def scatter_max(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_max\")\n\n def scatter_update(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(*args, **kwargs)\n self._scatter_not_implemented(\"scatter_update\")\n\n def value(self):\n if values_util.is_saving_non_distributed():\n return self._primary.value()\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n if ds_context.in_cross_replica_context() and not _in_update_replica():\n if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:\n return self._get_replica(0).value()\n return self._get_cross_replica()\n else:\n # _get_on_device_or_primary() returns a Variable.\n return self._get_on_device_or_primary().value()\n\n def _get_cross_replica(self):\n if values_util.is_saving_non_distributed():\n return self._primary.read_value()\n if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:\n # Consider returning a tensor value here to make the return value of\n # _get_cross_replica consistent.\n return self._get_replica(0)\n\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n return self._distribute_strategy.reduce(\n reduce_util.ReduceOp.from_variable_aggregation(self._aggregation),\n self,\n axis=None)\n\n def _as_graph_element(self):\n if values_util.is_saving_non_distributed():\n return self._primary._as_graph_element() # pylint: disable=protected-access\n # pylint: disable=protected-access\n with ds_context.enter_or_assert_strategy(self._distribute_strategy):\n if ds_context.in_cross_replica_context():\n return ops.convert_to_tensor(self._get_cross_replica())\n return self._get()._as_graph_element()\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"Overrides Trackable method.\n\n This allows both name-based and object-based save and restore of\n `SyncOnReadVariable`s.\n\n Returns:\n A dictionary mapping attribute names to `SaveableObject` factories.\n \"\"\"\n\n def _saveable_factory(name=self._common_name):\n return _SyncOnReadSaveable(self, name)\n\n return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}\n\n\n# Register a conversion functions which reads the value of the variable,\n# allowing instances of the class to be used as tensors.\n# DistributedVariable\ndef _tensor_conversion_distributed_var(var, dtype=None, name=None,\n as_ref=False):\n return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access\n\n\nops.register_tensor_conversion_function(DistributedVariable,\n _tensor_conversion_distributed_var)\n\n\n# MirroredVariables\ndef _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):\n return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access\n\n\nops.register_tensor_conversion_function(MirroredVariable,\n _tensor_conversion_mirrored)\n\n\n# Mirrored Values\ndef _tensor_conversion_mirrored_val(value, dtype=None, name=None, as_ref=False):\n return ops.convert_to_tensor(\n value._get(), dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access\n\n\nops.register_tensor_conversion_function(Mirrored,\n _tensor_conversion_mirrored_val)\n\n\n# SyncOnReadVariables\ndef _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):\n return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access\n\n\nops.register_tensor_conversion_function(SyncOnReadVariable,\n _tensor_conversion_sync_on_read)\n\n\nclass VariablePolicy(object):\n \"\"\"Policy defining synchronization and aggregation of a distributed variable.\n\n Given `synchronization` and `aggregation` parameters set on a `tf.Variable`\n during variable creation within `tf.distribute` scope, `tf.distribute` creates\n an appropriate policy object and assigns it to the distributed variable. All\n variable operations are delegated to the respective policy object.\n \"\"\"\n\n def __init__(self, aggregation):\n self._aggregation = aggregation\n\n def value(self):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes.\")\n\n def _is_mirrored(self):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes.\")\n\n def _as_graph_element(self, _):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes.\")\n\n def _get_cross_replica(self, var):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes.\")\n\n def _update_replica(self, var, update_fn, value, **kwargs):\n raise NotImplementedError(\n \"This method should be overridden by sub-classes.\")\n\n\nclass OnReadPolicy(VariablePolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_READ` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the\n values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`,\n `MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def _is_mirrored(self):\n return False\n\n def value(self, var):\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n if ds_context.in_cross_replica_context():\n return var._get_cross_replica() # pylint: disable=protected-access\n else:\n return var._get_on_device_or_primary().value() # pylint: disable=protected-access\n\n def _as_graph_element(self, var):\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n if ds_context.in_cross_replica_context():\n return ops.convert_to_tensor(var._get_cross_replica()) # pylint: disable=protected-access\n return var._get()._as_graph_element() # pylint: disable=protected-access\n\n def _get_cross_replica(self, var):\n if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:\n return var._primary # pylint: disable=protected-access\n\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n return var.distribute_strategy.reduce(\n reduce_util.ReduceOp.from_variable_aggregation(self._aggregation),\n var,\n axis=None)\n\n def _update_replica(self, var, update_fn, value, **kwargs):\n return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access\n\n def _scatter_not_implemented(self, method):\n raise NotImplementedError(\n \"ON_READ variables doesn't support `%s` in cross replica context\" %\n method)\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n \"\"\"Subtracts a value from this variable.\"\"\"\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n if ds_context.in_cross_replica_context():\n return values_util.on_read_assign_sub_cross_replica(\n var, value, read_value=read_value)\n else:\n return values_util.on_write_assign_sub(\n var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n \"\"\"Adds a value to this variable.\"\"\"\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n if ds_context.in_cross_replica_context():\n return values_util.on_read_assign_add_cross_replica(\n var, value, read_value=read_value)\n else:\n return values_util.on_write_assign_add(\n var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n with ds_context.enter_or_assert_strategy(var.distribute_strategy):\n if ds_context.in_cross_replica_context():\n return values_util.on_read_assign_cross_replica(var, value,\n read_value=read_value)\n else:\n return values_util.on_write_assign(var, value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_sub\")\n\n def scatter_add(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_add\")\n\n def scatter_mul(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_mul\")\n\n def scatter_div(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_div\")\n\n def scatter_min(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_min\")\n\n def scatter_max(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_max\")\n\n def scatter_update(self, *args, **kwargs):\n del args, kwargs\n self._scatter_not_implemented(\"scatter_update\")\n\n def get_saveable(self, var, primary_var, name):\n \"\"\"Create a saveable object for the given variable.\"\"\"\n\n # We use a callable so that we don't have to evaluate this expression\n # in the case where we are trying to restore instead of save.\n def tensor():\n strategy = var.distribute_strategy\n return strategy.extended.read_var(var)\n\n spec = saveable_object.SaveSpec(\n tensor=tensor,\n slice_spec=\"\",\n name=name,\n dtype=var.dtype,\n device=primary_var.device)\n\n return tensor, [spec]\n\n def get_restore_ops(self, var, tensor):\n \"\"\"Restore the same value into all variables.\"\"\"\n # To preserve the sum across save and restore, we have to divide the\n # total across all devices when restoring a variable that was summed\n # when saving.\n if self._aggregation == vs.VariableAggregation.SUM:\n strategy = var._distribute_strategy # pylint: disable=protected-access\n num_replicas_in_sync = strategy.num_replicas_in_sync\n tensor = math_ops.cast(tensor / num_replicas_in_sync, var.dtype)\n return control_flow_ops.group(\n tuple(\n values_util.assign_on_device(v.device, v, tensor)\n for v in var.values))\n\n\nclass AutoPolicy(VariablePolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.AUTO` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.AUTO` and `aggregation` is set to\n `tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def _is_mirrored(self):\n return True\n\n def value(self, var):\n return var._get_on_device_or_primary().value() # pylint: disable=protected-access\n\n def _as_graph_element(self, var):\n return var._get_on_device_or_primary()._as_graph_element() # pylint: disable=protected-access\n\n def _get_cross_replica(self, var):\n # Return identity, to avoid directly exposing the variable to the user and\n # allowing it to be modified by mistake.\n return array_ops.identity(Mirrored._get_cross_replica(var)) # pylint: disable=protected-access\n\n def _update_replica(self, var, update_fn, value, **kwargs):\n return update_fn(var._get_on_device_or_primary(), value, **kwargs) # pylint: disable=protected-access\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n return values_util.on_write_assign(var, value, use_locking=use_locking,\n name=name, read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n return values_util.on_write_assign_add(var, value, use_locking=use_locking,\n name=name, read_value=read_value)\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n return values_util.on_write_assign_sub(var, value, use_locking=use_locking,\n name=name, read_value=read_value)\n\n def scatter_sub(self, var, sparse_delta, use_locking=False, name=None):\n return values_util.scatter_sub(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_add(self, var, sparse_delta, use_locking=False, name=None):\n return values_util.scatter_add(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_mul(self, var, sparse_delta, use_locking=False, name=None):\n return values_util.scatter_mul(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_div(self, var, sparse_delta, use_locking=False, name=None):\n return values_util.scatter_div(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_min(self, var, sparse_delta, use_locking=False, name=None):\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_min\", aggregation=self._aggregation))\n return values_util.scatter_min(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_max(self, var, sparse_delta, use_locking=False, name=None):\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_max\", aggregation=self._aggregation))\n return values_util.scatter_max(var, sparse_delta, use_locking=use_locking,\n name=name)\n\n def scatter_update(self, var, sparse_delta, use_locking=False, name=None):\n if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and\n self._aggregation != vs.VariableAggregation.NONE):\n raise NotImplementedError(values_util.scatter_error_msg.format(\n op_name=\"scatter_update\", aggregation=self._aggregation))\n return values_util.scatter_update(var, sparse_delta,\n use_locking=use_locking,\n name=name)\n\n def get_saveable(self, var, primary_var, name):\n del var, name\n return primary_var, \"\"\n\n def get_restore_ops(self, var, tensor):\n return control_flow_ops.group(\n tuple(\n values_util.assign_on_device(v.device, v, tensor)\n for v in var.values))\n\n\nclass OnWritePolicy(AutoPolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization.\n\n This policy is created when the following `synchronization` and\n `aggregation` parameters are specified when creating a `tf.Variable` in\n `tf.distribute` scope:\n * `synchronization` is equal to `tf.VariableSynchronization.AUTO` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n * `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n \"\"\"\n\n def _update_replica(self, var, update_fn, value, **kwargs):\n return _on_write_update_replica(var, update_fn, value, **kwargs)\n\n\n# Utility functions\n# Return True if the Value is Mirrored or the Variable is replicated and kept in\n# sync.\ndef _is_mirrored(val):\n if isinstance(val, DistributedVariable):\n if val._policy: # pylint: disable=protected-access\n return val._policy._is_mirrored() # pylint: disable=protected-access\n return isinstance(val, Mirrored)\n\n\ndef _is_sync_on_read(val):\n if isinstance(val, DistributedVariable):\n if val._policy: # pylint: disable=protected-access\n return not val._policy._is_mirrored() # pylint: disable=protected-access\n return not isinstance(val, Mirrored)\n\n\ndef _in_update_replica():\n return distribute_lib.get_update_replica_id() is not None\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Computes a header file to be used with SELECTIVE_REGISTRATION.\n\nSee the executable wrapper, print_selective_registration_header.py, for more\ninformation.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport sys\n\nfrom google.protobuf import text_format\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python import _pywrap_kernel_registry\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging\n\n# Usually, we use each graph node to induce registration of an op and\n# corresponding kernel; nodes without a corresponding kernel (perhaps due to\n# attr types) generate a warning but are otherwise ignored. Ops in this set are\n# registered even if there's no corresponding kernel.\nOPS_WITHOUT_KERNEL_ALLOWLIST = frozenset([\n # AccumulateNV2 is rewritten away by AccumulateNV2RemovePass; see\n # core/common_runtime/accumulate_n_optimizer.cc.\n 'AccumulateNV2'\n])\nFLEX_PREFIX = b'Flex'\nFLEX_PREFIX_LENGTH = len(FLEX_PREFIX)\n\n\ndef _get_ops_from_ops_list(input_file):\n \"\"\"Gets the ops and kernels needed from the ops list file.\"\"\"\n ops = set()\n ops_list_str = gfile.GFile(input_file, 'r').read()\n if not ops_list_str:\n raise Exception('Input file should not be empty')\n ops_list = json.loads(ops_list_str)\n for op, kernel in ops_list:\n op_and_kernel = (op, kernel if kernel else None)\n ops.add(op_and_kernel)\n return ops\n\n\ndef _get_ops_from_graphdef(graph_def):\n \"\"\"Gets the ops and kernels needed from the tensorflow model.\"\"\"\n ops = set()\n for node_def in graph_def.node:\n if not node_def.device:\n node_def.device = '/cpu:0'\n kernel_class = _pywrap_kernel_registry.TryFindKernelClass(\n node_def.SerializeToString())\n op = str(node_def.op)\n if kernel_class or op in OPS_WITHOUT_KERNEL_ALLOWLIST:\n op_and_kernel = (op, str(kernel_class.decode('utf-8'))\n if kernel_class else None)\n ops.add(op_and_kernel)\n else:\n print('Warning: no kernel found for op %s' % node_def.op, file=sys.stderr)\n return ops\n\n\ndef get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):\n \"\"\"Gets the ops and kernels needed from the model files.\"\"\"\n ops = set()\n\n for proto_file in proto_files:\n tf_logging.info('Loading proto file %s', proto_file)\n # Load ops list file.\n if proto_fileformat == 'ops_list':\n ops = ops.union(_get_ops_from_ops_list(proto_file))\n continue\n\n # Load GraphDef.\n file_data = gfile.GFile(proto_file, 'rb').read()\n if proto_fileformat == 'rawproto':\n graph_def = graph_pb2.GraphDef.FromString(file_data)\n else:\n assert proto_fileformat == 'textproto'\n graph_def = text_format.Parse(file_data, graph_pb2.GraphDef())\n ops = ops.union(_get_ops_from_graphdef(graph_def))\n\n # Add default ops.\n if default_ops_str and default_ops_str != 'all':\n for s in default_ops_str.split(','):\n op, kernel = s.split(':')\n op_and_kernel = (op, kernel)\n if op_and_kernel not in ops:\n ops.add(op_and_kernel)\n\n return list(sorted(ops))\n\n\ndef get_header_from_ops_and_kernels(ops_and_kernels,\n include_all_ops_and_kernels):\n \"\"\"Returns a header for use with tensorflow SELECTIVE_REGISTRATION.\n\n Args:\n ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.\n include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op\n kernels are included.\n\n Returns:\n the string of the header that should be written as ops_to_register.h.\n \"\"\"\n ops = set(op for op, _ in ops_and_kernels)\n result_list = []\n\n def append(s):\n result_list.append(s)\n\n _, script_name = os.path.split(sys.argv[0])\n append('// This file was autogenerated by %s' % script_name)\n append('#ifndef OPS_TO_REGISTER')\n append('#define OPS_TO_REGISTER')\n\n if include_all_ops_and_kernels:\n append('#define SHOULD_REGISTER_OP(op) true')\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) true')\n append('#define SHOULD_REGISTER_OP_GRADIENT true')\n else:\n line = \"\"\"\n namespace {\n constexpr const char* skip(const char* x) {\n return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;\n }\n\n constexpr bool isequal(const char* x, const char* y) {\n return (*skip(x) && *skip(y))\n ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))\n : (!*skip(x) && !*skip(y));\n }\n\n template<int N>\n struct find_in {\n static constexpr bool f(const char* x, const char* const y[N]) {\n return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);\n }\n };\n\n template<>\n struct find_in<0> {\n static constexpr bool f(const char* x, const char* const y[]) {\n return false;\n }\n };\n } // end namespace\n \"\"\"\n line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\\n'\n for _, kernel_class in ops_and_kernels:\n if kernel_class is None:\n continue\n line += '\"%s\",\\n' % kernel_class\n line += '};'\n append(line)\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) '\n '(find_in<sizeof(kNecessaryOpKernelClasses) '\n '/ sizeof(*kNecessaryOpKernelClasses)>::f(clz, '\n 'kNecessaryOpKernelClasses))')\n append('')\n\n append('constexpr inline bool ShouldRegisterOp(const char op[]) {')\n append(' return false')\n for op in sorted(ops):\n append(' || isequal(op, \"%s\")' % op)\n append(' ;')\n append('}')\n append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)')\n append('')\n\n append('#define SHOULD_REGISTER_OP_GRADIENT ' +\n ('true' if 'SymbolicGradient' in ops else 'false'))\n\n append('#endif')\n return '\\n'.join(result_list)\n\n\ndef get_header(graphs,\n proto_fileformat='rawproto',\n default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'):\n \"\"\"Computes a header for use with tensorflow SELECTIVE_REGISTRATION.\n\n Args:\n graphs: a list of paths to GraphDef files to include.\n proto_fileformat: optional format of proto file, either 'textproto',\n 'rawproto' (default) or ops_list. The ops_list is the file contain the\n list of ops in JSON format, Ex: \"[[\"Transpose\", \"TransposeCpuOp\"]]\".\n default_ops: optional comma-separated string of operator:kernel pairs to\n always include implementation for. Pass 'all' to have all operators and\n kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'.\n\n Returns:\n the string of the header that should be written as ops_to_register.h.\n \"\"\"\n ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops)\n if not ops_and_kernels:\n print('Error reading graph!')\n return 1\n\n return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python utility script to generate unit test model data.\"\"\"\n\n# Steps to regenerate model test data:\n# TODO(b/158011574): Do these steps in the script here instead of manually.\n# 1.) Run this script\n# 2.) Hexdump the model into a .h/.cc file:\n# xxd -i /tmp/tf_micro_conv_test_model.tflite > /tmp/temp.cc\n# 3.) Copy/replace contents of temp.cc into desired header/source files (e.g.\n# test_conv_model.h/.cc\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\ndef generate_conv_model():\n \"\"\"Creates a basic Keras model and converts to tflite.\n\n This model does not make any relevant classifications. It only exists to\n generate a model that is designed to run on embedded devices.\n \"\"\"\n input_shape = (16, 16, 1)\n\n model = tf.keras.models.Sequential()\n model.add(\n tf.keras.layers.Conv2D(16, 3, activation=\"relu\", input_shape=input_shape))\n model.add(tf.keras.layers.Conv2D(32, 3, activation=\"relu\"))\n model.add(tf.keras.layers.MaxPooling2D(2))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(10))\n model.compile(\n optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n model.summary()\n\n # Test with random data\n data_x = np.random.rand(12, 16, 16, 1)\n data_y = np.random.randint(2, size=(12, 10))\n model.fit(data_x, data_y, epochs=5)\n\n def representative_dataset_gen():\n for _ in range(12):\n yield [np.random.rand(16, 16).reshape(1, 16, 16, 1).astype(np.float32)]\n\n # Now convert to a TFLite model with full int8 quantization:\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n converter.representative_dataset = representative_dataset_gen\n\n tflite_model = converter.convert()\n open(\"/tmp/tf_micro_conv_test_model.int8.tflite\", \"wb\").write(tflite_model)\n\n\ndef main(argv):\n del argv # Unused for now\n generate_conv_model()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for 3d convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.eager import context\n\n\ndef GetTestConfigs():\n \"\"\"Get all the valid tests configs to run.\n\n Returns:\n all the valid test configs as tuples of data_format and use_gpu.\n \"\"\"\n test_configs = [(\"NDHWC\", False), (\"NDHWC\", True)]\n if test.is_gpu_available(cuda_only=True):\n # \"NCDHW\" format is only supported on CUDA.\n test_configs += [(\"NCDHW\", True)]\n return test_configs\n\n\nclass Conv3DTest(test.TestCase):\n\n def _DtypesToTest(self, use_gpu):\n # double datatype is currently not supported for convolution ops\n # on the ROCm platform\n optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]\n if use_gpu:\n if not test_util.GpuSupportsHalfMatMulAndConv():\n return optional_float64 + [dtypes.float32]\n else:\n # It is important that float32 comes before float16 here,\n # as we will be using its gradients as reference for fp16 gradients.\n return optional_float64 + [dtypes.float32, dtypes.float16]\n else:\n return optional_float64 + [dtypes.float32, dtypes.float16]\n\n def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,\n padding, data_format, dtype, use_gpu):\n total_size_tensor = np.prod(tensor_in_sizes)\n total_size_filter = np.prod(filter_in_sizes)\n\n # Initializes the input tensor with array containing numbers from 0 to 1.\n # We keep the input tensor values fairly small to avoid overflowing float16\n # during the conv3d.\n x1 = [f * 1.0 / total_size_tensor for f in range(1, total_size_tensor + 1)]\n x2 = [f * 1.0 / total_size_filter for f in range(1, total_size_filter + 1)]\n with self.cached_session(use_gpu=use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)\n t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)\n\n if isinstance(stride, collections_abc.Iterable):\n strides = [1] + list(stride) + [1]\n else:\n strides = [1, stride, stride, stride, 1]\n\n if data_format == \"NCDHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv3d(t1, t2, strides, padding=padding,\n data_format=data_format)\n if data_format == \"NCDHW\":\n conv = test_util.NCHWToNHWC(conv)\n\n return conv\n\n def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,\n expected):\n results = []\n for data_format, use_gpu in GetTestConfigs():\n for dtype in self._DtypesToTest(use_gpu):\n result = self._SetupValuesForDevice(\n tensor_in_sizes,\n filter_in_sizes,\n stride,\n padding,\n data_format,\n dtype,\n use_gpu=use_gpu)\n results.append(result)\n\n with self.cached_session() as sess:\n values = self.evaluate(results)\n for value in values:\n print(\"expected = \", expected)\n print(\"actual = \", value)\n tol = 1e-6\n if value.dtype == np.float16:\n tol = 1e-3\n\n self.assertAllClose(expected, value.flatten(), atol=tol, rtol=tol)\n\n def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,\n stride, dilation, padding, data_format,\n use_gpu):\n total_size_tensor = np.prod(tensor_in_sizes)\n total_size_filter = np.prod(filter_in_sizes)\n\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = [f * 1.0 for f in range(1, total_size_tensor + 1)]\n x2 = [f * 1.0 for f in range(1, total_size_filter + 1)]\n with self.cached_session(use_gpu=use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n if isinstance(stride, collections_abc.Iterable):\n strides = list(stride)\n else:\n strides = [stride, stride, stride]\n if data_format == \"NCDHW\":\n t1 = test_util.NHWCToNCHW(t1)\n full_strides = [1, 1] + strides\n full_dilation = [1, 1] + dilation\n else:\n full_strides = [1] + strides + [1]\n full_dilation = [1] + dilation + [1]\n expected = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilation,\n data_format=data_format)\n computed = nn_ops.conv3d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilation,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCDHW\":\n expected = test_util.NCHWToNHWC(expected)\n computed = test_util.NCHWToNHWC(computed)\n return expected, computed\n\n def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, stride,\n padding, dilations):\n expected_results = []\n computed_results = []\n default_dilations = (\n dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)\n for data_format, use_gpu in GetTestConfigs():\n # If any dilation rate is larger than 1, only do test on the GPU\n # because we currently do not have a CPU implementation for arbitrary\n # dilation rates.\n if default_dilations or use_gpu:\n expected, computed = self._ComputeReferenceDilatedConv(\n tensor_in_sizes, filter_in_sizes, stride, dilations, padding,\n data_format, use_gpu)\n expected_results.append(expected)\n computed_results.append(computed)\n tolerance = 1e-2 if use_gpu else 1e-5\n with self.cached_session() as sess:\n expected_values = self.evaluate(expected_results)\n computed_values = self.evaluate(computed_results)\n for e_value, c_value in zip(expected_values, computed_values):\n print(\"expected = \", e_value)\n print(\"actual = \", c_value)\n self.assertAllClose(\n e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=1e-6)\n\n def _CreateNumpyTensor(self, sizes):\n return np.asarray([f * 1.0\n for f in range(1,\n np.prod(sizes) + 1)]).reshape(sizes)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv3DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 1, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 1, 3]\n filter_in_sizes = [1, 1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.conv3d_v2(\n x1, filter_in, strides=[1, 1, 1, 1, 1], padding=\"VALID\")\n conv2 = nn_ops.conv3d_v2(\n x2, filter_in, strides=[1, 1, 1, 1, 1], padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(conv1, self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionClass3DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 1, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 1, 3]\n filter_in_sizes = [1, 1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n convolver1 = nn_ops.Convolution(\n input_shape=x1.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver1.num_batch_dims, 1)\n convolver2 = nn_ops.Convolution(\n input_shape=x2.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver2.num_batch_dims, 2)\n conv1 = convolver1(x1, filter_in)\n conv2 = convolver2(x2, filter_in)\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(conv1, self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 1, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 1, 3]\n filter_in_sizes = [1, 1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.convolution(\n x1, filter_in, strides=[1, 1, 1], padding=\"VALID\")\n conv2 = nn_ops.convolution(\n x2, filter_in, strides=[1, 1, 1], padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(conv1, self.evaluate(conv2).reshape(conv1.shape))\n\n def testConv3D1x1x1Filter(self):\n expected_output = [\n 0.18518519, 0.22222222, 0.25925926, 0.40740741, 0.5, 0.59259259,\n 0.62962963, 0.77777778, 0.92592593, 0.85185185, 1.05555556, 1.25925926,\n 1.07407407, 1.33333333, 1.59259259, 1.2962963, 1.61111111, 1.92592593\n ]\n\n # These are equivalent to the Conv2D1x1 case.\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 1, 3],\n filter_in_sizes=[1, 1, 1, 3, 3],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 1, 3, 3],\n filter_in_sizes=[1, 1, 1, 3, 3],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n self._VerifyValues(\n tensor_in_sizes=[1, 1, 2, 3, 3],\n filter_in_sizes=[1, 1, 1, 3, 3],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n\n def testConv3D1x1x1Filter2x1x1Dilation(self):\n ctx = context.context()\n is_eager = ctx is not None and ctx.executing_eagerly()\n if test.is_gpu_available(cuda_only=True) or \\\n (test_util.IsMklEnabled() and is_eager is False):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 3, 6, 1, 1],\n filter_in_sizes=[1, 1, 1, 1, 1],\n stride=1,\n padding=\"VALID\",\n dilations=[2, 1, 1])\n\n # Expected values computed using scipy's correlate function.\n def testConv3D2x2x2Filter(self):\n expected_output = [\n 3.77199074, 3.85069444, 3.92939815, 4.2650463, 4.35763889, 4.45023148,\n 6.73032407, 6.89236111, 7.05439815, 7.22337963, 7.39930556, 7.57523148,\n 9.68865741, 9.93402778, 10.17939815, 10.18171296, 10.44097222,\n 10.70023148\n ]\n # expected_shape = [1, 3, 1, 2, 5]\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin\n filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n\n def testConv3D2x2x2Filter1x2x1Dilation(self):\n ctx = context.context()\n is_eager = ctx is not None and ctx.executing_eagerly()\n if test.is_gpu_available(cuda_only=True) or \\\n (test_util.IsMklEnabled() and is_eager is False):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 4, 6, 3, 1],\n filter_in_sizes=[2, 2, 2, 1, 1],\n stride=1,\n padding=\"VALID\",\n dilations=[1, 2, 1])\n\n def testConv3DStrides(self):\n expected_output = [\n 0.06071429, 0.08988095, 0.10238095, 0.11488095, 0.12738095, 0.13988095,\n 0.08452381, 0.26071429, 0.35238095, 0.36488095, 0.37738095, 0.38988095,\n 0.40238095, 0.23452381, 0.46071429, 0.61488095, 0.62738095, 0.63988095,\n 0.65238095, 0.66488095, 0.38452381, 1.12738095, 1.48988095, 1.50238095,\n 1.51488095, 1.52738095, 1.53988095, 0.88452381, 1.32738095, 1.75238095,\n 1.76488095, 1.77738095, 1.78988095, 1.80238095, 1.03452381, 1.52738095,\n 2.01488095, 2.02738095, 2.03988095, 2.05238095, 2.06488095, 1.18452381,\n 2.19404762, 2.88988095, 2.90238095, 2.91488095, 2.92738095, 2.93988095,\n 1.68452381, 2.39404762, 3.15238095, 3.16488095, 3.17738095, 3.18988095,\n 3.20238095, 1.83452381, 2.59404762, 3.41488095, 3.42738095, 3.43988095,\n 3.45238095, 3.46488095, 1.98452381\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 5, 8, 7, 1],\n filter_in_sizes=[1, 2, 3, 1, 1],\n stride=[2, 3, 1], # different stride for each spatial dimension\n padding=\"SAME\",\n expected=expected_output)\n\n def testConv3D2x2x2FilterStride2(self):\n expected_output = [\n 3.77199074, 3.85069444, 3.92939815, 9.68865741, 9.93402778, 10.17939815\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 2, 3, 3],\n filter_in_sizes=[2, 2, 2, 3, 3],\n stride=2,\n padding=\"VALID\",\n expected=expected_output)\n\n def testConv3DStride3(self):\n expected_output = [\n 1.51140873, 1.57167659, 1.63194444, 1.56349206, 1.62673611, 1.68998016,\n 1.6155754, 1.68179563, 1.74801587, 1.9280754, 2.01215278, 2.09623016,\n 1.98015873, 2.0672123, 2.15426587, 2.03224206, 2.12227183, 2.21230159,\n 4.4280754, 4.65500992, 4.88194444, 4.48015873, 4.71006944, 4.93998016,\n 4.53224206, 4.76512897, 4.99801587, 4.84474206, 5.09548611, 5.34623016,\n 4.8968254, 5.15054563, 5.40426587, 4.94890873, 5.20560516, 5.46230159\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 6, 7, 8, 2],\n filter_in_sizes=[3, 2, 1, 2, 3],\n stride=3,\n padding=\"VALID\",\n expected=expected_output)\n\n def testConv3D2x2x2FilterStride2Same(self):\n expected_output = [\n 3.77199074, 3.85069444, 3.92939815, 2.0162037, 2.06597222, 2.11574074,\n 9.68865741, 9.93402778, 10.17939815, 4.59953704, 4.73263889, 4.86574074\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 2, 3, 3],\n filter_in_sizes=[2, 2, 2, 3, 3],\n stride=2,\n padding=\"SAME\",\n expected=expected_output)\n\n def _TestConv3DEmptyTensorOutputShape(self):\n \"\"\"Verifies the output shape of the Conv3D op when output tensor is empty.\n\n Args: none\n \"\"\"\n input_shape = [0, 112, 112, 112, 32]\n filter_shape = [3, 3, 3, 32, 64]\n\n output_shape = [0, 112, 112, 112, 64]\n input_data = 1\n filter_data = 1\n for data_type in self._DtypesToTest(False):\n input_tensor = constant_op.constant(\n input_data, shape=input_shape, dtype=data_type, name=\"input\")\n filter_tensor = constant_op.constant(\n filter_data, shape=filter_shape, dtype=data_type, name=\"filter\")\n conv = nn_ops.conv3d(\n input_tensor,\n filter_tensor,\n strides=[1, 1, 1, 1, 1],\n dilations=[1, 1, 1, 1, 1],\n padding=\"SAME\",\n data_format=\"NDHWC\",\n name=\"conv\")\n values = self.evaluate(conv)\n self.assertEqual(values.shape, tensor_shape.TensorShape(output_shape))\n\n def testKernelSmallerThanStride(self):\n expected_output = [\n 0.03703704, 0.11111111, 0.25925926, 0.33333333, 0.7037037, 0.77777778,\n 0.92592593, 1.\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 3, 3, 1],\n filter_in_sizes=[1, 1, 1, 1, 1],\n stride=2,\n padding=\"SAME\",\n expected=expected_output)\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 3, 3, 1],\n filter_in_sizes=[1, 1, 1, 1, 1],\n stride=2,\n padding=\"VALID\",\n expected=expected_output)\n\n expected_output = [\n 0.54081633, 0.58017493, 0.28061224, 0.81632653, 0.85568513, 0.40306122,\n 0.41873178, 0.4340379, 0.19642857, 2.46938776, 2.50874636, 1.1377551,\n 2.74489796, 2.78425656, 1.26020408, 1.16873178, 1.1840379, 0.51785714,\n 1.09511662, 1.10604956, 0.44642857, 1.17164723, 1.18258017, 0.47704082,\n 0.3691691, 0.37244898, 0.125\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 7, 7, 7, 1],\n filter_in_sizes=[2, 2, 2, 1, 1],\n stride=3,\n padding=\"SAME\",\n expected=expected_output)\n\n expected_output = [\n 0.540816, 0.580175, 0.816327, 0.855685, 2.469388, 2.508746, 2.744898,\n 2.784257\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 7, 7, 7, 1],\n filter_in_sizes=[2, 2, 2, 1, 1],\n stride=3,\n padding=\"VALID\",\n expected=expected_output)\n\n def testKernelSizeMatchesInputSize(self):\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 1, 2, 1],\n filter_in_sizes=[2, 1, 2, 1, 2],\n stride=1,\n padding=\"VALID\",\n expected=[1.5625, 1.875])\n\n def _ConstructAndTestGradientForConfig(\n self, batch, input_shape, filter_shape, in_depth, out_depth, stride,\n padding, test_input, data_format, use_gpu):\n\n input_planes, input_rows, input_cols = input_shape\n filter_planes, filter_rows, filter_cols = filter_shape\n\n input_shape = [batch, input_planes, input_rows, input_cols, in_depth]\n filter_shape = [\n filter_planes, filter_rows, filter_cols, in_depth, out_depth\n ]\n\n if isinstance(stride, collections_abc.Iterable):\n strides = [1] + list(stride) + [1]\n else:\n strides = [1, stride, stride, stride, 1]\n\n if padding == \"VALID\":\n output_planes = int(\n math.ceil((input_planes - filter_planes + 1.0) / strides[1]))\n output_rows = int(\n math.ceil((input_rows - filter_rows + 1.0) / strides[2]))\n output_cols = int(\n math.ceil((input_cols - filter_cols + 1.0) / strides[3]))\n else:\n output_planes = int(math.ceil(float(input_planes) / strides[1]))\n output_rows = int(math.ceil(float(input_rows) / strides[2]))\n output_cols = int(math.ceil(float(input_cols) / strides[3]))\n output_shape = [batch, output_planes, output_rows, output_cols, out_depth]\n input_size = 1\n for x in input_shape:\n input_size *= x\n filter_size = 1\n for x in filter_shape:\n filter_size *= x\n input_data = [x * 1.0 / input_size for x in range(0, input_size)]\n filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]\n\n for data_type in self._DtypesToTest(use_gpu=use_gpu):\n # TODO(mjanusz): Modify gradient_checker to also provide max relative\n # error and synchronize the tolerance levels between the tests for forward\n # and backward computations.\n if data_type == dtypes.float64:\n tolerance = 1e-8\n elif data_type == dtypes.float32:\n tolerance = 5e-3\n elif data_type == dtypes.float16:\n tolerance = 1e-3\n\n with self.cached_session(use_gpu=use_gpu):\n orig_input_tensor = constant_op.constant(\n input_data, shape=input_shape, dtype=data_type, name=\"input\")\n filter_tensor = constant_op.constant(\n filter_data, shape=filter_shape, dtype=data_type, name=\"filter\")\n\n if data_format == \"NCDHW\":\n input_tensor = test_util.NHWCToNCHW(orig_input_tensor)\n new_strides = test_util.NHWCToNCHW(strides)\n else:\n input_tensor = orig_input_tensor\n new_strides = strides\n\n conv = nn_ops.conv3d(\n input_tensor,\n filter_tensor,\n new_strides,\n padding,\n data_format=data_format,\n name=\"conv\")\n\n if data_format == \"NCDHW\":\n conv = test_util.NCHWToNHWC(conv)\n\n self.assertEqual(conv.shape, tensor_shape.TensorShape(output_shape))\n\n if test_input:\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n orig_input_tensor, input_shape, conv, output_shape)\n else:\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n filter_tensor, filter_shape, conv, output_shape)\n\n if data_type != dtypes.float16:\n reference_jacob_t = jacob_t\n err = np.fabs(jacob_t - jacob_n).max()\n else:\n # Compare fp16 theoretical gradients to fp32 theoretical gradients,\n # since fp16 numerical gradients are too imprecise.\n err = np.fabs(jacob_t - reference_jacob_t).max()\n\n print(\"conv3d gradient error = \", err)\n self.assertLess(err, tolerance)\n\n def ConstructAndTestGradient(self, **kwargs):\n for data_format, use_gpu in GetTestConfigs():\n self._ConstructAndTestGradientForConfig(data_format=data_format,\n use_gpu=use_gpu, **kwargs)\n\n @test_util.run_deprecated_v1\n def testInputGradientValidPaddingStrideOne(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(3, 5, 4),\n filter_shape=(3, 3, 3),\n in_depth=2,\n out_depth=3,\n stride=1,\n padding=\"VALID\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientValidPaddingStrideOne(self):\n self.ConstructAndTestGradient(\n batch=4,\n input_shape=(4, 6, 5),\n filter_shape=(2, 2, 2),\n in_depth=2,\n out_depth=3,\n stride=1,\n padding=\"VALID\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientValidPaddingStrideTwo(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(6, 3, 5),\n filter_shape=(3, 3, 3),\n in_depth=2,\n out_depth=3,\n stride=2,\n padding=\"VALID\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientValidPaddingStrideTwo(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(7, 6, 5),\n filter_shape=(2, 2, 2),\n in_depth=2,\n out_depth=3,\n stride=2,\n padding=\"VALID\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientValidPaddingStrideThree(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(3, 7, 6),\n filter_shape=(3, 3, 3),\n in_depth=2,\n out_depth=3,\n stride=3,\n padding=\"VALID\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientValidPaddingStrideThree(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(4, 4, 7),\n filter_shape=(4, 4, 4),\n in_depth=2,\n out_depth=3,\n stride=3,\n padding=\"VALID\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientSamePaddingStrideOne(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(3, 2, 2),\n filter_shape=(3, 2, 1),\n in_depth=2,\n out_depth=1,\n stride=1,\n padding=\"SAME\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientSamePaddingStrideOne(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(3, 6, 5),\n filter_shape=(2, 2, 2),\n in_depth=2,\n out_depth=3,\n stride=1,\n padding=\"SAME\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientSamePaddingStrideTwo(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(6, 3, 4),\n filter_shape=(3, 3, 3),\n in_depth=2,\n out_depth=3,\n stride=2,\n padding=\"SAME\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientSamePaddingStrideTwo(self):\n self.ConstructAndTestGradient(\n batch=4,\n input_shape=(7, 3, 5),\n filter_shape=(2, 2, 2),\n in_depth=2,\n out_depth=3,\n stride=2,\n padding=\"SAME\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientSamePaddingStrideThree(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(9, 3, 6),\n filter_shape=(3, 3, 3),\n in_depth=2,\n out_depth=3,\n stride=3,\n padding=\"SAME\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientSamePaddingStrideThree(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(9, 4, 7),\n filter_shape=(4, 4, 4),\n in_depth=2,\n out_depth=3,\n stride=3,\n padding=\"SAME\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientSamePaddingDifferentStrides(self):\n self.ConstructAndTestGradient(\n batch=1,\n input_shape=(5, 8, 7),\n filter_shape=(1, 2, 3),\n in_depth=2,\n out_depth=3,\n stride=[2, 3, 1],\n padding=\"SAME\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientKernelSizeMatchesInputSize(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(5, 4, 3),\n filter_shape=(5, 4, 3),\n in_depth=2,\n out_depth=3,\n stride=1,\n padding=\"VALID\",\n test_input=False)\n\n @test_util.run_deprecated_v1\n def testInputGradientKernelSizeMatchesInputSize(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(5, 4, 3),\n filter_shape=(5, 4, 3),\n in_depth=2,\n out_depth=3,\n stride=1,\n padding=\"VALID\",\n test_input=True)\n\n def disabledtestFilterGradientSamePaddingDifferentStrides(self):\n self.ConstructAndTestGradient(\n batch=1,\n input_shape=(5, 8, 7),\n filter_shape=(1, 2, 3),\n in_depth=2,\n out_depth=3,\n stride=[2, 3, 1],\n padding=\"SAME\",\n test_input=False)\n\n # Test the fast path in gemm_pack_rhs/gemm_pack_colmajor_block, when channel\n # dimension is a multiple of packet size.\n @test_util.run_deprecated_v1\n def testInputGradientValidPaddingStrideOneFastPath(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(3, 5, 4),\n filter_shape=(2, 2, 2),\n in_depth=8,\n out_depth=2,\n stride=1,\n padding=\"VALID\",\n test_input=True)\n\n @test_util.run_deprecated_v1\n def testFilterGradientValidPaddingStrideOneFastPath(self):\n self.ConstructAndTestGradient(\n batch=2,\n input_shape=(4, 6, 5),\n filter_shape=(2, 2, 2),\n in_depth=8,\n out_depth=2,\n stride=1,\n padding=\"VALID\",\n test_input=False)\n\n # Testing for backprops\n def _RunAndVerifyBackprop(self, input_sizes, filter_sizes, output_sizes,\n strides, dilations, padding, data_format, use_gpu,\n err, mode):\n total_input_size = 1\n total_filter_size = 1\n for s in input_sizes:\n total_input_size *= s\n for s in filter_sizes:\n total_filter_size *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = [f * 1.0 for f in range(1, total_input_size + 1)]\n x2 = [f * 1.0 for f in range(1, total_filter_size + 1)]\n default_dilations = (\n dilations[0] == 1 and dilations[1] == 1 and dilations[2] == 1)\n\n # If any dilation rate is larger than 1, only do test on the GPU\n # because we currently do not have a CPU implementation for arbitrary\n # dilation rates.\n if default_dilations or use_gpu:\n with self.cached_session(use_gpu=use_gpu) as sess:\n if data_format == \"NCDHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t1 = constant_op.constant(x1, shape=input_sizes)\n t2 = constant_op.constant(x2, shape=filter_sizes)\n full_strides = [1] + strides + [1]\n full_dilations = [1] + dilations + [1]\n if data_format == \"NCDHW\":\n full_strides = test_util.NHWCToNCHW(full_strides)\n full_dilations = test_util.NHWCToNCHW(full_dilations)\n actual = nn_ops.conv3d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilations,\n padding=padding,\n data_format=data_format)\n expected = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n data_format=data_format)\n if data_format == \"NCDHW\":\n actual = test_util.NCHWToNHWC(actual)\n expected = test_util.NCHWToNHWC(expected)\n actual_grad = gradients_impl.gradients(actual, t1\n if mode == \"input\" else t2)[0]\n expected_grad = gradients_impl.gradients(expected, t1\n if mode == \"input\" else t2)[0]\n # \"values\" consists of two tensors for two backprops\n actual_value = self.evaluate(actual_grad)\n expected_value = self.evaluate(expected_grad)\n self.assertShapeEqual(actual_value, actual_grad)\n self.assertShapeEqual(expected_value, expected_grad)\n print(\"expected = \", expected_value)\n print(\"actual = \", actual_value)\n self.assertArrayNear(expected_value.flatten(), actual_value.flatten(),\n err)\n\n @test_util.run_deprecated_v1\n def testConv3D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackprop(\n input_sizes=[1, 3, 6, 1, 1],\n filter_sizes=[2, 2, 1, 1, 1],\n output_sizes=[1, 1, 5, 1, 1],\n strides=[1, 1, 1],\n dilations=[2, 1, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5,\n mode=\"filter\")\n\n @test_util.run_deprecated_v1\n def testConv3D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackprop(\n input_sizes=[1, 3, 6, 1, 1],\n filter_sizes=[2, 2, 1, 1, 1],\n output_sizes=[1, 1, 5, 1, 1],\n strides=[1, 1, 1],\n dilations=[2, 1, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5,\n mode=\"input\")\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for special_functions module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.autograph.lang import special_functions\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import list_ops\nfrom tensorflow.python.platform import test\n\n\nclass SpecialFunctionsTest(test.TestCase):\n\n def test_match_staging_level(self):\n some_tensor = constant_op.constant(0)\n tensor_one = special_functions.match_staging_level(1, some_tensor)\n python_one = special_functions.match_staging_level(1, 1)\n with self.cached_session() as sess:\n self.assertTrue(tensor_util.is_tensor(tensor_one))\n self.assertAllEqual(self.evaluate(tensor_one), 1)\n self.assertEqual(python_one, 1)\n\n def test_tensor_list_empty_list(self):\n l = special_functions.tensor_list([],\n element_dtype=dtypes.int32,\n element_shape=())\n sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)\n with self.cached_session() as sess:\n self.assertAllEqual(self.evaluate(sl), [])\n\n l = special_functions.tensor_list((),\n element_dtype=dtypes.int32,\n element_shape=())\n sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)\n with self.cached_session() as sess:\n self.assertAllEqual(self.evaluate(sl), [])\n\n def test_tensor_list_tensor(self):\n l = special_functions.tensor_list(\n constant_op.constant([], dtype=dtypes.int32))\n sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)\n with self.cached_session() as sess:\n self.assertAllEqual(self.evaluate(sl), [])\n\n def test_tensor_list_unsupported_initializer(self):\n with self.assertRaisesRegex(ValueError, 'unknown type'):\n special_functions.tensor_list(np.array([1, 2, 3]))\n\n def test_tensor_list_empty_list_no_type(self):\n with self.assertRaisesRegex(ValueError,\n 'element_dtype and element_shape are required'):\n special_functions.tensor_list([])\n\n def test_tensor_list_from_elements(self):\n elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]\n\n l = special_functions.tensor_list(elements)\n sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)\n with self.cached_session() as sess:\n self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])\n\n def test_tensor_list_array_from_elements(self):\n elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]\n\n l = special_functions.tensor_list(elements, use_tensor_array=True)\n sl = l.stack()\n with self.cached_session() as sess:\n self.assertAllEqual(self.evaluate(sl), [[1, 2], [3, 4]])\n\n def test_stack(self):\n self.assertEqual(special_functions.stack(1, strict=False), 1)\n self.assertListEqual(\n special_functions.stack([1, 2, 3], strict=False), [1, 2, 3])\n # TODO(mdan): This should probably forward to tf.stack.\n self.assertTrue(\n isinstance(\n special_functions.stack(\n [constant_op.constant(1),\n constant_op.constant(2)], strict=False), list))\n\n with self.assertRaises(ValueError):\n special_functions.stack([1, 2, 3])\n\n t = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_from_tensor(\n t, element_shape=constant_op.constant([], dtype=dtypes.int32))\n self.assertTrue(\n tensor_util.is_tensor(\n special_functions.stack(l, element_dtype=dtypes.float32)))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for overloaded RaggedTensor operators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedElementwiseOpsTest(test_util.TensorFlowTestCase):\n\n def testOrderingOperators(self):\n x = ragged_factory_ops.constant([[1, 5], [3]])\n y = ragged_factory_ops.constant([[4, 5], [1]])\n self.assertAllEqual((x > y), [[False, False], [True]])\n self.assertAllEqual((x >= y), [[False, True], [True]])\n self.assertAllEqual((x < y), [[True, False], [False]])\n self.assertAllEqual((x <= y), [[True, True], [False]])\n\n def testArithmeticOperators(self):\n x = ragged_factory_ops.constant([[1.0, -2.0], [8.0]])\n y = ragged_factory_ops.constant([[4.0, 4.0], [2.0]])\n self.assertAllEqual(abs(x), [[1.0, 2.0], [8.0]])\n\n self.assertAllEqual((-x), [[-1.0, 2.0], [-8.0]])\n\n self.assertAllEqual((x + y), [[5.0, 2.0], [10.0]])\n self.assertAllEqual((3.0 + y), [[7.0, 7.0], [5.0]])\n self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]])\n\n self.assertAllEqual((x - y), [[-3.0, -6.0], [6.0]])\n self.assertAllEqual((3.0 - y), [[-1.0, -1.0], [1.0]])\n self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]])\n\n self.assertAllEqual((x * y), [[4.0, -8.0], [16.0]])\n self.assertAllEqual((3.0 * y), [[12.0, 12.0], [6.0]])\n self.assertAllEqual((x * 3.0), [[3.0, -6.0], [24.0]])\n\n self.assertAllEqual((x / y), [[0.25, -0.5], [4.0]])\n self.assertAllEqual((y / x), [[4.0, -2.0], [0.25]])\n self.assertAllEqual((2.0 / y), [[0.5, 0.5], [1.0]])\n self.assertAllEqual((x / 2.0), [[0.5, -1.0], [4.0]])\n\n self.assertAllEqual((x // y), [[0.0, -1.0], [4.0]])\n self.assertAllEqual((y // x), [[4.0, -2.0], [0.0]])\n self.assertAllEqual((2.0 // y), [[0.0, 0.0], [1.0]])\n self.assertAllEqual((x // 2.0), [[0.0, -1.0], [4.0]])\n\n self.assertAllEqual((x % y), [[1.0, 2.0], [0.0]])\n self.assertAllEqual((y % x), [[0.0, -0.0], [2.0]])\n self.assertAllEqual((2.0 % y), [[2.0, 2.0], [0.0]])\n self.assertAllEqual((x % 2.0), [[1.0, 0.0], [0.0]])\n\n def testLogicalOperators(self):\n a = ragged_factory_ops.constant([[True, True], [False]])\n b = ragged_factory_ops.constant([[True, False], [False]])\n self.assertAllEqual((~a), [[False, False], [True]])\n\n self.assertAllEqual((a & b), [[True, False], [False]])\n self.assertAllEqual((a & True), [[True, True], [False]])\n self.assertAllEqual((True & b), [[True, False], [False]])\n\n self.assertAllEqual((a | b), [[True, True], [False]])\n self.assertAllEqual((a | False), [[True, True], [False]])\n self.assertAllEqual((False | b), [[True, False], [False]])\n\n self.assertAllEqual((a ^ b), [[False, True], [False]])\n self.assertAllEqual((a ^ True), [[False, False], [True]])\n self.assertAllEqual((True ^ b), [[False, True], [True]])\n\n def testDummyOperators(self):\n a = ragged_factory_ops.constant([[True, True], [False]])\n with self.assertRaisesRegex(TypeError,\n 'RaggedTensor may not be used as a boolean.'):\n bool(a)\n with self.assertRaisesRegex(TypeError,\n 'RaggedTensor may not be used as a boolean.'):\n if a:\n pass\n\n\nif __name__ == '__main__':\n googletest.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Memory leak detection utility.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework.python_memory_checker import _PythonMemoryChecker\nfrom tensorflow.python.profiler.traceme import TraceMe\nfrom tensorflow.python.profiler.traceme import traceme_wrapper\nfrom tensorflow.python.util import tf_inspect\n\ntry:\n from tensorflow.python.platform.cpp_memory_checker import _CppMemoryChecker # pylint:disable=g-import-not-at-top\nexcept ImportError:\n _CppMemoryChecker = None\n\n\ndef _get_test_name_best_effort():\n \"\"\"If available, return the current test name. Otherwise, `None`.\"\"\"\n for stack in tf_inspect.stack():\n function_name = stack[3]\n if function_name.startswith('test'):\n try:\n class_name = stack[0].f_locals['self'].__class__.__name__\n return class_name + '.' + function_name\n except: # pylint:disable=bare-except\n pass\n\n return None\n\n\n# TODO(kkb): Also create decorator versions for convenience.\nclass MemoryChecker(object):\n \"\"\"Memory leak detection class.\n\n This is a utility class to detect Python and C++ memory leaks. It's intended\n for both testing and debugging. Basic usage:\n\n >>> # MemoryChecker() context manager tracks memory status inside its scope.\n >>> with MemoryChecker() as memory_checker:\n >>> tensors = []\n >>> for _ in range(10):\n >>> # Simulating `tf.constant(1)` object leak every iteration.\n >>> tensors.append(tf.constant(1))\n >>>\n >>> # Take a memory snapshot for later analysis.\n >>> memory_checker.record_snapshot()\n >>>\n >>> # `report()` generates a html graph file showing allocations over\n >>> # snapshots per every stack trace.\n >>> memory_checker.report()\n >>>\n >>> # This assertion will detect `tf.constant(1)` object leak.\n >>> memory_checker.assert_no_leak_if_all_possibly_except_one()\n\n `record_snapshot()` must be called once every iteration at the same location.\n This is because the detection algorithm relies on the assumption that if there\n is a leak, it's happening similarly on every snapshot.\n \"\"\"\n\n @traceme_wrapper\n def __enter__(self):\n self._trace_me = TraceMe('with MemoryChecker():')\n self._trace_me.__enter__()\n self._python_memory_checker = _PythonMemoryChecker()\n if _CppMemoryChecker:\n self._cpp_memory_checker = _CppMemoryChecker(_get_test_name_best_effort())\n return self\n\n @traceme_wrapper\n def __exit__(self, exc_type, exc_value, traceback):\n if _CppMemoryChecker:\n self._cpp_memory_checker.stop()\n self._trace_me.__exit__(exc_type, exc_value, traceback)\n\n @traceme_wrapper\n def record_snapshot(self):\n \"\"\"Take a memory snapshot for later analysis.\n\n `record_snapshot()` must be called once every iteration at the same\n location. This is because the detection algorithm relies on the assumption\n that if there is a leak, it's happening similarly on every snapshot.\n\n The recommended number of `record_snapshot()` call depends on the testing\n code complexity and the allcoation pattern.\n \"\"\"\n self._python_memory_checker.record_snapshot()\n if _CppMemoryChecker:\n self._cpp_memory_checker.record_snapshot()\n\n @traceme_wrapper\n def report(self):\n \"\"\"Generates a html graph file showing allocations over snapshots.\n\n It create a temporary directory and put all the output files there.\n If this is running under Google internal testing infra, it will use the\n directory provided the infra instead.\n \"\"\"\n self._python_memory_checker.report()\n if _CppMemoryChecker:\n self._cpp_memory_checker.report()\n\n @traceme_wrapper\n def assert_no_leak_if_all_possibly_except_one(self):\n \"\"\"Raises an exception if a leak is detected.\n\n This algorithm classifies a series of allocations as a leak if it's the same\n type(Python) orit happens at the same stack trace(C++) at every snapshot,\n but possibly except one snapshot.\n \"\"\"\n\n self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()\n if _CppMemoryChecker:\n self._cpp_memory_checker.assert_no_leak_if_all_possibly_except_one()\n\n @traceme_wrapper\n def assert_no_new_python_objects(self, threshold=None):\n \"\"\"Raises an exception if there are new Python objects created.\n\n It computes the number of new Python objects per type using the first and\n the last snapshots.\n\n Args:\n threshold: A dictionary of [Type name string], [count] pair. It won't\n raise an exception if the new Python objects are under this threshold.\n \"\"\"\n self._python_memory_checker.assert_no_new_objects(threshold=threshold)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Script to test TF-TensorRT conversion of CombinedNMS op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.compiler.tf2tensorrt._pywrap_py_utils import get_linked_tensorrt_version\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import image_ops_impl\nfrom tensorflow.python.platform import test\n\n\nclass CombinedNmsTest(trt_test.TfTrtIntegrationTestBase):\n \"\"\"Test for CombinedNMS op in TF-TRT.\"\"\"\n\n def GraphFn(self, boxes, scores):\n max_output_size_per_class = 3\n max_total_size = 3\n score_threshold = 0.1\n iou_threshold = 0.5\n # Shapes\n max_output_size_per_class_tensor = constant_op.constant(\n max_output_size_per_class,\n dtype=dtypes.int32,\n name='max_output_size_per_class')\n max_total_size_tensor = constant_op.constant(\n max_total_size, dtype=dtypes.int32, name='max_total_size')\n iou_threshold_tensor = constant_op.constant(\n iou_threshold, dtype=dtypes.float32, name='iou_threshold')\n score_threshold_tensor = constant_op.constant(\n score_threshold, dtype=dtypes.float32, name='score_threshold')\n nms_output = image_ops_impl.combined_non_max_suppression(\n boxes,\n scores,\n max_output_size_per_class_tensor,\n max_total_size_tensor,\n iou_threshold_tensor,\n score_threshold_tensor,\n name='combined_nms')\n return [\n array_ops.identity(output, name=('output_%d' % i))\n for i, output in enumerate(nms_output)\n ]\n\n def GetParams(self):\n # Parameters\n q = 1\n batch_size = 1\n num_boxes = 200\n num_classes = 2\n max_total_size = 3\n\n boxes_shape = [batch_size, num_boxes, q, 4]\n scores_shape = [batch_size, num_boxes, num_classes]\n nmsed_boxes_shape = [batch_size, max_total_size, 4]\n nmsed_scores_shape = [batch_size, max_total_size]\n nmsed_classes_shape = [batch_size, max_total_size]\n valid_detections_shape = [batch_size]\n return self.BuildParams(self.GraphFn, dtypes.float32,\n [boxes_shape, scores_shape], [\n nmsed_boxes_shape, nmsed_scores_shape,\n nmsed_classes_shape, valid_detections_shape\n ])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n 'TRTEngineOp_0': [\n 'combined_nms/CombinedNonMaxSuppression',\n 'max_output_size_per_class', 'max_total_size', 'iou_threshold',\n 'score_threshold'\n ]\n }\n\n def ShouldRunTest(self, run_params):\n # There is no CombinedNonMaxSuppression op for GPU at the moment, so\n # calibration will fail.\n # TODO(laigd): fix this.\n # Only run for TRT 5.1 and above.\n ver = get_linked_tensorrt_version()\n return (ver[0] > 5 or\n (ver[0] == 5 and ver[1] >= 1)) and not trt_test.IsQuantizationMode(\n run_params.precision_mode), 'test >=TRT5.1 and non-INT8'\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# RUN: %p/keras | FileCheck %s\n\n# pylint: disable=missing-docstring,line-too-long\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common\n\n\ndef mnist_model():\n \"\"\"Creates a MNIST model.\"\"\"\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dense(10, activation='softmax'))\n return model\n\n\nclass TestModule(tf.Module):\n\n def __init__(self):\n super(TestModule, self).__init__()\n self.model = mnist_model()\n\n # CHECK: func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<1x28x28x1xf32> {tf._user_specified_name = \"x\", tf_saved_model.index_path = [0]}\n # CHECK: attributes {{.*}} tf_saved_model.exported_names = [\"my_predict\"]\n @tf.function(input_signature=[\n tf.TensorSpec([1, 28, 28, 1], tf.float32),\n ])\n def my_predict(self, x):\n return self.model(x)\n\n\nif __name__ == '__main__':\n common.do_test(TestModule, exported_names=['my_predict'])\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.math_ops.matrix_inverse.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import benchmark\nfrom tensorflow.python.platform import test\n\n\nclass InverseOpTest(test.TestCase):\n\n def _verifyInverse(self, x, np_type):\n for adjoint in False, True:\n y = x.astype(np_type)\n with self.cached_session(use_gpu=True):\n # Verify that x^{-1} * x == Identity matrix.\n inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)\n tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)\n np_ans = np.identity(y.shape[-1])\n if x.ndim > 2:\n tiling = list(y.shape)\n tiling[-2:] = [1, 1]\n np_ans = np.tile(np_ans, tiling)\n out = self.evaluate(tf_ans)\n self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)\n self.assertShapeEqual(y, tf_ans)\n\n def _verifyInverseReal(self, x):\n for np_type in [np.float32, np.float64]:\n self._verifyInverse(x, np_type)\n\n def _verifyInverseComplex(self, x):\n for np_type in [np.complex64, np.complex128]:\n self._verifyInverse(x, np_type)\n\n def _makeBatch(self, matrix1, matrix2):\n matrix_batch = np.concatenate(\n [np.expand_dims(matrix1, 0),\n np.expand_dims(matrix2, 0)])\n matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])\n return matrix_batch\n\n def testNonsymmetric(self):\n # 2x2 matrices\n matrix1 = np.array([[1., 2.], [3., 4.]])\n matrix2 = np.array([[1., 3.], [3., 5.]])\n self._verifyInverseReal(matrix1)\n self._verifyInverseReal(matrix2)\n # A multidimensional batch of 2x2 matrices\n self._verifyInverseReal(self._makeBatch(matrix1, matrix2))\n matrix1 = matrix1.astype(np.complex64)\n matrix1 += 1j * matrix1\n matrix2 = matrix2.astype(np.complex64)\n matrix2 += 1j * matrix2\n self._verifyInverseComplex(matrix1)\n self._verifyInverseComplex(matrix2)\n # Complex batch\n self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))\n\n def testSymmetricPositiveDefinite(self):\n # 2x2 matrices\n matrix1 = np.array([[2., 1.], [1., 2.]])\n matrix2 = np.array([[3., -1.], [-1., 3.]])\n self._verifyInverseReal(matrix1)\n self._verifyInverseReal(matrix2)\n # A multidimensional batch of 2x2 matrices\n self._verifyInverseReal(self._makeBatch(matrix1, matrix2))\n matrix1 = matrix1.astype(np.complex64)\n matrix1 += 1j * matrix1\n matrix2 = matrix2.astype(np.complex64)\n matrix2 += 1j * matrix2\n self._verifyInverseComplex(matrix1)\n self._verifyInverseComplex(matrix2)\n # Complex batch\n self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))\n\n @test_util.deprecated_graph_mode_only\n def testNonSquareMatrix(self):\n # When the inverse of a non-square matrix is attempted we should return\n # an error\n with self.assertRaises(ValueError):\n linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))\n\n @test_util.deprecated_graph_mode_only\n def testWrongDimensions(self):\n # The input to the inverse should be at least a 2-dimensional tensor.\n tensor3 = constant_op.constant([1., 2.])\n with self.assertRaises(ValueError):\n linalg_ops.matrix_inverse(tensor3)\n\n def testNotInvertible(self):\n # The input should be invertible.\n with self.cached_session():\n with self.assertRaisesOpError(\"Input is not invertible.\"):\n # All rows of the matrix below add to zero.\n tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],\n [0., -1., 1.]])\n linalg_ops.matrix_inverse(tensor3).eval()\n\n def testEmpty(self):\n self._verifyInverseReal(np.empty([0, 2, 2]))\n self._verifyInverseReal(np.empty([2, 0, 0]))\n\n def testRandomSmallAndLarge(self):\n np.random.seed(42)\n for dtype in np.float32, np.float64, np.complex64, np.complex128:\n for batch_dims in [(), (1,), (3,), (2, 2)]:\n for size in 8, 31, 32:\n shape = batch_dims + (size, size)\n matrix = np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape)).reshape(shape).astype(dtype)\n self._verifyInverseReal(matrix)\n\n @test_util.deprecated_graph_mode_only\n def testConcurrentExecutesWithoutError(self):\n with self.session(use_gpu=True) as sess:\n all_ops = []\n for adjoint_ in True, False:\n matrix1 = random_ops.random_normal([5, 5], seed=42)\n matrix2 = random_ops.random_normal([5, 5], seed=42)\n inv1 = linalg_ops.matrix_inverse(matrix1, adjoint=adjoint_)\n inv2 = linalg_ops.matrix_inverse(matrix2, adjoint=adjoint_)\n all_ops += [inv1, inv2]\n inv = self.evaluate(all_ops)\n self.assertAllEqual(inv[0], inv[1])\n self.assertAllEqual(inv[2], inv[3])\n\n\nclass MatrixInverseBenchmark(test.Benchmark):\n\n shapes = [\n (4, 4),\n (10, 10),\n (16, 16),\n (101, 101),\n (256, 256),\n (1000, 1000),\n (1024, 1024),\n (2048, 2048),\n (513, 4, 4),\n (513, 16, 16),\n (513, 256, 256),\n ]\n\n def _GenerateMatrix(self, shape):\n batch_shape = shape[:-2]\n shape = shape[-2:]\n assert shape[0] == shape[1]\n n = shape[0]\n matrix = np.ones(shape).astype(np.float32) / (\n 2.0 * n) + np.diag(np.ones(n).astype(np.float32))\n return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))\n\n def benchmarkMatrixInverseOp(self):\n for adjoint in False, True:\n for shape in self.shapes:\n with ops.Graph().as_default(), \\\n session.Session(config=benchmark.benchmark_config()) as sess, \\\n ops.device(\"/cpu:0\"):\n matrix = self._GenerateMatrix(shape)\n inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)\n self.evaluate(variables.global_variables_initializer())\n self.run_op_benchmark(\n sess,\n control_flow_ops.group(inv),\n min_iters=25,\n name=\"matrix_inverse_cpu_{shape}_adjoint_{adjoint}\".format(\n shape=shape, adjoint=adjoint))\n\n if test.is_gpu_available(True):\n with ops.Graph().as_default(), \\\n session.Session(config=benchmark.benchmark_config()) as sess, \\\n ops.device(\"/gpu:0\"):\n matrix = self._GenerateMatrix(shape)\n inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)\n self.evaluate(variables.global_variables_initializer())\n self.run_op_benchmark(\n sess,\n control_flow_ops.group(inv),\n min_iters=25,\n name=\"matrix_inverse_gpu_{shape}_adjoint_{adjoint}\".format(\n shape=shape, adjoint=adjoint))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ======================================\n\"\"\"Utilities for XLA-specific Python types.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as _np # Avoids becoming a part of public Tensorflow API.\n\nfrom tensorflow.compiler.xla import xla_data_pb2\nfrom tensorflow.python.framework import dtypes\n\n# Records correspondence between a XLA primitive type and Python/Numpy types.\n#\n# primitive_type: value of type xla_data_pb2.PrimitiveType\n# numpy_dtype: corresponding Numpy \"dtype\" (like np.float32)\n# literal_field_name: name of the field in the LiteralProto message elements\n# of this type go into.\n# literal_field_type: type of the field named 'literal_field_name'.\n#\n# TODO(eliben): figure out how to avoid knowing the extra Python type and the\n# astype cast when writing into Literals.\nTypeConversionRecord = collections.namedtuple('TypeConversionRecord', [\n 'primitive_type', 'numpy_dtype', 'literal_field_name', 'literal_field_type'\n])\n\n# Maps from XLA primitive types to TypeConversionRecord.\nMAP_XLA_TYPE_TO_RECORD = {\n xla_data_pb2.BF16:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.BF16,\n numpy_dtype=dtypes.bfloat16.as_numpy_dtype,\n literal_field_name='bf16s',\n literal_field_type=float),\n xla_data_pb2.F16:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.F16,\n numpy_dtype=_np.float16,\n literal_field_name='f16s',\n literal_field_type=float),\n xla_data_pb2.F32:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.F32,\n numpy_dtype=_np.float32,\n literal_field_name='f32s',\n literal_field_type=float),\n xla_data_pb2.F64:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.F64,\n numpy_dtype=_np.float64,\n literal_field_name='f64s',\n literal_field_type=float),\n xla_data_pb2.S8:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.S8,\n numpy_dtype=_np.int8,\n literal_field_name='s8s',\n literal_field_type=int),\n xla_data_pb2.S16:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.S16,\n numpy_dtype=_np.int16,\n literal_field_name='s16s',\n literal_field_type=int),\n xla_data_pb2.S32:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.S32,\n numpy_dtype=_np.int32,\n literal_field_name='s32s',\n literal_field_type=int),\n xla_data_pb2.S64:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.S64,\n numpy_dtype=_np.int64,\n literal_field_name='s64s',\n literal_field_type=int),\n xla_data_pb2.U8:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.U8,\n numpy_dtype=_np.uint8,\n literal_field_name='s8s',\n literal_field_type=int),\n xla_data_pb2.U16:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.U16,\n numpy_dtype=_np.uint16,\n literal_field_name='s16s',\n literal_field_type=int),\n xla_data_pb2.U32:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.U32,\n numpy_dtype=_np.uint32,\n literal_field_name='s32s',\n literal_field_type=int),\n xla_data_pb2.U64:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.U64,\n numpy_dtype=_np.uint64,\n literal_field_name='s64s',\n literal_field_type=int),\n xla_data_pb2.PRED:\n TypeConversionRecord(\n primitive_type=xla_data_pb2.PRED,\n numpy_dtype=_np.bool,\n literal_field_name='preds',\n literal_field_type=bool)\n}\n\n# Maps from Numpy dtypes to TypeConversionRecord.\n# Note the conversion on the key. Numpy has a known issue wherein dtype hashing\n# doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus,\n# when keying by dtype in this dict, we use the string form of dtypes.\nMAP_DTYPE_TO_RECORD = {\n str(_np.dtype(record.numpy_dtype)): record\n for record in MAP_XLA_TYPE_TO_RECORD.values()\n}\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport weakref\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass ContextTest(test.TestCase):\n\n def testSetGlobalSeed(self):\n c = context.Context()\n c._set_global_seed(123)\n for t in [np.int32, np.int64, np.uint32, np.uint64]:\n c._set_global_seed(t(123))\n c._set_global_seed(np.array(123, dtype=t))\n c._set_global_seed(ops.convert_to_tensor(123, dtype=t))\n\n def testContextIsDestroyedAfterTensors(self):\n # Create a new context\n new_context = context.Context()\n weak_c = weakref.ref(new_context)\n new_context.ensure_initialized()\n\n # Create a tensor with the new context as default.\n # Make sure to restore the original context.\n original_context = context.context()\n try:\n context._set_context(new_context)\n # Use a 2D tensor so that it is not cached.\n tensor1 = constant_op.constant([[3.]])\n # Produce a tensor as an operation output. This uses a different code path\n # from tensors created from Python.\n tensor2 = tensor1 * tensor1\n context._set_context(original_context)\n except:\n context._set_context(original_context)\n raise\n\n # Deleting our context reference should not delete the underlying object.\n del new_context\n self.assertIsNot(weak_c(), None)\n\n # Deleting the first tensor should not delete the context since there is\n # another tensor.\n del tensor1\n self.assertIsNot(weak_c(), None)\n\n # Deleting the last tensor should result in deleting its context.\n del tensor2\n self.assertIs(weak_c(), None)\n\n def testSimpleGraphCollection(self):\n\n @def_function.function\n def f(x):\n return x + constant_op.constant(1.)\n\n with context.collect_graphs() as graphs:\n with ops.device('CPU:0'):\n f(constant_op.constant(1.))\n\n self.assertLen(graphs, 1)\n graph, = graphs\n self.assertIn('CPU:0', graph.node[0].device)\n\n def testGetFunctionDef(self):\n\n @def_function.function\n def f():\n return constant_op.constant(1.)\n\n concrete = f.get_concrete_function()\n function_def = context.get_function_def(concrete.name)\n\n self.assertIsNot(function_def, None)\n\n found_const_node = False\n for node_def in function_def.node_def:\n if node_def.op == 'Const':\n found_const_node = True\n break\n self.assertTrue(found_const_node)\n\n with self.assertRaises(errors.NotFoundError):\n _ = context.get_function_def('this_should_not_be_found')\n\n @test_util.run_gpu_only\n def testGetMemoryUsage(self):\n array_ops.zeros([10]) # Allocate some memory on the GPU.\n self.assertGreater(\n context.context().get_total_memory_usage('GPU:0'), 0)\n\n def testGetMemoryUsageCPU(self):\n with self.assertRaisesRegex(ValueError, 'CPU does not support'):\n context.context().get_total_memory_usage('CPU:0')\n\n def testGetMemoryUsageUnknownDevice(self):\n with self.assertRaisesRegex(ValueError, 'Failed parsing device name'):\n context.context().get_total_memory_usage('unknown_device')\n\n @test_util.run_gpu_only\n def testGetMemoryUsageAmbiguousDevice(self):\n if len(context.context().list_physical_devices('GPU')) < 2:\n self.skipTest('Need at least 2 GPUs')\n with self.assertRaisesRegex(ValueError, 'Multiple devices'):\n context.context().get_total_memory_usage('GPU')\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for TFCONFIGClusterResolver.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python import framework\nfrom tensorflow.python.client import session\nfrom tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager.context import LogicalDevice\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import server_lib\n\nmock = test.mock\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass TFConfigClusterResolverTest(test.TestCase):\n\n def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):\n self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())\n self.assertProtoEquals(\n expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())\n\n def testNormalClusterSpecRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n expected_proto = \"\"\"\n job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }\n tasks { key: 1 value: 'ps1:2222' } }\n job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }\n tasks { key: 1 value: 'worker1:2222' }\n tasks { key: 2 value: 'worker2:2222' } }\n \"\"\"\n actual_cluster_spec = cluster_resolver.cluster_spec()\n self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)\n\n def testAutomaticMasterRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('ps0:2222', cluster_resolver.master())\n\n def testSpecifiedTaskTypeAndIndexMasterRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('worker1:2222', cluster_resolver.master('worker', 1))\n\n def testSessionMasterRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"session_master\": \"sessionmaster:2222\",\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('sessionmaster:2222', cluster_resolver.master())\n\n def testRpcLayerRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('grpc://ps0:2222', cluster_resolver.master())\n\n def testTaskTypeIndexRpcRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": \"ps\",\n \"index\": 0\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('ps', cluster_resolver.task_type)\n self.assertEqual(0, cluster_resolver.task_id)\n self.assertEqual('grpc', cluster_resolver.rpc_layer)\n\n def testParameterOverrides(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": \"ps\",\n \"index\": 1\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver(task_type='ps', task_id=0)\n\n self.assertEqual('grpc://ps0:2222', cluster_resolver.master())\n self.assertEqual('ps', cluster_resolver.task_type)\n self.assertEqual(0, cluster_resolver.task_id)\n\n cluster_resolver.task_type = 'worker'\n cluster_resolver.task_id = 1\n cluster_resolver.rpc_layer = 'test'\n\n self.assertEqual('test://worker1:2222', cluster_resolver.master())\n self.assertEqual('worker', cluster_resolver.task_type)\n self.assertEqual(1, cluster_resolver.task_id)\n self.assertEqual('test', cluster_resolver.rpc_layer)\n\n def testTaskTypeCastToString(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"123456\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": 123456,\n \"index\": 0\n }\n }\n \"\"\"\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('123456', cluster_resolver.task_type)\n\n def testTaskIndexCastToInteger(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": \"ps\",\n \"index\": \"1\"\n }\n }\n \"\"\"\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual(1, cluster_resolver.task_id)\n\n def testTaskIndexOverride(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"worker\": [\"worker0:2222\", \"worker1:2222\"]\n },\n \"task\": {\n \"type\": \"worker\",\n \"index\": \"0\"\n }\n }\n \"\"\"\n cluster_resolver = TFConfigClusterResolver(task_id=1)\n self.assertEqual(1, cluster_resolver.task_id)\n\n def testZeroItemsInClusterSpecMasterRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {}\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('', cluster_resolver.master())\n\n def testOneItemInClusterSpecMasterRead(self):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"worker\": [\"worker0:2222\"]\n }\n }\n \"\"\"\n\n cluster_resolver = TFConfigClusterResolver()\n self.assertEqual('', cluster_resolver.master())\n\n @mock.patch.object(framework.config, 'list_logical_devices')\n @mock.patch.object(session.BaseSession, 'list_devices')\n def testNumAcceleratorsFilterTasksByEnvVar(self, mock_list_devices,\n mock_eager_list_devices):\n os.environ['TF_CONFIG'] = \"\"\"\n {\n \"cluster\": {\n \"worker1\": [\"w10:2222\"],\n \"worker2\": [\"w21:2222\", \"w22:2222\", \"w23:2222\", \"w24:2222\"]\n },\n \"rpc_layer\": \"grpc\",\n \"task\": {\n \"type\": \"worker1\",\n \"index\": \"0\"\n }\n }\n \"\"\"\n\n devices = [\n LogicalDevice('/job:worker1/task:0/device:TPU:0', 'TPU'),\n LogicalDevice('/job:worker1/task:0/device:TPU:1', 'TPU'),\n LogicalDevice('/job:worker1/task:0/device:GPU:0', 'GPU'),\n LogicalDevice('/job:worker1/task:0/device:GPU:1', 'GPU'),\n LogicalDevice('/job:worker2/task:1/device:TPU:2', 'TPU'),\n LogicalDevice('/job:worker2/task:2/device:TPU:3', 'TPU'),\n LogicalDevice('/job:worker2/task:3/device:GPU:2', 'GPU'),\n LogicalDevice('/job:worker2/task:4/device:GPU:3', 'GPU'),\n ]\n device_list = [\n session._DeviceAttributes(d.name, d.device_type, 1024, 0)\n for d in devices\n ]\n mock_eager_list_devices.return_value = devices\n mock_list_devices.return_value = device_list\n\n resolver = TFConfigClusterResolver()\n\n # By default we read from TF_CONFIG\n self.assertEqual(resolver.num_accelerators(), {'TPU': 2, 'GPU': 2})\n\n # Override still works when we want it to\n self.assertEqual(resolver.num_accelerators(task_type='worker2', task_id=3),\n {'GPU': 1})\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the DataFormatVecPermute operator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass XlaDataFormatDimMapTest(xla_test.XLATestCase):\n\n def _test(self, input_data, src_format, dst_format, expected):\n for dtype in {np.int32, np.int64}:\n x = np.array(input_data, dtype=dtype)\n with self.session() as session:\n with self.test_scope():\n placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)\n param = {placeholder: x}\n output = nn_ops.data_format_dim_map(\n placeholder, src_format=src_format, dst_format=dst_format)\n result = session.run(output, param)\n self.assertAllEqual(result, expected)\n\n def test(self):\n self._test(0, \"NHWC\", \"NCHW\", 0)\n self._test(1, \"NHWC\", \"NCHW\", 2)\n self._test(2, \"NHWC\", \"NCHW\", 3)\n self._test(3, \"NHWC\", \"NCHW\", 1)\n self._test(-1, \"NHWC\", \"NCHW\", 1)\n self._test(-2, \"NHWC\", \"NCHW\", 3)\n self._test(-3, \"NHWC\", \"NCHW\", 2)\n self._test(-4, \"NHWC\", \"NCHW\", 0)\n self._test([1, 3], \"NHWC\", \"NCHW\", [2, 1])\n self._test([1, 3, -2], \"NHWC\", \"NCHW\", [2, 1, 3])\n self._test([1, -3, -2], \"NHWC\", \"NCHW\", [2, 2, 3])\n self._test([[1, -3], [1, -1]], \"NHWC\", \"NCHW\", [[2, 2], [2, 1]])\n\n self._test([1, -3, -2], \"NHWC\", \"NCHW\", [2, 2, 3])\n self._test([-4, -3, -2, -1, 0, 1, 2, 3], \"NHWC\", \"HWNC\",\n [2, 0, 1, 3, 2, 0, 1, 3])\n self._test([-4, -3, -2, -1, 0, 1, 2, 3], \"NHWC\", \"WHCN\",\n [3, 1, 0, 2, 3, 1, 0, 2])\n self._test([-4, -3, -2, -1, 0, 1, 2, 3], \"qwer\", \"rewq\",\n [3, 2, 1, 0, 3, 2, 1, 0])\n\n\nclass XlaPermuteOpTest(xla_test.XLATestCase):\n\n def _runPermuteAndCompare(self, x, src_format, dst_format, expected):\n with self.session() as session:\n with self.test_scope():\n placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)\n param = {placeholder: x}\n output = nn_ops.data_format_vec_permute(\n placeholder, src_format=src_format, dst_format=dst_format)\n result = session.run(output, param)\n self.assertAllEqual(result, expected)\n\n def testNHWCToNCHW(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([7, 4, 9, 3], dtype=dtype)\n self._runPermuteAndCompare(x, \"NHWC\", \"NCHW\", [7, 3, 4, 9])\n\n def testNHWCToNCHW_Size2(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([4, 9], dtype=dtype)\n self._runPermuteAndCompare(x, \"NHWC\", \"NCHW\", [4, 9])\n\n def testNCHWToNHWC(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([7, 4, 9, 3], dtype=dtype)\n self._runPermuteAndCompare(x, \"NCHW\", \"NHWC\", [7, 9, 3, 4])\n\n def testNCHWToNHWC_Size2(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([9, 3], dtype=dtype)\n self._runPermuteAndCompare(x, \"NCHW\", \"NHWC\", [9, 3])\n\n def testNHWCToHWNC(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([7, 4, 9, 3], dtype=dtype)\n self._runPermuteAndCompare(x, \"NHWC\", \"HWNC\", [4, 9, 7, 3])\n\n def testHWNCToNHWC(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([7, 4, 9, 3], dtype=dtype)\n self._runPermuteAndCompare(x, \"HWNC\", \"NHWC\", [9, 7, 4, 3])\n\n def testNHWCToNCHW2D(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)\n self._runPermuteAndCompare(x, \"NHWC\", \"NCHW\",\n [[7, 4], [5, 1], [9, 3], [4, 5]])\n\n def testNHWCToHWNC2D(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)\n self._runPermuteAndCompare(x, \"NHWC\", \"HWNC\",\n [[9, 3], [4, 5], [7, 4], [5, 1]])\n\n def testHWNCToNHWC2D(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)\n self._runPermuteAndCompare(x, \"HWNC\", \"NHWC\",\n [[4, 5], [7, 4], [9, 3], [5, 1]])\n\n def testNCHWToNHWC2D(self):\n for dtype in {np.int32, np.int64}:\n x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)\n self._runPermuteAndCompare(x, \"NCHW\", \"NHWC\",\n [[7, 4], [4, 5], [5, 1], [9, 3]])\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This module customizes `test_combinations` for `tf.keras` related tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import test_combinations\nfrom tensorflow.python.keras import testing_utils\n\nKERAS_MODEL_TYPES = ['functional', 'subclass', 'sequential']\n\n\ndef keras_mode_combinations(mode=None, run_eagerly=None):\n \"\"\"Returns the default test combinations for tf.keras tests.\n\n Note that if tf2 is enabled, then v1 session test will be skipped.\n\n Args:\n mode: List of modes to run the tests. The valid options are 'graph' and\n 'eager'. Default to ['graph', 'eager'] if not specified. If a empty list\n is provide, then the test will run under the context based on tf's\n version, eg graph for v1 and eager for v2.\n run_eagerly: List of `run_eagerly` value to be run with the tests.\n Default to [True, False] if not specified. Note that for `graph` mode,\n run_eagerly value will only be False.\n\n Returns:\n A list contains all the combinations to be used to generate test cases.\n \"\"\"\n if mode is None:\n mode = ['eager'] if tf2.enabled() else ['graph', 'eager']\n if run_eagerly is None:\n run_eagerly = [True, False]\n result = []\n if 'eager' in mode:\n result += combinations.combine(mode=['eager'], run_eagerly=run_eagerly)\n if 'graph' in mode:\n result += combinations.combine(mode=['graph'], run_eagerly=[False])\n return result\n\n\ndef keras_model_type_combinations():\n return combinations.combine(model_type=KERAS_MODEL_TYPES)\n\n\ndef keras_tensor_combinations():\n return combinations.combine(use_keras_tensors=['True', 'False'])\n\n\nclass KerasModeCombination(test_combinations.TestCombination):\n \"\"\"Combination for Keras test mode.\n\n It by default includes v1_session, v2_eager and v2_tf_function.\n \"\"\"\n\n def context_managers(self, kwargs):\n run_eagerly = kwargs.pop('run_eagerly', None)\n\n if run_eagerly is not None:\n return [testing_utils.run_eagerly_scope(run_eagerly)]\n else:\n return []\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('run_eagerly')]\n\n\nclass KerasModelTypeCombination(test_combinations.TestCombination):\n \"\"\"Combination for Keras model types when doing model test.\n\n It by default includes 'functional', 'subclass', 'sequential'.\n\n Various methods in `testing_utils` to get models will auto-generate a model\n of the currently active Keras model type. This allows unittests to confirm\n the equivalence between different Keras models.\n \"\"\"\n\n def context_managers(self, kwargs):\n model_type = kwargs.pop('model_type', None)\n if model_type in KERAS_MODEL_TYPES:\n return [testing_utils.model_type_scope(model_type)]\n else:\n return []\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('model_type')]\n\n\nclass KerasTensorCombination(test_combinations.TestCombination):\n \"\"\"Combination for whether KerasTensors are being used or not.\n\n It by default includes `True` and `False`:\n running Keras's functional API with KerasTensors\n as the inputs, and without.\n \"\"\"\n\n def context_managers(self, kwargs):\n use_keras_tensors = kwargs.pop('use_keras_tensors', None)\n\n if use_keras_tensors is not None:\n return [testing_utils.use_keras_tensors_scope(use_keras_tensors)]\n else:\n return []\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('use_keras_tensors')]\n\n\n_defaults = combinations.generate.keywords['test_combinations']\ngenerate = functools.partial(\n combinations.generate,\n test_combinations=_defaults +\n (KerasModeCombination(), KerasModelTypeCombination(),\n KerasTensorCombination()))\ncombine = test_combinations.combine\ntimes = test_combinations.times\nNamedObject = test_combinations.NamedObject\n",
"# Lint as: python2, python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"This tool converts an image file into a CSV data array.\n\nDesigned to help create test inputs that can be shared between Python and\non-device test cases to investigate accuracy issues.\n\nExample usage:\n\npython convert_image_to_csv.py some_image.jpg --width=16 --height=20 \\\n --want_grayscale\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework.errors_impl import NotFoundError\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.platform import app\n\n\ndef get_image(width, height, want_grayscale, filepath):\n \"\"\"Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)].\n\n Args:\n width: Width to rescale the image to.\n height: Height to rescale the image to.\n want_grayscale: Whether the result should be converted to grayscale.\n filepath: Path of the image file..\n\n Returns:\n np.ndarray of shape (height, width, channels) where channels is 1 if\n want_grayscale is true, otherwise 3.\n \"\"\"\n with ops.Graph().as_default():\n with session.Session():\n file_data = io_ops.read_file(filepath)\n channels = 1 if want_grayscale else 3\n image_tensor = image_ops.decode_image(file_data,\n channels=channels).eval()\n resized_tensor = image_ops.resize_images_v2(\n image_tensor, (height, width)).eval()\n return resized_tensor\n\n\ndef array_to_int_csv(array_data):\n \"\"\"Converts all elements in a numerical array to a comma-separated string.\n\n Args:\n array_data: Numerical array to convert.\n\n Returns:\n String containing array values as integers, separated by commas.\n \"\"\"\n flattened_array = array_data.flatten()\n array_as_strings = [item.astype(int).astype(str) for item in flattened_array]\n return ','.join(array_as_strings)\n\n\ndef run_main(_):\n \"\"\"Application run loop.\"\"\"\n parser = argparse.ArgumentParser(\n description='Loads JPEG or PNG input files, resizes them, optionally'\n ' converts to grayscale, and writes out as comma-separated variables,'\n ' one image per row.')\n parser.add_argument(\n 'image_file_names',\n type=str,\n nargs='+',\n help='List of paths to the input images.')\n parser.add_argument(\n '--width', type=int, default=96, help='Width to scale images to.')\n parser.add_argument(\n '--height', type=int, default=96, help='Height to scale images to.')\n parser.add_argument(\n '--want_grayscale',\n action='store_true',\n help='Whether to convert the image to monochrome.')\n args = parser.parse_args()\n\n for image_file_name in args.image_file_names:\n try:\n image_data = get_image(args.width, args.height, args.want_grayscale,\n image_file_name)\n print(array_to_int_csv(image_data))\n except NotFoundError:\n sys.stderr.write('Image file not found at {0}\\n'.format(image_file_name))\n sys.exit(1)\n\n\ndef main():\n app.run(main=run_main, argv=sys.argv[:1])\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Benchmark base to run and report benchmark results.\"\"\"\n\nfrom __future__ import absolute_import as _absolute_import\nfrom __future__ import division as _division\nfrom __future__ import print_function as _print_function\n\nimport os\nimport uuid\n\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.profiler import profiler_v2 as profiler\n\nflags.DEFINE_bool(\"xprof\", False, \"Run and report benchmarks with xprof on\")\nflags.DEFINE_string(\"logdir\", \"/tmp/xprof/\", \"Directory to store xprof data\")\n\n\nclass MicroBenchmarksBase(test.Benchmark):\n \"\"\"Run and report benchmark results.\n\n The first run is without any profilng.\n Second run is with xprof and python trace. Third run is with xprof without\n python trace. Note: xprof runs are with fewer iterations.\n \"\"\"\n\n def run_with_xprof(self, enable_python_trace, run_benchmark, func,\n num_iters_xprof, execution_mode, suid):\n if enable_python_trace:\n options = profiler.ProfilerOptions(python_tracer_level=1)\n logdir = os.path.join(flags.FLAGS.logdir, suid + \"_with_python\")\n else:\n options = profiler.ProfilerOptions(python_tracer_level=0)\n logdir = os.path.join(flags.FLAGS.logdir, suid)\n with profiler.Profile(logdir, options):\n total_time = run_benchmark(func, num_iters_xprof, execution_mode)\n us_per_example = float(\"{0:.3f}\".format(total_time * 1e6 / num_iters_xprof))\n return logdir, us_per_example\n\n def run_report(self, run_benchmark, func, num_iters, execution_mode=None):\n \"\"\"Run and report benchmark results.\"\"\"\n total_time = run_benchmark(func, num_iters, execution_mode)\n mean_us = total_time * 1e6 / num_iters\n extras = {\n \"examples_per_sec\": float(\"{0:.3f}\".format(num_iters / total_time)),\n \"us_per_example\": float(\"{0:.3f}\".format(total_time * 1e6 / num_iters))\n }\n\n if flags.FLAGS.xprof:\n suid = str(uuid.uuid4())\n # Re-run with xprof and python trace.\n num_iters_xprof = min(100, num_iters)\n xprof_link, us_per_example = self.run_with_xprof(True, run_benchmark,\n func, num_iters_xprof,\n execution_mode, suid)\n extras[\"xprof link with python trace\"] = xprof_link\n extras[\"us_per_example with xprof and python\"] = us_per_example\n\n # Re-run with xprof but no python trace.\n xprof_link, us_per_example = self.run_with_xprof(False, run_benchmark,\n func, num_iters_xprof,\n execution_mode, suid)\n extras[\"xprof link\"] = xprof_link\n extras[\"us_per_example with xprof\"] = us_per_example\n\n benchmark_name = self._get_benchmark_name()\n self.report_benchmark(\n iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables as variables_module\nfrom tensorflow.python.ops.linalg import linalg as linalg_lib\nfrom tensorflow.python.ops.linalg import linear_operator_test_util\nfrom tensorflow.python.platform import test\n\nlinalg = linalg_lib\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SquareLinearOperatorFullMatrixTest(\n linear_operator_test_util.SquareLinearOperatorDerivedClassTest):\n \"\"\"Most tests done in the base class LinearOperatorDerivedClassTest.\"\"\"\n\n def operator_and_matrix(\n self, build_info, dtype, use_placeholder,\n ensure_self_adjoint_and_pd=False):\n shape = list(build_info.shape)\n\n matrix = linear_operator_test_util.random_positive_definite_matrix(\n shape, dtype)\n\n lin_op_matrix = matrix\n\n if use_placeholder:\n lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)\n\n # Set the hints to none to test non-symmetric PD code paths.\n operator = linalg.LinearOperatorFullMatrix(\n lin_op_matrix,\n is_square=True,\n is_self_adjoint=True if ensure_self_adjoint_and_pd else None,\n is_positive_definite=True if ensure_self_adjoint_and_pd else None)\n\n return operator, matrix\n\n def test_is_x_flags(self):\n # Matrix with two positive eigenvalues.\n matrix = [[1., 0.], [1., 11.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix,\n is_positive_definite=True,\n is_non_singular=True,\n is_self_adjoint=False)\n self.assertTrue(operator.is_positive_definite)\n self.assertTrue(operator.is_non_singular)\n self.assertFalse(operator.is_self_adjoint)\n # Auto-detected.\n self.assertTrue(operator.is_square)\n\n def test_assert_non_singular_raises_if_cond_too_big_but_finite(self):\n with self.cached_session():\n tril = linear_operator_test_util.random_tril_matrix(\n shape=(50, 50), dtype=np.float32)\n diag = np.logspace(-2, 2, 50).astype(np.float32)\n tril = array_ops.matrix_set_diag(tril, diag)\n matrix = self.evaluate(math_ops.matmul(tril, tril, transpose_b=True))\n operator = linalg.LinearOperatorFullMatrix(matrix)\n with self.assertRaisesOpError(\"Singular matrix\"):\n # Ensure that we have finite condition number...just HUGE.\n cond = np.linalg.cond(matrix)\n self.assertTrue(np.isfinite(cond))\n self.assertGreater(cond, 1e12)\n operator.assert_non_singular().run()\n\n def test_assert_non_singular_raises_if_cond_infinite(self):\n with self.cached_session():\n matrix = [[1., 1.], [1., 1.]]\n # We don't pass the is_self_adjoint hint here, which means we take the\n # generic code path.\n operator = linalg.LinearOperatorFullMatrix(matrix)\n with self.assertRaisesOpError(\"Singular matrix\"):\n operator.assert_non_singular().run()\n\n def test_assert_self_adjoint(self):\n matrix = [[0., 1.], [0., 1.]]\n operator = linalg.LinearOperatorFullMatrix(matrix)\n with self.cached_session():\n with self.assertRaisesOpError(\"not equal to its adjoint\"):\n operator.assert_self_adjoint().run()\n\n @test_util.disable_xla(\"Assert statements in kernels not supported in XLA\")\n def test_assert_positive_definite(self):\n matrix = [[1., 1.], [1., 1.]]\n operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=True)\n with self.cached_session():\n with self.assertRaises(errors.InvalidArgumentError):\n operator.assert_positive_definite().run()\n\n def test_tape_safe(self):\n matrix = variables_module.Variable([[2.]])\n operator = linalg.LinearOperatorFullMatrix(matrix)\n self.check_tape_safe(operator)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest(\n linear_operator_test_util.SquareLinearOperatorDerivedClassTest):\n \"\"\"Most tests done in the base class LinearOperatorDerivedClassTest.\n\n In this test, the operator is constructed with hints that invoke the use of\n a Cholesky decomposition for solves/determinant.\n \"\"\"\n\n def setUp(self):\n # Increase from 1e-6 to 1e-5. This reduction in tolerance happens,\n # presumably, because we are taking a different code path in the operator\n # and the matrix. The operator uses a Cholesky, the matrix uses standard\n # solve.\n self._atol[dtypes.float32] = 1e-5\n self._rtol[dtypes.float32] = 1e-5\n self._atol[dtypes.float64] = 1e-10\n self._rtol[dtypes.float64] = 1e-10\n\n @staticmethod\n def dtypes_to_test():\n return [dtypes.float32, dtypes.float64]\n\n def operator_and_matrix(\n self, build_info, dtype, use_placeholder,\n ensure_self_adjoint_and_pd=False):\n\n # Matrix is always symmetric and positive definite in this class.\n del ensure_self_adjoint_and_pd\n\n shape = list(build_info.shape)\n\n matrix = linear_operator_test_util.random_positive_definite_matrix(\n shape, dtype, force_well_conditioned=True)\n\n lin_op_matrix = matrix\n\n if use_placeholder:\n lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)\n\n operator = linalg.LinearOperatorFullMatrix(\n lin_op_matrix,\n is_square=True,\n is_self_adjoint=True,\n is_positive_definite=True)\n\n return operator, matrix\n\n def test_is_x_flags(self):\n # Matrix with two positive eigenvalues.\n matrix = [[1., 0.], [0., 7.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix, is_positive_definite=True, is_self_adjoint=True)\n\n self.assertTrue(operator.is_positive_definite)\n self.assertTrue(operator.is_self_adjoint)\n\n # Should be auto-set\n self.assertTrue(operator.is_non_singular)\n self.assertTrue(operator._can_use_cholesky)\n self.assertTrue(operator.is_square)\n\n @test_util.disable_xla(\"Assert statements in kernels not supported in XLA\")\n def test_assert_non_singular(self):\n matrix = [[1., 1.], [1., 1.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix, is_self_adjoint=True, is_positive_definite=True)\n with self.cached_session():\n # Cholesky decomposition may fail, so the error is not specific to\n # non-singular.\n with self.assertRaisesOpError(\"\"):\n operator.assert_non_singular().run()\n\n def test_assert_self_adjoint(self):\n matrix = [[0., 1.], [0., 1.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix, is_self_adjoint=True, is_positive_definite=True)\n with self.cached_session():\n with self.assertRaisesOpError(\"not equal to its adjoint\"):\n operator.assert_self_adjoint().run()\n\n @test_util.disable_xla(\"Assert statements in kernels not supported in XLA\")\n def test_assert_positive_definite(self):\n matrix = [[1., 1.], [1., 1.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix, is_self_adjoint=True, is_positive_definite=True)\n with self.cached_session():\n # Cholesky decomposition may fail, so the error is not specific to\n # non-singular.\n with self.assertRaisesOpError(\"\"):\n operator.assert_positive_definite().run()\n\n def test_tape_safe(self):\n matrix = variables_module.Variable([[2.]])\n operator = linalg.LinearOperatorFullMatrix(\n matrix, is_self_adjoint=True, is_positive_definite=True)\n self.check_tape_safe(operator)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NonSquareLinearOperatorFullMatrixTest(\n linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):\n \"\"\"Most tests done in the base class LinearOperatorDerivedClassTest.\"\"\"\n\n def operator_and_matrix(\n self, build_info, dtype, use_placeholder,\n ensure_self_adjoint_and_pd=False):\n del ensure_self_adjoint_and_pd\n shape = list(build_info.shape)\n matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)\n\n lin_op_matrix = matrix\n\n if use_placeholder:\n lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)\n\n operator = linalg.LinearOperatorFullMatrix(lin_op_matrix, is_square=True)\n\n return operator, matrix\n\n def test_is_x_flags(self):\n matrix = [[3., 2., 1.], [1., 1., 1.]]\n operator = linalg.LinearOperatorFullMatrix(\n matrix,\n is_self_adjoint=False)\n self.assertEqual(operator.is_positive_definite, None)\n self.assertEqual(operator.is_non_singular, None)\n self.assertFalse(operator.is_self_adjoint)\n self.assertFalse(operator.is_square)\n\n def test_matrix_must_have_at_least_two_dims_or_raises(self):\n with self.assertRaisesRegex(ValueError, \"at least 2 dimensions\"):\n linalg.LinearOperatorFullMatrix([1.])\n\n def test_tape_safe(self):\n matrix = variables_module.Variable([[2., 1.]])\n operator = linalg.LinearOperatorFullMatrix(matrix)\n self.check_tape_safe(operator)\n\n\nif __name__ == \"__main__\":\n linear_operator_test_util.add_tests(SquareLinearOperatorFullMatrixTest)\n linear_operator_test_util.add_tests(NonSquareLinearOperatorFullMatrixTest)\n linear_operator_test_util.add_tests(\n SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest)\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests specific to `Sequential` model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass TestSequential(keras_parameterized.TestCase):\n \"\"\"Most Sequential model API tests are covered in `training_test.py`.\n \"\"\"\n\n @keras_parameterized.run_all_keras_modes\n def test_basic_methods(self):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1, input_dim=2))\n model.add(keras.layers.Dropout(0.3, name='dp'))\n model.add(keras.layers.Dense(2, kernel_regularizer='l2',\n kernel_constraint='max_norm'))\n self.assertEqual(len(model.layers), 3)\n self.assertEqual(len(model.weights), 2 * 2)\n self.assertEqual(model.get_layer(name='dp').name, 'dp')\n\n @keras_parameterized.run_all_keras_modes\n def test_input_defined_first_layer(self):\n model = keras.models.Sequential()\n model.add(keras.Input(shape=(2,), name='input_layer'))\n model.add(keras.layers.Dense(1))\n model.add(keras.layers.Dropout(0.3, name='dp'))\n model.add(keras.layers.Dense(2, kernel_regularizer='l2',\n kernel_constraint='max_norm'))\n self.assertLen(model.layers, 3)\n self.assertLen(model.weights, 2 * 2)\n self.assertEqual(model.get_layer(name='dp').name, 'dp')\n\n @keras_parameterized.run_all_keras_modes\n def test_single_layer_in_init(self):\n model = keras.models.Sequential(keras.layers.Dense(1))\n self.assertLen(model.layers, 1)\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_pop(self):\n num_hidden = 5\n input_dim = 3\n batch_size = 5\n num_classes = 2\n\n model = testing_utils.get_small_sequential_mlp(\n num_hidden, num_classes, input_dim)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n x = np.random.random((batch_size, input_dim))\n y = np.random.random((batch_size, num_classes))\n model.fit(x, y, epochs=1)\n model.pop()\n self.assertEqual(len(model.layers), 1)\n self.assertEqual(model.output_shape, (None, num_hidden))\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n y = np.random.random((batch_size, num_hidden))\n model.fit(x, y, epochs=1)\n\n # Test popping single-layer model\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))\n model.pop()\n self.assertEqual(model.layers, [])\n self.assertEqual(model.outputs, None)\n\n # Invalid use case\n model = keras.models.Sequential()\n with self.assertRaises(TypeError):\n model.pop()\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_deferred_build_with_np_arrays(self):\n num_hidden = 5\n input_dim = 3\n batch_size = 5\n num_classes = 2\n\n model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n metrics=[keras.metrics.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n self.assertEqual(len(model.layers), 2)\n with self.assertRaisesRegex(\n ValueError, 'Weights for model .* have not yet been created'):\n len(model.weights)\n self.assertFalse(model.built)\n\n x = np.random.random((batch_size, input_dim))\n y = np.random.random((batch_size, num_classes))\n model.fit(x, y, epochs=1)\n self.assertTrue(model.built)\n self.assertEqual(len(model.weights), 2 * 2)\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_deferred_build_with_dataset_iterators(self):\n num_hidden = 5\n input_dim = 3\n num_classes = 2\n num_samples = 50\n steps_per_epoch = 10\n\n model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n metrics=[keras.metrics.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n self.assertEqual(len(model.layers), 2)\n with self.assertRaisesRegex(\n ValueError, 'Weights for model .* have not yet been created'):\n len(model.weights)\n self.assertFalse(model.built)\n\n x = array_ops.ones((num_samples, input_dim))\n y = array_ops.zeros((num_samples, num_classes))\n dataset = dataset_ops.Dataset.from_tensor_slices((x, y))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n model.fit(dataset, epochs=1, steps_per_epoch=steps_per_epoch)\n self.assertTrue(model.built)\n self.assertEqual(len(model.weights), 2 * 2)\n\n # TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket\n @parameterized.parameters((True,), (False,))\n def test_training_and_eval_methods_on_symbolic_tensors(self, deferred):\n with ops.Graph().as_default(), self.cached_session():\n\n def get_model():\n if deferred:\n model = testing_utils.get_small_sequential_mlp(10, 4)\n else:\n model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3)\n model.compile(\n optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n inputs = keras.backend.zeros(shape=(10, 3))\n targets = keras.backend.zeros(shape=(10, 4))\n\n model = get_model()\n model.fit(inputs, targets, epochs=10, steps_per_epoch=30)\n\n model = get_model()\n model.evaluate(inputs, targets, steps=2, verbose=0)\n\n model = get_model()\n model.predict(inputs, steps=2)\n\n model = get_model()\n model.train_on_batch(inputs, targets)\n\n model = get_model()\n model.test_on_batch(inputs, targets)\n\n model = get_model()\n model.fit(\n inputs,\n targets,\n epochs=1,\n steps_per_epoch=2,\n verbose=0,\n validation_data=(inputs, targets),\n validation_steps=2)\n\n @keras_parameterized.run_all_keras_modes\n def test_invalid_use_cases(self):\n # Added objects must be layer instances\n with self.assertRaises(TypeError):\n model = keras.models.Sequential()\n model.add(None)\n\n @keras_parameterized.run_all_keras_modes\n def test_nested_sequential_trainability(self):\n input_dim = 20\n num_units = 10\n num_classes = 2\n\n inner_model = keras.models.Sequential()\n inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,)))\n\n model = keras.models.Sequential()\n model.add(inner_model)\n model.add(keras.layers.Dense(num_classes))\n\n self.assertEqual(len(model.layers), 2)\n\n self.assertEqual(len(model.trainable_weights), 4)\n inner_model.trainable = False\n self.assertEqual(len(model.trainable_weights), 2)\n inner_model.trainable = True\n self.assertEqual(len(model.trainable_weights), 4)\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_update_disabling(self):\n val_a = np.random.random((10, 4))\n val_out = np.random.random((10, 4))\n\n model = keras.models.Sequential()\n model.add(keras.layers.BatchNormalization(input_shape=(4,)))\n\n model.trainable = False\n model.compile('sgd', 'mse')\n\n x1 = model.predict(val_a)\n model.train_on_batch(val_a, val_out)\n x2 = model.predict(val_a)\n self.assertAllClose(x1, x2, atol=1e-7)\n\n model.trainable = True\n model.compile('sgd', 'mse')\n\n model.train_on_batch(val_a, val_out)\n x2 = model.predict(val_a)\n assert np.abs(np.sum(x1 - x2)) > 1e-5\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_deferred_build_serialization(self):\n num_hidden = 5\n input_dim = 3\n batch_size = 5\n num_classes = 2\n\n model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n metrics=[keras.metrics.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n self.assertFalse(model.built)\n\n x = np.random.random((batch_size, input_dim))\n y = np.random.random((batch_size, num_classes))\n model.train_on_batch(x, y)\n self.assertTrue(model.built)\n\n config = model.get_config()\n new_model = keras.models.Sequential.from_config(config)\n new_model.compile(\n loss='mse',\n optimizer='rmsprop',\n metrics=[keras.metrics.CategoricalAccuracy()],\n run_eagerly=testing_utils.should_run_eagerly())\n x = np.random.random((batch_size, input_dim))\n y = np.random.random((batch_size, num_classes))\n new_model.train_on_batch(x, y)\n self.assertEqual(len(new_model.layers), 2)\n self.assertEqual(len(new_model.weights), 4)\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_shape_inference_deferred(self):\n model = testing_utils.get_small_sequential_mlp(4, 5)\n output_shape = model.compute_output_shape((None, 7))\n self.assertEqual(tuple(output_shape.as_list()), (None, 5))\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_build_deferred(self):\n model = testing_utils.get_small_sequential_mlp(4, 5)\n\n model.build((None, 10))\n self.assertTrue(model.built)\n self.assertEqual(len(model.weights), 4)\n\n # Test with nested model\n model = testing_utils.get_small_sequential_mlp(4, 3)\n inner_model = testing_utils.get_small_sequential_mlp(4, 5)\n model.add(inner_model)\n\n model.build((None, 10))\n self.assertTrue(model.built)\n self.assertEqual(len(model.weights), 8)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_sequential_deferred_manual_build(self):\n model = testing_utils.get_small_sequential_mlp(4, 5)\n self.assertFalse(model.built)\n model(array_ops.zeros([1, 2]))\n self.assertTrue(model.built)\n model.compile(\n 'rmsprop',\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))\n\n @keras_parameterized.run_all_keras_modes\n def test_sequential_nesting(self):\n model = testing_utils.get_small_sequential_mlp(4, 3)\n inner_model = testing_utils.get_small_sequential_mlp(4, 5)\n model.add(inner_model)\n\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n x = np.random.random((2, 6))\n y = np.random.random((2, 5))\n model.fit(x, y, epochs=1)\n\n @test_util.run_v1_only('Behavior changed in V2.')\n def test_variable_names_deferred(self):\n model = keras.models.Sequential([keras.layers.Dense(3)])\n model.add(keras.layers.Dense(2))\n model(array_ops.ones([2, 4]))\n # Note that for regular sequential models (wrapping graph network),\n # the layers' weights are built\n # without the model name as prefix (because the Functional API __call__\n # reset the name scope). This is fixable, but it would be\n # backwards incompatible.\n self.assertEqual(\n ['sequential/dense/kernel:0', 'sequential/dense/bias:0',\n 'sequential/dense_1/kernel:0', 'sequential/dense_1/bias:0'],\n [v.name for v in model.variables])\n\n @keras_parameterized.run_all_keras_modes\n def test_input_assumptions_propagation(self):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(1))\n if context.executing_eagerly():\n with self.assertRaisesRegex(ValueError,\n 'expected min_ndim=2, found ndim=0'):\n model(1.0)\n\n @keras_parameterized.run_all_keras_modes\n def test_string_input(self):\n seq = keras.Sequential([\n keras.layers.InputLayer(input_shape=(1,), dtype=dtypes.string),\n keras.layers.Lambda(lambda x: x[0])\n ])\n seq.run_eagerly = testing_utils.should_run_eagerly()\n preds = seq.predict([['tensorflow eager']])\n self.assertEqual(preds.shape, (1,))\n\n @keras_parameterized.run_all_keras_modes\n def test_multi_output_layer_not_accepted(self):\n\n class MultiOutputLayer(keras.layers.Layer):\n\n def call(self, inputs):\n return inputs, inputs\n\n with self.assertRaisesRegex(ValueError,\n 'should have a single output tensor'):\n keras.Sequential([MultiOutputLayer(input_shape=(3,))])\n\n with self.assertRaisesRegex(ValueError,\n 'should have a single output tensor'):\n keras.Sequential([\n keras.layers.Dense(1, input_shape=(3,)),\n MultiOutputLayer()])\n\n # Should also raise error in a deferred build mode\n with self.assertRaisesRegex(ValueError,\n 'should have a single output tensor'):\n keras.Sequential([MultiOutputLayer()])(np.zeros((10, 10)))\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_layer_add_after_compile_deferred(self):\n model = keras.Sequential([keras.layers.Dense(3)])\n self.assertFalse(model.built)\n\n model.compile('adam', loss='mse')\n model.fit(np.random.random((1, 3)), np.random.random((1, 3)))\n self.assertTrue(model.built)\n\n model.add(keras.layers.Dense(3))\n\n model.compile('adam', loss='mse')\n model.fit(np.random.random((1, 3)), np.random.random((1, 3)))\n self.assertTrue(model.built)\n\n def test_sequential_layer_tracking(self):\n \"\"\"Test that Sequential only tracks layers added in init or `.add`.\"\"\"\n layer = keras.layers.Dense(1)\n model = keras.Sequential([layer])\n self.assertEqual(model._layers[-1], layer)\n\n model.a = [keras.layers.Dense(3)] # should not be added to the layers list.\n self.assertEqual(model._layers[-1], layer)\n\n layer2 = keras.layers.Dense(2)\n model.add(layer2)\n self.assertEqual(model._layers[-1], layer2)\n\n model.a = [keras.layers.Dense(3)] # should not be added to the layers list.\n self.assertEqual(model._layers[-1], layer2)\n\n model.pop()\n self.assertEqual(model._layers[-1], layer)\n\n def test_config_preserves_input_layer(self):\n model = keras.Sequential([\n keras.Input((None,), name='my_embedding_input', dtype='int32'),\n keras.layers.Embedding(32, 32),\n keras.layers.Dense(3),\n ])\n config = model.get_config()\n new_model = keras.Sequential.from_config(config)\n self.assertTrue(new_model.built)\n self.assertEqual(new_model._layers[0].dtype, 'int32')\n self.assertEqual(new_model._layers[0].name, 'my_embedding_input')\n\n def test_name_unicity(self):\n model = keras.Sequential()\n model.add(keras.layers.Dense(3, name='specific_name'))\n with self.assertRaisesRegex(ValueError, 'should have unique names'):\n model.add(keras.layers.Dense(3, name='specific_name'))\n\n\nclass TestSequentialEagerIntegration(keras_parameterized.TestCase):\n\n @keras_parameterized.run_all_keras_modes\n def test_defun_on_call(self):\n # Check that one can subclass Sequential and place the `call` in a `defun`.\n\n class MySequential(keras.Sequential):\n\n def __init__(self, name=None):\n super(MySequential, self).__init__(name=name)\n self.call = function.defun(self.call)\n\n model = MySequential()\n model.add(keras.layers.Dense(4, activation='relu'))\n model.add(keras.layers.Dense(5, activation='softmax'))\n\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n\n x = np.random.random((2, 6))\n y = np.random.random((2, 5))\n model.fit(x, y, epochs=1)\n\n @keras_parameterized.run_all_keras_modes\n def test_build_before_fit(self):\n # Fix for b/112433577\n model = testing_utils.get_small_sequential_mlp(4, 5)\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n\n model.build((None, 6))\n\n x = np.random.random((2, 6))\n y = np.random.random((2, 5))\n model.fit(x, y, epochs=1)\n\n @keras_parameterized.run_all_keras_modes\n def test_build_empty_network(self):\n x = np.random.random((2, 6))\n y = np.random.random((2, 5))\n model = keras.Sequential()\n\n # Make sure an empty sequential model can still work with build().\n model.build((None, 6))\n self.assertTrue(model.built)\n\n model.add(keras.layers.Dense(5, input_shape=(6,)))\n\n model.compile(\n loss='mse',\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly())\n model.fit(x, y)\n\n model.pop()\n self.assertFalse(model.built)\n\n model.build((None, 6))\n self.assertTrue(model.built)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras image dataset loading utilities.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\n\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef index_directory(directory,\n labels,\n formats,\n class_names=None,\n shuffle=True,\n seed=None,\n follow_links=False):\n \"\"\"Make list of all files in the subdirs of `directory`, with their labels.\n\n Args:\n directory: The target directory (string).\n labels: Either \"inferred\"\n (labels are generated from the directory structure),\n or a list/tuple of integer labels of the same size as the number of\n valid files found in the directory. Labels should be sorted according\n to the alphanumeric order of the image file paths\n (obtained via `os.walk(directory)` in Python).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n class_names: Only valid if \"labels\" is \"inferred\". This is the explict\n list of class names (must match names of subdirectories). Used\n to control the order of the classes\n (otherwise alphanumerical order is used).\n shuffle: Whether to shuffle the data. Default: True.\n If set to False, sorts the data in alphanumeric order.\n seed: Optional random seed for shuffling.\n follow_links: Whether to visits subdirectories pointed to by symlinks.\n\n Returns:\n tuple (file_paths, labels, class_names).\n file_paths: list of file paths (strings).\n labels: list of matching integer labels (same length as file_paths)\n class_names: names of the classes corresponding to these labels, in order.\n \"\"\"\n inferred_class_names = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n inferred_class_names.append(subdir)\n if not class_names:\n class_names = inferred_class_names\n else:\n if set(class_names) != set(inferred_class_names):\n raise ValueError(\n 'The `class_names` passed did not match the '\n 'names of the subdirectories of the target directory. '\n 'Expected: %s, but received: %s' %\n (inferred_class_names, class_names))\n class_indices = dict(zip(class_names, range(len(class_names))))\n\n # Build an index of the files\n # in the different class subfolders.\n pool = multiprocessing.pool.ThreadPool()\n results = []\n filenames = []\n for dirpath in (os.path.join(directory, subdir) for subdir in class_names):\n results.append(\n pool.apply_async(index_subdirectory,\n (dirpath, class_indices, follow_links, formats)))\n labels_list = []\n for res in results:\n partial_filenames, partial_labels = res.get()\n labels_list.append(partial_labels)\n filenames += partial_filenames\n if labels != 'inferred':\n if len(labels) != len(filenames):\n raise ValueError('Expected the lengths of `labels` to match the number '\n 'of files in the target directory. len(labels) is %s '\n 'while we found %s files in %s.' % (\n len(labels), len(filenames), directory))\n else:\n i = 0\n labels = np.zeros((len(filenames),), dtype='int32')\n for partial_labels in labels_list:\n labels[i:i + len(partial_labels)] = partial_labels\n i += len(partial_labels)\n\n print('Found %d files belonging to %d classes.' %\n (len(filenames), len(class_names)))\n pool.close()\n pool.join()\n file_paths = [os.path.join(directory, fname) for fname in filenames]\n\n if shuffle:\n # Shuffle globally to erase macro-structure\n if seed is None:\n seed = np.random.randint(1e6)\n rng = np.random.RandomState(seed)\n rng.shuffle(file_paths)\n rng = np.random.RandomState(seed)\n rng.shuffle(labels)\n return file_paths, labels, class_names\n\n\ndef iter_valid_files(directory, follow_links, formats):\n walk = os.walk(directory, followlinks=follow_links)\n for root, _, files in sorted(walk, key=lambda x: x[0]):\n for fname in sorted(files):\n if fname.lower().endswith(formats):\n yield root, fname\n\n\ndef index_subdirectory(directory, class_indices, follow_links, formats):\n \"\"\"Recursively walks directory and list image paths and their class index.\n\n Arguments:\n directory: string, target directory.\n class_indices: dict mapping class names to their index.\n follow_links: boolean, whether to recursively follow subdirectories\n (if False, we only list top-level images in `directory`).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n\n Returns:\n tuple `(filenames, labels)`. `filenames` is a list of relative file\n paths, and `labels` is a list of integer labels corresponding to these\n files.\n \"\"\"\n dirname = os.path.basename(directory)\n valid_files = iter_valid_files(directory, follow_links, formats)\n labels = []\n filenames = []\n for root, fname in valid_files:\n labels.append(class_indices[dirname])\n absolute_path = os.path.join(root, fname)\n relative_path = os.path.join(\n dirname, os.path.relpath(absolute_path, directory))\n filenames.append(relative_path)\n return filenames, labels\n\n\ndef get_training_or_validation_split(samples, labels, validation_split, subset):\n \"\"\"Potentially restict samples & labels to a training or validation split.\n\n Args:\n samples: List of elements.\n labels: List of corresponding labels.\n validation_split: Float, fraction of data to reserve for validation.\n subset: Subset of the data to return.\n Either \"training\", \"validation\", or None. If None, we return all of the\n data.\n\n Returns:\n tuple (samples, labels), potentially restricted to the specified subset.\n \"\"\"\n if not validation_split:\n return samples, labels\n\n num_val_samples = int(validation_split * len(samples))\n if subset == 'training':\n print('Using %d files for training.' % (len(samples) - num_val_samples,))\n samples = samples[:-num_val_samples]\n labels = labels[:-num_val_samples]\n elif subset == 'validation':\n print('Using %d files for validation.' % (num_val_samples,))\n samples = samples[-num_val_samples:]\n labels = labels[-num_val_samples:]\n else:\n raise ValueError('`subset` must be either \"training\" '\n 'or \"validation\", received: %s' % (subset,))\n return samples, labels\n\n\ndef labels_to_dataset(labels, label_mode, num_classes):\n label_ds = dataset_ops.Dataset.from_tensor_slices(labels)\n if label_mode == 'binary':\n label_ds = label_ds.map(\n lambda x: array_ops.expand_dims(math_ops.cast(x, 'float32'), axis=-1))\n elif label_mode == 'categorical':\n label_ds = label_ds.map(lambda x: array_ops.one_hot(x, num_classes))\n return label_ds\n\n\ndef check_validation_split_arg(validation_split, subset, shuffle, seed):\n \"\"\"Raise errors in case of invalid argument values.\"\"\"\n if validation_split and not 0 < validation_split < 1:\n raise ValueError(\n '`validation_split` must be between 0 and 1, received: %s' %\n (validation_split,))\n if (validation_split or subset) and not (validation_split and subset):\n raise ValueError(\n 'If `subset` is set, `validation_split` must be set, and inversely.')\n if subset not in ('training', 'validation', None):\n raise ValueError('`subset` must be either \"training\" '\n 'or \"validation\", received: %s' % (subset,))\n if validation_split and shuffle and seed is None:\n raise ValueError(\n 'If using `validation_split` and shuffling the data, you must provide '\n 'a `seed` argument, to make sure that there is no overlap between the '\n 'training and validation subset.')\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Preprocessing stage tests.\"\"\"\n# pylint: disable=g-classes-have-attributes\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras.engine import base_preprocessing_layer\nfrom tensorflow.python.keras.layers import convolutional\nfrom tensorflow.python.keras.layers.preprocessing import image_preprocessing\nfrom tensorflow.python.keras.layers.preprocessing import normalization\nfrom tensorflow.python.keras.layers.preprocessing import preprocessing_stage\nfrom tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass PreprocessingStageTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_adapt(self):\n\n class PL(base_preprocessing_layer.PreprocessingLayer):\n\n def __init__(self, **kwargs):\n self.adapt_time = None\n self.adapt_count = 0\n super(PL, self).__init__(**kwargs)\n\n def adapt(self, data, reset_state=True):\n self.adapt_time = time.time()\n self.adapt_count += 1\n\n def call(self, inputs):\n return inputs + 1.\n\n # Test with NumPy array\n stage = preprocessing_stage.PreprocessingStage([\n PL(),\n PL(),\n PL(),\n ])\n stage.adapt(np.ones((3, 4)))\n self.assertEqual(stage.layers[0].adapt_count, 1)\n self.assertEqual(stage.layers[1].adapt_count, 1)\n self.assertEqual(stage.layers[2].adapt_count, 1)\n self.assertLessEqual(stage.layers[0].adapt_time, stage.layers[1].adapt_time)\n self.assertLessEqual(stage.layers[1].adapt_time, stage.layers[2].adapt_time)\n\n # Check call\n y = stage(array_ops.ones((3, 4)))\n self.assertAllClose(y, np.ones((3, 4)) + 3.)\n\n # Test with dataset\n adapt_data = dataset_ops.Dataset.from_tensor_slices(np.ones((3, 10)))\n adapt_data = adapt_data.batch(2) # 5 batches of 2 samples\n\n stage.adapt(adapt_data)\n self.assertEqual(stage.layers[0].adapt_count, 2)\n self.assertEqual(stage.layers[1].adapt_count, 2)\n self.assertEqual(stage.layers[2].adapt_count, 2)\n self.assertLess(stage.layers[0].adapt_time, stage.layers[1].adapt_time)\n self.assertLess(stage.layers[1].adapt_time, stage.layers[2].adapt_time)\n\n # Test error with bad data\n with self.assertRaisesRegex(ValueError, 'requires a '):\n stage.adapt(None)\n\n def test_mixing_preprocessing_and_regular_layers(self):\n stage = preprocessing_stage.PreprocessingStage([\n image_preprocessing.CenterCrop(16, 16),\n normalization.Normalization(),\n convolutional.Conv2D(4, 3)\n ])\n data = np.ones((16, 20, 20, 3), dtype='float32')\n stage.adapt(data)\n _ = stage(data)\n stage.compile('rmsprop', 'mse')\n stage.fit(data, np.ones((16, 14, 14, 4)))\n _ = stage.evaluate(data, np.ones((16, 14, 14, 4)))\n _ = stage.predict(data)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for keras.layers.preprocessing.normalization.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras.layers.preprocessing import category_crossing\nfrom tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils\nfrom tensorflow.python.platform import test\n\n\ndef batch_wrapper(dataset, batch_size, distribution, repeat=None):\n if repeat:\n dataset = dataset.repeat(repeat)\n # TPUs currently require fully defined input shapes, drop_remainder ensures\n # the input will have fully defined shapes.\n if isinstance(distribution,\n (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):\n return dataset.batch(batch_size, drop_remainder=True)\n else:\n return dataset.batch(batch_size)\n\n\[email protected](\n combinations.combine(\n # Investigate why crossing is not supported with TPU.\n distribution=strategy_combinations.all_strategies,\n mode=['eager', 'graph']))\nclass CategoryCrossingDistributionTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_distribution(self, distribution):\n input_array_1 = np.array([['a', 'b'], ['c', 'd']])\n input_array_2 = np.array([['e', 'f'], ['g', 'h']])\n inp_dataset = dataset_ops.DatasetV2.from_tensor_slices(\n {'input_1': input_array_1, 'input_2': input_array_2})\n inp_dataset = batch_wrapper(inp_dataset, 2, distribution)\n\n # pyformat: disable\n expected_output = [[b'a_X_e', b'a_X_f', b'b_X_e', b'b_X_f'],\n [b'c_X_g', b'c_X_h', b'd_X_g', b'd_X_h']]\n config.set_soft_device_placement(True)\n\n with distribution.scope():\n input_data_1 = keras.Input(shape=(2,), dtype=dtypes.string,\n name='input_1')\n input_data_2 = keras.Input(shape=(2,), dtype=dtypes.string,\n name='input_2')\n input_data = [input_data_1, input_data_2]\n layer = category_crossing.CategoryCrossing()\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(inp_dataset)\n self.assertAllEqual(expected_output, output_dataset)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ragged_array_ops.concat.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedConcatOpTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def _rt_inputs_to_tensors(self, rt_inputs, ragged_ranks=None):\n if ragged_ranks is None:\n ragged_ranks = [None] * len(rt_inputs)\n return [ # pylint: disable=g-long-ternary\n ragged_factory_ops.constant(rt_input, ragged_rank=rrank)\n if rrank != 0 else constant_op.constant(rt_input)\n for (rt_input, rrank) in zip(rt_inputs, ragged_ranks)\n ]\n\n @parameterized.parameters(\n dict(\n descr='Two rank-2 inputs with empty value axis=1',\n rt_inputs=([[]], [[]]),\n axis=1,\n expected=[[]]),\n dict(\n descr='Two rank-2 inputs (ragged_rank=1), axis=0',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)\n [['b00'], ['b10']]), # shape=(2, None)\n axis=0,\n expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],\n [b'b10']]),\n dict(\n descr='Two rank-2 inputs (ragged_rank=1), axis=1',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)\n [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)\n axis=1,\n expected=[\n [b'a00', b'a01', b'b00'],\n [b'b10', b'b11', b'b12'],\n [b'a20', b'a21', b'a22', b'b20']]),\n dict(\n descr='Two rank-2 inputs (ragged_rank=1), axis=-2',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)\n [['b00'], ['b10']]), # shape=(2, None)\n axis=-2,\n expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],\n [b'b10']]),\n dict(\n descr='Two rank-2 inputs (ragged_rank=1), axis=-1',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)\n [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)\n axis=-1,\n expected=[\n [b'a00', b'a01', b'b00'],\n [b'b10', b'b11', b'b12'],\n [b'a20', b'a21', b'a22', b'b20']],\n expected_shape=[3, None]),\n dict(\n descr='Three rank-2 inputs (ragged_rank=1), axis=0',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)\n [['b00'], ['b10']], # shape=(2, None)\n [['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None)\n axis=0,\n expected=[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22'], [b'b00'],\n [b'b10'], [b'c00'], [b'c10', b'c11'], [b'c21']]),\n dict(\n descr='Three rank-2 inputs (ragged_rank=1), axis=1',\n rt_inputs=(\n [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)\n [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)\n [[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None)\n axis=1,\n expected=[\n [b'a00', b'a01', b'b00'],\n [b'b10', b'b11', b'b12', b'c10', b'c11'],\n [b'a20', b'a21', b'a22', b'b20', b'c20', b'c21']]),\n dict(\n descr='Three rank-3 inputs (ragged_rank=2), axis=0',\n rt_inputs=(\n [[['a000', 'a001'], ['a010']],\n [['a100', 'a101', 'a102'], ['a110', 'a111']]],\n [[['b000']], [['b100', 'b101'], ['b110']]],\n [[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]),\n axis=0,\n expected=[\n [[b'a000', b'a001'], [b'a010']],\n [[b'a100', b'a101', b'a102'], [b'a110', b'a111']],\n [[b'b000']],\n [[b'b100', b'b101'], [b'b110']],\n [],\n [[b'c100', b'c101', b'c102', b'c103']],\n [[], [b'c210', b'c211']]]),\n dict(\n descr='Three rank-3 inputs (ragged_rank=2), axis=1',\n rt_inputs=(\n [[['a000', 'a001'], ['a010']],\n [['a100', 'a101', 'a102'], ['a110', 'a111']]],\n [[['b000']], [['b100', 'b101'], ['b110']]],\n [[], [[], ['c110', 'c111']]]),\n axis=1,\n expected=[\n [[b'a000', b'a001'], [b'a010'], [b'b000']],\n [[b'a100', b'a101', b'a102'], [b'a110', b'a111'],\n [b'b100', b'b101'], [b'b110'], [], [b'c110', b'c111']]]),\n dict(\n descr='Three rank-3 inputs (ragged_rank=2), axis=2',\n rt_inputs=(\n [[['a000', 'a001'], ['a010']],\n [['a100', 'a101', 'a102'], ['a110', 'a111']]],\n [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],\n [[['c000'], ['c010']], [[], ['c110', 'c111']]]),\n axis=2,\n expected=[\n [[b'a000', b'a001', b'c000'],\n [b'a010', b'b010', b'b011', b'c010']],\n [[b'a100', b'a101', b'a102', b'b100', b'b101'],\n [b'a110', b'a111', b'b110', b'c110', b'c111']]]),\n dict(\n descr='Three rank-3 inputs (ragged_rank=2), axis=-1',\n rt_inputs=(\n [[['a000', 'a001'], ['a010']],\n [['a100', 'a101', 'a102'], ['a110', 'a111']]],\n [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],\n [[['c000'], ['c010']], [[], ['c110', 'c111']]]),\n axis=-1,\n expected=[\n [[b'a000', b'a001', b'c000'],\n [b'a010', b'b010', b'b011', b'c010']],\n [[b'a100', b'a101', b'a102', b'b100', b'b101'],\n [b'a110', b'a111', b'b110', b'c110', b'c111']]]),\n dict(\n descr='ragged_concat([uniform, ragged, uniform], axis=1)',\n ragged_ranks=[0, 1, 0],\n rt_inputs=(\n [['0('], ['1('], ['2(']], # shape=(3, 1)\n [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)\n [[')0'], [')1'], [')2']]), # shape=(3, 1)\n axis=1,\n expected=[\n [b'0(', b'b00', b')0'],\n [b'1(', b'b10', b'b11', b'b12', b')1'],\n [b'2(', b'b20', b')2']]),\n dict(\n descr='ragged_concat([uniform, uniform], axis=0)',\n ragged_ranks=[0, 0],\n rt_inputs=(\n [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)\n [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)\n axis=0,\n expected=[\n [b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],\n [b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']],\n expected_ragged_rank=1),\n dict(\n descr='ragged_concat([uniform, ragged], axis=0)',\n ragged_ranks=[0, 1],\n rt_inputs=(\n [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)\n [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)\n axis=0,\n expected=[\n [b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],\n [b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]),\n dict(\n descr='ragged_concat([uniform, ragged], axis=0) with rank-3 inputs',\n ragged_ranks=[0, 2],\n rt_inputs=(\n [[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2)\n [[[8], [8, 8]]]), # shape = (2, None, None)\n axis=0,\n expected=[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], [8, 8]]]),\n dict(\n descr='Two rank-3 inputs with ragged_rank=1, axis=-1',\n ragged_ranks=[1, 1],\n rt_inputs=(\n [[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],\n [[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),\n axis=-1,\n expected=[\n [[0, 1, 9, 8], [2, 3, 7, 6], [4, 5, 5, 4]], [],\n [[6, 7, 3, 2], [8, 9, 1, 0]]],\n expected_ragged_rank=1),\n dict(\n descr='ragged_concat([vector, vector], axis=0)',\n ragged_ranks=[0, 0],\n rt_inputs=([1, 2, 3], [4, 5, 6]),\n axis=0,\n expected=[1, 2, 3, 4, 5, 6]),\n dict(\n descr='One input (so ragged_concat is a noop)',\n rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],),\n axis=0,\n expected=[[b'a00', b'a01'], [], [b'a20', b'a21']]),\n ) # pyformat: disable\n def testRaggedConcat(self,\n descr,\n rt_inputs,\n axis,\n expected,\n ragged_ranks=None,\n expected_ragged_rank=None,\n expected_shape=None):\n rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)\n concatenated = ragged_concat_ops.concat(rt_inputs, axis)\n if expected_ragged_rank is not None:\n self.assertEqual(concatenated.ragged_rank, expected_ragged_rank)\n if expected_shape is not None:\n self.assertEqual(concatenated.shape.as_list(), expected_shape)\n self.assertAllEqual(concatenated, expected)\n\n @parameterized.parameters(\n dict(\n rt_inputs=(),\n axis=0,\n error=ValueError,\n message=r'rt_inputs may not be empty\\.'),\n dict(\n rt_inputs=([[1, 2]], [[3, 4]]),\n axis=r'foo',\n error=TypeError,\n message='axis must be an int'),\n dict(\n rt_inputs=([[1, 2]], [[3, 4]]),\n axis=-3,\n error=ValueError,\n message='axis=-3 out of bounds: expected -2<=axis<2'),\n dict(\n rt_inputs=([[1, 2]], [[3, 4]]),\n axis=2,\n error=ValueError,\n message='axis=2 out of bounds: expected -2<=axis<2'),\n dict(\n ragged_ranks=(0, 0),\n rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),\n axis=1,\n error=(ValueError, errors.InvalidArgumentError)),\n )\n def testStaticError(self,\n rt_inputs,\n axis,\n error,\n message=None,\n ragged_ranks=None):\n rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)\n self.assertRaisesRegex(error, message, ragged_concat_ops.concat, rt_inputs,\n axis)\n\n @parameterized.parameters([\n dict(\n ragged_ranks=(1, 1),\n rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),\n axis=1,\n error=errors.InvalidArgumentError,\n message='Input tensors have incompatible shapes'),\n ])\n def testRuntimeError(self, rt_inputs, axis, error, message,\n ragged_ranks=None):\n if context.executing_eagerly():\n return\n rt_inputs = [\n array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs\n ]\n concatenated = ragged_concat_ops.concat(rt_inputs, axis)\n with self.assertRaisesRegex(error, message):\n self.evaluate(concatenated)\n\n def testNegativeAxisWithUnknownRankError(self):\n if context.executing_eagerly():\n return\n rt_inputs = [\n array_ops.placeholder(dtypes.int64),\n array_ops.placeholder(dtypes.int64)\n ]\n self.assertRaisesRegex(\n ValueError, r'axis may only be negative if ndims is statically known.',\n ragged_concat_ops.concat, rt_inputs, -1)\n\n def testSingleTensorInput(self):\n \"\"\"Tests ragged_concat with a single tensor input.\n\n Usually, we pass a list of values in for rt_inputs. However, you can\n also pass in a single value (as with tf.concat), in which case it simply\n returns that tensor. This test exercises that path.\n \"\"\"\n rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])\n concatenated = ragged_concat_ops.concat(rt_inputs, 0)\n self.assertAllEqual(concatenated, [[1, 2], [3, 4]])\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] | [
[
"tensorflow.python.data.experimental.service.server_lib.DispatchServer",
"tensorflow.python.data.experimental.service.server_lib.WorkerServer",
"tensorflow.python.platform.test.main"
],
[
"tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model.common.do_test",
"tensorflow.compat.v2.TensorSpec"
],
[
"numpy.minimum",
"numpy.lib.stride_tricks.as_strided",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.signal.window_ops.hann_window",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.linspace",
"numpy.arange",
"numpy.std",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.gradient_checker_v2.compute_gradient",
"numpy.zeros",
"tensorflow.python.ops.signal.spectral_ops.stft",
"tensorflow.python.ops.signal.spectral_ops.mdct",
"numpy.fft.irfft",
"tensorflow.python.ops.signal.spectral_ops.inverse_stft",
"numpy.random.rand",
"numpy.sum",
"tensorflow.python.ops.signal.spectral_ops.inverse_stft_window_fn",
"numpy.random.random",
"numpy.abs",
"numpy.fft.rfft",
"numpy.ones",
"numpy.random.normal",
"numpy.shape",
"tensorflow.python.ops.signal.spectral_ops.inverse_mdct",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.ops.confusion_matrix.remove_squeezable_dimensions",
"tensorflow.python.ops.array_ops.concat",
"numpy.asarray",
"numpy.arange",
"numpy.reshape",
"numpy.ones",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.main",
"numpy.zeros_like",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.ops.confusion_matrix.confusion_matrix",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.round",
"tensorflow.python.ops.math_ops.maximum",
"numpy.zeros"
],
[
"tensorflow.python.keras.layers.einsum_dense._analyze_einsum_string",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.layers.einsum_dense.EinsumDense",
"tensorflow.python.keras.initializers.constant",
"tensorflow.python.keras.testing_utils.layer_test",
"tensorflow.python.platform.test.main",
"numpy.array"
],
[
"tensorflow.lite.tools.signature.signature_def_utils.get_signature_defs",
"tensorflow.TensorShape",
"tensorflow.lite.tools.signature.signature_def_utils.set_signature_defs",
"tensorflow.as_dtype",
"tensorflow.lite.tools.signature.signature_def_utils.clear_signature_defs",
"tensorflow.io.gfile.GFile",
"tensorflow.compat.v1.saved_model.build_signature_def",
"tensorflow.test.main",
"tensorflow.compat.v1.resource_loader.get_path_to_datafile",
"tensorflow.compat.v1.resource_loader.get_root_dir_with_all_resources"
],
[
"tensorflow.audio.encode_wav",
"tensorflow.python.platform.test.main",
"tensorflow.examples.speech_commands.wav_to_features.wav_to_features",
"tensorflow.zeros"
],
[
"tensorflow.python.keras.layers.Flatten",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.lib.io.file_io.recursive_create_dir_v2",
"tensorflow.python.distribute.multi_process_runner.barrier",
"tensorflow.python.training.checkpoint_management.latest_checkpoint",
"tensorflow.python.keras.layers.Conv2D",
"tensorflow.python.distribute.multi_worker_test_base.create_cluster_spec",
"tensorflow.python.keras.layers.Reshape",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices",
"tensorflow.python.training.checkpoint_management.CheckpointManager",
"numpy.float32",
"tensorflow.python.lib.io.file_io.delete_recursively_v2",
"tensorflow.python.keras.saving.save.load_model",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD",
"tensorflow.python.lib.io.file_io.file_exists",
"tensorflow.python.distribute.multi_process_runner.test_main",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.datasets.mnist.load_data",
"tensorflow.python.framework.test_util.skip_if_error",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.python.distribute.collective_all_reduce_strategy.CollectiveAllReduceStrategy",
"tensorflow.python.training.tracking.util.Checkpoint"
],
[
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.eager.context.collect_graphs",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.data.ops.readers.TFRecordDataset",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.platform.test.main",
"tensorflow.python.lib.io.python_io.TFRecordWriter",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.data.experimental.ops.scan_ops.scan",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.tf2.enabled",
"tensorflow.python.framework.ops.convert_to_tensor"
],
[
"tensorflow.python.distribute.client.metric_utils.get_metric_summary",
"tensorflow.python.distribute.client.client.Cluster",
"tensorflow.python.eager.test.main",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.distribute.multi_worker_test_base.pick_unused_port",
"tensorflow.python.distribute.multi_worker_test_base.create_in_process_cluster"
],
[
"tensorflow.python.grappler.tf_optimizer.OptimizeGraph",
"tensorflow.core.protobuf.graph_debug_info_pb2.GraphDebugInfo",
"tensorflow.lite.python.op_hint.find_all_hinted_output_nodes",
"tensorflow.lite.python.schema_py_generated.ModelT.InitFromObj",
"tensorflow.lite.python.op_hint.convert_op_hints_to_stubs",
"tensorflow.lite.python.schema_py_generated.QuantizationParametersT",
"tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel",
"tensorflow.core.protobuf.meta_graph_pb2.SignatureDef",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.python.framework.error_interpolation.create_graph_debug_info_def",
"tensorflow.python.training.saver.export_meta_graph",
"tensorflow.python.framework.convert_to_constants.disable_lower_using_switch_merge",
"tensorflow.core.protobuf.meta_graph_pb2.CollectionDef",
"tensorflow.core.protobuf.config_pb2.ConfigProto"
],
[
"numpy.matrix",
"tensorflow.python.platform.app.run",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.random.randn",
"tensorflow.python.framework.sparse_tensor.SparseTensor.from_value",
"tensorflow.python.framework.ops.device",
"numpy.where",
"numpy.random.randint",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul",
"tensorflow.python.platform.test.is_gpu_available",
"numpy.random.rand",
"numpy.array",
"numpy.abs",
"numpy.random.seed",
"tensorflow.python.framework.ops.Graph",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.tracking.util.gather_initializers",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.training.adam.AdamOptimizer",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.platform.test.main",
"tensorflow.python.training.tracking.util.add_variable",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.keras.models.save_model",
"tensorflow.python.keras.layers.Dense",
"tensorflow.lite.testing.model_coverage.model_coverage_lib.test_keras_model",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.saved_model.saved_model.simple_save",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.lite.testing.model_coverage.model_coverage_lib.test_frozen_graph",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.client.session.Session",
"tensorflow.lite.testing.model_coverage.model_coverage_lib.test_saved_model",
"numpy.random.seed",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.training_util.write_graph",
"numpy.random.uniform",
"tensorflow.lite.testing.model_coverage.model_coverage_lib.test_frozen_graph_quant",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.keras.backend.clear_session",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.distribute.values_util.apply_aggregation",
"tensorflow.python.distribute.values_util.scatter_div",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.framework.type_spec.type_spec_from_value",
"tensorflow.python.distribute.values_util.assert_replica_context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.distribute.values_util.scatter_error_msg.format",
"tensorflow.python.distribute.values_util.assign_on_device",
"tensorflow.python.distribute.values_util.is_saving_non_distributed",
"tensorflow.python.distribute.values_util.on_write_assign",
"tensorflow.python.distribute.distribution_strategy_context.enter_or_assert_strategy",
"tensorflow.python.distribute.distribute_lib.get_update_replica_id",
"tensorflow.python.distribute.packed_distributed_variable.PackedDistributedVariable",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.values_util.scatter_min",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.distribute.values_util.on_write_assign_sub",
"tensorflow.python.distribute.values_util.on_read_assign_add_cross_replica",
"tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context",
"tensorflow.python.distribute.values_util.scatter_update",
"tensorflow.python.distribute.values_util.on_read_assign_cross_replica",
"tensorflow.python.distribute.values_util.scatter_max",
"tensorflow.python.distribute.device_util.current",
"tensorflow.python.training.saving.saveable_object.SaveSpec",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.distribute.values_util.scatter_add",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.distribute.values_util.get_current_replica_id_as_int",
"tensorflow.python.saved_model.save_context.in_save_context",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.distribute.values_util.on_write_assign_add",
"tensorflow.python.distribute.values_util.scatter_sub",
"tensorflow.python.distribute.reduce_util.ReduceOp.from_variable_aggregation",
"tensorflow.python.distribute.values_util.on_read_assign_sub_cross_replica",
"tensorflow.python.distribute.values_util.scatter_mul"
],
[
"tensorflow.python.platform.gfile.GFile",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.core.framework.graph_pb2.GraphDef.FromString"
],
[
"tensorflow.compat.v2.keras.layers.Flatten",
"tensorflow.compat.v2.keras.models.Sequential",
"tensorflow.compat.v2.lite.TFLiteConverter.from_keras_model",
"tensorflow.compat.v2.keras.layers.MaxPooling2D",
"tensorflow.compat.v2.keras.layers.Dense",
"numpy.random.rand",
"tensorflow.compat.v2.keras.layers.Conv2D",
"numpy.random.randint"
],
[
"tensorflow.python.ops.nn_ops.conv3d",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.nn_ops.convolution",
"tensorflow.python.ops.gradient_checker.compute_gradient",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.fabs",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.nn_ops.Convolution",
"tensorflow.python.platform.test.main",
"tensorflow.python.platform.test.is_built_with_rocm",
"numpy.prod",
"tensorflow.python.ops.nn_ops.conv3d_v2",
"tensorflow.python.framework.test_util.NHWCToNCHW",
"tensorflow.python.framework.test_util.IsMklEnabled",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.test_util.NCHWToNHWC",
"tensorflow.python.framework.test_util.GpuSupportsHalfMatMulAndConv",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.autograph.lang.special_functions.stack",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.list_ops.tensor_list_stack",
"tensorflow.python.platform.test.main",
"numpy.array",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.autograph.lang.special_functions.tensor_list",
"tensorflow.python.autograph.lang.special_functions.match_staging_level"
],
[
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.googletest.main"
],
[
"tensorflow.python.profiler.traceme.TraceMe",
"tensorflow.python.framework.python_memory_checker._PythonMemoryChecker",
"tensorflow.python.util.tf_inspect.stack"
],
[
"tensorflow.python.ops.image_ops_impl.combined_non_max_suppression",
"tensorflow.python.platform.test.main",
"tensorflow.python.compiler.tensorrt.test.tf_trt_integration_test_base.IsQuantizationMode",
"tensorflow.compiler.tf2tensorrt._pywrap_py_utils.get_linked_tensorrt_version",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.compat.v2.keras.layers.Flatten",
"tensorflow.compat.v2.keras.models.Sequential",
"tensorflow.compat.v2.keras.layers.Dense",
"tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model.common.do_test",
"tensorflow.compat.v2.TensorSpec"
],
[
"numpy.expand_dims",
"tensorflow.python.framework.ops.device",
"tensorflow.python.platform.test.main",
"tensorflow.python.platform.benchmark.benchmark_config",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.linalg_ops.matrix_inverse",
"tensorflow.python.platform.test.is_gpu_available",
"numpy.identity",
"numpy.array",
"numpy.random.seed",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.Graph",
"numpy.tile",
"numpy.ones",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.prod",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.empty",
"tensorflow.python.framework.constant_op.constant"
],
[
"numpy.dtype"
],
[
"tensorflow.python.framework.ops.enable_eager_execution",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context._set_context",
"tensorflow.python.eager.context.collect_graphs",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.main",
"tensorflow.python.eager.context.Context",
"tensorflow.python.eager.context.get_function_def",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.eager.context.context",
"numpy.array",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.client.session._DeviceAttributes",
"tensorflow.python.eager.context.LogicalDevice",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.distribute.cluster_resolver.tfconfig_cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.platform.test.main"
],
[
"tensorflow.python.ops.nn_ops.data_format_dim_map",
"tensorflow.python.ops.nn_ops.data_format_vec_permute",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.platform.test.main",
"numpy.array"
],
[
"tensorflow.python.keras.testing_utils.model_type_scope",
"tensorflow.python.keras.testing_utils.run_eagerly_scope",
"tensorflow.python.framework.test_combinations.OptionalParameter",
"tensorflow.python.tf2.enabled",
"tensorflow.python.keras.testing_utils.use_keras_tensors_scope",
"tensorflow.python.framework.combinations.combine"
],
[
"tensorflow.python.platform.app.run",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.image_ops.decode_image",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.io_ops.read_file",
"tensorflow.python.ops.image_ops.resize_images_v2"
],
[
"tensorflow.python.platform.flags.DEFINE_bool",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.python.profiler.profiler_v2.Profile",
"tensorflow.python.profiler.profiler_v2.ProfilerOptions"
],
[
"tensorflow.python.ops.array_ops.matrix_set_diag",
"tensorflow.python.framework.test_util.disable_xla",
"tensorflow.python.ops.linalg.linear_operator_test_util.add_tests",
"numpy.isfinite",
"numpy.logspace",
"tensorflow.python.ops.linalg.linear_operator_test_util.random_positive_definite_matrix",
"tensorflow.python.ops.linalg.linear_operator_test_util.random_normal",
"numpy.linalg.cond",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.linalg.linear_operator_test_util.random_tril_matrix",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.array_ops.placeholder_with_default"
],
[
"tensorflow.python.keras.backend.zeros",
"tensorflow.python.keras.layers.Lambda",
"tensorflow.python.keras.layers.Embedding",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.Sequential",
"tensorflow.python.ops.array_ops.ones",
"numpy.zeros",
"tensorflow.python.keras.testing_utils.get_small_sequential_mlp",
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.eager.function.defun",
"tensorflow.python.keras.models.Sequential",
"tensorflow.python.keras.layers.Dropout",
"numpy.sum",
"tensorflow.python.keras.layers.InputLayer",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"numpy.random.random",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.metrics.CategoricalAccuracy",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.keras.Sequential.from_config",
"tensorflow.python.keras.models.Sequential.from_config"
],
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.array_ops.one_hot",
"numpy.random.randint",
"numpy.random.RandomState",
"tensorflow.python.ops.math_ops.cast"
],
[
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.keras.layers.preprocessing.normalization.Normalization",
"numpy.ones",
"tensorflow.python.keras.layers.preprocessing.image_preprocessing.CenterCrop",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.layers.convolutional.Conv2D",
"tensorflow.python.ops.array_ops.ones"
],
[
"tensorflow.python.keras.layers.preprocessing.category_crossing.CategoryCrossing",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.from_tensor_slices",
"tensorflow.python.keras.Input",
"tensorflow.python.framework.config.set_soft_device_placement",
"tensorflow.python.keras.Model",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.platform.test.main",
"numpy.array"
],
[
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.ops.ragged.ragged_concat_ops.concat",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"1.4",
"2.2",
"1.13",
"2.3",
"2.4",
"2.6",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.2",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.2",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
evanloshin/CarND-Behavioral-Cloning-P3 | [
"22ec89cdea5257a10512f07b07fc4c074bc7c649"
] | [
"drive.py"
] | [
"import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\n\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\n\nfrom keras.models import load_model\nimport h5py\nfrom keras import __version__ as keras_version\nfrom keras import Model\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\n\nclass SimplePIController:\n def __init__(self, Kp, Ki):\n self.Kp = Kp\n self.Ki = Ki\n self.set_point = 0.\n self.error = 0.\n self.integral = 0.\n\n def set_desired(self, desired):\n self.set_point = desired\n\n def update(self, measurement):\n # proportional error\n self.error = self.set_point - measurement\n\n # integral error\n self.integral += self.error\n\n return self.Kp * self.error + self.Ki * self.integral\n\n\ncontroller = SimplePIController(0.1, 0.002)\nset_speed = 9\ncontroller.set_desired(set_speed)\n\n\[email protected]('telemetry')\ndef telemetry(sid, data):\n if data:\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))\n\n # # Extract intermediate layer output\n # layer_name = 'first_convolution'\n # intermediate_layer_model = Model(inputs=model.input,\n # outputs=model.get_layer(layer_name).output)\n # intermediate_output = intermediate_layer_model.predict(image_array[None, :, :, :], batch_size=1)\n # intermediate_output = np.squeeze(intermediate_output)\n # intermediate_output = (255.0 / intermediate_output.max() * (intermediate_output - intermediate_output.min())).astype(np.uint8)\n # intermediate_output_img = Image.fromarray(intermediate_output[12])\n #\n # # save intermediate output layer\n # timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n # image_filename = os.path.join('/Users/evanloshin/Documents/Udacity/SDC/behavioral-cloning-data/Intermediate-Layer/', timestamp)\n # intermediate_output_img.save('{}.jpg'.format(image_filename))\n\n throttle = controller.update(float(speed))\n\n print(\"Predicted Steering Angle: {} Throttle: {}\".format(round(steering_angle, 5), round(throttle, 5)))\n send_control(steering_angle, throttle)\n\n # save frame\n if args.image_folder != '':\n timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n image_filename = os.path.join(args.image_folder, timestamp)\n image.save('{}.jpg'.format(image_filename))\n else:\n # NOTE: DON'T EDIT THIS.\n sio.emit('manual', data={}, skip_sid=True)\n\n\[email protected]('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n parser.add_argument(\n 'image_folder',\n type=str,\n nargs='?',\n default='',\n help='Path to image folder. This is where the images from the run will be saved.'\n )\n args = parser.parse_args()\n\n # check that model Keras version is same as local Keras version\n f = h5py.File(args.model, mode='r')\n model_version = f.attrs.get('keras_version')\n keras_version = str(keras_version).encode('utf8')\n\n if model_version != keras_version:\n print('You are using Keras version ', keras_version,\n ', but the model was built using ', model_version)\n\n model = load_model(args.model)\n\n if args.image_folder != '':\n print(\"Creating image folder at {}\".format(args.image_folder))\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n else:\n shutil.rmtree(args.image_folder)\n os.makedirs(args.image_folder)\n print(\"RECORDING THIS RUN ...\")\n else:\n print(\"NOT RECORDING THIS RUN ...\")\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frederikschubert/rltime | [
"d1722ffd4cf7b4599655b8d9c64abc243919afc9"
] | [
"rltime/eval.py"
] | [
"\"\"\" Entry point for evaluating/rendering a trained policy. \"\"\"\n\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport time\nimport datetime\n\nfrom rltime.general.config import load_config\nfrom rltime.general.utils import deep_dictionary_update\nfrom rltime.general.type_registry import get_registered_type\nfrom rltime.env_wrappers.common import make_env_creator, EpisodeRecorder\nfrom rltime.env_wrappers.vec_env.sub_proc import make_sub_proc_vec_env\nfrom rltime.general.loggers import DirectoryLogger\n\n\ndef create_policy_from_config(config, action_space, observation_space):\n \"\"\"Creates a policy from the given config and spaces\n\n This does not load the weights just creates the policy\n \"\"\"\n if not isinstance(config, dict):\n config = load_config(config)\n\n train_cls = get_registered_type(\n \"trainers\", config['training'].get(\"type\", None))\n assert(hasattr(train_cls, \"create_policy\")), \\\n f\"Config training class {type(train_cls)} does not have a \" \\\n \"'create_policy' method\"\n\n model_config = config.get(\"model\")\n\n return train_cls.create_policy(\n model_config=model_config, action_space=action_space,\n observation_space=observation_space, **config.get(\"policy_args\", {}))\n\n\ndef eval_policy(path, num_envs, episode_count, record=False, record_fps=60,\n render=False, render_fps=None, eps=0.001, conf_update=None):\n \"\"\"Evaluates training result at 'path', loading the last checkpoint\n\n The result is logged to a new line in file 'eval.json' in <path>\n\n Args:\n path: The path containing the training result output to evaluate\n num_envs: Amount of vectorized (sub-process) ENVs to evaluate in\n parallel\n episode_count: The amount of episodes to evaluate total\n record: Whether to record episodes to MP4 (under 'recordings'\n sub-directory in <path>)\n record_fps: If <record>, the FPS to record at (These are raw ENV frames\n before any frame-skipping, so atari would usually be 60)\n render: Whether to render the ENVs in a window in real-time (Tiled if\n num_envs>1)\n render_fps: Frames-Per-Second to sync the rendering to (Valid only for\n render=True), the default (None) renders at max policy speed. These\n are acting steps, so after frame-skipping if active\n eps: Epsilon to use for random action selection\n\n Note: We count the first 'episode_count' episodes that started and not\n ended, as 'ended' is unfair to longer episodes in case of vectorized\n evaluation. For Example: Take a policy that achieves 100 reward in 100\n seconds 50% of the time and 0 reward in <1 second 50% of the time.\n So we'd expect if we evaluate 20 episodes to get around ~50 average\n reward (which we would if running 20 episodes serially on a single ENV)\n But if we run 16 ENVs in parallel we will likely get near-0 mean reward\n if we count the first 20 episodes that finished (Since half of the 16\n ENVs immediately end with reward 0 then restart, then half of those\n immediately end with 0 and so on, so we quickly get ~(8+4+2+1) 0-reward\n runs and don't count the ones which are running long and going to reach\n 100 reward), while if we take the first 20 episodes that started (and\n ignore any that started after) we will get the expected result\n \"\"\"\n print(\"Evaluating:\", path)\n assert(num_envs <= episode_count), \\\n \"num_envs can't be higher than the requested episode_count\"\n\n logger = DirectoryLogger(path, use_logging=False, tensorboard=False)\n\n # Load the config from the result path\n config = logger.get_config()\n \n if conf_update:\n config = dict(config) # Avoid changing the passed config\n deep_dictionary_update(config, conf_update)\n\n # Make the env-creaton function based on the config settings\n env_args = config.get(\"env_args\", {})\n if record:\n # If requested, add also an episode-recorder to the ENV stack\n recorder = {\n \"type\": EpisodeRecorder,\n \"args\": {\n \"path\": os.path.join(path, \"recordings\"),\n \"fps\": record_fps\n }\n }\n env_args['wrappers'] = [recorder] + env_args.get('wrappers', [])\n\n env_creator = make_env_creator(config.get(\"env\"), **env_args)\n\n # Create a vectorized ENV\n env = make_sub_proc_vec_env(env_creator, num_envs)\n\n # Create the policy based on the config\n policy = create_policy_from_config(\n config, env.action_space, env.observation_space)\n\n # Load the last checkpoint\n training_step, cp_data = logger.get_checkpoint()\n # Load the weights from the checkpoint to the policy\n policy.load_state(cp_data['policy_state'])\n print(\"Loaded checkpoint from step:\", training_step)\n\n # The initial policy input state\n state = policy.make_input_state(env.reset(), np.array([True] * num_envs))\n\n episodes_started = num_envs\n rewards = []\n lengths = []\n # This signifies the ENV started the episode in time and should be counted\n masks = [True] * num_envs\n # TODO(frederik): Mention mode and difficulty\n print(f\"Running '{config.get('env')}' for {episode_count} episodes\"\n f\" on {num_envs} ENVs\")\n while len(rewards) < episode_count:\n step_start = time.time()\n # Select the next action for each env\n preds = policy.actor_predict(state, timesteps=1)\n actions = preds['actions']\n if eps:\n # Remap to random actions with eps probability\n for i in range(num_envs):\n if np.random.rand() < eps:\n actions[i] = env.action_space.sample()\n # Send the action and get the transition data\n obs, _, dones, info = env.step(actions)\n\n # Check any env if finished\n for i, env_info in enumerate(info):\n # We use the 'real' done/reward from the EpisodeTracker wrapper\n if env_info['episode_info']['done']:\n if masks[i]:\n # Only count the first 'episode_count' that started\n reward = env_info['episode_info']['reward']\n length = env_info['episode_info']['length']\n rewards.append(reward)\n lengths.append(length)\n print(f\"Episode {len(rewards)}/{episode_count} \"\n f\"finished with reward: {reward}\")\n\n episodes_started += 1\n if episodes_started > episode_count:\n masks[i] = False\n\n # Render to screen if requested\n if render:\n if render_fps:\n diff = 1./render_fps - (time.time() - step_start)\n if diff > 0:\n time.sleep(diff)\n env.render()\n # Generate the next policy input state\n state = policy.make_input_state(obs, dones)\n\n env.close()\n\n # Log the result\n result = {\n \"step\": training_step,\n \"date\": datetime.datetime.now(),\n \"episodes\": episode_count,\n \"envs\": num_envs,\n **{\n key: {\n \"mean\": np.mean(vals),\n \"min\": np.min(vals),\n \"max\": np.max(vals),\n \"median\": np.median(vals),\n \"std\": np.std(vals),\n } for key, vals in [(\"reward\", rewards), (\"length\", lengths)]\n }\n }\n print(\"Result:\")\n logger.log_result(\"eval\", result, None)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n 'path', type=str,\n help=\"The path to the training directory result to evaluate\")\n parser.add_argument(\n '--num-envs', type=int, default=1,\n help=\"Amount of ENVs to run in parallel\")\n parser.add_argument(\n '--episodes', type=int, default=5,\n help=\"Amount of episodes to run\")\n parser.add_argument(\n '--record', action='store_true',\n help=\"Whether to record episode to MP4 (To a sub-directory in the \"\n \"result path). Warning: If used with --num-envs>1 the last \"\n \"videos will be truncated\")\n parser.add_argument(\n '--record-fps', type=int, default=60,\n help=\"FPS to record at if --record (Typically 60FPS for atari)\")\n parser.add_argument(\n '--render', action='store_true',\n help=\"Whether to render the episodes in real-time\")\n parser.add_argument(\n '--render-fps', type=int, default=0,\n help=\"FPS to sync to if using --render (Set to 0 for full speed), \"\n \"note this is after ENV frame-skipping so if you want 60FPS with \"\n \"frame-skip of 4 use 15 here\")\n parser.add_argument(\n '--eps', type=float, default=0.001,\n help=\"Epsilon value to use for random action selection during \"\n \"evaluation\")\n parser.add_argument(\n '--conf-update', type=str,\n help=\"Optional JSON dictionary string to deep-update the config with\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n conf_update = None if not args.conf_update \\\n else json.loads(args.conf_update)\n\n eval_policy(\n args.path, num_envs=args.num_envs, episode_count=args.episodes,\n record=args.record, record_fps=args.record_fps,\n render=args.render, render_fps=args.render_fps, eps=args.eps, conf_update=conf_update)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.min",
"numpy.median",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.random.rand",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
apexrl/EBIL-torch | [
"8d257d5efa36f7c608085e34a7cdd3e996962d3f"
] | [
"rlkit/core/base_algorithm.py"
] | [
"import abc\nimport pickle\nimport time\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport gtimer as gt\nimport numpy as np\n\nfrom rlkit.core import logger, eval_util\nfrom rlkit.data_management.env_replay_buffer import EnvReplayBuffer\nfrom rlkit.data_management.path_builder import PathBuilder\nfrom rlkit.policies.base import ExplorationPolicy\nfrom rlkit.torch.sac.policies import MakeDeterministic\nfrom rlkit.samplers import PathSampler\nfrom rlkit.envs.wrapped_absorbing_env import WrappedAbsorbingEnv\n\nfrom gym.spaces import Dict\n\n\nclass BaseAlgorithm(metaclass=abc.ABCMeta):\n \"\"\"\n base algorithm for single task setting\n can be used for RL or Learning from Demonstrations\n \"\"\"\n def __init__(\n self,\n env,\n exploration_policy: ExplorationPolicy,\n training_env=None,\n eval_policy=None,\n eval_sampler=None,\n\n num_epochs=100,\n num_steps_per_epoch=10000,\n num_steps_between_train_calls=1000,\n num_steps_per_eval=1000,\n max_path_length=1000,\n min_steps_before_training=0,\n\n replay_buffer=None,\n replay_buffer_size=10000,\n\n freq_saving=1,\n save_replay_buffer=False,\n save_environment=False,\n save_algorithm=False,\n\n save_best=False,\n save_best_starting_from_epoch=0,\n best_key='AverageReturn', # higher is better\n \n no_terminal=False,\n wrap_absorbing=False,\n\n render=False,\n render_kwargs={},\n\n freq_log_visuals=1,\n\n eval_deterministic=False\n ):\n self.env = env\n self.training_env = training_env or pickle.loads(pickle.dumps(env))\n self.exploration_policy = exploration_policy\n\n self.num_epochs = num_epochs\n self.num_env_steps_per_epoch = num_steps_per_epoch\n self.num_steps_between_train_calls = num_steps_between_train_calls\n self.num_steps_per_eval = num_steps_per_eval\n self.max_path_length = max_path_length\n self.min_steps_before_training = min_steps_before_training\n\n self.render = render\n\n self.save_replay_buffer = save_replay_buffer\n self.save_algorithm = save_algorithm\n self.save_environment = save_environment\n self.save_best = save_best\n self.save_best_starting_from_epoch = save_best_starting_from_epoch\n self.best_key = best_key\n self.best_statistic_so_far = float('-Inf')\n \n if eval_sampler is None:\n if eval_policy is None:\n eval_policy = exploration_policy\n eval_policy = MakeDeterministic(eval_policy)\n eval_sampler = PathSampler(\n env,\n eval_policy,\n num_steps_per_eval,\n max_path_length,\n no_terminal=no_terminal,\n render=render,\n render_kwargs=render_kwargs\n )\n self.eval_policy = eval_policy\n self.eval_sampler = eval_sampler\n\n self.action_space = env.action_space\n self.obs_space = env.observation_space\n self.replay_buffer_size = replay_buffer_size\n if replay_buffer is None:\n assert max_path_length < replay_buffer_size\n replay_buffer = EnvReplayBuffer(\n self.replay_buffer_size,\n self.env,\n random_seed=np.random.randint(10000)\n )\n else:\n assert max_path_length < replay_buffer._max_replay_buffer_size\n self.replay_buffer = replay_buffer\n\n self._n_env_steps_total = 0\n self._n_train_steps_total = 0\n self._n_rollouts_total = 0\n self._do_train_time = 0\n self._epoch_start_time = None\n self._algo_start_time = None\n self._old_table_keys = None\n self._current_path_builder = PathBuilder()\n self._exploration_paths = []\n\n if wrap_absorbing:\n # needs to be properly handled both here and in replay buffer\n raise NotImplementedError()\n self.wrap_absorbing = wrap_absorbing\n self.freq_saving = freq_saving\n self.no_terminal = no_terminal\n\n self.eval_statistics = None\n self.freq_log_visuals = freq_log_visuals\n\n\n def train(self, start_epoch=0):\n self.pretrain()\n if start_epoch == 0:\n params = self.get_epoch_snapshot(-1)\n logger.save_itr_params(-1, params)\n self.training_mode(False)\n self._n_env_steps_total = start_epoch * self.num_env_steps_per_epoch\n gt.reset()\n gt.set_def_unique(False)\n self.start_training(start_epoch=start_epoch)\n\n\n def pretrain(self):\n \"\"\"\n Do anything before the main training phase.\n \"\"\"\n pass\n\n def start_training(self, start_epoch=0):\n self._current_path_builder = PathBuilder()\n observation = self._start_new_rollout()\n\n for epoch in gt.timed_for(\n range(start_epoch, self.num_epochs),\n save_itrs=True,\n ):\n self._start_epoch(epoch)\n for steps_this_epoch in range(self.num_env_steps_per_epoch):\n action, agent_info = self._get_action_and_info(observation)\n if self.render: self.training_env.render()\n\n next_ob, raw_reward, terminal, env_info = (\n self.training_env.step(action)\n )\n if self.no_terminal: terminal = False\n self._n_env_steps_total += 1\n\n reward = np.array([raw_reward])\n terminal = np.array([terminal])\n self._handle_step(\n observation,\n action,\n reward,\n next_ob,\n np.array([False]) if self.no_terminal else terminal,\n absorbing=np.array([0., 0.]),\n agent_info=agent_info,\n env_info=env_info,\n )\n if terminal[0]:\n if self.wrap_absorbing:\n raise NotImplementedError()\n '''\n If we wrap absorbing states, two additional\n transitions must be added: (s_T, s_abs) and\n (s_abs, s_abs). In Disc Actor Critic paper\n they make s_abs be a vector of 0s with last\n dim set to 1. Here we are going to add the following:\n ([next_ob,0], random_action, [next_ob, 1]) and\n ([next_ob,1], random_action, [next_ob, 1])\n This way we can handle varying types of terminal states.\n '''\n # next_ob is the absorbing state\n # for now just taking the previous action\n self._handle_step(\n next_ob,\n action,\n # env.action_space.sample(),\n # the reward doesn't matter\n reward,\n next_ob,\n np.array([False]),\n absorbing=np.array([0.0, 1.0]),\n agent_info=agent_info,\n env_info=env_info\n )\n self._handle_step(\n next_ob,\n action,\n # env.action_space.sample(),\n # the reward doesn't matter\n reward,\n next_ob,\n np.array([False]),\n absorbing=np.array([1.0, 1.0]),\n agent_info=agent_info,\n env_info=env_info\n )\n self._handle_rollout_ending()\n observation = self._start_new_rollout()\n elif len(self._current_path_builder) >= self.max_path_length:\n self._handle_rollout_ending()\n observation = self._start_new_rollout()\n else:\n observation = next_ob\n\n if self._n_env_steps_total % self.num_steps_between_train_calls == 0:\n gt.stamp('sample')\n self._try_to_train(epoch)\n gt.stamp('train')\n\n gt.stamp('sample')\n self._try_to_eval(epoch)\n gt.stamp('eval')\n self._end_epoch()\n\n def _try_to_train(self, epoch):\n if self._can_train():\n self.training_mode(True)\n self._do_training(epoch)\n self._n_train_steps_total += 1\n self.training_mode(False)\n\n def _try_to_eval(self, epoch):\n\n if self._can_evaluate():\n # save if it's time to save\n if (epoch % self.freq_saving == 0) or (epoch + 1 >= self.num_epochs):\n # if epoch + 1 >= self.num_epochs:\n # epoch = 'final'\n logger.save_extra_data(self.get_extra_data_to_save(epoch))\n params = self.get_epoch_snapshot(epoch)\n logger.save_itr_params(epoch, params)\n\n self.evaluate(epoch)\n\n logger.record_tabular(\n \"Number of train calls total\",\n self._n_train_steps_total,\n )\n logger.record_tabular(\n \"Number of env steps total\",\n self._n_env_steps_total,\n )\n logger.record_tabular(\n \"Number of rollouts total\",\n self._n_rollouts_total,\n )\n\n times_itrs = gt.get_times().stamps.itrs\n train_time = times_itrs['train'][-1]\n sample_time = times_itrs['sample'][-1]\n eval_time = times_itrs['eval'][-1] if epoch > 0 else 0\n epoch_time = train_time + sample_time + eval_time\n total_time = gt.get_times().total\n\n logger.record_tabular('Train Time (s)', train_time)\n logger.record_tabular('(Previous) Eval Time (s)', eval_time)\n logger.record_tabular('Sample Time (s)', sample_time)\n logger.record_tabular('Epoch Time (s)', epoch_time)\n logger.record_tabular('Total Train Time (s)', total_time)\n\n logger.record_tabular(\"Epoch\", epoch)\n logger.dump_tabular(with_prefix=False, with_timestamp=False)\n else:\n logger.log(\"Skipping eval for now.\")\n\n def _can_evaluate(self):\n \"\"\"\n One annoying thing about the logger table is that the keys at each\n iteration need to be the exact same. So unless you can compute\n everything, skip evaluation.\n\n A common example for why you might want to skip evaluation is that at\n the beginning of training, you may not have enough data for a\n validation and training set.\n\n :return:\n \"\"\"\n return (\n len(self._exploration_paths) > 0\n and self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training\n )\n\n def _can_train(self):\n return self.replay_buffer.num_steps_can_sample() >= self.min_steps_before_training\n\n def _get_action_and_info(self, observation):\n \"\"\"\n Get an action to take in the environment.\n :param observation:\n :return:\n \"\"\"\n self.exploration_policy.set_num_steps_total(self._n_env_steps_total)\n return self.exploration_policy.get_action(\n observation,\n )\n\n def _start_epoch(self, epoch):\n self._epoch_start_time = time.time()\n self._exploration_paths = []\n self._do_train_time = 0\n logger.push_prefix('Iteration #%d | ' % epoch)\n\n def _end_epoch(self):\n self.eval_statistics = None\n logger.log(\"Epoch Duration: {0}\".format(\n time.time() - self._epoch_start_time\n ))\n logger.log(\"Started Training: {0}\".format(self._can_train()))\n logger.pop_prefix()\n\n def _start_new_rollout(self):\n self.exploration_policy.reset()\n return self.training_env.reset()\n\n def _handle_path(self, path):\n \"\"\"\n Naive implementation: just loop through each transition.\n :param path:\n :return:\n \"\"\"\n for (\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info,\n env_info\n ) in zip(\n path[\"observations\"],\n path[\"actions\"],\n path[\"rewards\"],\n path[\"next_observations\"],\n path[\"terminals\"],\n path[\"agent_infos\"],\n path[\"env_infos\"],\n ):\n self._handle_step(\n ob,\n action,\n reward,\n next_ob,\n terminal,\n agent_info=agent_info,\n env_info=env_info,\n )\n self._handle_rollout_ending()\n\n def _handle_step(\n self,\n observation,\n action,\n reward,\n next_observation,\n terminal,\n absorbing,\n agent_info,\n env_info,\n ):\n \"\"\"\n Implement anything that needs to happen after every step\n :return:\n \"\"\"\n self._current_path_builder.add_all(\n observations=observation,\n actions=action,\n rewards=reward,\n next_observations=next_observation,\n terminals=terminal,\n absorbing=absorbing,\n agent_infos=agent_info,\n env_infos=env_info,\n )\n self.replay_buffer.add_sample(\n observation=observation,\n action=action,\n reward=reward,\n terminal=terminal,\n next_observation=next_observation,\n absorbing=absorbing,\n agent_info=agent_info,\n env_info=env_info,\n )\n\n def _handle_rollout_ending(self):\n \"\"\"\n Implement anything that needs to happen after every rollout.\n \"\"\"\n self.replay_buffer.terminate_episode()\n self._n_rollouts_total += 1\n if len(self._current_path_builder) > 0:\n self._exploration_paths.append(\n self._current_path_builder\n )\n self._current_path_builder = PathBuilder()\n\n def get_epoch_snapshot(self, epoch):\n \"\"\"\n Probably will be overridden by each algorithm\n \"\"\"\n data_to_save = dict(\n epoch=epoch,\n exploration_policy=self.exploration_policy,\n )\n if self.save_environment:\n data_to_save['env'] = self.training_env\n return data_to_save\n \n # @abc.abstractmethod\n # def load_snapshot(self, snapshot):\n # \"\"\"\n # Should be implemented on a per algorithm basis\n # taking into consideration the particular\n # get_epoch_snapshot implementation for the algorithm\n # \"\"\"\n # pass\n\n def get_extra_data_to_save(self, epoch):\n \"\"\"\n Save things that shouldn't be saved every snapshot but rather\n overwritten every time.\n :param epoch:\n :return:\n \"\"\"\n if self.render:\n self.training_env.render(close=True)\n data_to_save = dict(\n epoch=epoch,\n )\n if self.save_environment:\n data_to_save['env'] = self.training_env\n if self.save_replay_buffer:\n data_to_save['replay_buffer'] = self.replay_buffer\n if self.save_algorithm:\n data_to_save['algorithm'] = self\n return data_to_save\n\n @abc.abstractmethod\n def training_mode(self, mode):\n \"\"\"\n Set training mode to `mode`.\n :param mode: If True, training will happen (e.g. set the dropout\n probabilities to not all ones).\n \"\"\"\n pass\n\n\n @abc.abstractmethod\n def _do_training(self):\n \"\"\"\n Perform some update, e.g. perform one gradient step.\n :return:\n \"\"\"\n pass\n\n\n def evaluate(self, epoch):\n \"\"\"\n Evaluate the policy, e.g. save/print progress.\n :param epoch:\n :return:\n \"\"\"\n statistics = OrderedDict()\n try:\n statistics.update(self.eval_statistics)\n self.eval_statistics = None\n except:\n print('No Stats to Eval')\n\n logger.log(\"Collecting samples for evaluation\")\n test_paths = self.eval_sampler.obtain_samples()\n\n statistics.update(eval_util.get_generic_path_information(\n test_paths, stat_prefix=\"Test\",\n ))\n statistics.update(eval_util.get_generic_path_information(\n self._exploration_paths, stat_prefix=\"Exploration\",\n ))\n\n if hasattr(self.env, \"log_diagnostics\"):\n self.env.log_diagnostics(test_paths)\n if hasattr(self.env, \"log_statistics\"):\n statistics.update(self.env.log_statistics(test_paths))\n if epoch % self.freq_log_visuals == 0:\n if hasattr(self.env, \"log_visuals\"):\n self.env.log_visuals(test_paths, epoch, logger.get_snapshot_dir())\n \n average_returns = eval_util.get_average_returns(test_paths)\n statistics['AverageReturn'] = average_returns\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n \n best_statistic = statistics[self.best_key]\n if best_statistic > self.best_statistic_so_far:\n self.best_statistic_so_far = best_statistic\n if self.save_best and epoch >= self.save_best_starting_from_epoch:\n data_to_save = {\n 'epoch': epoch,\n 'statistics': statistics\n }\n data_to_save.update(self.get_epoch_snapshot(epoch))\n logger.save_extra_data(data_to_save, 'best.pkl')\n print('\\n\\nSAVED BEST\\n\\n')\n"
] | [
[
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VolkerH/gputools | [
"b8732c3cf82b96c6960497e6d82ce6b2bac463aa"
] | [
"gputools/convolve/minmax_filter.py"
] | [
"from __future__ import print_function, unicode_literals, absolute_import, division\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nimport os\nimport numpy as np\nfrom gputools import OCLArray, OCLProgram, get_device\n\nfrom gputools.core.ocltypes import assert_bufs_type\nfrom gputools.utils.tile_iterator import tile_iterator\nfrom ._abspath import abspath\n\n\ndef _filter_max_2_gpu(data_g, size=10, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"max_2_x\", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))\n prog.run_kernel(\"max_2_y\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))\n\n return res_g\n\n\ndef _filter_max_3_gpu(data_g, size=10, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"max_3_x\", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))\n prog.run_kernel(\"max_3_y\", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))\n prog.run_kernel(\"max_3_z\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))\n\n return res_g\n\n\n\n\ndef _max_filter_gpu(data_g, size=5, res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n assert (len(data_g.shape) == len(size))\n\n if len(data_g.shape) == 2:\n return _filter_max_2_gpu(data_g, size=size, res_g=res_g)\n elif len(data_g.shape) == 3:\n return _filter_max_3_gpu(data_g, size=size, res_g=res_g)\n else:\n raise NotImplementedError(\"only 2 or 3d arrays are supported for now\")\n\n\ndef _max_filter_numpy(data, size=5):\n data_g = OCLArray.from_array(data.astype(np.float32))\n return _max_filter_gpu(data_g, size=size).get()\n\n\ndef max_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):\n \"\"\"\n maximum filter of given size\n\n Parameters\n ----------\n data: 2 or 3 dimensional ndarray or OCLArray of type float32\n input data\n size: scalar, tuple\n the size of the patch to consider\n res_g: OCLArray\n store result in buffer if given\n sub_blocks:\n perform over subblock tiling (only if data is ndarray)\n\n Returns\n -------\n filtered image or None (if OCLArray)\n \"\"\"\n\n if np.isscalar(size):\n size = (size,)*len(data.shape)\n\n if isinstance(data, np.ndarray):\n data = np.ascontiguousarray(data)\n if set(sub_blocks) == {1} or sub_blocks is None:\n return _max_filter_numpy(data, size)\n else:\n # cut the image into tile and operate on every of them\n N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]\n Npads = tuple(map(lambda x: x//2, size))\n res = np.empty(data.shape, np.float32)\n for i, (data_tile, data_s_src, data_s_dest) \\\n in enumerate(tile_iterator(data, blocksize=N_sub,\n padsize=Npads,\n mode=\"constant\")):\n res_tile = _max_filter_numpy(data_tile.copy(),\n size)\n res[data_s_src] = res_tile[data_s_dest]\n return res\n\n\n elif isinstance(data, OCLArray):\n return _max_filter_gpu(data, size=size, res_g=res_g)\n else:\n raise TypeError(\"array argument (1) has bad type: %s\" % type(data))\n\n\n\ndef _filter_min_2_gpu(data_g, size=(10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"min_2_x\", data_g.shape[::-1], None, data_g.data, tmp_g.data, np.int32(size[-1]))\n prog.run_kernel(\"min_2_y\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-2]))\n\n return res_g\n\n\ndef _filter_min_3_gpu(data_g, size=(10,10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n prog = OCLProgram(abspath(\"kernels/minmax_filter.cl\"))\n\n tmp_g = OCLArray.empty_like(data_g)\n\n if res_g is None:\n res_g = OCLArray.empty_like(data_g)\n\n prog.run_kernel(\"min_3_x\", data_g.shape[::-1], None, data_g.data, res_g.data, np.int32(size[-1]))\n prog.run_kernel(\"min_3_y\", data_g.shape[::-1], None, res_g.data, tmp_g.data, np.int32(size[-2]))\n prog.run_kernel(\"min_3_z\", data_g.shape[::-1], None, tmp_g.data, res_g.data, np.int32(size[-3]))\n\n return res_g\n\n\n\n\ndef _min_filter_gpu(data_g, size=(10,10), res_g=None):\n assert_bufs_type(np.float32, data_g)\n\n assert (len(data_g.shape)==len(size))\n\n if len(data_g.shape) == 2:\n return _filter_min_2_gpu(data_g, size=size, res_g=res_g)\n elif len(data_g.shape) == 3:\n return _filter_min_3_gpu(data_g, size=size, res_g=res_g)\n else:\n raise NotImplementedError(\"only 2 or 3d arrays are supported for now\")\n\n\ndef _min_filter_numpy(data, size=(10,10)):\n data_g = OCLArray.from_array(data.astype(np.float32))\n return _min_filter_gpu(data_g, size=size).get()\n\n\ndef min_filter(data, size=10, res_g=None, sub_blocks=(1, 1, 1)):\n \"\"\"\n minimum filter of given size\n\n Parameters\n ----------\n data: 2 or 3 dimensional ndarray or OCLArray of type float32\n input data\n size: scalar, tuple\n the size of the patch to consider\n res_g: OCLArray\n store result in buffer if given\n sub_blocks:\n perform over subblock tiling (only if data is ndarray)\n\n Returns\n -------\n filtered image or None (if OCLArray)\n \"\"\"\n\n if np.isscalar(size):\n size = (size,)*len(data.shape)\n\n if isinstance(data, np.ndarray):\n if set(sub_blocks) == {1} or sub_blocks is None:\n return _min_filter_numpy(data, size)\n else:\n # cut the image into tile and operate on every of them\n N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]\n Npads = tuple(map(lambda x: x//2, size))\n res = np.empty(data.shape, np.float32)\n for i, (data_tile, data_s_src, data_s_dest) \\\n in enumerate(tile_iterator(data, blocksize=N_sub,\n padsize=Npads,\n mode=\"constant\")):\n res_tile = _min_filter_numpy(data_tile.copy(),\n size)\n res[data_s_src] = res_tile[data_s_dest]\n return res\n\n\n elif isinstance(data, OCLArray):\n return _min_filter_gpu(data, size=size, res_g=res_g)\n else:\n raise TypeError(\"array argument (1) has bad type: %s\" % type(data))\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.int32",
"numpy.ceil",
"numpy.isscalar",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
domingoesteban/robolearn | [
"0d20125425c352b80ef2eeed1c0b11ab6497b11a",
"0d20125425c352b80ef2eeed1c0b11ab6497b11a"
] | [
"robolearn/torch/policies/tanh_gaussian_promp_multi_policy.py",
"robolearn/torch/policies/tanh_gaussian_composed_multi_policy.py"
] | [
"import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\n# LOG_SIG_MAX = 2\n# LOG_SIG_MIN = -3.0\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -20\n\n# SIG_MAX = 7.38905609893065\n# SIG_MIN = 0.049787068367863944\n\n# LOG_MIX_COEFF_MIN = -10\n# LOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5\n# LOG_MIX_COEFF_MIN = -1\n# LOG_MIX_COEFF_MAX = 1 #-4.5e-5\n\n# EPS = 1e-12\nEPS = 1e-8\n\n\nclass TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianPrompMultiPolicy(...)\n action, policy_dict = policy(obs)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=0,\n output_w_init='xavier_normal',\n output_b_init_val=0,\n pol_output_activation='linear',\n mix_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n softmax_weights=False,\n **kwargs\n ):\n self.save_init_params(locals())\n PyTorchModule.__init__(self)\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n mixture_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # Multi-Policy Log-Stds Last Layers\n self.stds = stds\n self.log_std = list()\n if stds is None:\n self._pfc_log_std_lasts = list()\n for pol_idx in range(self._n_subpolicies):\n last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)\n ptu.layer_init(\n layer=last_pfc_log_std,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std)\n self._pfc_log_std_lasts.append(last_pfc_log_std)\n self.add_policies_module(\"pfc{}_log_std_last\".format(pol_idx),\n last_pfc_log_std, idx=pol_idx)\n\n else:\n for std in stds:\n self.log_std.append(torch.log(stds))\n assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX\n\n # ############# #\n # Mixing Layers #\n # ############# #\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n self.mfc_sigmoid = nn.Sigmoid()\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n\n def get_action(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n \"\"\"\n \"\"\"\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool): True for using mean. False, sample from dist.\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._shared_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n # Last Mean Layers\n means = torch.cat(\n [(\n self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # Last Log-Std Layers\n if self.stds is None:\n log_stds = torch.cat(\n [(\n self._pol_output_activation(\n self._pfc_log_std_lasts[pp](hs[pp])\n )\n ).unsqueeze(dim=1)\n for pp in range(self._n_subpolicies)\n ],\n dim=1\n ) # Batch x Npols x dA\n\n # # log_std option 1:\n # log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)\n # log_std option 2:\n log_stds = torch.tanh(log_stds)\n log_stds = \\\n LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)\n\n stds = torch.exp(log_stds)\n variances = stds**2\n\n else:\n log_stds = self.log_std\n stds = self.stds\n variances = stds**2\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = h.clone()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mixture_coeff = \\\n self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)\n\n mixture_coeff = self.mfc_sigmoid(mixture_coeff)\n\n # if torch.isnan(mixture_coeff).any():\n # raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %\n # mixture_coeff)\n #\n # if torch.isnan(means).any():\n # raise ValueError('Some means are NAN: %s' %\n # means)\n #\n # if torch.isnan(stds).any():\n # raise ValueError('Some stds are NAN: %s' %\n # stds)\n\n if pol_idx is None:\n # Calculate weighted means and stds (and log_stds)\n if optimize_policies:\n sig_invs = mixture_coeff/variances\n else:\n sig_invs = mixture_coeff/variances.detach()\n\n variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)\n\n if optimize_policies:\n mean = variance*torch.sum(\n means*sig_invs,\n dim=1,\n keepdim=False\n )\n else:\n mean = variance*torch.sum(\n means.detach()*sig_invs,\n dim=1,\n keepdim=False\n )\n\n # log_std option 1:\n std = torch.sqrt(variance)\n std = torch.clamp(std,\n min=math.exp(LOG_SIG_MIN),\n max=math.exp(LOG_SIG_MAX))\n log_std = torch.log(std)\n # # log_std option 2:\n # variance = torch.tanh(variance)\n # variance = (\n # math.exp(LOG_SIG_MIN)**2 +\n # 0.5*(math.exp(LOG_SIG_MAX)**2 - math.exp(LOG_SIG_MIN)**2) *\n # (variance + 1)\n # )\n # std = torch.sqrt(variance)\n # log_std = torch.log(std)\n\n # TODO: Remove the following?\n # log_std = torch.logsumexp(\n # log_stds + log_mixture_coeff.reshape(-1,\n # self.action_dim,\n # self._n_subpolicies),\n # dim=-1,\n # keepdim=False\n # ) - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)\n\n # log_std = torch.log(std)\n\n else:\n index = self._pols_idxs[pol_idx]\n mean = \\\n torch.index_select(means, dim=1, index=index).squeeze(1)\n std = \\\n torch.index_select(stds, dim=1, index=index).squeeze(1)\n log_std = \\\n torch.index_select(log_stds, dim=1, index=index).squeeze(1)\n variance = \\\n torch.index_select(variances, dim=1, index=index).squeeze(1)\n\n pre_tanh_value = None\n log_prob = None\n pre_tanh_values = None\n log_probs = None\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n # # Using this distribution instead of TanhMultivariateNormal\n # # because it has Diagonal Covariance.\n # # Then, a collection of n independent Gaussian r.v.\n # tanh_normal = TanhNormal(mean, std)\n #\n # # # It is the Lower-triangular factor of covariance because it is\n # # # Diagonal Covariance\n # # scale_trils = torch.stack([torch.diag(m) for m in std])\n # # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)\n #\n # if return_log_prob:\n # log_prob = tanh_normal.log_prob(\n # action,\n # pre_tanh_value=pre_tanh_value\n # )\n # log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n noise = self._normal_dist.sample((nbatch,))\n\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # Log probability: Main Policy\n log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \\\n - log_std - math.log(math.sqrt(2*math.pi))\n log_prob -= torch.log(\n # torch.clamp(1. - action**2, 0, 1)\n clip_but_pass_gradient(1. - action**2, 0, 1)\n + 1.e-6\n )\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n # Log probability: Sub-Policies\n log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\\\n - log_stds - math.log(math.sqrt(2*math.pi))\n log_probs -= torch.log(\n # torch.clamp(1. - actions**2, 0, 1)\n clip_but_pass_gradient(1. - actions**2, 0, 1)\n + 1.e-6\n )\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n # if torch.isnan(action).any():\n # raise ValueError('ACTION NAN')\n #\n # if torch.isnan(actions).any():\n # raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n std=std,\n log_std=log_std,\n log_prob=log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n\n\ndef clip_but_pass_gradient(x, l=-1., u=1.):\n clip_up = (x > u).to(ptu.device, dtype=torch.float32)\n clip_low = (x < l).to(ptu.device, dtype=torch.float32)\n return x + ((u - x)*clip_up + (l - x)*clip_low).detach()\n",
"import math\nimport torch\nfrom torch import nn as nn\nfrom torch.distributions import Normal\nfrom robolearn.torch.core import PyTorchModule\nfrom robolearn.torch.utils.pytorch_util import np_ify\nfrom torch.nn.modules.normalization import LayerNorm\nimport robolearn.torch.utils.pytorch_util as ptu\nfrom robolearn.models.policies import ExplorationPolicy\nfrom collections import OrderedDict\nfrom itertools import chain\n\nLOG_SIG_MAX = 2\n# LOG_SIG_MIN = -20\nLOG_SIG_MIN = -3.0\n\nSIG_MAX = 7.38905609893065\nSIG_MIN = 0.049787068367863944\n\nLOG_MIX_COEFF_MIN = -10\nLOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5\nLOG_MIX_COEFF_MIN = -1\nLOG_MIX_COEFF_MAX = 1 #-4.5e-5\n\nEPS = 1e-12\n\n\nclass TanhGaussianComposedMultiPolicy(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Usage:\n\n ```\n policy = TanhGaussianComposedMultiPolicy(...)\n action, policy_dict = policy(obs)\n action, policy_dict = policy(obs, deterministic=True)\n action, policy_dict = policy(obs, return_log_prob=True)\n ```\n\n Here, mean and log_std are the mean and log_std of the Gaussian that is\n sampled from.\n\n If deterministic is True, action = tanh(mean).\n If return_log_prob is False (default), log_prob = None\n This is done because computing the log_prob can be a bit expensive.\n \"\"\"\n def __init__(\n self,\n obs_dim,\n action_dim,\n n_policies,\n latent_dim,\n shared_hidden_sizes=None,\n unshared_hidden_sizes=None,\n unshared_mix_hidden_sizes=None,\n unshared_policy_hidden_sizes=None,\n stds=None,\n hidden_activation='relu',\n hidden_w_init='xavier_normal',\n hidden_b_init_val=1e-2,\n output_w_init='xavier_normal',\n output_b_init_val=1e-2,\n pol_output_activation='linear',\n mix_output_activation='linear',\n final_pol_output_activation='linear',\n input_norm=False,\n shared_layer_norm=False,\n policies_layer_norm=False,\n mixture_layer_norm=False,\n final_policy_layer_norm=False,\n epsilon=1e-6,\n softmax_weights=False,\n **kwargs\n ):\n self.save_init_params(locals())\n TanhGaussianComposedMultiPolicy.__init__(self)\n ExplorationPolicy.__init__(self, action_dim)\n\n self._input_size = obs_dim\n self._output_sizes = action_dim\n self._n_subpolicies = n_policies\n self._latent_size = latent_dim\n # Activation Fcns\n self._hidden_activation = ptu.get_activation(hidden_activation)\n self._pol_output_activation = ptu.get_activation(pol_output_activation)\n self._mix_output_activation = ptu.get_activation(mix_output_activation)\n self._final_pol_output_activation = ptu.get_activation(final_pol_output_activation)\n # Normalization Layer Flags\n self._shared_layer_norm = shared_layer_norm\n self._policies_layer_norm = policies_layer_norm\n self._mixture_layer_norm = mixture_layer_norm\n self._final_policy_layer_norm = final_policy_layer_norm\n # Layers Lists\n self._sfcs = [] # Shared Layers\n self._sfc_norms = [] # Norm. Shared Layers\n self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers\n self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.\n self._pfc_lasts = [] # Last Policies Layers\n self._mfcs = [] # Mixing Layers\n self._norm_mfcs = [] # Norm. Mixing Layers\n # self.mfc_last = None # Below is instantiated\n self._fpfcs = [] # Final Policy Layers\n self._norm_fpfcs = [] # Norm. Mixing Layers\n\n self._softmax_weights = softmax_weights\n\n # Initial size = Obs size\n in_size = self._input_size\n\n # Ordered Dictionaries for specific modules/parameters\n self._shared_modules = OrderedDict()\n self._shared_parameters = OrderedDict()\n self._policies_modules = [OrderedDict() for _ in range(n_policies)]\n self._policies_parameters = [OrderedDict() for _ in range(n_policies)]\n self._mixing_modules = OrderedDict()\n self._mixing_parameters = OrderedDict()\n self._final_policy_modules = OrderedDict()\n self._final_policy_parameters = OrderedDict()\n\n # ############# #\n # Shared Layers #\n # ############# #\n if input_norm:\n ln = nn.BatchNorm1d(in_size)\n self.sfc_input = ln\n self.add_shared_module(\"sfc_input\", ln)\n self.__setattr__(\"sfc_input\", ln)\n else:\n self.sfc_input = None\n\n if shared_hidden_sizes is not None:\n for ii, next_size in enumerate(shared_hidden_sizes):\n sfc = nn.Linear(in_size, next_size)\n ptu.layer_init(\n layer=sfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"sfc{}\".format(ii), sfc)\n self._sfcs.append(sfc)\n self.add_shared_module(\"sfc{}\".format(ii), sfc)\n\n if self._shared_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"sfc{}_norm\".format(ii), ln)\n self._sfc_norms.append(ln)\n self.add_shared_module(\"sfc{}_norm\".format(ii), ln)\n in_size = next_size\n\n # Get the output_size of the shared layers (assume same for all)\n multipol_in_size = in_size\n\n # ############### #\n # Unshared Layers #\n # ############### #\n # Unshared Multi-Policy Hidden Layers\n if unshared_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_hidden_sizes):\n for pol_idx in range(self._n_subpolicies):\n pfc = nn.Linear(multipol_in_size, next_size)\n ptu.layer_init(\n layer=pfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"pfc{}_{}\".format(pol_idx, ii), pfc)\n self._pfcs[pol_idx].append(pfc)\n self.add_policies_module(\"pfc{}_{}\".format(pol_idx, ii),\n pfc, idx=pol_idx)\n\n if self._policies_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"pfc{}_{}_norm\".format(pol_idx, ii),\n ln)\n self._pfc_norms[pol_idx].append(ln)\n self.add_policies_module(\"pfc{}_{}_norm\".format(pol_idx,\n ii),\n ln, idx=pol_idx)\n multipol_in_size = next_size\n\n # Multi-Policy Last Layers\n for pol_idx in range(self._n_subpolicies):\n last_pfc = nn.Linear(multipol_in_size, latent_dim)\n ptu.layer_init(\n layer=last_pfc,\n option=output_w_init,\n activation=pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"pfc{}_last\".format(pol_idx), last_pfc)\n self._pfc_lasts.append(last_pfc)\n self.add_policies_module(\"pfc{}_last\".format(pol_idx), last_pfc,\n idx=pol_idx)\n\n # ############# #\n # Mixing Layers #\n # ############# #\n mixture_in_size = in_size + latent_dim*self._n_subpolicies\n # Unshared Mixing-Weights Hidden Layers\n if unshared_mix_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_mix_hidden_sizes):\n mfc = nn.Linear(mixture_in_size, next_size)\n ptu.layer_init(\n layer=mfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val,\n )\n self.__setattr__(\"mfc{}\".format(ii), mfc)\n self._mfcs.append(mfc)\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc{}\".format(ii), mfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"mfc{}_norm\".format(ii), ln)\n self._norm_mfcs.append(ln)\n self.add_mixing_module(\"mfc{}_norm\".format(ii), ln)\n mixture_in_size = next_size\n\n # Unshared Mixing-Weights Last Layers\n mfc_last = nn.Linear(mixture_in_size, latent_dim)\n ptu.layer_init(\n layer=mfc_last,\n option=output_w_init,\n activation=mix_output_activation,\n b=output_b_init_val,\n )\n self.__setattr__(\"mfc_last\", mfc_last)\n self.mfc_last = mfc_last\n # Add it to specific dictionaries\n self.add_mixing_module(\"mfc_last\", mfc_last)\n\n if softmax_weights:\n raise ValueError(\"Check if it is correct a softmax\")\n # self.mfc_softmax = nn.Softmax(dim=1)\n else:\n self.mfc_softmax = None\n\n # ################### #\n # Final Policy Layers #\n # ################### #\n final_pol_in_size = latent_dim\n if unshared_policy_hidden_sizes is not None:\n for ii, next_size in enumerate(unshared_policy_hidden_sizes):\n fpfc = nn.Linear(final_pol_in_size, next_size)\n ptu.layer_init(\n layer=fpfc,\n option=hidden_w_init,\n activation=hidden_activation,\n b=hidden_b_init_val\n )\n self.__setattr__(\"fpfc{}\".format(ii), fpfc)\n self._fpfcs.append(fpfc)\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc{}\".format(ii), fpfc)\n\n if self._mixture_layer_norm:\n ln = LayerNorm(next_size)\n # ln = nn.BatchNorm1d(next_size)\n self.__setattr__(\"fpfc{}_norm\".format(ii), ln)\n self._norm_fpfcs.append(ln)\n self.add_final_policy_module(\"fpfc{}_norm\".format(ii), ln)\n final_pol_in_size = next_size\n\n # Unshared Final Policy Last Layer\n fpfc_last = nn.Linear(final_pol_in_size, action_dim)\n ptu.layer_init(\n layer=fpfc_last,\n option=output_w_init,\n activation=final_pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"fpfc_last\", fpfc_last)\n self.fpfc_last = fpfc_last\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc_last\", fpfc_last)\n\n # ########## #\n # Std Layers #\n # ########## #\n # Multi-Policy Log-Stds Last Layers\n fpfc_last_log_std = nn.Linear(final_pol_in_size, action_dim)\n ptu.layer_init(\n layer=fpfc_last_log_std,\n option=output_w_init,\n activation=final_pol_output_activation,\n b=output_b_init_val\n )\n self.__setattr__(\"fpfc_last_log_std\", fpfc_last_log_std)\n self.fpfc_last_log_std = fpfc_last_log_std\n # Add it to specific dictionaries\n self.add_final_policy_module(\"fpfc_last_log_std\", fpfc_last_log_std)\n\n self._normal_dist = Normal(loc=ptu.zeros(action_dim),\n scale=ptu.ones(action_dim))\n self._epsilon = epsilon\n\n self._pols_idxs = ptu.arange(self._n_subpolicies)\n self._compo_pol_idx = torch.tensor([self._n_subpolicies],\n dtype=torch.int64,\n device=ptu.device)\n\n def get_action(self, obs_np, **kwargs):\n actions, info_dict = self.get_actions(obs_np[None], **kwargs)\n\n for key, val in info_dict.items():\n info_dict[key] = val[0, :]\n\n # Get [0, :] vals (Because it has dimension 1xdA)\n return actions[0, :], info_dict\n\n def get_actions(self, obs_np, **kwargs):\n actions, torch_info_dict = self.eval_np(obs_np, **kwargs)\n\n info_dict = dict()\n for key, vals in torch_info_dict.items():\n if key in ['mixing_coeff']:\n info_dict[key] = np_ify(torch_info_dict[key])\n\n return actions, info_dict\n\n def forward(\n self,\n obs,\n deterministic=False,\n return_log_prob=False,\n pol_idx=None,\n optimize_policies=True,\n ):\n \"\"\"\n\n Args:\n obs (Tensor): Observation(s)\n deterministic (bool):\n return_log_prob (bool):\n pol_idx (int):\n optimize_policies (bool):\n\n Returns:\n action (Tensor):\n pol_info (dict):\n\n \"\"\"\n h = obs\n nbatch = obs.shape[0]\n\n # ############# #\n # Shared Layers #\n # ############# #\n if self.sfc_input is not None:\n # h = self.sfc_input(h)\n if nbatch > 1:\n h = self.sfc_input(h)\n else:\n h = torch.batch_norm(\n h,\n self.sfc_input.weight,\n self.sfc_input.bias,\n self.sfc_input.running_mean,\n self.sfc_input.running_var,\n True, # TODO: True or False??\n self.sfc_input.momentum,\n self.sfc_input.eps,\n torch.backends.cudnn.enabled\n )\n\n for ss, fc in enumerate(self._sfcs):\n h = fc(h)\n\n if self._mixture_layer_norm:\n h = self._sfc_norms[ss](h)\n\n h = self._hidden_activation(h)\n\n # ############## #\n # Multi Policies #\n # ############## #\n hs = [h.clone() for _ in range(self._n_subpolicies)]\n\n # Hidden Layers\n if len(self._pfcs) > 0:\n for pp in range(self._n_subpolicies):\n for ii, fc in enumerate(self._pfcs[pp]):\n hs[pp] = fc(hs[pp])\n\n if self._policies_layer_norm:\n hs[pp] = self._pfc_norms[pp][ii](hs[pp])\n\n hs[pp] = self._hidden_activation(hs[pp])\n\n subpol_means = \\\n [self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))\n for pp in range(self._n_subpolicies)]\n subpols = torch.cat(subpol_means, dim=-1)\n\n if torch.isnan(subpols).any():\n raise ValueError('Some subpols are NAN:',\n subpols)\n\n # ############## #\n # Mixing Weigths #\n # ############## #\n mh = torch.cat([h.clone(), subpols], dim=-1) # N x dZ\n if not optimize_policies:\n mh = mh.detach()\n\n if len(self._mfcs) > 0:\n for mm, mfc in enumerate(self._mfcs):\n mh = mfc(mh)\n\n if self._mixture_layer_norm:\n mh = self._norm_mfcs[mm](mh)\n\n mh = self._hidden_activation(mh)\n\n # NO nonlinear transformation\n mpol_mean = self.mfc_last(mh)\n\n if self.mfc_softmax is not None:\n raise NotImplementedError\n # mixture_coeff = self.mfc_softmax(mixture_coeff)\n\n\n # Final Policy\n final_pol_inputs = [ii.unsqueeze(-2)\n for ii in (subpol_means + [mpol_mean])]\n fph = torch.cat(\n final_pol_inputs,\n dim=-2,\n )\n\n for ff, fpfc in enumerate(self._fpfcs):\n fph = fpfc(fph)\n\n if self._final_policy_layer_norm:\n fph = self._norm_mfcs[ff](fph)\n\n fph = self._hidden_activation(fph)\n\n means = self._final_pol_output_activation(\n self.fpfc_last(fph)\n )\n\n log_stds = self._final_pol_output_activation(\n self.fpfc_last_log_std(fph)\n )\n log_stds = torch.clamp(log_stds, LOG_SIG_MIN, LOG_SIG_MAX)\n stds = torch.exp(log_stds)\n variances = torch.pow(stds, 2)\n\n if pol_idx is None:\n index = self._compo_pol_idx\n else:\n index = self._pols_idxs[pol_idx]\n\n mean = \\\n torch.index_select(means, dim=-2, index=index).squeeze(-2)\n std = \\\n torch.index_select(stds, dim=-2, index=index).squeeze(-2)\n log_std = \\\n torch.index_select(log_stds, dim=-2, index=index).squeeze(-2)\n variance = \\\n torch.index_select(variances, dim=-2, index=index).squeeze(-2)\n\n means = \\\n torch.index_select(means, dim=-2, index=self._pols_idxs).squeeze(-2)\n stds = \\\n torch.index_select(stds, dim=-2, index=self._pols_idxs).squeeze(-2)\n log_stds = \\\n torch.index_select(log_stds, dim=-2, index=self._pols_idxs).squeeze(-2)\n variances = \\\n torch.index_select(variances, dim=-2, index=self._pols_idxs).squeeze(-2)\n\n pre_tanh_value = None\n log_prob = None\n entropy = None\n mean_action_log_prob = None\n log_probs = None\n pre_tanh_values = None\n\n mixture_coeff = ptu.ones((nbatch, self.n_heads, self.action_dim))\n\n if deterministic:\n action = torch.tanh(mean)\n actions = torch.tanh(means)\n else:\n noise = self._normal_dist.sample((nbatch,))\n pre_tanh_value = std*noise + mean\n pre_tanh_values = stds*noise.unsqueeze(1) + means\n action = torch.tanh(pre_tanh_value)\n actions = torch.tanh(pre_tanh_values)\n\n if return_log_prob:\n # Log probability: Main Policy\n log_prob = -((pre_tanh_value - mean) ** 2) / (2 * variance) \\\n - torch.log(std) - math.log(math.sqrt(2 * math.pi))\n log_prob -= torch.log(1. - action**2 + self._epsilon)\n log_prob = log_prob.sum(dim=-1, keepdim=True)\n\n # Log probability: Sub-Policies\n log_probs = -((pre_tanh_values - means) ** 2) / (2 * variances) \\\n - torch.log(stds) - math.log(math.sqrt(2 * math.pi))\n log_probs -= torch.log(1. - actions**2 + self._epsilon)\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n if torch.isnan(action).any():\n raise ValueError('ACTION NAN')\n\n if torch.isnan(actions).any():\n raise ValueError('ACTION NAN')\n\n info_dict = dict(\n mean=mean,\n log_std=log_std,\n log_prob=log_prob,\n entropy=entropy,\n std=std,\n mean_action_log_prob=mean_action_log_prob,\n pre_tanh_value=pre_tanh_value,\n # log_mixture_coeff=log_mixture_coeff,\n mixing_coeff=mixture_coeff,\n pol_actions=actions,\n pol_means=means,\n pol_stds=stds,\n pol_log_stds=log_stds,\n pol_log_probs=log_probs,\n pol_pre_tanh_values=pre_tanh_values,\n )\n\n return action, info_dict\n\n def log_action(self, actions, obs, pol_idx=None):\n raise NotImplementedError\n\n @property\n def n_heads(self):\n return self._n_subpolicies\n\n @property\n def n_subpolicies(self):\n return self._n_subpolicies\n\n # ################# #\n # Shared parameters #\n # ################# #\n\n def shared_parameters(self):\n \"\"\"Returns an iterator over the shared parameters.\n \"\"\"\n for name, param in self.named_shared_parameters():\n yield param\n\n def named_shared_parameters(self, **kwargs):\n \"\"\"Returns an iterator over shared module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._shared_modules,\n self._shared_parameters,\n **kwargs)\n\n def add_shared_module(self, name, module):\n ptu.add_module(self._shared_modules, name, module)\n\n # ####################### #\n # Sub-Policies parameters #\n # ####################### #\n\n def policies_parameters(self, idx=None):\n \"\"\"Returns an iterator over the policies parameters.\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for name, param in self.named_policies_parameters(idx_list):\n yield param\n\n def named_policies_parameters(self, idx=None, **kwargs):\n \"\"\"Returns an iterator over policies module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n return chain(*[ptu.named_parameters(self._policies_modules[idx],\n self._policies_parameters[idx],\n **kwargs)\n for idx in idx_list])\n\n def add_policies_module(self, name, module, idx=None):\n if idx is None:\n idx_list = list(range(self._n_subpolicies))\n elif isinstance(idx, list) or isinstance(idx, tuple):\n idx_list = idx\n else:\n idx_list = [idx]\n\n for idx in idx_list:\n ptu.add_module(self._policies_modules[idx], name, module)\n\n # ################# #\n # Mixing parameters #\n # ################# #\n\n def mixing_parameters(self):\n \"\"\"Returns an iterator over the mixing parameters.\n \"\"\"\n for name, param in self.named_mixing_parameters():\n yield param\n\n def named_mixing_parameters(self, **kwargs):\n \"\"\"Returns an iterator over mixing module parameters, yielding both the\n name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._mixing_modules,\n self._mixing_parameters,\n **kwargs)\n\n def add_mixing_module(self, name, module):\n ptu.add_module(self._mixing_modules, name, module)\n\n # ####################### #\n # Final policy parameters #\n # ####################### #\n\n def final_policy_parameters(self):\n \"\"\"Returns an iterator over the final policy parameters.\n \"\"\"\n for name, param in self.named_final_policy_parameters():\n yield param\n\n def named_final_policy_parameters(self, **kwargs):\n \"\"\"Returns an iterator over final policy module parameters, yielding\n both the name of the parameter as well as the parameter itself\n \"\"\"\n return ptu.named_parameters(self._final_policy_modules,\n self._final_policy_parameters,\n **kwargs)\n\n def add_final_policy_module(self, name, module):\n ptu.add_module(self._final_policy_modules, name, module)\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.sqrt",
"torch.sum",
"torch.nn.Sigmoid",
"torch.tanh",
"torch.nn.Linear",
"torch.exp",
"torch.log",
"torch.nn.modules.normalization.LayerNorm",
"torch.index_select",
"torch.batch_norm"
],
[
"torch.nn.BatchNorm1d",
"torch.isnan",
"torch.cat",
"torch.tensor",
"torch.exp",
"torch.nn.Linear",
"torch.tanh",
"torch.log",
"torch.nn.modules.normalization.LayerNorm",
"torch.clamp",
"torch.index_select",
"torch.pow",
"torch.batch_norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kcexn/singular-value-decomposition | [
"63e2a23f9f0db9aa361e338b8065d59b80f7649e"
] | [
"coded_distributed_computing.py"
] | [
"''' coded_distributed_computing\nThis module contains functions related to a study of the coded distributed computing model.\n\n'''\nimport numpy as np\n\ndef encode_matrix(A: np.matrix, G: np.matrix) -> np.matrix:\n ''' encode_matrix\n Parameters:\n ---\n A: np.matrix, input matrix to code.\n G: np.matrix, generator matrix to encode A with.\n ---\n Returns:\n ---\n A*G: np.matrix, output encoded matrix.\n ---\n Description:\n ---\n Following van Lint's text \"Introduction to Coding Theory\", \n I am constructing linear block codes using a generator matrix G \n and an input matrix A. \n\n Actually typically the codes would be constructed using a \n generator matrix G and an input vector k which would create an \n output message, a vector, m.\n\n Following from my conversation with Jingge last week though. \n I'm convinced that encoding a matrix to preserve the \n matrix vector multiplication Ax is exactly the same as encoding\n multiple messages across time simultaneously. i.e. If I were to \n accumulate n messages (column vectors) of size k and concatenated them \n I would end up with a matrix of size k x n (rows and columns). Encoding \n it with the generator matrix G would give me a matrix of size m x n. Where\n each column in the matrix A*G can be considered one message to be delivered \n over time. The matrix vector multiplication Ax is simply the rows of multiple\n messages concatenated together multiplied with the vector x.\n\n This is not a super great analogue, because obviously matrices in a matrix vector \n multiplication are shared with everyone all at once not one column at a time. \n But I think it's a useful way to reason about the coding properties of \n the matrix A*G. And I believe opens up the possibilities of \n matrix encodings to ALL codes that can be represented as linear block codes \n (which I believe are simply, ALL linear codes).\n\n '''\n return np.matmul(A,G)\n\n\n\n"
] | [
[
"numpy.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sanzgiri/MaatPy | [
"381a0d31f1afdd2c53b9ccbb410eb0df6b4b9965"
] | [
"maatpy/dataset.py"
] | [
"import warnings\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.utils import check_X_y\nfrom sklearn.utils import Bunch\nfrom sklearn.preprocessing import LabelEncoder\nfrom imblearn.under_sampling.prototype_selection import RandomUnderSampler\nfrom imblearn.over_sampling import RandomOverSampler\n\n\nclass Dataset(Bunch):\n\n def __init__(self, data=None, target=None, feature_names=None, target_names=None):\n \"\"\"\n\n :param data:\n :param target:\n :param feature_names:\n :param target_names:\n \"\"\"\n self.data = data\n self.target = target\n self.feature_names = feature_names\n self.target_names = target_names\n\n def make_imbalance(self, ratio=None, random_state=None):\n \"\"\"\n Built on the imblearn.make_imbalance function\n :param ratio: dict or list\n Ratio to use for resampling the data set.\n - When 'dict', the keys correspond to the targeted classes. The values correspond to the desired number\n of samples for each targeted class.\n - When 'list', the values correspond to the proportions of samples (float) assigned to each class. In\n this case the number of samples is maintained but the samples per class are adjusted to the given\n proportions.\n :param random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n :return:\n \"\"\"\n x, y = check_X_y(self.data, self.target)\n original_dataset_size = len(y)\n n_classes = len(self.target_names)\n\n if isinstance(ratio, dict):\n ratio_ = ratio\n\n elif isinstance(ratio, list):\n weights = ratio\n if len(weights) != n_classes:\n raise ValueError(\"{} classes available but only {} values provided\".format(n_classes, len(weights)))\n ratio_ = {}\n for i in range(n_classes):\n ratio_[i] = int(round(weights[i] * original_dataset_size, 0))\n\n else:\n raise TypeError(\"Expected dict or list; {} provided\".format(type(ratio)))\n\n if sum(ratio_.values()) < original_dataset_size:\n rus = RandomUnderSampler(ratio=ratio_, random_state=random_state)\n self.data, self.target = rus.fit_sample(x, y)\n\n elif sum(ratio_.values()) == original_dataset_size:\n original_distribution = Counter(y)\n interim_ratio = {}\n for key in ratio_:\n if ratio_[key] >= original_distribution[key]:\n interim_ratio[key] = original_distribution[key]\n else:\n interim_ratio[key] = ratio_[key]\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n rus = RandomUnderSampler(ratio=interim_ratio, random_state=random_state)\n x_int, y_int = rus.fit_sample(x, y)\n with warnings.catch_warnings():\n # Silencing RandomOverSampler UserWarning: After over-sampling, the number of samples in class A will\n # be larger than the number of samples in the majority class\n warnings.simplefilter(\"ignore\")\n ros = RandomOverSampler(ratio=ratio_, random_state=random_state)\n self.data, self.target = ros.fit_sample(x_int, y_int)\n\n else:\n raise ValueError(\"The requested dataset cannot be larger than the original dataset\")\n\n def load_from_csv(self, filename, sep=',', output_column=None, ignore=None):\n \"\"\"\n\n :param filename: path to filename containing the data to load\n :param sep: field separator; default ','\n :param output_column: column containing the outcome\n :param ignore: column to remove from data; str or list\n :return:\n \"\"\"\n df = pd.read_csv(filename, sep=sep)\n if output_column:\n le = LabelEncoder()\n le.fit(list(df[output_column]))\n self.target_names = le.classes_\n self.target = le.transform(list(df[output_column]))\n df.drop(output_column, axis=1, inplace=True)\n else:\n raise ValueError('Please define an output_column; column containing the class defined for each observation '\n '(row)')\n if ignore is not None:\n df.drop(ignore, axis=1, inplace=True)\n self.feature_names = df.columns\n self.data = df.values\n\n\ndef simulate_dataset(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_classes=2, n_clusters_per_class=1,\n weights=None, flip_y=0.01, class_sep=1.0, random_state=None):\n \"\"\"\n Using sklearn.make_classification function to return a Dataset object\n :param n_samples: int, optional (default=100).\n The number of samples.\n :param n_features: int, optional (default=2)\n The total number of features. These comprise 'n_informative' informative features and 'n_redundant'\n redundant features.\n :param n_informative: int, optional (default=2)\n The number of informative features. Each class is composed of a number of gaussian clusters each located\n around the vertices of a hypercube in a subspace of dimension 'n_informative'. For each cluster,\n informative features are drawn independently from N(0, 1) and then randomly linearly combined within\n each cluster in order to add covariance. The clusters are then placed on the vertices of the hypercube.\n :param n_redundant: int, optional (default=0)\n The number of redundant features. These features are generated a random linear combinations of the\n informative features.\n :param n_classes: int, optional (default=2)\n The number of classes (or labels) of the classification problem.\n :param n_clusters_per_class: int, optional (default=1)\n The number of clusters per class.\n :param weights: list of floats or None (default=None)\n The proportions of samples assigned to each class. If None, then classes are balanced. Note that if\n 'len(weights) == n_classes - 1' then the last class weight is automatically inferred. More than\n 'n_samples' samples may be returned if the sum of `weights` exceeds 1.\n :param flip_y: float, optional (default=0.01)\n The fraction of samples whose class are randomly exchanged. Larger values introduce noise in the labels\n and make the classification task harder.\n :param class_sep: float, optional (default=1.0)\n The factor multiplying the hypercube size. Larger values spread out the clusters/classes and make the\n classification task easier.\n :param random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number generator is the RandomState\n instance used by `np.random`.\n :return: Dataset object\n \"\"\"\n\n data, target = make_classification(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, n_redundant=n_redundant,\n n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,\n weights=weights, flip_y=flip_y, class_sep=class_sep,\n random_state=random_state)\n feature_names = ['feature#{}'.format(i) for i in range(data.shape[1])]\n target_names = ['class#{}'.format(i) for i in np.unique(target)]\n\n return Dataset(data, target, feature_names, target_names)\n"
] | [
[
"pandas.read_csv",
"sklearn.datasets.make_classification",
"sklearn.utils.check_X_y",
"numpy.unique",
"sklearn.preprocessing.LabelEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
beesk135/ReID-Survey | [
"d1467c0ce5d3ca78640196360a05df9ff9f9f42a"
] | [
"evaluate/__init__.py"
] | [
"import torch \n\nfrom .eval_reid import eval_func\n\ndef euclidean_dist(x, y):\n m, n = x.size(0), y.size(0)\n xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, x, y.t())\n dist = dist.clamp(min=1e-12).sqrt()\n return dist\n"
] | [
[
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xujin1184104394/coco-analyze | [
"fefe16025554dbf831e71d32d6601dd8f00286a8"
] | [
"analysisAPI/scoringErrors.py"
] | [
"## imports\nimport os, time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# package imports \nfrom . import utilities\n\ndef scoringErrors( coco_analyze, oks, imgs_info, saveDir ):\n loc_dir = saveDir + '/scoring_errors'\n if not os.path.exists(loc_dir):\n os.makedirs(loc_dir)\n f = open('%s/std_out.txt'%loc_dir, 'w')\n f.write(\"Running Analysis: [Scoring Errors]\\n\\n\")\n tic = time.time()\n paths = {}\n\n # set parameters for the scoring errors analysis\n coco_analyze.params.areaRng = [[32 ** 2, 1e5 ** 2]]\n coco_analyze.params.areaRngLbl = ['all']\n coco_analyze.params.oksThrs = [oks]\n coco_analyze.cocoEval.params.useGtIgnore = 0\n coco_analyze.cocoEval.params.gtIgnoreIds = []\n coco_analyze.analyze(check_kpts=False, check_scores=True, check_bckgd=False)\n coco_analyze.summarize(makeplots=True, savedir=loc_dir, team_name='scoring')\n paths['opt_score_prc'] = \\\n '%s/error_prc_[scoring][%d][%s][%d].pdf'%(loc_dir, int(oks*100),\n coco_analyze.params.areaRngLbl[0],\n coco_analyze.params.maxDets[0])\n corrected_dts = coco_analyze.corrected_dts['all']\n\n # dictionary of all corrected detections grouped by image id\n all_dts = {}\n for d in coco_analyze.corrected_dts['all']:\n if d['image_id'] not in all_dts:\n all_dts[d['image_id']] = {}\n all_dts[d['image_id']]['dts'] = [d]\n else:\n all_dts[d['image_id']]['dts'].append(d)\n\n subopt_order_images = []\n all_gts = {}; all_dtgt_oks = {}\n for imgId in imgs_info:\n if imgId in all_dts:\n dts = all_dts[imgId]['dts']\n all_dts[imgId]['score'] = np.argsort([-d['score'] for d in dts], kind='mergesort')\n all_dts[imgId]['opt_score'] = np.argsort([-d['opt_score'] for d in dts], kind='mergesort')\n\n if list(all_dts[imgId]['score']) != list(all_dts[imgId]['opt_score']):\n subopt_order_images.append(imgId)\n else:\n dts = []\n\n gts = coco_analyze.cocoGt.loadAnns(coco_analyze.cocoGt.getAnnIds(imgIds=imgId))\n not_ignore_gts = []\n for g in gts:\n # gt ignores are discarded\n if g['ignore'] or (g['area']<coco_analyze.params.areaRng[0][0] or g['area']>coco_analyze.params.areaRng[0][1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n not_ignore_gts.append(g)\n\n # compute the oks matrix between the dts and gts of each image\n image_oks_mat = utilities.compute_oks(dts, not_ignore_gts)\n if len(image_oks_mat) == 0:\n all_gts[imgId] = not_ignore_gts\n all_dtgt_oks[imgId] = []\n\n else:\n # sort the ground truths by their max oks value with any detection\n maxoksvals = [-max(image_oks_mat[:,j]) for j in range(len(not_ignore_gts))]\n gtind = np.argsort(maxoksvals, kind='mergesort')\n all_gts[imgId] = [not_ignore_gts[j] for j in gtind]\n all_dtgt_oks[imgId] = image_oks_mat[:,gtind]\n\n ## check how many images have optimal score and original score with same order\n perc = 100*len(subopt_order_images)/float(len(all_dts))\n f.write(\"Num. of imgs with sub-optimal detections order: [%d]/[%d] (%.2f%%).\\n\\n\"%(len(subopt_order_images), len(all_dts), perc))\n\n ## find scoring errors before and after rescoring\n min_match_oks = .5\n scoring_errors = {'score':[],'opt_score':[]}\n for score_type in scoring_errors.keys():\n for ind, imgId in enumerate(all_dts.keys()):\n dind = all_dts[imgId][score_type]\n sorted_dts = [all_dts[imgId]['dts'][i] for i in dind]\n gtIds = [g['id'] for g in all_gts[imgId]]\n if len(sorted_dts) * len(gtIds) == 0: continue\n\n used_dts = []\n for gind, gt in enumerate(all_gts[imgId]):\n assert(gt['_ignore']==0)\n\n oks = all_dtgt_oks[imgId][dind,gind]\n dts_with_oks = np.where(oks >= min_match_oks)[0]\n # remove the matched dts\n dts_available = [(i,sorted_dts[i]['id'],oks[i],sorted_dts[i][score_type]) \\\n for i in dts_with_oks if sorted_dts[i]['id'] not in used_dts]\n if len(dts_available) == 0: break\n\n max_oks_dt = np.argmax([d[2] for d in dts_available])\n used_dts.append(dts_available[max_oks_dt][1])\n\n if len( dts_available ) > 1:\n # check for scoring error\n max_score_dt = np.argmax([d[3] for d in dts_available])\n if max_score_dt!=max_oks_dt:\n # this is a scoring error\n error = {}\n error['gt'] = gt\n error['imgId'] = imgId\n error['matched_dt'] = sorted_dts[dts_available[max_score_dt][0]]\n error['top_match_dt'] = sorted_dts[dts_available[max_oks_dt][0]]\n error['high_oks'] = dts_available[max_oks_dt][2]\n error['low_oks'] = dts_available[max_score_dt][2]\n scoring_errors[score_type].append(error)\n\n f.write(\"Num. of scoring errors:\\n\")\n f.write(\" - Original Score: %d\\n\"%len(scoring_errors['score']))\n f.write(\" - Optimal Score: %d\\n\"%len(scoring_errors['opt_score']))\n\n f.write(\"\\nMost relevant scoring errors:\\n\")\n ## print the top scoring errors of the algorithm\n ori_scoring_errors = scoring_errors['score']\n ori_scoring_errors.sort(key=lambda k: -np.sqrt((k['matched_dt']['score']-k['top_match_dt']['score'])*(k['high_oks']-k['low_oks'])))\n for ind, err in enumerate(ori_scoring_errors[0:12]):\n relevance = np.sqrt((err['matched_dt']['score']-err['top_match_dt']['score'])*(err['high_oks']-err['low_oks']))\n f.write(\"================================================\\n\")\n f.write( \"- gt id: [%d]\\n\"%err['gt']['id'] )\n f.write( \"- dt id, high score, low oks: [%d][%.3f][%.3f]\\n\"%(err['matched_dt']['id'], err['matched_dt']['score'], err['low_oks']) )\n f.write( \"- dt id, low score, high oks: [%d][%.3f][%.3f]\\n\"%(err['top_match_dt']['id'], err['top_match_dt']['score'], err['high_oks']) )\n f.write( \"- Relevance: [%.3f]\\n\\n\"%relevance )\n\n name = 'score_err_%d_high_score'%ind\n paths[name] = '%s/%s.pdf'%(loc_dir,name)\n utilities.show_dets([err['matched_dt']],\n [err['gt']],\n imgs_info[err['imgId']],save_path=paths[name])\n\n name = 'score_err_%d_high_oks'%ind\n paths[name] = '%s/%s.pdf'%(loc_dir,name)\n utilities.show_dets([err['top_match_dt']],\n [err['gt']],\n imgs_info[err['imgId']],save_path=paths[name])\n\n # for all the images with dts and gts compute the following quantities\n # - number of dts with oks > min_match_oks for each gt\n # - histogram of oks for the detection with highest oks\n # - histogram of oks for all the other detections\n # - histogram of original/optimal scores for the detection with highest oks\n # - histogram of original/optimal scores for all the other detections\n num_dts_high_oks = []\n high_oks_dt_oks_hist = []; other_dt_oks_hist = []\n high_oks_dt_ori_score_hist = []; other_dt_ori_score_hist = []\n high_oks_dt_opt_score_hist = []; other_dt_opt_score_hist = []\n\n for ind, imgId in enumerate(all_dts.keys()):\n dts = [(d['id'],d['score'],d['opt_score']) for d in all_dts[imgId]['dts']]\n gtIds = [g['id'] for g in all_gts[imgId]]\n if len(dts) * len(gtIds) == 0: continue\n\n for gind, gt in enumerate(all_gts[imgId]):\n assert(gt['_ignore']==0)\n\n dts_oks = all_dtgt_oks[imgId][:,gind]\n dts_high_oks_i = np.where(dts_oks > .1)[0]\n num_dts_high_oks.append(len(dts_high_oks_i))\n\n if len(dts_high_oks_i) >= 2:\n # study the case where multiple detections have high oks\n\n # add the oks of the detections to the histogram of oks\n oks_vals = sorted([(dts_oks[i],dts[i]) for i in dts_high_oks_i], key=lambda k: -k[0])\n high_oks_dt_oks_hist.append(oks_vals[0][0])\n other_dt_oks_hist.extend([k[0] for k in oks_vals[1:]])\n\n high_oks_dt_ori_score_hist.append(oks_vals[0][1][1])\n other_dt_ori_score_hist.extend([k[1][1] for k in oks_vals[1:]])\n\n high_oks_dt_opt_score_hist.append(oks_vals[0][1][2])\n other_dt_opt_score_hist.extend([k[1][2] for k in oks_vals[1:]])\n\n fig, ax = plt.subplots(figsize=(10,10))\n ax.set_facecolor('lightgray')\n plt.hist(num_dts_high_oks,bins=[i-.5 for i in range(max(num_dts_high_oks)+1)],color='green')\n plt.grid()\n plt.xticks([i for i in range(max(num_dts_high_oks))])\n plt.title('Histogram of Detection Redundancy',fontsize=20)\n plt.xlabel('Number of Detections with OKS > .1',fontsize=20)\n plt.ylabel('Number of Ground Truth Instances',fontsize=20)\n path = '%s/num_dts_high_oks.pdf'%loc_dir\n paths['num_dts_high_oks'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n fig, ax = plt.subplots(figsize=(10,10))\n y1,binEdges=np.histogram(high_oks_dt_ori_score_hist,bins=19)\n bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')\n min_val1 = min(bincenters1)\n max_val1 = max(bincenters1)\n\n y2,binEdges=np.histogram(other_dt_ori_score_hist,bins=19)\n bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')\n min_val2 = min(bincenters2)\n max_val2 = max(bincenters2)\n\n min_val = min(min_val1,min_val2)\n max_val = max(max_val1,max_val2)\n\n overlapbins = [min(x,y) for x,y in zip(y1,y2)]\n width = (max_val-min_val)/20.\n ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')\n plt.grid()\n plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])\n\n plt.grid()\n plt.legend(loc='upper center',fontsize=20)\n plt.title('Histogram of Original Detection Scores',fontsize=20)\n plt.xlabel('Original Confidence Score',fontsize=20)\n plt.ylabel('Number of Detections',fontsize=20)\n path = '%s/dts_ori_score_hist.pdf'%loc_dir\n paths['dts_ori_score_hist'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n fig, ax = plt.subplots(figsize=(10,10))\n y1,binEdges=np.histogram(high_oks_dt_opt_score_hist,bins=19)\n bincenters1 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters1,y1,'-',linewidth=3,c='b',label='Max OKS Detection')\n min_val1 = min(bincenters1)\n max_val1 = max(bincenters1)\n\n y2,binEdges=np.histogram(other_dt_opt_score_hist,bins=19)\n bincenters2 = 0.5*(binEdges[1:]+binEdges[:-1])\n ax.plot(bincenters2,y2,'--',linewidth=3,c='b',label='Lower OKS Detection(s)')\n min_val2 = min(bincenters2)\n max_val2 = max(bincenters2)\n\n min_val = min(min_val1,min_val2)\n max_val = max(max_val1,max_val2)\n\n overlapbins = [min(x,y) for x,y in zip(y1,y2)]\n width = (max_val-min_val)/20.\n ax.bar(np.linspace(min_val,max_val,19), overlapbins, color='red', alpha=.65, width=width,align='center')\n plt.grid()\n plt.xlim([min_val-(max_val-min_val)/20.,max_val+(max_val-min_val)/20.])\n\n plt.grid()\n plt.legend(loc='upper center',fontsize=20)\n plt.title('Histogram of Optimal Detection Scores',fontsize=20)\n plt.xlabel('Optimal Confidence Score',fontsize=20)\n plt.ylabel('Number of Detections',fontsize=20)\n path = '%s/dts_opt_score_hist.pdf'%loc_dir\n paths['dts_opt_score_hist'] = path\n plt.savefig(path,bbox_inches='tight')\n plt.close()\n\n f.write(\"\\nDone, (t=%.2fs).\"%(time.time()-tic))\n f.close()\n\n return paths\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"numpy.argmax",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"numpy.argsort",
"matplotlib.pyplot.xlabel",
"numpy.histogram",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sirjamesmeddel-gitty/intuition | [
"cd517e6b3b315a743eb4d0d0dc294e264ab913ce",
"cd517e6b3b315a743eb4d0d0dc294e264ab913ce",
"cd517e6b3b315a743eb4d0d0dc294e264ab913ce",
"cd517e6b3b315a743eb4d0d0dc294e264ab913ce"
] | [
"tests/core/test_configuration.py",
"intuition/api/datafeed.py",
"intuition/core/analyzes.py",
"tests/api/test_datafeed.py"
] | [
"'''\nTests for intuition.core.configuration\n'''\n\nimport unittest\nfrom nose.tools import raises\nimport dna.test_utils as test_utils\nimport pandas as pd\nimport intuition.core.configuration as configuration\nfrom dna.errors import DynamicImportFailed\nfrom intuition.errors import InvalidConfiguration\n\n\nclass ConfigurationUtilsTestCase(unittest.TestCase):\n\n def test_logfile(self):\n logfile = configuration.logfile('fake_id')\n if 'tmp' in logfile:\n self.assertEqual('/tmp/logs/fake_id.log', logfile)\n else:\n self.assertIn('.intuition/logs/fake_id.log', logfile)\n\n\nclass ContextLoadTestCase(unittest.TestCase):\n\n def setUp(self):\n test_utils.setup_logger(self)\n self.good_driver = \\\n 'intuition.test_utils.FakeContext://localhost/path?valid=true'\n self.bad_driver = \\\n 'no.file.FileContext://localhost/path?valid=true'\n self.bad_config = \\\n 'intuition.test_utils.FakeContext://localhost/path?valid=false'\n self.bad_formatted_config = \\\n 'intuition.test_utils.FakeContext://localhost/path?format=false'\n\n def tearDown(self):\n test_utils.teardown_logger(self)\n\n def test_load_context(self):\n with configuration.Context(self.good_driver) as context:\n self.assertIsInstance(context, dict)\n self.assertIsInstance(context['strategy'], dict)\n self.assertIsInstance(context['config'], dict)\n\n @raises(InvalidConfiguration)\n def test_validate_bad_config(self):\n bad_config = {}\n ctx = configuration.Context(self.bad_driver)\n ctx._validate(bad_config)\n\n def test_validate_good_config(self):\n good_config = {\n 'universe': 'nasdaq,4',\n 'index': pd.date_range('2014/2/3', periods=30),\n 'modules': {\n 'algorithm': 'dualma'\n }\n }\n ctx = configuration.Context(self.bad_driver)\n self.assertIsNone(ctx._validate(good_config))\n\n @raises(InvalidConfiguration)\n def test_load_bad_configuration(self):\n ctx = configuration.Context(self.bad_formatted_config)\n ctx.__enter__()\n\n def test_loaded_configuration(self):\n with configuration.Context(self.good_driver) as context:\n for field in ['manager', 'algorithm', 'data']:\n self.assertIn(field, context['strategy'])\n for field in ['index', 'live']:\n self.assertIn(field, context['config'])\n\n @raises(DynamicImportFailed)\n def test_absent_driver_context_load(self):\n ctx = configuration.Context(self.bad_driver)\n ctx.__enter__()\n",
"# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\n'''\n Intuition Data source api\n -------------------------\n\n Data generator motherboard for Intuition. It uses backtest and live data\n sources to build the datafeed of the algorithm.\n\n :copyright (c) 2014 Xavier Bruhiere\n :license: Apache 2.0, see LICENSE for more details.\n'''\n\n\nimport pandas as pd\nfrom datetime import timedelta\nimport dna.logging\nfrom zipline.sources.data_source import DataSource\nfrom zipline.gens.utils import hash_args\nimport intuition.utils as utils\nfrom intuition.errors import LoadDataFailed, InvalidDatafeed\n\n\ndef _build_safe_event(event, date, sid):\n event.update({\n 'dt': date,\n 'sid': sid,\n 'volume': event.get('volume', 1000)\n })\n return event\n\n\ndef _check_data_modules(backtest, live, start, end):\n # TODO Fails if the class as no get_data method\n # TODO Fails if no backtest for past dates and same for live\n if not backtest and not live:\n raise InvalidDatafeed(\n reason='provide at least a backtest or a live data module')\n return True\n\n\nclass HybridDataFactory(DataSource):\n '''\n Surcharge of zipline.DataSource, switching automatically between live\n stream and backtest sources\n '''\n\n backtest = None\n live = None\n _is_live = False\n\n def __init__(self, **kwargs):\n # TODO Use alternatives to `index` and `universe` objects\n self.log = dna.logging.logger(__name__)\n\n if 'index' not in kwargs or 'universe' not in kwargs:\n raise InvalidDatafeed(\n reason='you must provide a universe and an index')\n if not isinstance(kwargs.get('index'),\n pd.tseries.index.DatetimeIndex):\n raise InvalidDatafeed(reason='you must provide a valid time index')\n\n # Unpack config dictionary with default values.\n self.sids = kwargs['universe'].sids\n self.index = kwargs['index']\n self.start = self.index[0]\n self.end = self.index[-1]\n\n self.frequency = float(kwargs.get('frequency', 14))\n self.market_open = kwargs['universe'].open\n self.market_close = kwargs['universe'].close\n\n if 'backtest' in kwargs:\n self.backtest = kwargs['backtest'](self.sids, kwargs)\n if 'live' in kwargs:\n self.live = kwargs['live'](self.sids, kwargs)\n\n _check_data_modules(self.backtest, self.live, self.start, self.end)\n\n # Hash_value for downstream sorting.\n self.arg_string = hash_args(**kwargs)\n self._raw_data = None\n\n @property\n def mapping(self):\n if self._is_live:\n return self.live.mapping\n else:\n return self.backtest.mapping\n\n def _set_next_tick(self, date):\n ''' Use self.freq and the given date to deduce the next event hour '''\n return date + timedelta(hours=self.frequency)\n\n def _get_backtest_data(self):\n # The first date is usually a few seconds before now,\n # so we compare to the next one\n if self.backtest and not utils.is_live(self.index[1]):\n try:\n bt_data = self.backtest.get_data(\n self.sids, self.start, self.end)\n except Exception as error:\n raise LoadDataFailed(sids=self.sids, reason=error)\n else:\n bt_data = None\n return bt_data\n\n def _agnostic_get_data_at(self, date, data):\n dated_data = pd.DataFrame()\n n_axes = len(data.axes)\n\n if self._is_live:\n try:\n dated_data = self.live.get_data(self.sids)\n except Exception as error:\n raise LoadDataFailed(sids=self.sids, reason=error)\n\n if isinstance(dated_data, pd.Series):\n dated_data = pd.DataFrame(\n {sid: {'price': price}\n for sid, price in dated_data.iterkv()})\n\n else:\n midnight_date = date.replace(hour=0, minute=0)\n if n_axes == 2:\n if midnight_date in data.index:\n dated_data = pd.DataFrame(\n {sid: {'price': price}\n for sid, price in data.ix[midnight_date].iterkv()})\n\n elif n_axes == 3:\n if midnight_date in data.major_axis:\n dated_data = data.major_xs(midnight_date)\n\n else:\n raise TypeError('only dataframe and panel are supported')\n return dated_data\n\n def raw_data_gen(self):\n bt_data = self._get_backtest_data()\n\n for date in self.index:\n backtest_is_done = False\n date = date.replace(hour=self.market_open.hour,\n minute=self.market_open.minute)\n close_hour = date.replace(hour=self.market_close.hour,\n minute=self.market_close.minute)\n\n # Trade until the end of the trading day\n while date < close_hour:\n # Set to opening of the market\n self.log.debug('--> next tick {}'.format(date))\n # NOTE Make _is_live a property ?\n self._is_live = utils.next_tick(date)\n\n data = self._agnostic_get_data_at(date, bt_data)\n if not data.empty:\n for sid, series in data.iterkv():\n if backtest_is_done and not self._is_live:\n # TODO Use previous and next data to extrapolate\n # random values\n self.log.debug('extrapoling intraday data')\n\n yield _build_safe_event(series.to_dict(), date, sid)\n backtest_is_done = True\n\n # Done for this event, when is the next ?\n date = self._set_next_tick(date)\n\n @property\n def instance_hash(self):\n return self.arg_string\n\n @property\n def raw_data(self):\n if not self._raw_data:\n self._raw_data = self.raw_data_gen()\n return self._raw_data\n",
"# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n\n'''\n Intuition results analyzer\n --------------------------\n\n Wraps session results with convenient analyse methods\n\n :copyright (c) 2014 Xavier Bruhiere\n :license: Apache 2.0, see LICENSE for more details.\n'''\n\nimport pytz\nimport pandas as pd\nimport numpy as np\nimport dna.logging\nimport dna.debug\nimport dna.utils\nfrom zipline.data.benchmarks import get_benchmark_returns\nfrom intuition.finance import qstk_get_sharpe_ratio\n\nlog = dna.logging.logger(__name__)\n\n\nclass Analyze():\n ''' Handle backtest results and performances measurments '''\n def __init__(self, params, results, metrics, benchmark='^GSPC'):\n # NOTE Temporary\n # Simulation parameters\n self.sim_params = params\n # Final risk measurments as returned by the backtester\n self.results = results\n # Simulation rolling performance\n self.metrics = metrics\n # Market where we traded\n self.benchmark = benchmark\n\n def build_report(self, timestamp='one_month', show=False):\n # Get daily, cumulative and not, returns of portfolio and benchmark\n # NOTE Temporary fix before intuition would be able to get benchmark\n # data on live trading\n try:\n bm_sym = self.benchmark\n returns_df = self.get_returns(benchmark=bm_sym)\n skip = False\n except:\n log.warn('unable to get benchmark data on live trading for now')\n skip = True\n\n orders = 0\n for order in self.results.orders:\n orders += len(order)\n\n final_value = self.results.portfolio_value[-1]\n report = {\n 'portfolio': final_value,\n 'gain': final_value - self.sim_params.capital_base,\n 'orders': orders,\n 'pnl_mean': self.results.pnl.mean(),\n 'pnl_deviation': self.results.pnl.std(),\n }\n if not skip:\n report['portfolio_perfs'] = returns_df['algo_c_return'][-1] * 100.0\n report['benchmark_perfs'] = \\\n returns_df['benchmark_c_return'][-1] * 100.0\n\n perfs = self.overall_metrics(timestamp)\n for k, v in perfs.iteritems():\n report[k] = v\n\n # Float values for humans\n for key, value in report.iteritems():\n report[key] = dna.utils.truncate(value, 3)\n\n log.info('generated report', report=report)\n if show:\n print\n print(dna.debug.emphasis(report, align=True))\n print\n\n return report\n\n def _to_perf_array(self, timestamp, key, length):\n return np.array([self.metrics[timestamp][i][key] for i in length])\n\n def rolling_performances(self, timestamp='one_month'):\n ''' Filters self.perfs '''\n # TODO Study the impact of month choice\n # TODO Check timestamp in an enumeration\n # TODO Implement other benchmarks for perf computation\n # (zipline issue, maybe expected)\n\n if self.metrics:\n perfs = {}\n length = range(len(self.metrics[timestamp]))\n index = self._get_index(self.metrics[timestamp])\n perf_keys = self.metrics[timestamp][0].keys()\n perf_keys.pop(perf_keys.index('period_label'))\n\n perfs['period'] = np.array(\n [pd.datetime.date(date) for date in index])\n for key in perf_keys:\n perfs[key] = self._to_perf_array(timestamp, key, length)\n else:\n # TODO Get it from DB if it exists\n raise NotImplementedError()\n\n return pd.DataFrame(perfs, index=index)\n\n def overall_metrics(self, timestamp='one_month', metrics=None):\n '''\n Use zipline results to compute some performance indicators\n '''\n perfs = dict()\n\n # If no rolling perfs provided, computes it\n if metrics is None:\n metrics = self.rolling_performances(timestamp=timestamp)\n riskfree = np.mean(metrics['treasury_period_return'])\n\n perfs['sharpe'] = qstk_get_sharpe_ratio(\n metrics['algorithm_period_return'].values, risk_free=riskfree)\n perfs['algorithm_period_return'] = (\n ((metrics['algorithm_period_return'] + 1).cumprod()) - 1)[-1]\n perfs['max_drawdown'] = max(metrics['max_drawdown'])\n perfs['algo_volatility'] = np.mean(metrics['algo_volatility'])\n perfs['beta'] = np.mean(metrics['beta'])\n perfs['alpha'] = np.mean(metrics['alpha'])\n perfs['benchmark_period_return'] = (\n ((metrics['benchmark_period_return'] + 1).cumprod()) - 1)[-1]\n\n return perfs\n\n def get_returns(self, benchmark=''):\n returns = {}\n\n if benchmark:\n try:\n benchmark_data = (\n get_benchmark_returns(benchmark,\n self.results.index[0],\n self.results.index[-1]))\n except Exception as e:\n raise KeyError(e)\n else:\n #TODO Automatic detection given exchange market (on command line) ?\n raise NotImplementedError()\n\n # NOTE Could be more efficient. But len(benchmark_data.date) !=\n # len(self.results.returns.index). Maybe because of different markets\n dates = pd.DatetimeIndex([d.date for d in benchmark_data])\n\n returns['benchmark_return'] = pd.Series(\n [d.returns for d in benchmark_data], index=dates)\n returns['benchmark_c_return'] = (\n (returns['benchmark_return'] + 1).cumprod()) - 1\n returns['algo_return'] = pd.Series(\n self.results.returns.values, index=dates)\n returns['algo_c_return'] = pd.Series(\n ((self.results.returns.values + 1).cumprod()) - 1, index=dates)\n\n df = pd.DataFrame(returns, index=dates)\n\n if benchmark is None:\n df = df.drop(['benchmark_return', 'benchmark_c_return'], axis=1)\n return df\n\n def _get_index(self, perfs):\n # NOTE No frequency infos or just period number ?\n start = pytz.utc.localize(pd.datetime.strptime(\n perfs[0]['period_label'] + '-01', '%Y-%m-%d'))\n end = pytz.utc.localize(pd.datetime.strptime(\n perfs[-1]['period_label'] + '-01', '%Y-%m-%d'))\n return pd.date_range(start - pd.datetools.BDay(10),\n end,\n freq=pd.datetools.MonthBegin())\n",
"'''\nTests for intuition.api.datafeed\n'''\n\nimport unittest\nfrom nose.tools import raises, ok_, eq_, nottest\nimport random\nimport pytz\nimport datetime as dt\nimport pandas as pd\nimport intuition.api.datafeed as datafeed\nfrom intuition.data.universe import Market\nfrom intuition.errors import InvalidDatafeed\nimport dna.test_utils\n\n\nclass FakeBacktestDatasource(object):\n\n def __init__(self, sids, properties):\n pass\n\n @property\n def mapping(self):\n return {\n 'backtest': (lambda x: True, 'sid'),\n 'dt': (lambda x: x, 'dt'),\n 'sid': (lambda x: x, 'sid'),\n 'price': (float, 'price'),\n 'volume': (int, 'volume'),\n }\n\n def get_data(self, sids, start, end):\n index = pd.date_range(start, end, tz=pytz.utc)\n return pd.DataFrame({sid: [random.random()] * len(index)\n for sid in sids}, index=index)\n\n\nclass FakePanelBacktestDatasource(object):\n\n def __init__(self, sids, properties):\n pass\n\n @property\n def mapping(self):\n return {\n 'backtest': (lambda x: True, 'sid'),\n 'dt': (lambda x: x, 'dt'),\n 'sid': (lambda x: x, 'sid'),\n 'price': (float, 'price'),\n 'low': (float, 'low'),\n 'high': (float, 'high'),\n 'volume': (int, 'volume'),\n }\n\n def get_data(self, sids, start, end):\n index = pd.date_range(start, end, tz=pytz.utc)\n fake_data = {}\n for sid in sids:\n fake_data[sid] = pd.DataFrame(\n {field: [random.random()] * len(index)\n for field in ['price', 'low', 'high', 'volume']}, index=index)\n return pd.Panel(fake_data)\n\n\nclass FakePanelWithoutVolumeBacktestDatasource(object):\n\n def __init__(self, sids, properties):\n pass\n\n def get_data(self, sids, start, end):\n index = pd.date_range(start, end, tz=pytz.utc)\n fake_data = {}\n for sid in sids:\n fake_data[sid] = pd.DataFrame(\n {field: [random.random()] * len(index)\n for field in ['price', 'low', 'high']}, index=index)\n return pd.Panel(fake_data)\n\n\nclass FakeLiveDatasource(object):\n\n def __init__(self, sids, properties):\n pass\n\n @property\n def mapping(self):\n return {\n 'live': True\n }\n\n def get_data(self, sids, start, end):\n return pd.DataFrame()\n\n\nclass DatafeedUtilsTestCase(unittest.TestCase):\n\n def setUp(self):\n dna.test_utils.setup_logger(self)\n self.fake_sid = 'fake_sid'\n self.fake_one_sid_series = pd.Series(\n {key: random.random() for key in ['low', 'close']})\n self.fake_multiple_sids_series = pd.Series(\n {key: random.random() for key in ['goog', 'fake_sid']})\n self.fake_multiple_sids_df = pd.DataFrame(\n {key: {'price': random.random(), 'close': 0.3}\n for key in ['goog', 'fake_sid']})\n self.fake_date = dt.datetime(2013, 1, 1)\n\n def tearDown(self):\n dna.test_utils.teardown_logger(self)\n\n @nottest\n def _check_event(self, event):\n self.assertIsInstance(event, dict)\n self.assertIn('volume', event)\n self.assertIn('dt', event)\n eq_(event['dt'], self.fake_date)\n eq_(event['sid'], self.fake_sid)\n\n def test_build_safe_event_without_volume(self):\n partial_event = self.fake_one_sid_series.to_dict()\n event = datafeed._build_safe_event(\n partial_event, self.fake_date, self.fake_sid)\n self._check_event(event)\n for field in self.fake_one_sid_series.index:\n self.assertIn(field, event.keys())\n\n def test_build_safe_event_with_volume(self):\n partial_event = self.fake_one_sid_series.to_dict()\n partial_event.update({'volume': 12034})\n event = datafeed._build_safe_event(\n partial_event, self.fake_date, self.fake_sid)\n self._check_event(event)\n for field in self.fake_one_sid_series.index:\n self.assertIn(field, event.keys())\n\n @raises(AttributeError)\n def test_wrong_data_type(self):\n wrong_type = bool\n datafeed._build_safe_event(wrong_type, self.fake_date, self.fake_sid)\n\n def test_check_data_modules(self):\n end = self.fake_date + pd.datetools.MonthBegin(6)\n ok_(datafeed._check_data_modules(\n 'backtest.module', None, self.fake_date, end))\n\n @raises(InvalidDatafeed)\n def test_check_data_modules_all_nones(self):\n end = self.fake_date + pd.datetools.MonthBegin(6)\n datafeed._check_data_modules(None, None, self.fake_date, end)\n\n\nclass HybridDataFactoryTestCase(unittest.TestCase):\n\n def setUp(self):\n dna.test_utils.setup_logger(self)\n self.test_index = pd.date_range(\n '2012/01/01', '2012/01/7', tz=pytz.utc)\n self.test_universe = 'forex,5'\n self.market = Market()\n self.market.parse_universe_description(self.test_universe)\n self.test_sids = self.market.sids\n\n def tearDown(self):\n dna.test_utils.teardown_logger(self)\n\n @nottest\n def _check_datasource(self, source):\n ok_((source.index == self.test_index).all())\n eq_(source.start, self.test_index[0])\n eq_(source.end, self.test_index[-1])\n eq_(source.sids, self.test_sids)\n self.assertIsNone(source._raw_data)\n eq_(source.arg_string, source.instance_hash)\n eq_(source.event_type, 4)\n ok_(hasattr(source, 'log'))\n self.assertFalse(source._is_live)\n\n @raises(InvalidDatafeed)\n def test_data_source_without_modules(self):\n config = {\n 'sids': self.test_sids,\n 'index': self.test_index\n }\n datafeed.HybridDataFactory(**config)\n\n @raises(InvalidDatafeed)\n def test_data_source_invalid_index(self):\n config = {\n 'sids': self.test_sids,\n 'index': bool\n }\n datafeed.HybridDataFactory(**config)\n\n def test_minimal_data_source(self):\n source = datafeed.HybridDataFactory(\n universe=self.market,\n index=self.test_index,\n backtest=FakeBacktestDatasource)\n self._check_datasource(source)\n\n def test_hybrid_mapping(self):\n source = datafeed.HybridDataFactory(\n universe=self.market,\n index=self.test_index,\n backtest=FakeBacktestDatasource,\n live=FakeLiveDatasource)\n\n self.assertIn('backtest', source.mapping)\n source._is_live = True\n self.assertIn('live', source.mapping)\n\n\n# TODO Test Live data sources\nclass SpecificMarketDataFactoryTestCase(unittest.TestCase):\n\n def setUp(self):\n dna.test_utils.setup_logger(self)\n self.test_index = pd.date_range(\n '2012/01/01', '2012/01/7', tz=pytz.utc)\n\n def tearDown(self):\n dna.test_utils.teardown_logger(self)\n\n def test_dataframe_forex_backtest_data_generation(self):\n test_universe = 'forex,5'\n market = Market()\n market.parse_universe_description(test_universe)\n source = datafeed.HybridDataFactory(\n universe=market,\n index=self.test_index,\n backtest=FakeBacktestDatasource)\n total_rows = 0\n for row in source.raw_data:\n if not total_rows:\n self.assertListEqual(\n sorted(row.keys()),\n sorted(['dt', 'price', 'sid', 'volume']))\n total_rows += 1\n eq_(total_rows, 2 * len(self.test_index) * len(market.sids))\n\n def test_dataframe_cac40_backtest_data_generation(self):\n test_universe = 'stocks:paris:cac40'\n market = Market()\n market.parse_universe_description(test_universe)\n source = datafeed.HybridDataFactory(\n universe=market,\n index=self.test_index,\n backtest=FakeBacktestDatasource)\n total_rows = 0\n for row in source.raw_data:\n if not total_rows:\n self.assertListEqual(\n sorted(row.keys()),\n sorted(['dt', 'price', 'sid', 'volume']))\n total_rows += 1\n eq_(total_rows, len(self.test_index) * len(market.sids))\n\n def test_panel_cac40_backtest_data_generation(self):\n test_universe = 'stocks:paris:cac40'\n market = Market()\n market.parse_universe_description(test_universe)\n source = datafeed.HybridDataFactory(\n universe=market,\n index=self.test_index,\n backtest=FakePanelBacktestDatasource)\n total_rows = 0\n for row in source.raw_data:\n if not total_rows:\n self.assertListEqual(\n sorted(row.keys()),\n sorted(['dt', 'price', 'low', 'high', 'sid', 'volume']))\n total_rows += 1\n eq_(total_rows, len(self.test_index) * len(market.sids))\n\n def test_panel_without_volume_cac40_backtest_data_generation(self):\n test_universe = 'stocks:paris:cac40,5'\n market = Market()\n market.parse_universe_description(test_universe)\n source = datafeed.HybridDataFactory(\n universe=market,\n index=self.test_index,\n backtest=FakePanelWithoutVolumeBacktestDatasource)\n total_rows = 0\n for row in source.raw_data:\n if not total_rows:\n self.assertListEqual(\n sorted(row.keys()),\n sorted(['dt', 'price', 'low', 'high', 'sid', 'volume']))\n total_rows += 1\n eq_(total_rows, len(self.test_index) * len(market.sids))\n"
] | [
[
"pandas.date_range"
],
[
"pandas.DataFrame"
],
[
"pandas.Series",
"pandas.datetime.date",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"numpy.mean",
"pandas.datetime.strptime",
"pandas.datetools.BDay",
"pandas.datetools.MonthBegin",
"numpy.array"
],
[
"pandas.datetools.MonthBegin",
"pandas.Panel",
"pandas.DataFrame",
"pandas.date_range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
}
] |
rkripa/PS-FCN | [
"eb8ddbd60964830c06432a734a2cf6dce34f70f0"
] | [
"models/PS_FCN_run.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn.init import kaiming_normal_\nfrom models import model_utils\n\nclass FeatExtractor(nn.Module):\n def __init__(self, batchNorm=False, c_in=3, other={}):\n super(FeatExtractor, self).__init__()\n self.other = other\n self.conv1 = model_utils.conv(batchNorm, c_in, 64, k=3, stride=1, pad=1)\n self.conv2 = model_utils.conv(batchNorm, 64, 128, k=3, stride=2, pad=1)\n self.conv3 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.conv4 = model_utils.conv(batchNorm, 128, 256, k=3, stride=2, pad=1)\n self.conv5 = model_utils.conv(batchNorm, 256, 256, k=3, stride=1, pad=1)\n self.conv6 = model_utils.deconv(256, 128)\n self.conv7 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n out = self.conv3(out)\n out = self.conv4(out)\n out = self.conv5(out)\n out = self.conv6(out)\n out_feat = self.conv7(out)\n n, c, h, w = out_feat.data.shape\n out_feat = out_feat.view(-1)\n return out_feat, [n, c, h, w]\n\nclass Regressor(nn.Module):\n def __init__(self, batchNorm=False, other={}): \n super(Regressor, self).__init__()\n self.other = other\n self.deconv1 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.deconv2 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)\n self.deconv3 = model_utils.deconv(128, 64)\n self.est_normal= self._make_output(64, 3, k=3, stride=1, pad=1)\n self.other = other\n\n def _make_output(self, cin, cout, k=3, stride=1, pad=1):\n return nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size=k, stride=stride, padding=pad, bias=False))\n\n def forward(self, x, shape):\n x = x.view(shape[0], shape[1], shape[2], shape[3])\n out = self.deconv1(x)\n out = self.deconv2(out)\n out = self.deconv3(out)\n normal = self.est_normal(out)\n normal = torch.nn.functional.normalize(normal, 2, 1)\n return normal\n\nclass PS_FCN(nn.Module):\n def __init__(self, fuse_type='max', batchNorm=False, c_in=3, other={}):\n super(PS_FCN, self).__init__()\n self.extractor = FeatExtractor(batchNorm, c_in, other)\n self.regressor = Regressor(batchNorm, other)\n self.c_in = c_in\n self.fuse_type = fuse_type\n self.other = other\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n kaiming_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n img = x[0]\n img_split = torch.split(img, 3, 1)\n if len(x) > 1: # Have lighting\n light = x[1]\n light_split = torch.split(light, 3, 1)\n\n feats = torch.Tensor()\n for i in range(len(img_split)):\n net_in = img_split[i] if len(x) == 1 else torch.cat([img_split[i], light_split[i]], 1)\n feat, shape = self.extractor(net_in)\n if i == 0:\n feats = feat\n else:\n if self.fuse_type == 'mean':\n feats = torch.stack([feats, feat], 1).sum(1)\n elif self.fuse_type == 'max':\n feats, _ = torch.stack([feats, feat], 1).max(1)\n if self.fuse_type == 'mean':\n feats = feats / len(img_split)\n feat_fused = feats\n normal = self.regressor(feat_fused, shape)\n return normal\n"
] | [
[
"torch.nn.functional.normalize",
"torch.Tensor",
"torch.cat",
"torch.nn.Conv2d",
"torch.split",
"torch.stack",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ComputationalMechanics/SurfaceTopography | [
"6751be427c89d526ef4857300409596c79119029",
"6751be427c89d526ef4857300409596c79119029",
"7dc7346cb9545326a3323fda0d402f254eae8c0e",
"6751be427c89d526ef4857300409596c79119029",
"7dc7346cb9545326a3323fda0d402f254eae8c0e"
] | [
"SurfaceTopography/Uniform/Filtering.py",
"test/test_reliability_cutoff.py",
"examples/bicubic_interpolation.py",
"test/IO/test_io.py",
"SurfaceTopography/Nonuniform/common.py"
] | [
"#\n# Copyright 2020-2021 Lars Pastewka\n# 2020-2021 Antoine Sanner\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport numpy as np\nfrom scipy.signal import get_window\n\nfrom ..FFTTricks import get_window_2D\nfrom ..HeightContainer import UniformTopographyInterface\nfrom ..UniformLineScanAndTopography import DecoratedUniformTopography\n\n\nclass WindowedUniformTopography(DecoratedUniformTopography):\n \"\"\"\n Construct a topography with a window function applied to it.\n \"\"\"\n\n name = 'windowed_topography'\n\n def __init__(self, topography, window=None, direction=None, info={}):\n \"\"\"\n window : str, optional\n Window for eliminating edge effect. See scipy.signal.get_window.\n (Default: no window for periodic Topographies, \"hann\" window for\n nonperiodic Topographies)\n direction : str, optional\n Direction in which the window is applied. Possible options are\n 'x', 'y' and 'radial'. If set to None, it chooses 'x' for line\n scans and 'radial' for topographies. (Default: None)\n \"\"\"\n super().__init__(topography, info=info)\n\n self._window_name = window\n self._direction = direction\n\n self._window_data = None\n\n def _make_window(self):\n self._window_data = None\n\n n = self.parent_topography.nb_grid_pts\n\n try:\n nx, ny = n\n except ValueError:\n nx, = n\n\n window_name = self._window_name\n if not self.parent_topography.is_periodic and window_name is None:\n window_name = \"hann\"\n\n direction = self._direction\n if direction is None:\n direction = 'x' if self.parent_topography.dim == 1 else 'radial'\n\n # Construct window\n if window_name is not None and window_name != 'None':\n if direction == 'x':\n # Get window from scipy.signal\n win = get_window(window_name, nx)\n # Normalize window\n win *= np.sqrt(nx / (win ** 2).sum())\n elif direction == 'y':\n if self.parent_topography.dim == 1:\n raise ValueError(\"Direction 'y' does not make sense for line scans.\")\n # Get window from scipy.signal\n win = get_window(window_name, ny)\n # Normalize window\n win *= np.sqrt(ny / (win ** 2).sum())\n elif direction == 'radial':\n if self.parent_topography.dim == 1:\n raise ValueError(\"Direction 'radial' does not make sense for line scans.\")\n win = get_window_2D(window_name, nx, ny,\n self.parent_topography.physical_sizes)\n # Normalize window\n win *= np.sqrt(nx * ny / (win ** 2).sum())\n else:\n raise ValueError(f\"Unknown direction '{self._direction}'.\")\n\n self._window_data = win\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), \\\n self._window_name, self._direction\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._window_name, self._direction = state\n super().__setstate__(superstate)\n\n @property\n def window_data(self):\n if self._window_data is None:\n self._make_window()\n return self._window_data\n\n def heights(self):\n \"\"\" Computes the windowed topography.\n \"\"\"\n if self.window_data is None:\n return self.parent_topography.heights()\n else:\n direction = self._direction\n if direction is None:\n direction = 'x' if self.parent_topography.dim == 1 else 'radial'\n if direction == 'x':\n return (self.window_data * self.parent_topography.heights().T).T\n elif direction == 'y' or direction == 'radial':\n return self.window_data * self.parent_topography.heights()\n else:\n raise ValueError(f\"Unknown direction '{self._direction}'.\")\n\n\nclass FilteredUniformTopography(DecoratedUniformTopography):\n name = 'filtered_topography'\n\n def __init__(self, topography,\n filter_function=lambda qx, qy: (np.abs(qx) <= 1) * np.abs(qy) <= 1,\n isotropic=True,\n info={}):\n\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n super().__init__(topography, info=info)\n\n self._filter_function = filter_function\n self._is_filter_isotropic = isotropic\n # TODO: should be deductible from the filter function signature\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), \\\n self._filter_function, self._is_filter_isotropic\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._is_filter_isotropic = state\n super().__setstate__(superstate)\n\n @property\n def is_filter_isotropic(self):\n return self._is_filter_isotropic\n\n def filter_function(self, *args):\n \"\"\"\n\n Parameters\n ----------\n if dim = 2 and filter is not isotropic\n qx, qy\n if dim = 1\n q\n \"\"\"\n\n if self.dim == 2 and not self.is_filter_isotropic \\\n and len(args) != 2:\n raise (\"ValueError: qx, qy expected\")\n elif self.dim == 1 and len(args) != 1:\n raise (\"ValueError: q expected\")\n\n return self._filter_function(*args)\n\n def heights(self):\n if self.dim == 2:\n nx, ny = self.parent_topography.nb_grid_pts\n sx, sy = self.parent_topography.physical_sizes\n\n qx = np.arange(0, nx, dtype=np.float64).reshape(-1, 1)\n qx = np.where(qx <= nx // 2, qx / sx, (qx - nx) / sx)\n qx *= 2 * np.pi\n\n qy = np.arange(0, ny // 2 + 1, dtype=np.float64).reshape(1, -1)\n qy *= 2 * np.pi / sy\n\n if self.is_filter_isotropic:\n h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *\n self.filter_function(np.sqrt(qx ** 2 + qy ** 2)))\n else:\n h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *\n self.filter_function(qx, qy))\n\n return h_qs\n elif self.dim == 1:\n s, = self.parent_topography.physical_sizes\n n, = self.parent_topography.nb_grid_pts\n q = abs(2 * np.pi * np.fft.rfftfreq(n, s / n))\n\n h = self.parent_topography.heights()\n h_q = np.fft.rfft(h)\n h_q_filtered = np.fft.irfft(h_q * self.filter_function(q))\n\n # Max_imaginary = np.max(np.imag(shifted_pot))\n # assert Max_imaginary < 1e-14 *np.max(np.real(shifted_pot)) ,\n # f\"{Max_imaginary}\"\n\n return np.real(h_q_filtered)\n\n\nclass ShortCutTopography(FilteredUniformTopography):\n name = 'shortcut_filtered_topography'\n\n def __init__(self, topography,\n cutoff_wavevector=None, cutoff_wavelength=None,\n kind=\"circular step\",\n info={}):\n r\"\"\"Applies a short wavelength cut filter to the topography using fft.\n\n for `kind==\"circular step\"` (default), parts of the spectrum with\n `|q| > cutoff_wavevector` are set to zero\n\n for `kind==\"square step\"`, parts of the spectrum with\n `q_x > cutoff_wavevector or q_y > cutoff_wavevector ` are set to zero\n\n either `cutoff_wavelength` or\n `cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`\n have to be provided.\n\n Parameters\n ----------\n topography: Topography\n cutoff_wavevector: float\n highest wavevector\n cutoff_wavelength: float\n shortest wavelength\n kind: {\"circular step\", \"square step\"}\n\n Returns\n -------\n Topography with filtered heights\n\n Examples\n --------\n >>> topography.shortcut(cutoff_wavevector=2 * np.pi / l)\n >>> topography.shortcut(cutoff_wavelength=l) # equivalent\n\n \"\"\"\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n\n if cutoff_wavelength is None:\n if cutoff_wavevector is not None:\n cutoff_wavelength = 2 * np.pi / cutoff_wavevector\n else:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n elif cutoff_wavevector is not None:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n\n self._cutoff_wavelength = cutoff_wavelength\n self._kind = kind\n\n def circular_step(q):\n return q <= self.cutoff_wavevector\n\n def square_step(qx, qy):\n return (np.abs(qx) <= self.cutoff_wavevector) * (\n np.abs(qy) <= self.cutoff_wavevector)\n\n if self._kind == \"circular step\":\n super().__init__(topography, info=info,\n filter_function=circular_step)\n elif self._kind == \"square step\":\n super().__init__(topography, info=info,\n filter_function=square_step, isotropic=False)\n else:\n raise ValueError(\"Invalid kind\")\n\n @property\n def cutoff_wavevector(self):\n return 2 * np.pi / self._cutoff_wavelength\n\n @property\n def cutoff_wavelength(self):\n return self._cutoff_wavelength\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), self._filter_function, \\\n self._kind, self._cutoff_wavelength\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._kind, \\\n self._cutoff_wavelength = state\n super().__setstate__(superstate)\n\n\nclass LongCutTopography(FilteredUniformTopography):\n name = 'longcut_filtered_topography'\n\n def __init__(self, topography,\n cutoff_wavevector=None, cutoff_wavelength=None,\n kind=\"circular step\",\n info={}):\n r\"\"\"Applies a long wavelength cut filter to the topography using fft.\n\n for `kind==\"circular step\"` (default), parts of the spectrum with\n `|q| < cutoff_wavevector` are set to zero\n\n for `kind==\"square step\"`, parts of the spectrum with\n `q_x < cutoff_wavevector or q_y < cutoff_wavevector ` are set to zero\n\n either `cutoff_wavelength` or\n `cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`\n have to be provided.\n\n Parameters\n ----------\n topography: Topography\n cutoff_wavevector: float\n highest wavevector\n cutoff_wavelength: float\n shortest wavelength\n kind: {\"circular step\", \"square step\"}\n\n Returns\n -------\n Topography with filtered heights\n\n Examples\n --------\n >>> topography.longcut(cutoff_wavevector=2 * np.pi / l)\n >>> topography.longcut(cutoff_wavelength=l) # equivalent\n\n \"\"\"\n if not topography.is_periodic:\n raise ValueError(\"only implemented for periodic topographies\")\n\n if cutoff_wavelength is None:\n if cutoff_wavevector is not None:\n cutoff_wavelength = 2 * np.pi / cutoff_wavevector\n else:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n elif cutoff_wavevector is not None:\n raise ValueError(\"cutoff_wavevector \"\n \"or cutoff_wavelength should be provided\")\n\n self._cutoff_wavelength = cutoff_wavelength\n self._kind = kind\n\n def circular_step(q):\n return q >= self.cutoff_wavevector\n\n def square_step(qx, qy):\n return (np.abs(qx) >= self.cutoff_wavevector) * (\n np.abs(qy) >= self.cutoff_wavevector)\n\n if self._kind == \"circular step\":\n super().__init__(topography, info=info,\n filter_function=circular_step)\n elif self._kind == \"square step\":\n super().__init__(topography, info=info,\n filter_function=square_step, isotropic=False)\n else:\n raise ValueError(\"Invalid kind\")\n\n @property\n def cutoff_wavevector(self):\n return 2 * np.pi / self._cutoff_wavelength\n\n @property\n def cutoff_wavelength(self):\n return self._cutoff_wavelength\n\n def __getstate__(self):\n \"\"\" is called and the returned object is pickled as the contents for\n the instance\n \"\"\"\n state = super().__getstate__(), self._filter_function, \\\n self._kind, self._cutoff_wavelength\n return state\n\n def __setstate__(self, state):\n \"\"\" Upon unpickling, it is called with the unpickled state\n Keyword Arguments:\n state -- result of __getstate__\n \"\"\"\n superstate, self._filter_function, self._kind, \\\n self._cutoff_wavelength = state\n super().__setstate__(superstate)\n\n\nUniformTopographyInterface.register_function(\"window\", WindowedUniformTopography)\nUniformTopographyInterface.register_function(\"filter\", FilteredUniformTopography)\nUniformTopographyInterface.register_function(\"shortcut\", ShortCutTopography)\nUniformTopographyInterface.register_function(\"longcut\", LongCutTopography)\n",
"#\n# Copyright 2021 Lars Pastewka\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\"\"\"\nTests reliability cutoff and its use to restrict the range of data in the\nanalysis pipeline functions.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom SurfaceTopography import (read_container, read_topography, SurfaceContainer, NonuniformLineScan, UniformLineScan,\n Topography)\nfrom SurfaceTopography.Exceptions import NoReliableDataError\n\n\ndef test_scanning_probe_reliability_cutoff(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'))\n np.testing.assert_allclose(surf.scanning_probe_reliability_cutoff(40), 90.700854)\n\n # Should be None because there is no tip radius information\n assert surf.short_reliability_cutoff() is None\n\n cut = surf.short_reliability_cutoff(0.2)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_almost_equal(cut, 0.2)\n\n\ndef test_tip_radius_reliability_cutoff_from_instrument_metadata(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'), info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 40,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_allclose(cut, 90.700854)\n\n # Make sure PSD returns only reliable portion\n q, _ = surf.power_spectrum_from_profile()\n assert q[-1] < 2 * np.pi / cut\n\n q, _ = surf.power_spectrum_from_area()\n assert q[-1] < 2 * np.pi / cut\n\n # Make sure ACF returns only reliable portion\n r, A = surf.autocorrelation_from_profile()\n assert r[0] >= cut / 2\n\n r, A = surf.autocorrelation_from_area()\n assert r[0] >= cut / 2\n\n # Make sure SDRP returns only reliable portion\n r, s = surf.scale_dependent_statistical_property(lambda x, y=None: np.mean(x * x))\n assert r[0] >= cut / 2\n\n\ndef test_resolution_reliability_cutoff_from_instrument_metadata(file_format_examples):\n resolution = 70\n surf = read_topography(os.path.join(file_format_examples, 'di1.di'), info={\n 'instrument': {\n 'parameters': {\n 'resolution': {\n 'value': resolution,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_almost_equal(cut, resolution)\n\n # Make sure PSD returns only reliable portion\n q, _ = surf.power_spectrum_from_profile()\n assert q[-1] < 2 * np.pi / cut\n\n q, _ = surf.power_spectrum_from_area()\n assert q[-1] < 2 * np.pi / cut\n\n # Make sure ACF returns only reliable portion\n r, A = surf.autocorrelation_from_profile()\n assert r[0] >= cut / 2\n\n r, A = surf.autocorrelation_from_area()\n assert r[0] >= cut / 2\n\n # Make sure SDRP returns only reliable portion\n r, s = surf.scale_dependent_statistical_property(lambda x, y=None: np.mean(x * x))\n assert r[0] >= cut / 2\n\n\ndef test_reliability_cutoff_line_scan(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'example7.txt'), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 40,\n 'unit': 'nm',\n }\n }\n }\n })\n cut = surf.short_reliability_cutoff()\n np.testing.assert_allclose(cut, 0.126504, atol=1e-6)\n\n cut = surf.to_nonuniform().short_reliability_cutoff()\n # This differs from the above because the derivatives are computed at slightly different locations\n np.testing.assert_allclose(cut, 0.126505, atol=1e-6)\n\n cut = surf.to_nonuniform().short_reliability_cutoff(0.2)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_allclose(cut, 0.2)\n\n cut = surf.to_nonuniform().short_reliability_cutoff(0.1)\n # Should be the maximum of the actual value and the value that was passed\n np.testing.assert_allclose(cut, 0.126505, atol=1e-6)\n\n\ndef test_problem1(file_format_examples):\n surf = read_topography(os.path.join(file_format_examples, 'di6.di'), info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 26,\n 'unit': 'nm',\n }\n }\n }\n })\n assert surf.short_reliability_cutoff() is None\n\n\ndef test_no_reliable_data_uniform():\n t = UniformLineScan([-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667], 6,\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1)\n\n c = SurfaceContainer([t])\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.autocorrelation(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.variable_bandwidth(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1, unit='um')\n\n\ndef test_no_reliable_data_topography():\n t = Topography(\n np.array([[-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667]] * 6),\n (6, 6),\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_area()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x, y: np.mean(x * x + y * y), n=1)\n\n\ndef test_no_reliable_data_nonuniform():\n t = NonuniformLineScan([0., 1., 2., 3.5, 4., 5., 6.],\n [-0.16666667, -0.16666667, -0.16666667, 0.83333333, -0.16666667, -0.16666667, -0.16666667],\n unit='nm',\n info=dict(instrument={'name': 'Bla',\n 'type': 'microscope-based',\n 'parameters': {'resolution': {'unit': 'µm', 'value': 10.0}}}))\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.power_spectrum_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.autocorrelation_from_profile(resampling_method=None)\n\n with pytest.raises(NoReliableDataError):\n t.variable_bandwidth_from_profile()\n\n with pytest.raises(NoReliableDataError):\n t.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1)\n\n c = SurfaceContainer([t])\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.autocorrelation(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.variable_bandwidth(unit='um')\n\n with pytest.raises(NoReliableDataError):\n c.scale_dependent_statistical_property(lambda x: np.mean(x * x), n=1, unit='um')\n\n\ndef test_linear_2d_small_tip():\n t = Topography(np.array([[9, 9, 9, 9, 9],\n [7, 7, 7, 7, 7],\n [5, 5, 5, 5, 5],\n [3, 3, 3, 3, 3],\n [1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1],\n [-3, -3, -3, -3, -3],\n [-5, -5, -5, -5, -5],\n [-7, -7, -7, -7, -7],\n [-9, -9, -9, -9, -9]]).T,\n (1, 2), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 26,\n 'unit': 'nm',\n }\n }\n }}).detrend('center')\n\n # This has zero curvature, so everything should be reliable\n assert t.short_reliability_cutoff() is None\n\n q, C = t.power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.transpose().power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.power_spectrum_from_area()\n assert np.isfinite(C).sum() > 0\n\n\ndef test_linear_2d_large_tip():\n t = Topography(np.array([[9, 9, 9, 9, 9],\n [7, 7, 7, 7, 7],\n [5, 5, 5, 5, 5],\n [3, 3, 3, 3, 3],\n [1, 1, 1, 1, 1],\n [-1, -1, -1, -1, -1],\n [-3, -3, -3, -3, -3],\n [-5, -5, -5, -5, -5],\n [-7, -7, -7, -7, -7],\n [-9, -9, -9, -9, -9]]).T,\n (1, 2), unit='um', info={\n 'instrument': {\n 'parameters': {\n 'tip_radius': {\n 'value': 10,\n 'unit': 'mm',\n }\n }\n }}).detrend('center')\n\n # This has zero curvature, so everything should be reliable\n assert t.short_reliability_cutoff() is None\n\n q, C = t.power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.transpose().power_spectrum_from_profile()\n assert np.isfinite(C).sum() > 0\n\n q, C = t.power_spectrum_from_area()\n assert np.isfinite(C).sum() > 0\n\n\ndef test_partially_reliable_data_container(file_format_examples):\n c, = read_container(f'{file_format_examples}/container1.zip')\n\n # Patch info dictionary\n c[0]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n c[1]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n c[2]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'um'}}}\n\n # Check that we raise NoReliableDataError for one of the topographies\n c[0].power_spectrum_from_profile()\n c[1].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[2].power_spectrum_from_profile()\n\n # This should raise no error\n c.power_spectrum(unit='um')\n\n # Patch info dictionary such that all data is unreliable\n c[0]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n c[1]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n c[2]._info['instrument'] = {'parameters': {'tip_radius': {'value': 10, 'unit': 'mm'}}}\n\n # Check that we raise NoReliableDataError for one of the topographies\n with pytest.raises(NoReliableDataError):\n c[0].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[1].power_spectrum_from_profile()\n with pytest.raises(NoReliableDataError):\n c[2].power_spectrum_from_profile()\n\n # This should now raise a NoReliableDataError\n with pytest.raises(NoReliableDataError):\n c.power_spectrum(unit='um')\n",
"#\n# Copyright 2020 Lars Pastewka\n# 2020 Antoine Sanner\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom SurfaceTopography.Support.Interpolation import Bicubic\nfrom SurfaceTopography.Generation import fourier_synthesis\n\n# from muFFT import FourierInterpolation # future\n\n\nnx, ny = [512] * 2\nsx, sy = [1.] * 2\n\n# %% Generate random topography\nhc = 0.1 * sx\n\ntopography = fourier_synthesis((nx, ny), (sx, sy), 0.8, rms_height=1.,\n short_cutoff=hc, long_cutoff=hc + 1e-9, )\ntopography = topography.scale(1 / topography.rms_height_from_area())\ndx, dy = topography.fourier_derivative()\n\n# %%\nfig, ax = plt.subplots()\nax.imshow(topography.heights())\nfig.show()\n# %%\nfig, ax = plt.subplots()\nax.imshow(dx)\nfig.show()\nfig, ax = plt.subplots()\nax.imshow(topography.derivative(1)[0])\nfig.show()\n\n# %% check bicubic interpolation against fourier interpolation\n\n# %%\nfig, ax = plt.subplots()\nx, y = topography.positions()\nax.plot(x[:, 0], topography.heights()[:, 0], \".k\", label=\"original\")\n\nskips = [4, 8, 16, 32, 64]\nrms_err = []\nmax_err = []\nfor skip in skips:\n grid_slice = (slice(None, None, skip), slice(None, None, skip))\n\n interp = Bicubic(topography.heights()[grid_slice],\n dx[grid_slice] * topography.pixel_size[0] * skip,\n dy[grid_slice] * topography.pixel_size[1] * skip\n )\n\n interp_field, interp_derx, interp_dery = interp(\n x / (topography.pixel_size[0] * skip),\n y / (topography.pixel_size[1] * skip), derivative=1)\n l, = ax.plot(x[grid_slice][:, 0], topography.heights()[grid_slice][:, 0],\n \"+\")\n ax.plot(x[:, 0], interp_field[:, 0], color=l.get_color(),\n label=r\"bicubic, $l_{{cor}} / \\Delta_x={}$\"\n .format(hc / (skip * topography.pixel_size[0])))\n\n rms_err.append(\n np.sqrt(np.mean((interp_field - topography.heights()) ** 2)))\n max_err.append(np.max(abs(interp_field - topography.heights())))\n ax.legend()\n fig.show()\n\nskips = np.array(skips)\nrms_err = np.array(rms_err)\nmax_err = np.array(max_err)\n\n# %%\n\nfig, ax = plt.subplots()\nsampling = (skips * topography.pixel_size[0]) / hc\n\nax.plot(sampling, rms_err, \"-o\", label=\"rms error\")\nax.plot(sampling, max_err, \"-o\", label=\"max error\")\n\nax.set_xlabel(r\"$\\Delta_x / l_{cor}$\")\nax.legend()\nax.set_yscale(\"log\")\nax.set_xscale(\"log\")\nfig.show()\n",
"#\n# Copyright 2019-2020 Lars Pastewka\n# 2020 Michael Röttger\n# 2019-2020 Antoine Sanner\n# 2020 Kai Haase\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\nimport io\nimport os\nimport pickle\nimport tempfile\nimport unittest\nimport warnings\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nimport NuMPI\nfrom NuMPI import MPI\n\nimport SurfaceTopography.IO\nfrom SurfaceTopography import open_topography, read_topography\nfrom SurfaceTopography.Exceptions import CannotDetectFileFormat, MetadataAlreadyFixedByFile\nfrom SurfaceTopography.IO import readers, detect_format\nfrom SurfaceTopography.IO.common import is_binary_stream\nfrom SurfaceTopography.IO.Text import read_matrix, read_xyz\nfrom SurfaceTopography.UniformLineScanAndTopography import Topography\n\npytestmark = pytest.mark.skipif(\n MPI.COMM_WORLD.Get_size() > 1,\n reason=\"tests only serial functionalities, please execute with pytest\")\n\nDATADIR = os.path.join(\n os.path.dirname(\n os.path.dirname(os.path.realpath(__file__))),\n 'file_format_examples')\n\n\ndef _convert_filelist(filelist):\n \"\"\"\n Parameters\n ----------\n filelist\n list of strings with filenames withput path\n\n Returns\n -------\n List of filenames prepended with DATADIR\n \"\"\"\n return [os.path.join(DATADIR, fn) for fn in filelist]\n\n\nbinary_example_file_list = _convert_filelist([\n 'di1.di',\n 'di2.di',\n 'di3.di',\n 'di4.di',\n 'di5.di',\n 'example.ibw',\n 'spot_1-1000nm.ibw',\n # 'surface.2048x2048.h5',\n '10x10-one_channel_without_name.ibw',\n 'example1.mat',\n 'example.opd',\n 'example.x3p',\n 'example2.x3p',\n 'opdx1.OPDx',\n 'opdx2.OPDx',\n 'mi1.mi',\n 'N46E013.hgt',\n 'example.zon',\n 'example.nc',\n] + [] if NuMPI._has_mpi4py else ['example-2d.npy']) # MPI I/O does not support Python streams\n\ntext_example_file_list = _convert_filelist([\n 'example1.txt',\n 'example2.txt',\n 'example3.txt',\n 'example4.txt',\n 'example5.txt',\n 'example8.txt',\n # example8: from the reader's docstring, with extra newline at end\n 'opdx1.txt',\n 'opdx2.txt',\n 'example-2d.xyz',\n # Not yet working\n # 'example6.txt',\n])\n\ntext_example_without_size_file_list = _convert_filelist([\n 'example.xyz',\n 'line_scan_1_minimal_spaces.asc',\n])\n\nexplicit_physical_sizes = _convert_filelist([\n 'example5.txt',\n 'example1.mat',\n 'example-2d.npy'\n])\n\ntext_example_memory_list = [\n \"\"\"\n 0 0\n 1 2\n 2 4\n 3 6\n \"\"\"\n]\n\n\[email protected](\"reader\", readers)\ndef test_no_resource_warning_on_failure(reader):\n \"\"\"\n Tests for each reader class that it doesn't raise a ResourceWarning\n \"\"\"\n fn = os.path.join(DATADIR, \"wrongnpyfile.npy\")\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\") # deactivate hiding of ResourceWarnings\n\n # noinspection PyBroadException\n try:\n reader(fn)\n except Exception:\n pass\n # assert no warning is a ResourceWarning\n for wi in w:\n assert not issubclass(wi.category, ResourceWarning)\n\n\ndef test_uniform_stylus():\n t = read_topography(os.path.join(DATADIR, 'example7.txt'))\n assert t.is_uniform\n\n\ndef test_cannot_detect_file_format_on_txt():\n with pytest.raises(CannotDetectFileFormat):\n read_topography(os.path.join(DATADIR, 'nonsense_txt_file.txt'))\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list)\ndef test_keep_text_file_open(fn):\n # Text file can be opened as binary or text\n with open(fn, 'rb') as f:\n open_topography(f)\n assert not f.closed, f\"Text file {fn} was opened as binary file and is closed, but should not\"\n with open(fn, 'r') as f:\n open_topography(f)\n assert not f.closed, f\"Text file {fn} was opened as text file and is closed, but should not\"\n\n\[email protected]('fn', binary_example_file_list)\ndef test_keep_binary_file_open(fn):\n with open(fn, 'rb') as f:\n open_topography(f)\n assert not f.closed, f\"Binary file {fn} was opened as binary file and is closed, but should not\"\n\n\[email protected]('datastr', text_example_memory_list)\ndef test_keep_stream_from_memory_open(datastr):\n with io.StringIO(datastr) as f:\n open_topography(f)\n assert not f.closed, \"text memory stream for '{}' was closed\".format(datastr)\n\n # Doing the same when but only giving a binary stream\n with io.BytesIO(datastr.encode(encoding='utf-8')) as f:\n open_topography(f)\n assert not f.closed, \"binary memory stream for '{}' was closed\".format(datastr)\n\n\ndef test_is_binary_stream():\n # just grep a random existing file here\n fn = text_example_file_list[0]\n\n assert is_binary_stream(open(fn, mode='rb'))\n assert not is_binary_stream(open(fn, mode='r')) # opened as text file\n\n # should also work with streams in memory\n assert is_binary_stream(io.BytesIO(b\"11111\")) # some bytes in memory\n assert not is_binary_stream(io.StringIO(\"11111\")) # some bytes in memory\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_can_be_pickled(fn):\n reader = open_topography(fn)\n physical_sizes = None\n if reader.default_channel.physical_sizes is None:\n physical_sizes = (1.,) * reader.default_channel.dim\n\n topography = reader.topography(physical_sizes=physical_sizes)\n topographies = [topography]\n if hasattr(topography, 'to_uniform'):\n topographies += [topography.to_uniform(100, 0)]\n for t in topographies:\n s = pickle.dumps(t)\n pickled_t = pickle.loads(s)\n\n #\n # Compare some attributes after unpickling\n #\n # sometimes the result is a list of topographies\n multiple = isinstance(t, list)\n if not multiple:\n t = [t]\n pickled_t = [pickled_t]\n\n for x, y in zip(t, pickled_t):\n for attr in ['dim', 'physical_sizes', 'is_periodic']:\n assert getattr(x, attr) == getattr(y, attr)\n if x.physical_sizes is not None:\n assert_array_equal(x.positions(), y.positions())\n assert_array_equal(x.heights(), y.heights())\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_reader_arguments(fn):\n \"\"\"Check whether all readers have channel, physical_sizes and\n height_scale_factor arguments. Also check whether we can execute\n `topography` multiple times for all readers\"\"\"\n physical_sizes0 = (1.2, 1.3)\n\n # Test open -> topography\n r = open_topography(fn)\n physical_sizes = None if r.channels[0].physical_sizes is not None else physical_sizes0\n\n t = r.topography(channel_index=0, physical_sizes=physical_sizes,\n height_scale_factor=None)\n if physical_sizes is not None:\n assert t.physical_sizes == physical_sizes\n # Second call to topography\n t2 = r.topography(channel_index=0, physical_sizes=physical_sizes,\n height_scale_factor=None)\n if physical_sizes is not None:\n assert t2.physical_sizes == physical_sizes\n assert_array_equal(t.heights(), t2.heights())\n # Test read_topography\n t = read_topography(fn, channel_index=0,\n physical_sizes=physical_sizes,\n height_scale_factor=None)\n if physical_sizes is not None:\n assert t.physical_sizes == physical_sizes\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_readers_with_binary_file_object(fn):\n \"\"\"Check whether all readers have channel, physical_sizes and\n height_scale_factor arguments. Also check whether we can execute\n `topography` multiple times for all readers\"\"\"\n physical_sizes0 = (1.2, 1.3)\n\n # Test open -> topography\n r = open_topography(open(fn, mode='rb'))\n physical_sizes = None if r.channels[0].physical_sizes is not None else physical_sizes0\n t = r.topography(channel_index=0, physical_sizes=physical_sizes,\n height_scale_factor=None)\n if physical_sizes is not None:\n assert t.physical_sizes == physical_sizes\n # Second call to topography\n t2 = r.topography(channel_index=0, physical_sizes=physical_sizes,\n height_scale_factor=None)\n if physical_sizes is not None:\n assert t2.physical_sizes == physical_sizes\n assert_array_equal(t.heights(), t2.heights(), err_msg=fn)\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_nb_grid_pts_and_physical_sizes_are_tuples_or_none(fn):\n r = open_topography(fn)\n assert isinstance(r.default_channel.nb_grid_pts, tuple), f'{fn} - {r.__class__}: {r.default_channel.nb_grid_pts}'\n if r.default_channel.physical_sizes is not None:\n assert isinstance(r.default_channel.physical_sizes, tuple), \\\n f'{fn} - {r.__class__}: {r.default_channel.physical_sizes}'\n # If it is a tuple, it cannot contains None's\n assert np.all([p is not None for p in r.default_channel.physical_sizes]), \\\n f'{fn} - {r.__class__}: {r.default_channel.physical_sizes}'\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_reader_topography_same(fn):\n \"\"\"\n Tests that properties like physical sizes, units and nb_grid_pts are\n the same in the ChannelInfo and the loaded topography.\n \"\"\"\n\n reader = open_topography(fn)\n\n for channel in reader.channels:\n foo_str = reader.format() + \"-%d\" % (channel.index,) # unique for each channel\n topography = channel.topography(\n physical_sizes=(1, 1) if channel.physical_sizes is None\n else None,\n info=dict(foo=foo_str))\n assert channel.nb_grid_pts == topography.nb_grid_pts\n\n # some checks on info dict in channel and topography\n assert topography.info['foo'] == foo_str\n if channel.unit is not None or topography.unit is not None:\n assert channel.unit == topography.unit\n assert channel.info['unit'] == topography.unit\n assert channel.unit == topography.info['unit']\n\n if channel.physical_sizes is not None:\n assert channel.physical_sizes == topography.physical_sizes\n\n if channel.height_scale_factor is not None and hasattr(topography, 'scale_factor'):\n assert channel.height_scale_factor == topography.scale_factor\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_reader_args_doesnt_overwrite_data_from_file(fn):\n \"\"\"\n Tests that if some properties like `physical_sizes and `height_scale_factor`\n are given in the file, they cannot be overridden by given arguments to\n the .topography() method.\n \"\"\"\n reader = open_topography(fn)\n ch = reader.default_channel\n physical_sizes_arg_if_missing_in_file = (1.,) * ch.dim\n physical_sizes_arg = physical_sizes_arg_if_missing_in_file if ch.physical_sizes is None else None\n\n if ch.physical_sizes is not None:\n with pytest.raises(MetadataAlreadyFixedByFile):\n reader.topography(physical_sizes=physical_sizes_arg_if_missing_in_file)\n\n if ch.height_scale_factor is not None:\n with pytest.raises(MetadataAlreadyFixedByFile):\n if ch.physical_sizes is None:\n reader.topography(physical_sizes=physical_sizes_arg, height_scale_factor=10)\n else:\n # if an exception happens, we want it because of height scale factor\n reader.topography(height_scale_factor=10)\n\n # A small problem with this test is maybe that there are a few input\n # files which pass this test without any assert, so it looks like\n # a passed test, but it has no meaning. Since this are only three files\n # by now, I think this is okay, sorting this out would be difficult.\n\n\[email protected]('fn', text_example_file_list + binary_example_file_list)\ndef test_periodic_flag(fn):\n reader = open_topography(fn)\n ch = reader.default_channel\n physical_sizes_arg_if_missing_in_file = (1.,) * ch.dim\n physical_sizes_arg = physical_sizes_arg_if_missing_in_file if ch.physical_sizes is None else None\n\n t = reader.topography(physical_sizes=physical_sizes_arg, periodic=True)\n assert t.is_periodic, fn\n\n t = reader.topography(physical_sizes=physical_sizes_arg, periodic=False)\n assert not t.is_periodic, fn\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_reader_height_scale_factor_arg_for_topography(fn):\n \"\"\"Test whether height_scale_factor can be given to .topography() and is effective.\n\n Also checking whether the reader channels have .height_scale_factor attribute and\n whether it is equal to the scaling factor known from topography.\n\n Also tests that info dict of channel and topography have no height_scale_factor,\n because this should be a channel property now.\n \"\"\"\n reader = open_topography(fn)\n ch = reader.default_channel\n\n assert hasattr(ch, 'height_scale_factor')\n assert 'height_scale_factor' not in ch.info\n\n height_scale_factor_if_missing_in_file = 2 # just some number\n\n # calculate argument for .topography()\n height_scale_factor_arg = height_scale_factor_if_missing_in_file if ch.height_scale_factor is None else None\n\n # which factor we expect at the end\n exp_height_scale_factor = height_scale_factor_if_missing_in_file if ch.height_scale_factor is None \\\n else ch.height_scale_factor\n\n # in order to call .topography(), we also need valid physical_sizes\n physical_sizes_arg_if_missing_in_file = (1.,) * ch.dim\n physical_sizes_arg = physical_sizes_arg_if_missing_in_file if ch.physical_sizes is None else None\n\n # The check whether an exception is raised if meta data like `physical_sizes`\n # and `height_scale_factor` has already been defined in the file and\n # one tries to override it, is done in another test.\n #\n # (only do this if not an NC file with units, since this is special: height_scale_factor\n # should only be choosable then.)\n if reader.format != 'nc' and 'unit' not in ch.info:\n topography = reader.topography(physical_sizes=physical_sizes_arg, height_scale_factor=height_scale_factor_arg)\n if hasattr(topography, 'scale_factor'):\n # sometimes we use height_scale_factor = 1 in the channel info in order\n # to denote that the height scale factor cannot be changed later.\n # This does not mean that the topography also really has been scaled, so\n # we compare the scale factor here only of it is available\n assert pytest.approx(exp_height_scale_factor) == topography.scale_factor, \\\n \"Difference in height scale factor between channel/argument and resulting topography\"\n\n\[email protected]('fn', text_example_file_list + text_example_without_size_file_list + binary_example_file_list)\ndef test_to_netcdf(fn):\n \"\"\"Test that files can be stored as NetCDF and that reading then gives\n an identical topography object\"\"\"\n if fn in explicit_physical_sizes:\n t = read_topography(fn, physical_sizes=(1, 1))\n else:\n t = read_topography(fn)\n with tempfile.TemporaryDirectory() as d:\n tmpfn = f'{d}/netcdf_representation.nc'\n t.to_netcdf(tmpfn)\n t2 = read_topography(tmpfn)\n assert t == t2\n\n\nclass UnknownFileFormatGivenTest(unittest.TestCase):\n\n def test_read(self):\n with self.assertRaises(SurfaceTopography.IO.UnknownFileFormatGiven):\n SurfaceTopography.IO.open_topography(\n os.path.join(DATADIR, \"surface.2048x2048.h5\"),\n format='Nonexistentfileformat')\n\n def test_detect_format(self):\n with self.assertRaises(SurfaceTopography.Exceptions.UnknownFileFormatGiven):\n SurfaceTopography.IO.open_topography(\n os.path.join(DATADIR, \"surface.2048x2048.h5\"),\n format='Nonexistentfileformat')\n\n\ndef test_file_format_mismatch():\n with pytest.raises(SurfaceTopography.Exceptions.FileFormatMismatch):\n SurfaceTopography.IO.open_topography(\n os.path.join(DATADIR, 'surface.2048x2048.h5'), format=\"npy\")\n\n\nclass LineScanInFileWithMinimalSpacesTest(unittest.TestCase):\n def test_detect_format_then_read(self):\n self.assertEqual(detect_format(\n os.path.join(DATADIR, 'line_scan_1_minimal_spaces.asc')), 'xyz')\n\n def test_read(self):\n surface = read_xyz(\n os.path.join(DATADIR, 'line_scan_1_minimal_spaces.asc'))\n\n self.assertFalse(surface.is_uniform)\n self.assertEqual(surface.dim, 1)\n\n x, y = surface.positions_and_heights()\n self.assertGreater(len(x), 0)\n self.assertEqual(len(x), len(y))\n\n\[email protected](\"reader\", readers)\ndef test_readers_have_name(reader):\n reader.name()\n\n\n# yes, the German version still has \"Value units\"\[email protected](\"lang_filename_infix\", [\"english\", \"german\"])\ndef test_gwyddion_txt_import(lang_filename_infix):\n fname = os.path.join(\n DATADIR,\n 'gwyddion-export-{}.txt'.format(lang_filename_infix))\n\n #\n # test channel infos\n #\n reader = open_topography(fname)\n\n assert len(reader.channels) == 1\n channel = reader.default_channel\n\n assert channel.name == \"My Channel Name\"\n assert channel.unit == 'm'\n assert pytest.approx(\n channel.physical_sizes[0]) == 12.34 * 1e-6 # was given as µm\n assert pytest.approx(\n channel.physical_sizes[1]) == 5678.9 * 1e-9 # was given as nm\n\n #\n # test metadata of topography\n #\n topo = reader.topography()\n assert topo.unit == 'm'\n assert pytest.approx(\n topo.physical_sizes[0]) == 12.34 * 1e-6 # was given as µm\n assert pytest.approx(\n topo.physical_sizes[1]) == 5678.9 * 1e-9 # was given as nm\n\n #\n # test scaling and order of data\n #\n # The order of the lines in the text files mimic the lines as they\n # are shown in the gwyddion plot.\n #\n # In gwyddion's text export:\n # - first index corresponds to y dimension (rows), second index (columns)\n # to x dimension\n # - y coordinates grow from top row to bottom row\n # - x coordinates grow from left column to column of array\n #\n # PyCo's heights() has a different order:\n # - first index corresponds to x dimension, second index to y dimension\n # - plot from the heights correspond to same image in gwyddion if plotted\n # with \"pcolormesh(t.heights.T)\", but with origin in lower left, i.e. the\n # image looks flipped vertically when compared to gwyddion\n #\n # => heights() must be same array as in file, but transposed\n #\n heights_in_file = [[1, 1.5, 3],\n [-2, -3, -6],\n [0, 0, 0],\n [9, 9, 9]]\n\n expected_heights = np.array(heights_in_file).T\n\n np.testing.assert_allclose(topo.heights(), expected_heights)\n\n\ndef test_detect_format():\n assert detect_format(os.path.join(DATADIR, 'di1.di')) == 'di'\n assert detect_format(os.path.join(DATADIR, 'di2.di')) == 'di'\n assert detect_format(os.path.join(DATADIR, 'example.ibw')) == 'ibw'\n assert detect_format(os.path.join(DATADIR, 'example.opd')) == 'opd'\n assert detect_format(os.path.join(DATADIR, 'example.x3p')) == 'x3p'\n assert detect_format(os.path.join(DATADIR, 'example1.mat')) == 'mat'\n assert detect_format(os.path.join(DATADIR, 'example.xyz')) == 'xyz'\n assert detect_format(os.path.join(DATADIR, 'example-2d.xyz')) == 'xyz'\n assert detect_format(os.path.join(DATADIR, 'line_scan_1_minimal_spaces.asc')) == 'xyz'\n assert detect_format(os.path.join(DATADIR, 'example-2d.npy')) == 'npy'\n assert detect_format(os.path.join(DATADIR, 'surface.2048x2048.h5')) == 'h5'\n assert detect_format(os.path.join(DATADIR, 'example.zon')) == 'zon'\n\n\ndef test_to_matrix():\n y = np.arange(10).reshape((1, -1))\n x = np.arange(5).reshape((-1, 1))\n arr = -2 * y + 0 * x\n t = Topography(arr, (5, 10), unit='nm')\n # Check that we can export downstream the pipeline\n with tempfile.TemporaryDirectory() as d:\n t.to_matrix(f\"{d}/topo.txt\")\n t.detrend('center').to_matrix(f'{d}/topo.txt')\n t2 = read_matrix(f'{d}/topo.txt')\n np.testing.assert_allclose(t.detrend('center').heights(), t2.heights())\n",
"#\n# Copyright 2018-2020 Lars Pastewka\n# 2019 Antoine Sanner\n# 2019 Michael Röttger\n# 2015-2016 Till Junge\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\"\"\"\nBin for small common helper function and classes for nonuniform\ntopographies.\n\"\"\"\n\nimport numpy as np\n\nfrom ..HeightContainer import NonuniformLineScanInterface\n\n\ndef bandwidth(self):\n \"\"\"\n Computes lower and upper bound of bandwidth, i.e. of the wavelengths or\n length scales occurring on a topography. The lower end of the bandwidth is\n given by the mean of the spacing of the individual points on the line\n scan. The upper bound is given by the overall length of the line scan.\n\n Returns\n -------\n lower_bound : float\n Lower bound of the bandwidth.\n upper_bound : float\n Upper bound of the bandwidth.\n \"\"\"\n x = self.positions()\n lower_bound = np.mean(np.diff(x))\n upper_bound, = self.physical_sizes\n\n return lower_bound, upper_bound\n\n\n# Register analysis functions from this module\nNonuniformLineScanInterface.register_function('bandwidth', bandwidth)\n"
] | [
[
"numpy.abs",
"numpy.fft.rfft",
"scipy.signal.get_window",
"numpy.fft.rfftfreq",
"numpy.arange",
"numpy.sqrt",
"numpy.real",
"numpy.where"
],
[
"numpy.isfinite",
"numpy.testing.assert_almost_equal",
"numpy.mean",
"numpy.testing.assert_allclose",
"numpy.array"
],
[
"numpy.array",
"matplotlib.pyplot.subplots"
],
[
"numpy.all",
"numpy.arange",
"numpy.array"
],
[
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
georgetown-analytics/DC-Bikeshare | [
"42676654d103cdaddfb76db76d1eece533251261",
"42676654d103cdaddfb76db76d1eece533251261"
] | [
"final_plots/read_aws.py",
"report_queries/dockless_trips_by_operator.py"
] | [
"import psycopg2\nimport psycopg2.extras\nimport pandas as pd\nimport os\nimport time\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n\ndef read_only_connect_aws():\n env_path = 'env_readonly.env'\n load_dotenv(dotenv_path=env_path)\n host = \"bikeshare-restored.cs9te7lm3pt2.us-east-1.rds.amazonaws.com\"\n port = 5432\n database = \"bikeshare\"\n\n user = os.environ.get(\"AWS_READONLY_USER\")\n password = os.environ.get(\"AWS_READONLY_PASS\")\n\n # Connect to aws postgres D\n conn = psycopg2.connect(\n host=host, user=user, port=port, password=password,\n database=database)\n return conn\n\n# Function to load cabi data from AWS. Leaving room to add different load\n# types. Right now only allowing a load of all the database\n\n\nclass QueryTool:\n\n def __init__(self, connection, table=None):\n self.connection = connection\n self.table = table\n\n def basic(self):\n query = (\n 'SELECT * from ') + self.table\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def missing_check(self):\n query = (\"\"\"\n SELECT\n COUNT(*) as total_count,\n dt.operator as operator\n FROM dockless_trips as dt\n GROUP BY\n operator;\"\"\")\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def geo_metric(self, cut):\n self.cut = cut\n query = (\"\"\"\n SELECT\n stations.end_region_code,\n stations.start_region_code,\n extract({0} from subq_trip.start_date) as {0},\n COUNT(*) as total_trips\n FROM\n (SELECT * FROM {1} LIMIT 25) as subq_trip\n LEFT JOIN cabi_stations_geo_temp AS stations\n ON subq_trip.start_station = stations.start_short_name\n AND subq_trip.end_station = stations.end_short_name\n GROUP BY\n stations.end_region_code,\n stations.start_region_code,\n extract({0} from subq_trip.start_date);\"\"\").format(cut, table)\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def annual(self, year):\n self.year = year\n start_string = (\n 'SELECT * from cabi_trips '\n 'WHERE EXTRACT(YEAR FROM start_date)=')\n query = start_string + str(self.year)\n dataframe = pd.read_sql(query, con=self.connection)\n return dataframe\n\n def describe_data(self):\n cur = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(\"\"\"select *\n from information_schema.columns\n where table_schema NOT IN (\n 'information_schema', 'pg_catalog')\n order by table_schema, table_name\"\"\")\n for row in cur:\n print(\"schema: {schema}, table: {table}, column: {col}, \\\n type: {type}\".format(\n schema=row['table_schema'], table=row['table_name'],\n col=row['column_name'], type=row['data_type']))\n\n\nif __name__ == '__main__':\n print('Running')\n conn = read_only_connect_aws()\n CABI_TRIPS = QueryTool(conn, 'cabi_trips')\n CABI_TRIPS.describe_data()\n",
"import pandas as pd\nimport util_functions as uf\n\nif __name__ == \"__main__\":\n # Connect to AWS\n uf.set_env_path()\n conn, cur = uf.aws_connect()\n\n # Trips by Date and Operator\n df = pd.read_sql(\"\"\"select distinct\n OperatorClean,\n count(*) as trips\n from dockless_trips\n group by OperatorClean\n order by OperatorClean\n \"\"\", con=conn)\n print(df)\n"
] | [
[
"pandas.read_sql"
],
[
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
daroari/pygmt | [
"e022851d62814a9255ed2bb63ae092b666b832b9"
] | [
"pygmt/tests/test_datasets_earth_relief.py"
] | [
"\"\"\"\nTest basic functionality for loading Earth relief datasets.\n\"\"\"\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nfrom pygmt.datasets import load_earth_relief\nfrom pygmt.exceptions import GMTInvalidInput\n\n\ndef test_earth_relief_fails():\n \"\"\"\n Make sure earth relief fails for invalid resolutions.\n \"\"\"\n resolutions = \"1m 1d bla 60d 001m 03\".split()\n resolutions.append(60)\n for resolution in resolutions:\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(resolution=resolution)\n\n\n# Only test 01d and 30m to avoid downloading large datasets in CI\ndef test_earth_relief_01d():\n \"\"\"\n Test some properties of the earth relief 01d data.\n \"\"\"\n data = load_earth_relief(resolution=\"01d\", registration=\"gridline\")\n assert data.shape == (181, 361)\n npt.assert_allclose(data.lat, np.arange(-90, 91, 1))\n npt.assert_allclose(data.lon, np.arange(-180, 181, 1))\n npt.assert_allclose(data.min(), -8592.5)\n npt.assert_allclose(data.max(), 5559.0)\n\n\ndef test_earth_relief_01d_with_region():\n \"\"\"\n Test loading low-resolution earth relief with 'region'.\n \"\"\"\n data = load_earth_relief(\n resolution=\"01d\", region=[-10, 10, -5, 5], registration=\"gridline\"\n )\n assert data.shape == (11, 21)\n npt.assert_allclose(data.lat, np.arange(-5, 6, 1))\n npt.assert_allclose(data.lon, np.arange(-10, 11, 1))\n npt.assert_allclose(data.min(), -5145)\n npt.assert_allclose(data.max(), 805.5)\n\n\ndef test_earth_relief_30m():\n \"\"\"\n Test some properties of the earth relief 30m data.\n \"\"\"\n data = load_earth_relief(resolution=\"30m\", registration=\"gridline\")\n assert data.shape == (361, 721)\n npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))\n npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))\n npt.assert_allclose(data.min(), -9460.5)\n npt.assert_allclose(data.max(), 5887.5)\n\n\ndef test_earth_relief_05m_with_region():\n \"\"\"\n Test loading a subregion of high-resolution earth relief grid.\n \"\"\"\n data = load_earth_relief(\n resolution=\"05m\", region=[120, 160, 30, 60], registration=\"gridline\"\n )\n assert data.coords[\"lat\"].data.min() == 30.0\n assert data.coords[\"lat\"].data.max() == 60.0\n assert data.coords[\"lon\"].data.min() == 120.0\n assert data.coords[\"lon\"].data.max() == 160.0\n assert data.data.min() == -9633.0\n assert data.data.max() == 2532.0\n assert data.sizes[\"lat\"] == 361\n assert data.sizes[\"lon\"] == 481\n\n\ndef test_earth_relief_05m_without_region():\n \"\"\"\n Test loading high-resolution earth relief without passing 'region'.\n \"\"\"\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(\"05m\")\n\n\ndef test_earth_relief_03s_landonly_srtm():\n \"\"\"\n Test loading original 3 arc-second land-only SRTM tiles.\n \"\"\"\n data = load_earth_relief(\n \"03s\", region=[135, 136, 35, 36], registration=\"gridline\", use_srtm=True\n )\n\n assert data.coords[\"lat\"].data.min() == 35.0\n assert data.coords[\"lat\"].data.max() == 36.0\n assert data.coords[\"lon\"].data.min() == 135.0\n assert data.coords[\"lon\"].data.max() == 136.0\n # data.data.min() == -305.51846 if use_srtm is False.\n assert data.data.min() == -6.0\n assert data.data.max() == 1191.0\n assert data.sizes[\"lat\"] == 1201\n assert data.sizes[\"lon\"] == 1201\n\n\ndef test_earth_relief_incorrect_registration():\n \"\"\"\n Test loading earth relief with incorrect registration type.\n \"\"\"\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(registration=\"improper_type\")\n\n\ndef test_earth_relief_invalid_resolution_registration_combination():\n \"\"\"\n Test loading earth relief with invalid combination of resolution and\n registration.\n \"\"\"\n for resolution, registration in [\n (\"15s\", \"gridline\"),\n (\"03s\", \"pixel\"),\n (\"01s\", \"pixel\"),\n ]:\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(resolution=resolution, registration=registration)\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
minhmanho/rrdncnn | [
"f09ef7d92e31bfd43a548bb476970cfe38d32508"
] | [
"pytorch_ssim.py"
] | [
"import torch\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nfrom math import exp\r\n\r\ndef gaussian(window_size, sigma):\r\n gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\r\n return gauss/gauss.sum()\r\n\r\ndef create_window(window_size, channel):\r\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\r\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\r\n window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())\r\n return window\r\n\r\ndef _ssim(img1, img2, window, window_size, channel, size_average = True):\r\n mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)\r\n mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)\r\n\r\n mu1_sq = mu1.pow(2)\r\n mu2_sq = mu2.pow(2)\r\n mu1_mu2 = mu1*mu2\r\n\r\n sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq\r\n sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq\r\n sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2\r\n\r\n C1 = 0.01**2\r\n C2 = 0.03**2\r\n\r\n ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\r\n\r\n if size_average:\r\n return ssim_map.mean()\r\n else:\r\n return ssim_map.mean(1).mean(1).mean(1)\r\n\r\nclass SSIM(torch.nn.Module):\r\n def __init__(self, window_size = 11, size_average = True):\r\n super(SSIM, self).__init__()\r\n self.window_size = window_size\r\n self.size_average = size_average\r\n self.channel = 1\r\n self.window = create_window(window_size, self.channel)\r\n\r\n def forward(self, img1, img2):\r\n (_, channel, _, _) = img1.size()\r\n\r\n if channel == self.channel and self.window.data.type() == img1.data.type():\r\n window = self.window\r\n else:\r\n window = create_window(self.window_size, channel)\r\n \r\n if img1.is_cuda:\r\n window = window.cuda(img1.get_device())\r\n window = window.type_as(img1)\r\n \r\n self.window = window\r\n self.channel = channel\r\n\r\n\r\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\r\n\r\ndef ssim(img1, img2, window_size = 11, size_average = True):\r\n (_, channel, _, _) = img1.size()\r\n window = create_window(window_size, channel)\r\n \r\n if img1.is_cuda:\r\n window = window.cuda(img1.get_device())\r\n window = window.type_as(img1)\r\n \r\n return _ssim(img1, img2, window, window_size, channel, size_average)"
] | [
[
"torch.nn.functional.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kay-wong/DiscoBERT | [
"814c741e2a049de3afc489835e0df3ccf9fb4fe9"
] | [
"model/archival_gnns.py"
] | [
"# Graph Conv and Relational Graph Conv\nimport itertools\nimport torch\nfrom typing import List, Union\n\nimport dgl\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom allennlp.common import FromParams\nfrom allennlp.common import Registrable\nfrom allennlp.modules.encoder_base import _EncoderBase\nfrom allennlp.modules.feedforward import FeedForward\nfrom allennlp.modules.layer_norm import LayerNorm\nfrom allennlp.modules.masked_layer_norm import MaskedLayerNorm\nfrom overrides import overrides\n\n\nclass GraphEncoder(_EncoderBase, Registrable):\n def get_input_dim(self) -> int:\n raise NotImplementedError\n\n def get_output_dim(self) -> int:\n raise NotImplementedError\n\n def is_bidirectional(self):\n raise NotImplementedError\n #\n def convert_sent_tensors_to_graphs(self, sent, sent_mask, meta_field, key):\n batch_size, max_sent_num, hdim = sent.shape\n effective_length = torch.sum(sent_mask, dim=1).long().tolist()\n graph_bag = []\n for b in range(batch_size):\n this_sent = sent[b] # max_sent, hdim\n this_len = effective_length[b]\n graph_seed = meta_field[b][key] # List of tuples\n G = dgl.DGLGraph()\n G.add_nodes(max_sent_num)\n # fc_src = [i for i in range(this_len)] * this_len\n # fc_tgt = [[i] * this_len for i in range(this_len)]\n # fc_tgt = list(itertools.chain.from_iterable(fc_tgt))\n fc_src = [x[0] for x in graph_seed]\n fc_tgt = [x[1] for x in graph_seed]\n G.add_edges(fc_src, fc_tgt)\n G.ndata['h'] = this_sent # every node has the parameter\n graph_bag.append(G)\n return graph_bag\n\n\[email protected](\"easy_graph_encoder\")\nclass EasyGraph(GraphEncoder, torch.nn.Module, FromParams):\n def __init__(self,\n input_dim: int,\n num_layers: int,\n hidden_dims: Union[int, List[int]],\n dropout=0.1):\n super().__init__()\n\n if not isinstance(hidden_dims, list):\n hidden_dims = [hidden_dims] * num_layers\n if not isinstance(dropout, list):\n dropout = [dropout] * num_layers # type: ignore\n\n self._activations = [torch.nn.functional.relu] * num_layers\n input_dims = [input_dim] + hidden_dims[:-1]\n linear_layers = []\n for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):\n linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))\n self._linear_layers = torch.nn.ModuleList(linear_layers)\n dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]\n self._dropout = torch.nn.ModuleList(dropout_layers)\n self._output_dim = hidden_dims[-1]\n\n self.lin = torch.nn.Linear(self._output_dim, self._output_dim)\n self.ln = MaskedLayerNorm(size=hidden_dims[0])\n\n def transform_sent_rep(self, sent_rep, sent_mask, graphs):\n # LayerNorm(x + Sublayer(x))\n output = sent_rep\n\n for layer, activation, dropout in zip(self._linear_layers, self._activations, self._dropout):\n mid = layer(output) # output: batch, seq, feat\n mid = mid.permute(0, 2, 1) # mid: batch, feat, seq\n\n nex = torch.bmm(mid, graphs)\n output = dropout(activation(nex))\n output = output.permute(0, 2, 1) # mid: batch, seq, feat\n middle = sent_rep + self.lin(output)\n output = self.ln.forward(middle, sent_mask)\n return output\n\n\[email protected](\"old_gcn\")\nclass GCN_layers(GraphEncoder, torch.nn.Module, FromParams):\n\n def __init__(self, input_dims: List[int],\n num_layers: int,\n hidden_dims: Union[int, List[int]],\n activations='relu'):\n super(GCN_layers, self).__init__()\n if not isinstance(hidden_dims, list):\n hidden_dims = [hidden_dims] * num_layers\n # TODO remove hard code relu\n activations = [torch.nn.functional.tanh] * num_layers\n assert len(input_dims) == len(hidden_dims) == len(activations) == num_layers\n gcn_layers = []\n for layer_input_dim, layer_output_dim, activate in zip(input_dims, hidden_dims, activations):\n gcn_layers.append(GCN(layer_input_dim, layer_output_dim, activate))\n self.layers = nn.ModuleList(gcn_layers)\n self._output_dim = hidden_dims[-1]\n self.input_dim = input_dims[0]\n self.ln = LayerNorm(hidden_dims[0])\n self._mlp = FeedForward(hidden_dims[0], 1, hidden_dims[0], torch.nn.functional.sigmoid)\n\n def transform_sent_rep(self, sent_rep, sent_mask, sent_graph):\n init_graphs = self.convert_sent_tensors_to_graphs(sent_rep, sent_mask)\n unpadated_graphs = []\n for g in init_graphs:\n updated_graph = self.forward(g)\n unpadated_graphs.append(updated_graph)\n recovered_sent = torch.stack(unpadated_graphs, dim=0)\n assert recovered_sent.shape == sent_rep.shape\n return recovered_sent\n\n def convert_sent_tensors_to_graphs(self, sent, sent_mask):\n batch_size, max_sent_num, hdim = sent.shape\n effective_length = torch.sum(sent_mask, dim=1).long().tolist()\n graph_bag = []\n for b in range(batch_size):\n this_sent = sent[b] # max_sent, hdim\n # this_mask = sent_mask[b]\n this_len = effective_length[b]\n\n G = dgl.DGLGraph()\n G.add_nodes(max_sent_num)\n fc_src = [i for i in range(this_len)] * this_len\n fc_tgt = [[i] * this_len for i in range(this_len)]\n fc_tgt = list(itertools.chain.from_iterable(fc_tgt))\n\n G.add_edges(fc_src, fc_tgt)\n G.ndata['h'] = this_sent # every node has the parameter\n graph_bag.append(G)\n return graph_bag\n\n @overrides\n def forward(self, g):\n # h = g.in_degrees().view(-1, 1).float()\n h = g.ndata['h']\n output = h\n for conv in self.layers:\n output = conv(g, output)\n print(output)\n norm_output = self.ln(h + output)\n # print(norm_output)\n # m = self._mlp(norm_output)\n # h = self.ln(norm_output + m)\n h = norm_output\n g.ndata['h'] = h\n hg = dgl.mean_nodes(g, 'h')\n # return g, g.ndata['h'], hg # g is the raw graph, h is the node rep, and hg is the mean of all h\n return g.ndata['h']\n\n def get_input_dim(self) -> int:\n return self.input_dim\n\n def get_output_dim(self) -> int:\n return self._output_dim\n\n @overrides\n def is_bidirectional(self):\n return False\n\n\ndef discourse_oracle(disco_txt, ):\n # oracle labels\n docs = [disc.get_readable_words_as_list() for disc in disco_bag]\n\n # rewrite the docs to accomodate the dependency\n modified_docs_w_deps = []\n oracle_inclusion = []\n for idx, disco in enumerate(disco_bag):\n # tmp_txt, tmp_oracle_inclusion = copy.deepcopy(docs[idx]),[idx]\n tmp_txt, tmp_oracle_inclusion = [], []\n if disco.dep != []:\n for _d in disco.dep:\n if _d < len(docs):\n tmp_txt += docs[_d]\n tmp_oracle_inclusion.append(_d)\n tmp_txt += copy.deepcopy(docs[idx])\n tmp_oracle_inclusion.append(idx)\n modified_docs_w_deps.append(\" \".join(tmp_txt))\n oracle_inclusion.append(tmp_oracle_inclusion)\n else:\n modified_docs_w_deps.append(\n \" \".join(docs[idx])\n )\n oracle_inclusion.append([idx])\n\n yangliu_label = original_greedy_selection([x.split(\" \") for x in modified_docs_w_deps], summary, 5)\n # oracle_ids = greedy_selection(modified_docs_w_deps, summary, oracle_size)\n return yangliu_labelf\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.sum",
"torch.nn.Linear",
"torch.bmm",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paigeco/VirtualGoniometer | [
"536e7e77fbb036ad8d777b42e751a0f3e80b8242"
] | [
"src/AngleMeasurement/RP1DClustering.py"
] | [
"import numpy as np\nfrom .PCASmallestEig import pca_smallest_eig, pca_smallest_eig_powermethod\nfrom .Withness import withness\nfrom .CalculateAngle import get_angle\n\n#RP1D clustering from\n#Han, Sangchun, and Mireille Boutin. \"The hidden structure of image datasets.\" 2015 IEEE International Conference on Image Processing (ICIP). IEEE, 2015.\n############################################\ndef ClusteringMeanRP1D(P,N,T,A=0,UsePCA=True,UsePower=False):\n n = N.shape[0]\n d = N.shape[1]\n v = np.random.rand(T,d)\n \n #u = np.mean(N,axis=0)\n \n if UsePower:\n N1 = pca_smallest_eig_powermethod(N, center=False)\n N1 = np.reshape(N1,(3,))\n else:\n N1 = pca_smallest_eig(N, center=False)\n \n N2 = np.sum(N,axis=0)\n v = np.cross(N1,N2)\n v = v/np.linalg.norm(v)\n \n m = np.mean(P,axis=0)\n dist = np.sqrt(np.sum((P - m)**2,axis=1))\n i = np.argmin(dist)\n radius = np.max(dist)\n D = (P - P[i,:])/radius\n\n #The A=2 is just hand tuned. Larger A encourages the clustering to split the patch in half\n #A=0 is the previous version of the virtual goniometer\n x = np.sum(v*N,axis=1) + A*np.sum(v*D,axis=1)\n\n #Clustering\n _, m = withness(x)\n\n C = np.zeros(n,)\n C[x>m] = 1\n C[x<=m] = 2\n \n P1 = P[C==1,:]\n P2 = P[C==2,:]\n N1 = N[C==1,:]\n N2 = N[C==2,:]\n \n theta, n1, n2 = get_angle(P1,P2,N1,N2,UsePCA = UsePCA, UsePower = UsePower)\n \n \n return C,n1,n2,theta\n\ndef ClusteringRandomRP1D(X,T):\n n = X.shape[0]\n d = X.shape[1]\n v = np.random.rand(T,d)\n u = np.mean(X,axis=0)\n wmin = float(\"inf\")\n imin = 0\n \n #w_list = []\n #m_list = []\n \n for i in range(T):\n x = np.sum((v[i,:]-(np.dot(v[i,:],u)/np.dot(v[i,:],v[i,:]))*u)*X,axis=1)\n w,m = withness(x) \n if w < wmin:\n wmin = w\n imin = i\n \n x = np.sum((v[imin,:]-(np.dot(v[imin,:],u)/np.dot(v[imin,:],v[imin,:]))*u)*X,axis=1)\n \n _,m = withness(x)\n\n C = np.zeros(n,)\n C[x>m] = 1\n C[x<=m] = 2\n \n return C"
] | [
[
"numpy.dot",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.max",
"numpy.mean",
"numpy.argmin",
"numpy.random.rand",
"numpy.cross",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nipreps/mriqc | [
"e021008da0a2ef1c48e882baf932139a673349f9"
] | [
"mriqc/interfaces/anatomical.py"
] | [
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n#\n# Copyright 2021 The NiPreps Developers <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# We support and encourage derived works from this project, please read\n# about our expectations at\n#\n# https://www.nipreps.org/community/licensing/\n#\n\"\"\"Nipype interfaces to support anatomical workflow.\"\"\"\nimport os.path as op\nfrom builtins import zip\n\nimport nibabel as nb\nimport numpy as np\nimport scipy.ndimage as nd\nfrom mriqc.qc.anatomical import (\n art_qi1,\n art_qi2,\n cjv,\n cnr,\n efc,\n fber,\n rpve,\n snr,\n snr_dietrich,\n summary_stats,\n volume_fraction,\n wm2max,\n)\nfrom mriqc.utils.misc import _flatten_dict\nfrom nipype.interfaces.base import (\n BaseInterfaceInputSpec,\n File,\n InputMultiPath,\n SimpleInterface,\n TraitedSpec,\n isdefined,\n traits,\n)\nfrom nipype.utils.filemanip import fname_presuffix\n\n\nclass StructuralQCInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"file to be plotted\")\n in_noinu = File(exists=True, mandatory=True, desc=\"image after INU correction\")\n in_segm = File(exists=True, mandatory=True, desc=\"segmentation file from FSL FAST\")\n in_bias = File(exists=True, mandatory=True, desc=\"bias file\")\n head_msk = File(exists=True, mandatory=True, desc=\"head mask\")\n air_msk = File(exists=True, mandatory=True, desc=\"air mask\")\n rot_msk = File(exists=True, mandatory=True, desc=\"rotation mask\")\n artifact_msk = File(exists=True, mandatory=True, desc=\"air mask\")\n in_pvms = InputMultiPath(\n File(exists=True),\n mandatory=True,\n desc=\"partial volume maps from FSL FAST\",\n )\n in_tpms = InputMultiPath(File(), desc=\"tissue probability maps from FSL FAST\")\n mni_tpms = InputMultiPath(File(), desc=\"tissue probability maps from FSL FAST\")\n in_fwhm = traits.List(\n traits.Float, mandatory=True, desc=\"smoothness estimated with AFNI\"\n )\n human = traits.Bool(True, usedefault=True, desc=\"human workflow\")\n\n\nclass StructuralQCOutputSpec(TraitedSpec):\n summary = traits.Dict(desc=\"summary statistics per tissue\")\n icvs = traits.Dict(desc=\"intracranial volume (ICV) fractions\")\n rpve = traits.Dict(desc=\"partial volume fractions\")\n size = traits.Dict(desc=\"image sizes\")\n spacing = traits.Dict(desc=\"image sizes\")\n fwhm = traits.Dict(desc=\"full width half-maximum measure\")\n inu = traits.Dict(desc=\"summary statistics of the bias field\")\n snr = traits.Dict\n snrd = traits.Dict\n cnr = traits.Float\n fber = traits.Float\n efc = traits.Float\n qi_1 = traits.Float\n wm2max = traits.Float\n cjv = traits.Float\n out_qc = traits.Dict(desc=\"output flattened dictionary with all measures\")\n out_noisefit = File(exists=True, desc=\"plot of background noise and chi fitting\")\n tpm_overlap = traits.Dict\n\n\nclass StructuralQC(SimpleInterface):\n \"\"\"\n Computes anatomical :abbr:`QC (Quality Control)` measures on the\n structural image given as input\n\n \"\"\"\n\n input_spec = StructuralQCInputSpec\n output_spec = StructuralQCOutputSpec\n\n def _run_interface(self, runtime): # pylint: disable=R0914,E1101\n imnii = nb.load(self.inputs.in_noinu)\n erode = (\n np.all(np.array(imnii.header.get_zooms()[:3], dtype=np.float32) < 1.9)\n if self.inputs.human\n else False\n )\n\n # Load image corrected for INU\n inudata = np.nan_to_num(imnii.get_fdata())\n inudata[inudata < 0] = 0\n\n if np.all(inudata < 1e-5):\n raise RuntimeError(\n \"Input inhomogeneity-corrected data seem empty. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Load binary segmentation from FSL FAST\n segnii = nb.load(self.inputs.in_segm)\n segdata = np.asanyarray(segnii.dataobj).astype(np.uint8)\n\n if np.sum(segdata > 0) < 1e3:\n raise RuntimeError(\n \"Input segmentation data is likely corrupt. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Load air, artifacts and head masks\n airdata = np.asanyarray(nb.load(self.inputs.air_msk).dataobj).astype(np.uint8)\n artdata = np.asanyarray(nb.load(self.inputs.artifact_msk).dataobj).astype(\n np.uint8\n )\n\n headdata = np.asanyarray(nb.load(self.inputs.head_msk).dataobj).astype(np.uint8)\n if np.sum(headdata > 0) < 100:\n raise RuntimeError(\n \"Detected less than 100 voxels belonging to the head mask. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n rotdata = np.asanyarray(nb.load(self.inputs.rot_msk).dataobj).astype(np.uint8)\n\n # Load Partial Volume Maps (pvms) from FSL FAST\n pvmdata = []\n for fname in self.inputs.in_pvms:\n pvmdata.append(nb.load(fname).get_fdata(dtype=\"float32\"))\n if np.sum(pvmdata[-1] > 1e-4) < 10:\n raise RuntimeError(\n \"Detected less than 10 voxels belonging to one tissue prob. map. \"\n \"MRIQC failed to process this dataset.\"\n )\n\n # Summary stats\n stats = summary_stats(inudata, pvmdata, airdata, erode=erode)\n self._results[\"summary\"] = stats\n\n # SNR\n snrvals = []\n self._results[\"snr\"] = {}\n for tlabel in [\"csf\", \"wm\", \"gm\"]:\n snrvals.append(\n snr(\n stats[tlabel][\"median\"],\n stats[tlabel][\"stdv\"],\n stats[tlabel][\"n\"],\n )\n )\n self._results[\"snr\"][tlabel] = snrvals[-1]\n self._results[\"snr\"][\"total\"] = float(np.mean(snrvals))\n\n snrvals = []\n self._results[\"snrd\"] = {\n tlabel: snr_dietrich(\n stats[tlabel][\"median\"],\n mad_air=stats[\"bg\"][\"mad\"],\n sigma_air=stats[\"bg\"][\"stdv\"],\n )\n for tlabel in [\"csf\", \"wm\", \"gm\"]\n }\n self._results[\"snrd\"][\"total\"] = float(\n np.mean([val for _, val in list(self._results[\"snrd\"].items())])\n )\n\n # CNR\n self._results[\"cnr\"] = cnr(\n stats[\"wm\"][\"median\"],\n stats[\"gm\"][\"median\"],\n stats[\"bg\"][\"stdv\"],\n stats[\"wm\"][\"stdv\"],\n stats[\"gm\"][\"stdv\"],\n )\n\n # FBER\n self._results[\"fber\"] = fber(inudata, headdata, rotdata)\n\n # EFC\n self._results[\"efc\"] = efc(inudata, rotdata)\n\n # M2WM\n self._results[\"wm2max\"] = wm2max(inudata, stats[\"wm\"][\"median\"])\n\n # Artifacts\n self._results[\"qi_1\"] = art_qi1(airdata, artdata)\n\n # CJV\n self._results[\"cjv\"] = cjv(\n # mu_wm, mu_gm, sigma_wm, sigma_gm\n stats[\"wm\"][\"median\"],\n stats[\"gm\"][\"median\"],\n stats[\"wm\"][\"mad\"],\n stats[\"gm\"][\"mad\"],\n )\n\n # FWHM\n fwhm = np.array(self.inputs.in_fwhm[:3]) / np.array(\n imnii.header.get_zooms()[:3]\n )\n self._results[\"fwhm\"] = {\n \"x\": float(fwhm[0]),\n \"y\": float(fwhm[1]),\n \"z\": float(fwhm[2]),\n \"avg\": float(np.average(fwhm)),\n }\n\n # ICVs\n self._results[\"icvs\"] = volume_fraction(pvmdata)\n\n # RPVE\n self._results[\"rpve\"] = rpve(pvmdata, segdata)\n\n # Image specs\n self._results[\"size\"] = {\n \"x\": int(inudata.shape[0]),\n \"y\": int(inudata.shape[1]),\n \"z\": int(inudata.shape[2]),\n }\n self._results[\"spacing\"] = {\n i: float(v) for i, v in zip([\"x\", \"y\", \"z\"], imnii.header.get_zooms()[:3])\n }\n\n try:\n self._results[\"size\"][\"t\"] = int(inudata.shape[3])\n except IndexError:\n pass\n\n try:\n self._results[\"spacing\"][\"tr\"] = float(imnii.header.get_zooms()[3])\n except IndexError:\n pass\n\n # Bias\n bias = nb.load(self.inputs.in_bias).get_fdata()[segdata > 0]\n self._results[\"inu\"] = {\n \"range\": float(\n np.abs(np.percentile(bias, 95.0) - np.percentile(bias, 5.0))\n ),\n \"med\": float(np.median(bias)),\n } # pylint: disable=E1101\n\n mni_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.mni_tpms]\n in_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.in_pvms]\n overlap = fuzzy_jaccard(in_tpms, mni_tpms)\n self._results[\"tpm_overlap\"] = {\n \"csf\": overlap[0],\n \"gm\": overlap[1],\n \"wm\": overlap[2],\n }\n\n # Flatten the dictionary\n self._results[\"out_qc\"] = _flatten_dict(self._results)\n return runtime\n\n\nclass ArtifactMaskInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"File to be plotted\")\n head_mask = File(exists=True, mandatory=True, desc=\"head mask\")\n rot_mask = File(exists=True, desc=\"a rotation mask\")\n nasion_post_mask = File(\n exists=True,\n mandatory=True,\n desc=\"nasion to posterior of cerebellum mask\",\n )\n\n\nclass ArtifactMaskOutputSpec(TraitedSpec):\n out_hat_msk = File(exists=True, desc='output \"hat\" mask')\n out_art_msk = File(exists=True, desc=\"output artifacts mask\")\n out_air_msk = File(exists=True, desc='output \"hat\" mask, without artifacts')\n\n\nclass ArtifactMask(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = ArtifactMaskInputSpec\n output_spec = ArtifactMaskOutputSpec\n\n def _run_interface(self, runtime):\n imnii = nb.load(self.inputs.in_file)\n imdata = np.nan_to_num(imnii.get_fdata().astype(np.float32))\n\n # Remove negative values\n imdata[imdata < 0] = 0\n\n hmdata = np.asanyarray(nb.load(self.inputs.head_mask).dataobj)\n npdata = np.asanyarray(nb.load(self.inputs.nasion_post_mask).dataobj)\n\n # Invert head mask\n airdata = np.ones_like(hmdata, dtype=np.uint8)\n airdata[hmdata == 1] = 0\n\n # Calculate distance to border\n dist = nd.morphology.distance_transform_edt(airdata)\n\n # Apply nasion-to-posterior mask\n airdata[npdata == 1] = 0\n dist[npdata == 1] = 0\n dist /= dist.max()\n\n # Apply rotation mask (if supplied)\n if isdefined(self.inputs.rot_mask):\n rotmskdata = np.asanyarray(nb.load(self.inputs.rot_mask).dataobj)\n airdata[rotmskdata == 1] = 0\n\n # Run the artifact detection\n qi1_img = artifact_mask(imdata, airdata, dist)\n\n fname, ext = op.splitext(op.basename(self.inputs.in_file))\n if ext == \".gz\":\n fname, ext2 = op.splitext(fname)\n ext = ext2 + ext\n\n self._results[\"out_hat_msk\"] = op.abspath(\"{}_hat{}\".format(fname, ext))\n self._results[\"out_art_msk\"] = op.abspath(\"{}_art{}\".format(fname, ext))\n self._results[\"out_air_msk\"] = op.abspath(\"{}_air{}\".format(fname, ext))\n\n hdr = imnii.header.copy()\n hdr.set_data_dtype(np.uint8)\n nb.Nifti1Image(qi1_img, imnii.affine, hdr).to_filename(\n self._results[\"out_art_msk\"]\n )\n\n nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(\n self._results[\"out_hat_msk\"]\n )\n\n airdata[qi1_img > 0] = 0\n nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(\n self._results[\"out_air_msk\"]\n )\n return runtime\n\n\nclass ComputeQI2InputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"File to be plotted\")\n air_msk = File(exists=True, mandatory=True, desc=\"air (without artifacts) mask\")\n\n\nclass ComputeQI2OutputSpec(TraitedSpec):\n qi2 = traits.Float(desc=\"computed QI2 value\")\n out_file = File(desc=\"output plot: noise fit\")\n\n\nclass ComputeQI2(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = ComputeQI2InputSpec\n output_spec = ComputeQI2OutputSpec\n\n def _run_interface(self, runtime):\n imdata = nb.load(self.inputs.in_file).get_fdata()\n airdata = nb.load(self.inputs.air_msk).get_fdata()\n qi2, out_file = art_qi2(imdata, airdata)\n self._results[\"qi2\"] = qi2\n self._results[\"out_file\"] = out_file\n return runtime\n\n\nclass HarmonizeInputSpec(BaseInterfaceInputSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"input data (after bias correction)\"\n )\n wm_mask = File(exists=True, mandatory=True, desc=\"white-matter mask\")\n erodemsk = traits.Bool(True, usedefault=True, desc=\"erode mask\")\n thresh = traits.Float(0.9, usedefault=True, desc=\"WM probability threshold\")\n\n\nclass HarmonizeOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"input data (after intensity harmonization)\")\n\n\nclass Harmonize(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = HarmonizeInputSpec\n output_spec = HarmonizeOutputSpec\n\n def _run_interface(self, runtime):\n\n in_file = nb.load(self.inputs.in_file)\n wm_mask = nb.load(self.inputs.wm_mask).get_fdata()\n wm_mask[wm_mask < 0.9] = 0\n wm_mask[wm_mask > 0] = 1\n wm_mask = wm_mask.astype(np.uint8)\n\n if self.inputs.erodemsk:\n # Create a structural element to be used in an opening operation.\n struc = nd.generate_binary_structure(3, 2)\n # Perform an opening operation on the background data.\n wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)\n\n data = in_file.get_fdata()\n data *= 1000.0 / np.median(data[wm_mask > 0])\n\n out_file = fname_presuffix(\n self.inputs.in_file, suffix=\"_harmonized\", newpath=\".\"\n )\n in_file.__class__(data, in_file.affine, in_file.header).to_filename(out_file)\n\n self._results[\"out_file\"] = out_file\n\n return runtime\n\n\nclass RotationMaskInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"input data\")\n\n\nclass RotationMaskOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"rotation mask (if any)\")\n\n\nclass RotationMask(SimpleInterface):\n \"\"\"\n Computes the artifact mask using the method described in [Mortamet2009]_.\n \"\"\"\n\n input_spec = RotationMaskInputSpec\n output_spec = RotationMaskOutputSpec\n\n def _run_interface(self, runtime):\n in_file = nb.load(self.inputs.in_file)\n data = in_file.get_fdata()\n mask = data <= 0\n\n # Pad one pixel to control behavior on borders of binary_opening\n mask = np.pad(mask, pad_width=(1,), mode=\"constant\", constant_values=1)\n\n # Remove noise\n struc = nd.generate_binary_structure(3, 2)\n mask = nd.binary_opening(mask, structure=struc).astype(np.uint8)\n\n # Remove small objects\n label_im, nb_labels = nd.label(mask)\n if nb_labels > 2:\n sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))\n ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))\n for _, label in ordered[2:]:\n mask[label_im == label] = 0\n\n # Un-pad\n mask = mask[1:-1, 1:-1, 1:-1]\n\n # If mask is small, clean-up\n if mask.sum() < 500:\n mask = np.zeros_like(mask, dtype=np.uint8)\n\n out_img = in_file.__class__(mask, in_file.affine, in_file.header)\n out_img.header.set_data_dtype(np.uint8)\n\n out_file = fname_presuffix(self.inputs.in_file, suffix=\"_rotmask\", newpath=\".\")\n out_img.to_filename(out_file)\n self._results[\"out_file\"] = out_file\n return runtime\n\n\ndef artifact_mask(imdata, airdata, distance, zscore=10.0):\n \"\"\"Computes a mask of artifacts found in the air region\"\"\"\n from statsmodels.robust.scale import mad\n\n if not np.issubdtype(airdata.dtype, np.integer):\n airdata[airdata < 0.95] = 0\n airdata[airdata > 0.0] = 1\n\n bg_img = imdata * airdata\n if np.sum((bg_img > 0).astype(np.uint8)) < 100:\n return np.zeros_like(airdata)\n\n # Find the background threshold (the most frequently occurring value\n # excluding 0)\n bg_location = np.median(bg_img[bg_img > 0])\n bg_spread = mad(bg_img[bg_img > 0])\n bg_img[bg_img > 0] -= bg_location\n bg_img[bg_img > 0] /= bg_spread\n\n # Apply this threshold to the background voxels to identify voxels\n # contributing artifacts.\n qi1_img = np.zeros_like(bg_img)\n qi1_img[bg_img > zscore] = 1\n qi1_img[distance < 0.10] = 0\n\n # Create a structural element to be used in an opening operation.\n struc = nd.generate_binary_structure(3, 1)\n qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)\n qi1_img[airdata <= 0] = 0\n\n return qi1_img\n\n\ndef fuzzy_jaccard(in_tpms, in_mni_tpms):\n overlaps = []\n for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):\n tpm = tpm.reshape(-1)\n mni_tpm = mni_tpm.reshape(-1)\n\n num = np.min([tpm, mni_tpm], axis=0).sum()\n den = np.max([tpm, mni_tpm], axis=0).sum()\n overlaps.append(float(num / den))\n return overlaps\n"
] | [
[
"scipy.ndimage.binary_erosion",
"numpy.issubdtype",
"numpy.all",
"numpy.max",
"numpy.zeros_like",
"numpy.mean",
"scipy.ndimage.binary_opening",
"numpy.ones_like",
"numpy.pad",
"scipy.ndimage.generate_binary_structure",
"numpy.asanyarray",
"numpy.min",
"numpy.median",
"scipy.ndimage.label",
"numpy.array",
"numpy.sum",
"scipy.ndimage.morphology.distance_transform_edt",
"numpy.percentile",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
GrumpyMeow/ownphotos-backend | [
"98d8e9136e9188009afe08657f943dba3df80ccb"
] | [
"api/util.py"
] | [
"import base64\nimport pickle\nimport itertools\n\nfrom scipy import linalg\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom sklearn import cluster\nfrom sklearn import mixture\nfrom scipy.spatial import distance\nfrom sklearn.preprocessing import StandardScaler\n\n\nimport requests\n\nfrom config import mapzen_api_key, mapbox_api_key\n\nimport logging\nimport logging.handlers\n\nimport spacy\n\nnlp = spacy.load('en_core_web_sm')\n\nlogger = logging.getLogger('ownphotos')\nfomatter = logging.Formatter(\n '%(asctime)s : %(filename)s : %(funcName)s : %(lineno)s : %(levelname)s : %(message)s')\nfileMaxByte = 256 * 1024 * 200 # 100MB\nfileHandler = logging.handlers.RotatingFileHandler(\n './logs/ownphotos.log', maxBytes=fileMaxByte, backupCount=10)\nfileHandler.setFormatter(fomatter)\nlogger.addHandler(fileHandler)\nlogger.setLevel(logging.INFO)\n\n\n\ndef convert_to_degrees(values):\n \"\"\"\n Helper function to convert the GPS coordinates stored in the EXIF to degress in float format\n :param value:\n :type value: exifread.utils.Ratio\n :rtype: float\n \"\"\"\n d = float(values[0].num) / float(values[0].den)\n m = float(values[1].num) / float(values[1].den)\n s = float(values[2].num) / float(values[2].den)\n\n return d + (m / 60.0) + (s / 3600.0)\n\nweekdays = {1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday',7:'Sunday'}\n\n\n\ndef compute_bic(kmeans,X):\n \"\"\"\n Computes the BIC metric for a given clusters\n\n Parameters:\n -----------------------------------------\n kmeans: List of clustering object from scikit learn\n\n X : multidimension np array of data points\n\n Returns:\n -----------------------------------------\n BIC value\n \"\"\"\n # assign centers and labels\n centers = [kmeans.cluster_centers_]\n labels = kmeans.labels_\n #number of clusters\n m = kmeans.n_clusters\n # size of the clusters\n n = np.bincount(labels)\n #size of data set\n N, d = X.shape\n\n #compute variance for all clusters beforehand\n cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], \n 'euclidean')**2) for i in range(m)])\n\n const_term = 0.5 * m * np.log(N) * (d+1)\n\n BIC = np.sum([n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]) - const_term\n\n return(BIC)\n\n\ndef mapzen_reverse_geocode(lat,lon):\n url = \"https://search.mapzen.com/v1/reverse?point.lat=%f&point.lon=%f&size=1&lang=en&api_key=%s\"%(lat,lon,mapzen_api_key)\n resp = requests.get(url)\n if resp.status_code == 200:\n resp_json = resp.json()\n search_text = []\n if len(resp_json['features']) > 0:\n if 'country' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['country'])\n if 'county' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['county'])\n if 'macrocounty' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['macrocounty'])\n if 'locality' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['locality'])\n if 'region' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['region'])\n if 'neighbourhood' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['neighbourhood'])\n if 'name' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['name'])\n if 'label' in resp_json['features'][0]['properties'].keys():\n search_text.append(resp_json['features'][0]['properties']['label'])\n search_text = ' '.join(search_text)\n search_text = search_text.replace(',',' ')\n search_text_tokens = list(set(search_text.split()))\n search_text = ' '.join(search_text_tokens)\n resp_json['search_text'] = search_text\n return resp_json\n else:\n return {}\n\n\ndef mapbox_reverse_geocode(lat,lon):\n url = \"https://api.mapbox.com/geocoding/v5/mapbox.places/%f,%f.json?access_token=%s\"%(lon,lat,mapbox_api_key)\n resp = requests.get(url)\n print(resp)\n if resp.status_code == 200:\n resp_json = resp.json()\n search_terms = []\n\n if 'features' in resp_json.keys():\n for feature in resp_json['features']:\n search_terms.append(feature['text'])\n\n logger.info('location search terms: %s'%(' '.join(search_terms)))\n resp_json['search_text'] = ' '.join(search_terms)\n return resp_json\n else:\n logger.info('mapbox returned non 200 response.')\n return {}\n"
] | [
[
"numpy.log",
"numpy.where",
"numpy.bincount"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matham/Ceed | [
"b81a14a6b8211e5f4582418ddea34c951ab2667e"
] | [
"ceed/tests/test_app/test_stage.py"
] | [
"import os\nimport sys\nimport math\nfrom contextlib import contextmanager\nfrom math import isclose\nimport numpy as np\nimport pytest\n\nimport ceed\nfrom .examples.stages import create_test_stages, make_stage, StageWrapper, \\\n stage_classes, assert_stages_same\nfrom typing import Type, List, Union\nfrom ceed.tests.ceed_app import CeedTestApp\nfrom ceed.tests.test_app import replace_text, touch_widget, escape, \\\n run_plugin_experiment\nfrom ceed.stage import CeedStage, CeedStageRef, last_experiment_stage_name\nfrom ceed.function import CeedFuncRef, FuncBase, FuncGroup\nfrom ceed.shape import CeedShape, CeedShapeGroup\nfrom .examples.shapes import assert_add_three_groups, CircleShapeP1\nfrom .examples.funcs import create_funcs, GroupFunction\nfrom .examples.stages import fake_plugin_stage, SerialAllStage\nfrom .examples.experiment import wait_stage_experiment_started, \\\n wait_experiment_done, measure_fps, wait_experiment_stopped\nfrom .test_func import assert_func_params_in_gui, \\\n replace_last_ref_with_original_func, assert_funcs_same\n\npytestmark = pytest.mark.ceed_app\n\n\nasync def assert_set_params_in_gui(\n stage_app: CeedTestApp, stage: StageWrapper, settings=None,\n check_name=False):\n opened_settings = settings is None\n if opened_settings:\n settings = await open_stage_settings(stage_app, stage.stage)\n\n if check_name:\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n assert name.text != stage.name\n assert name.text == stage.stage.name\n await replace_text(stage_app, name, stage.name)\n assert name.text == stage.name\n assert name.text == stage.stage.name\n\n # verify colors\n for color in ('r', 'g', 'b'):\n widget = stage_app.resolve_widget(settings).down(\n test_name='stage color {}'.format(color))()\n prop = 'color_{}'.format(color)\n # the stage values should always match the GUI values\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n # if the wrapper need to change the value, do it\n if getattr(stage, prop) != getattr(stage.stage, prop):\n await touch_widget(stage_app, widget)\n\n # make sure it was changed\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n assert getattr(stage, prop) == getattr(stage.stage, prop)\n\n # parallel vs serial\n serial = stage_app.resolve_widget(settings).down(\n test_name='stage serial')()\n parallel = stage_app.resolve_widget(settings).down(\n test_name='stage parallel')()\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n\n # set the GUI to the correct value\n if stage.order == 'parallel' and parallel.state != 'down':\n await touch_widget(stage_app, parallel)\n elif stage.order == 'serial' and serial.state != 'down':\n await touch_widget(stage_app, serial)\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n assert (stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.order == 'parallel') == (parallel.state == 'down')\n\n # complete_on all vs any\n all_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish all')()\n any_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish any')()\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n\n # set the GUI to the correct value\n if stage.complete_on == 'all' and all_w.state != 'down':\n await touch_widget(stage_app, all_w)\n elif stage.complete_on == 'any' and any_w.state != 'down':\n await touch_widget(stage_app, any_w)\n\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n assert (stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.complete_on == 'any') == (any_w.state == 'down')\n\n if opened_settings:\n await escape(stage_app)\n return settings\n\n\nasync def assert_stage_params_in_gui(\n stage_app: CeedTestApp, stage: StageWrapper, settings=None,\n check_name=False):\n opened_settings = settings is None\n if opened_settings:\n settings = await open_stage_settings(stage_app, stage.stage)\n\n if check_name:\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n name_label = stage_app.resolve_widget(stage.stage.display).down(\n test_name='stage label')()\n assert name.text == stage.name\n assert name_label.text == stage.name\n assert name.text == stage.stage.name\n\n # verify colors\n for color in ('r', 'g', 'b'):\n widget = stage_app.resolve_widget(settings).down(\n test_name='stage color {}'.format(color))()\n prop = 'color_{}'.format(color)\n assert getattr(stage.stage, prop) == (widget.state == 'down')\n assert getattr(stage, prop) == getattr(stage.stage, prop)\n\n # parallel vs serial\n serial = stage_app.resolve_widget(settings).down(\n test_name='stage serial')()\n parallel = stage_app.resolve_widget(settings).down(\n test_name='stage parallel')()\n assert (stage.stage.order == 'serial') == (serial.state == 'down') and \\\n (stage.stage.order == 'parallel') == (parallel.state == 'down')\n\n # complete_on all vs any\n all_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish all')()\n any_w = stage_app.resolve_widget(settings).down(\n test_name='stage finish any')()\n assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \\\n (stage.stage.complete_on == 'any') == (any_w.state == 'down')\n\n if opened_settings:\n await escape(stage_app)\n return settings\n\n\nasync def replace_last_ref_with_original_stage(\n stage_app: CeedTestApp,\n stages: List[Union[CeedStageRef, CeedStage]], name: str):\n start_stages = stages[:]\n ref_stage = stages[-1]\n # it should be a ref to start with\n assert isinstance(ref_stage, CeedStageRef)\n # make sure the class name matches - we added the right class\n assert ref_stage.stage.name == name\n\n # the label of the new sub-stage\n sub_stage_widget = ref_stage.display\n name_w = stage_app.resolve_widget(sub_stage_widget).down(\n test_name='stage label')()\n assert name_w.text == name\n # replace the ref with a copy of the stage\n ref_btn = stage_app.resolve_widget(sub_stage_widget).down(\n test_name='stage settings open')()\n await touch_widget(stage_app, ref_btn)\n\n # should now have replaced the ref with a copy of the original\n assert ref_stage not in stages\n assert len(stages) == len(start_stages)\n\n new_stage = stages[-1]\n assert ref_stage is not new_stage\n assert stages[:-1] == start_stages[:-1]\n # it should not be a ref anymore\n assert not isinstance(new_stage, CeedStageRef)\n\n assert_stages_same(ref_stage.stage, new_stage)\n\n return new_stage\n\n\nasync def open_stage_settings(app: CeedTestApp, stage: CeedStage):\n settings_btn = app.resolve_widget(stage.display).down(\n test_name='stage settings open')()\n await touch_widget(app, settings_btn)\n\n return app.resolve_widget().down(test_name='stage settings')()\n\n\nasync def test_stage_find_shape_in_all_stages(stage_app: CeedTestApp):\n (s1, s2, s3), (group, shape1, shape2, shape3) = create_test_stages(\n stage_app=stage_app, show_in_gui=True)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape1, shape2, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape2.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape1, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n for shape in (shape2, ):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape1.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape3, ):\n for stage in (s1, s2, s3):\n assert shape.shape in [s.shape for s in stage.stage.shapes]\n assert shape.shape in group.shapes\n for shape in (shape2, shape1):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n stage_app.app.shape_factory.remove_shape(shape3.shape)\n await stage_app.wait_clock_frames(2)\n\n for shape in (shape2, shape1, shape3):\n for stage in (s1, s2, s3):\n assert shape.shape not in [s.shape for s in stage.stage.shapes]\n assert shape.shape not in group.shapes\n\n\nasync def test_add_empty_stage(stage_app: CeedTestApp):\n stage_factory = stage_app.app.stage_factory\n assert not stage_factory.stages\n n = len(stage_factory.stage_names)\n\n # add first empty stage\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n await touch_widget(stage_app, add_stage)\n\n assert stage_factory.stages\n stage = stage_factory.stages[0]\n assert stage in list(stage_factory.stage_names.values())\n assert len(stage_factory.stage_names) == n + 1\n assert stage.display.show_more\n\n # select the stage and add stage to it\n name_label = stage_app.resolve_widget(stage.display).down(\n test_name='stage label')()\n assert not stage.display.selected\n\n await touch_widget(stage_app, name_label)\n assert stage.display.selected\n await touch_widget(stage_app, add_stage)\n assert stage_factory.stages == [stage]\n\n # deselect the stage and add stage globally\n assert stage.display.selected\n await touch_widget(stage_app, name_label)\n await touch_widget(stage_app, add_stage)\n\n assert len(stage_factory.stages) == 2\n assert stage_factory.stages[0] is stage\n\n\nasync def test_gui_add_stages(stage_app: CeedTestApp):\n stages = []\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n for i, stage_cls in enumerate(stage_classes):\n stage = stage_cls(app=stage_app, show_in_gui=False)\n stages.append(stage)\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = stages.pop(0)\n assert oldest_stage.stage in stage_app.app.stage_factory.stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.stage.display).down(test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage.stage not in stage_app.app.stage_factory.stages\n\n # add the stage\n await touch_widget(stage_app, add_stage)\n assert len(stage_app.app.stage_factory.stages) == min(2, i + 1)\n stage.stage = stage_app.app.stage_factory.stages[-1]\n\n # show the settings for the stage\n widget = stage.stage.display\n settings = await open_stage_settings(stage_app, stage.stage)\n\n # check default name\n name = stage_app.resolve_widget(settings).down(\n test_name='stage name')()\n assert not name.disabled, \"root stages can be renamed\"\n name_label = stage_app.resolve_widget(widget).down(\n test_name='stage label')()\n original_name = name.text\n assert stage.name != original_name\n assert original_name == name_label.text\n assert original_name in stage_app.app.stage_factory.stage_names\n assert stage.name not in stage_app.app.stage_factory.stage_names\n\n # change the stage name\n await replace_text(stage_app, name, stage.name)\n assert name.text == stage.name\n assert name_label.text == stage.name\n assert original_name not in stage_app.app.stage_factory.stage_names\n assert stage.name in stage_app.app.stage_factory.stage_names\n\n await assert_set_params_in_gui(stage_app, stage, settings)\n\n # close the settings widget\n await escape(stage_app)\n\n\nasync def test_gui_add_sub_stages(stage_app: CeedTestApp):\n add_stage = stage_app.resolve_widget().down(test_name='stage add')()\n await touch_widget(stage_app, add_stage)\n\n base_stage: CeedStage = stage_app.app.stage_factory.stages[0]\n name_label = stage_app.resolve_widget(base_stage.display).down(\n test_name='stage label')()\n await touch_widget(stage_app, name_label)\n assert base_stage.display.selected\n assert not base_stage.stages\n\n stages = []\n for i, stage_cls in enumerate(stage_classes[:4]):\n stage = stage_cls(app=stage_app, show_in_gui=False)\n stages.append(stage)\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = stages.pop(0)\n assert oldest_stage.stage in base_stage.stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.stage.display).down(test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage.stage not in base_stage.stages\n\n if not base_stage.display.selected:\n await touch_widget(stage_app, name_label)\n assert base_stage.display.selected\n\n # add the stage\n await touch_widget(stage_app, add_stage)\n assert len(base_stage.stages) == min(2, i + 1)\n stage.stage = base_stage.stages[-1]\n\n # replace the ref stage\n settings_btn = stage_app.resolve_widget(stage.stage.display).down(\n test_name='stage settings open')()\n await touch_widget(stage_app, settings_btn)\n stage.stage = base_stage.stages[-1]\n\n await assert_set_params_in_gui(stage_app, stage, check_name=False)\n\n\nasync def test_gui_drag_shape_to_stage(stage_app: CeedTestApp):\n (group, group2, group3), (shape1, shape2, shape3) = \\\n assert_add_three_groups(\n shape_factory=stage_app.app.shape_factory, app=stage_app,\n show_in_gui=True)\n await stage_app.wait_clock_frames(2)\n\n (s1, s2, s3), _ = create_test_stages(\n stage_app=stage_app, add_func=False, add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # multiple stages\n for stage in (s2, s3):\n container = stage.stage.display.shape_widget\n shapes = stage.stage.shapes\n assert not shapes\n\n # drag each shape to the stage\n added_shapes = []\n for i, shape in enumerate((shape1, group2, shape3, shape2)):\n if isinstance(shape, CeedShapeGroup):\n src = stage_app.resolve_widget(shape.widget).down(\n test_name='group drag button')()\n else:\n shape = shape.shape\n src = stage_app.resolve_widget(shape.widget).down(\n test_name='shape drag')()\n\n offset = (0, 5) if container.height else (0, 0)\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=offset, drag_n=15):\n pass\n\n # check that shape was added\n assert len(shapes) == min(3, i + 1)\n assert shape is shapes[-1].shape\n\n # make sure label matches\n name_label = stage_app.resolve_widget(shapes[-1].display).down(\n test_name='stage shape name')()\n assert name_label.text == shape.name\n\n added_shapes.append(shapes[-1])\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_shape = added_shapes.pop(0)\n assert oldest_shape in shapes\n remove_btn = stage_app.resolve_widget(\n oldest_shape.display).down(\n test_name='stage shape del')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_shape not in shapes\n await stage_app.wait_clock_frames(2)\n\n\nasync def test_gui_drag_func_to_stage(stage_app: CeedTestApp):\n global_funcs = create_funcs(func_app=stage_app, show_in_gui=True)\n group_func: GroupFunction = global_funcs[-1]\n ff1 = group_func.wrapper_funcs[0]\n ff2 = group_func.wrapper_funcs[1]\n global_funcs = [\n (ff1, True)] + [(f, False) for f in global_funcs] + [(ff2, True)]\n await stage_app.wait_clock_frames(2)\n\n (s1, s2, s3), _ = create_test_stages(\n stage_app=stage_app, add_func=False, add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # multiple funcs\n for stage in (s2, s3):\n container = stage.stage.display.func_widget\n functions = stage.stage.functions\n assert not functions\n\n # drag each func to the stage\n added_funcs = []\n for i, (func, is_sub_func) in enumerate(global_funcs):\n src = stage_app.resolve_widget(func.func.display).down(\n test_name='func drag btn')()\n\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=(0, 5)):\n pass\n\n # check that shape was added\n assert len(functions) == min(3, i + 1)\n assert functions[-1] is not func.func\n if is_sub_func:\n assert isinstance(functions[-1], (FuncBase, FuncGroup))\n assert_funcs_same(functions[-1], func.func)\n else:\n assert isinstance(functions[-1], CeedFuncRef)\n assert func.func is functions[-1].func\n await replace_last_ref_with_original_func(\n stage_app, functions, func.func.name)\n\n added_funcs.append(functions[-1])\n\n # don't keep more than two funcs so the list is not too long\n if i >= 2:\n oldest_func = added_funcs.pop(0)\n assert oldest_func in functions\n remove_btn = stage_app.resolve_widget(\n oldest_func.display).down(\n test_name='del_btn_func')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_func not in functions\n\n await stage_app.wait_clock_frames(2)\n\n\nasync def test_gui_drag_stage_to_stage(stage_app: CeedTestApp):\n (s1, s2, s21), _ = create_test_stages(\n stage_app=stage_app, show_in_gui=True, add_func=False,\n add_shapes=False)\n (s3, s4, s41), _ = create_test_stages(\n stage_app=stage_app, show_in_gui=True, add_func=False,\n add_shapes=False)\n await stage_app.wait_clock_frames(2)\n\n # collapse stages to not take up space\n for stage in (s1, s21, s3):\n stage.stage.display.show_more = False\n await stage_app.wait_clock_frames(2)\n\n # multiple funcs\n for stage in (s4, s41):\n container = stage.stage.display.stage_widget\n stages = stage.stage.stages\n n_start = 0 if stage is s41 else 1\n assert len(stages) == n_start\n\n # drag each func to the stage\n added_stages = []\n for i, src_stage in enumerate((s1, s2, s21, s3)):\n src = stage_app.resolve_widget(src_stage.stage.display).down(\n test_name='stage drag btn')()\n\n async for _ in stage_app.do_touch_drag_follow(\n widget=src, target_widget=container,\n target_widget_loc=('center_x', 'y'),\n target_widget_offset=(0, 5)):\n pass\n\n # check that shape was added\n assert len(stages) == min(3, i + 1) + n_start\n\n assert stages[-1] is not src_stage.stage\n if src_stage is s21:\n assert isinstance(stages[-1], CeedStage)\n assert_stages_same(stages[-1], src_stage.stage)\n else:\n assert isinstance(stages[-1], CeedStageRef)\n assert src_stage.stage is stages[-1].stage\n await replace_last_ref_with_original_stage(\n stage_app, stages, src_stage.stage.name)\n\n added_stages.append(stages[-1])\n\n # don't keep more than two stages so the list is not too long\n if i >= 2:\n oldest_stage = added_stages.pop(0)\n assert oldest_stage in stages\n remove_btn = stage_app.resolve_widget(\n oldest_stage.display).down(\n test_name='del btn stage')()\n await touch_widget(stage_app, remove_btn)\n assert oldest_stage not in stages\n\n await stage_app.wait_clock_frames(2)\n\n\ndef verify_color(\n stage_app, shape_color, shape2_color, frame, centers, flip, video_mode):\n (cx1, cy1), (cx2, cy2) = centers\n if flip:\n cx1 = 1920 - cx1\n cx2 = 1920 - cx2\n\n centers = [[(cx1, cy1), (cx2, cy2)]]\n if 'QUAD' in video_mode:\n cx1, cx2, cy1, cy2 = cx1 // 2, cx2 // 2, cy1 // 2, cy2 // 2\n corners = ((0, 540), (960, 540), (0, 0), (960, 0))\n centers = [\n [(cx + x, cy + y) for cx, cy in [(cx1, cy1), (cx2, cy2)]]\n for x, y in corners]\n\n if video_mode == 'QUAD12X':\n # first get all 4 centers values, one for each quadrant\n rgb_values = []\n for i in range(4):\n rgb = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers[i])\n rgb = [[c / 255 for c in p] for p in rgb]\n rgb_values.append(rgb)\n\n # r, g, b\n for plane in [0, 1, 2]:\n # 4 quads\n for color1, color2 in rgb_values:\n assert isclose(\n color1[plane], shape_color[frame][3], abs_tol=2 / 255)\n assert isclose(\n color2[plane], shape2_color[frame][3], abs_tol=2 / 255)\n frame += 1\n else:\n n_sub_frames = 1\n if video_mode == 'QUAD4X':\n n_sub_frames = 4\n\n for i in range(n_sub_frames):\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers[i])\n points = [[c / 255 for c in p] for p in points]\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n\n val = shape_color[frame]\n assert isclose(r1, val[3], abs_tol=2 / 255) if val[0] else r1 == 0\n assert isclose(g1, val[3], abs_tol=2 / 255) if val[1] else g1 == 0\n assert isclose(b1, val[3], abs_tol=2 / 255) if val[2] else b1 == 0\n val = shape2_color[frame]\n assert isclose(r2, val[3], abs_tol=2 / 255) if val[0] else r2 == 0\n assert isclose(g2, val[3], abs_tol=2 / 255) if val[1] else g2 == 0\n assert isclose(b2, val[3], abs_tol=2 / 255) if val[2] else b2 == 0\n frame += 1\n\n return frame\n\n\[email protected]('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])\[email protected](\n 'flip,skip', [(True, False), (False, True), (False, False)])\nasync def test_recursive_play_stage_intensity(\n stage_app: CeedTestApp, tmp_path, flip, skip, video_mode):\n \"\"\"Checks that proper frame rendering happens in all these modes.\n In skip mode, some frames are skipped if GPU/CPU is too slow.\n \"\"\"\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP2\n from kivy.clock import Clock\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n\n from ceed.function.plugin import LinearFunc\n for i, stage in enumerate((s1, s2, s3, s4, s5, s6)):\n stage.stage.add_func(LinearFunc(\n function_factory=stage_app.app.function_factory, b=0, m=.5,\n duration=(i % 2 + 1) * 1))\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n shape2 = CircleShapeP2(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n s1.stage.add_shape(shape.shape)\n s4.stage.add_shape(shape.shape)\n s5.stage.add_shape(shape.shape)\n s2.stage.add_shape(shape2.shape)\n s3.stage.add_shape(shape2.shape)\n s6.stage.add_shape(shape2.shape)\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n frame = 0\n event = None\n # make GPU too slow to force skipping frames, when enabled\n fps = await measure_fps(stage_app) + 10\n rate = stage_app.app.view_controller.frame_rate = fps\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = flip\n stage_app.app.view_controller.skip_estimated_missed_frames = skip\n stage_app.app.view_controller.video_mode = video_mode\n stage_app.app.view_controller.pad_to_stage_handshake = False\n\n n_sub_frames = 1\n if video_mode == 'QUAD4X':\n n_sub_frames = 4\n elif video_mode == 'QUAD12X':\n n_sub_frames = 12\n\n centers = shape.center, shape2.center\n num_frames = rate * n_sub_frames * (2 + 1 + 2 + 1)\n shape_color = [(False, False, False, 0.), ] * num_frames\n shape2_color = [(False, False, False, 0.), ] * num_frames\n skipped_frame_indices = set()\n n_missed_frames = 0\n\n for s, start, e in [(s1, 0, 1), (s4, 3, 5), (s5, 5, 6)]:\n for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):\n val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5\n shape_color[i] = s.color_r, s.color_g, s.color_b, val\n\n for s, start, e in [(s2, 0, 2), (s3, 2, 3), (s6, 5, 6)]:\n for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):\n val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5\n shape2_color[i] = s.color_r, s.color_g, s.color_b, val\n\n def verify_intensity(*largs):\n nonlocal frame, n_missed_frames\n # total frames is a multiple of n_sub_frames\n if not stage_app.app.view_controller.stage_active:\n assert stage_app.app.view_controller.count - 1 == num_frames\n if skip:\n # last frame could be passed actual frames\n assert frame - n_missed_frames * n_sub_frames <= num_frames\n else:\n assert frame == num_frames\n event.cancel()\n return\n # not yet started\n if not stage_app.app.view_controller.count:\n return\n\n # some frame may have been skipped, but num_frames is max frames\n # This callback happens after frame callback and after the frame flip.\n # This also means we record even the last skipped frames (if skipped)\n assert frame < num_frames\n\n frame = verify_color(\n stage_app, shape_color, shape2_color, frame, centers, flip,\n video_mode)\n assert stage_app.app.view_controller.count == frame\n\n if skip:\n # some frames may have been dropped for next frame\n n_missed_frames = stage_app.app.view_controller._n_missed_frames\n for k in range(n_missed_frames * n_sub_frames):\n # frame is next frame index, next frame is skipped\n skipped_frame_indices.add(frame)\n frame += 1\n else:\n assert not stage_app.app.view_controller._n_missed_frames\n\n event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)\n event()\n stage_app.app.view_controller.request_stage_start(root.name)\n\n await wait_experiment_done(stage_app, timeout=num_frames / rate * 50)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'recursive_play_stage_intensity.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0']\n assert not f.num_images_in_file\n f.load_experiment(0)\n\n shape_data = f.shapes_intensity[shape.name]\n shape_data_rendered = f.shapes_intensity_rendered[shape.name]\n shape2_data = f.shapes_intensity[shape2.name]\n shape2_data_rendered = f.shapes_intensity_rendered[shape2.name]\n recorded_rendered_frames = f.rendered_frames\n\n # even when skipping, skipped frames are still logged but they are removed\n # in xxx_rendered arrays\n if skip:\n # because frame rate is high, we'll definitely drop frames\n assert skipped_frame_indices\n else:\n assert not skipped_frame_indices\n\n assert shape_data.shape[0] == num_frames\n assert shape2_data.shape[0] == num_frames\n\n n_skipped = len(skipped_frame_indices)\n if skip:\n # last frame may be recorded as skipped, but if stage is done frame is\n # not real. n_missed_frames is the n_missed_frames from last frame\n assert num_frames - n_skipped <= shape_data_rendered.shape[0] \\\n <= num_frames - n_skipped + n_sub_frames * n_missed_frames\n assert num_frames - n_skipped <= shape2_data_rendered.shape[0] \\\n <= num_frames - n_skipped + n_sub_frames * n_missed_frames\n else:\n assert shape_data_rendered.shape[0] == num_frames\n assert shape2_data_rendered.shape[0] == num_frames\n\n # in QUAD12X mode, all 3 channels have same value in the data (because we\n # show gray). But the projector outputs different values for each channel,\n # for each sub-frame\n gray = video_mode == 'QUAD12X'\n i = 0\n k = 0\n for (r, g, b, val), (r1, g1, b1, _) in zip(shape_color, shape_data):\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n\n if skip:\n assert recorded_rendered_frames[k] \\\n == (k not in skipped_frame_indices)\n else:\n assert recorded_rendered_frames[k]\n\n if k not in skipped_frame_indices:\n r1, g1, b1, _ = shape_data_rendered[i, :]\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n i += 1\n k += 1\n\n i = 0\n k = 0\n for (r, g, b, val), (r1, g1, b1, _) in zip(shape2_color, shape2_data):\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n\n if skip:\n assert recorded_rendered_frames[k] \\\n == (k not in skipped_frame_indices)\n else:\n assert recorded_rendered_frames[k]\n\n if k not in skipped_frame_indices:\n r1, g1, b1, _ = shape2_data_rendered[i, :]\n assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0\n assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0\n assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0\n i += 1\n k += 1\n\n f.close_h5()\n\n\nasync def test_moat_stage_shapes(stage_app: CeedTestApp, tmp_path):\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP1Internal\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n # internal shape\n s1.stage.color_r = False\n s1.stage.color_g = False\n s1.stage.color_b = True\n # surrounding shape\n s2.stage.color_r = True\n s2.stage.color_g = False\n s2.stage.color_b = True\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n internal_shape = CircleShapeP1Internal(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n s1.stage.add_shape(internal_shape.shape)\n\n s2.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n s2.stage.add_shape(shape.shape)\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = False\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 0\n assert g1 == 0\n assert b1 == 255\n\n assert r2 == 255\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n assert not stage_app.app.view_controller.stage_active\n\n # now hide internal shape behind larger circle\n stage_app.app.shape_factory.move_shape_upwards(shape.shape)\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 255\n assert g1 == 0\n assert b1 == 255\n\n assert r2 == 255\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'moat_stage_shapes.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', '1']\n assert not f.num_images_in_file\n\n f.load_experiment(0)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)\n\n f.load_experiment(1)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)\n\n f.close_h5()\n\n\nasync def test_moat_single_stage_shapes(stage_app: CeedTestApp, tmp_path):\n from ..test_stages import create_recursive_stages\n from .examples.shapes import CircleShapeP1, CircleShapeP1Internal\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(\n stage_app.app.stage_factory, app=stage_app)\n s1.stage.color_r = False\n s1.stage.color_g = False\n s1.stage.color_b = True\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n internal_shape = CircleShapeP1Internal(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, a=1, duration=5))\n stage_shape = s1.stage.add_shape(internal_shape.shape)\n s1.stage.add_shape(shape.shape)\n stage_shape.keep_dark = True\n\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.flip_projector = False\n\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_stage_experiment_started(stage_app)\n assert stage_app.app.view_controller.stage_active\n\n points = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, [internal_shape.center, shape.center])\n (r1, g1, b1, _), (r2, g2, b2, _) = points\n assert r1 == 0\n assert g1 == 0\n assert b1 == 0\n\n assert r2 == 0\n assert g2 == 0\n assert b2 == 255\n\n stage_app.app.view_controller.request_stage_end()\n await stage_app.wait_clock_frames(2)\n assert not stage_app.app.view_controller.stage_active\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'moat_single_stage_shapes.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', ]\n assert not f.num_images_in_file\n\n f.load_experiment(0)\n assert tuple(np.array(f.shapes_intensity[shape.name])[0]) == (0, 0, 1, 1)\n assert tuple(\n np.array(f.shapes_intensity[internal_shape.name])[0]) == (0, 0, 0, 1)\n f.close_h5()\n\n\[email protected]('func', [True, False])\nasync def test_event_data_empty(stage_app: CeedTestApp, tmp_path, func):\n from ..test_stages import create_2_shape_stage\n from ceed.function.plugin import ConstFunc\n from ceed.analysis import CeedDataReader\n\n root, s1, s2, shape1, shape2 = create_2_shape_stage(\n stage_app.app.stage_factory, show_in_gui=True, app=stage_app)\n s1.stage.name = 'test stage'\n\n if func:\n s1.stage.add_func(ConstFunc(\n function_factory=stage_app.app.function_factory, duration=0))\n\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.skip_estimated_missed_frames = False\n stage_app.app.view_controller.frame_rate = 10\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app, timeout=180)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'event_data_empty.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n # order in which the stage/func id start/finish\n if func:\n order = (0, 1, 3, 2), (2, 1, 3, 0)\n else:\n order = (0, 1, 2), (1, 2, 0)\n loops = [\n [0, i, 'start' + s, [0, ] * 2] for i in order[0] for s in ('', '_loop')\n ]\n loops += [\n [0, i, 'end' + s, [0, ] * 2] for i in order[1] for s in ('_loop', '')\n ]\n\n with CeedDataReader(filename) as f:\n f.load_experiment(0)\n events = [d[:-1] + [d[-1][:-1], ] for d in f.event_data]\n assert loops == events\n\n s = f.experiment_stage.stages[0]\n\n for kw in [{'ceed_id': s.ceed_id}, {'ceed_name': s1.stage.name},\n {'ceed_obj': s}]:\n items = f.format_event_data(event='start_loop', **kw)\n assert len(items) == 1\n assert items[0][:5] == [0, s, 'start_loop', 0, 0]\n\n items = f.format_event_data(**kw)\n assert len(items) == 4\n for item, val in zip(\n items, ['start', 'start_loop', 'end_loop', 'end']):\n assert item[:5] == [0, s, val, 0, 0]\n\n\[email protected](\n 'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])\[email protected]('skip', [False, True])\nasync def test_pad_stage_ticks(\n stage_app: CeedTestApp, tmp_path, quad, sub_frames, skip):\n from ceed.analysis import CeedDataReader\n\n root = SerialAllStage(\n stage_factory=stage_app.app.stage_factory, show_in_gui=False,\n app=stage_app, create_add_to_parent=True)\n\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n root.stage.add_shape(shape.shape)\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n # use a larger frame rate so we have to drop frames\n stage_app.app.view_controller.frame_rate = await measure_fps(stage_app) + 10\n stage_app.app.view_controller.skip_estimated_missed_frames = skip\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.video_mode = quad\n\n stage_app.app.view_controller.pad_to_stage_handshake = False\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app)\n\n stage_app.app.view_controller.pad_to_stage_handshake = True\n stage_app.app.view_controller.request_stage_start(root.name)\n await wait_experiment_done(stage_app, 300)\n await wait_experiment_stopped(stage_app)\n\n filename = str(tmp_path / 'pad_stage_ticks.h5')\n stage_app.app.ceed_data.save(filename=filename)\n\n f = CeedDataReader(filename)\n f.open_h5()\n assert f.experiments_in_file == ['0', '1']\n assert not f.num_images_in_file\n\n f.load_experiment('0')\n assert f.shapes_intensity[shape.name].shape == (0, 4)\n\n f.load_experiment('1')\n # sub_frames scales up the handshake since IO is same for each sub-frame\n # Even skipped frames are logged so size matches\n assert f.shapes_intensity[shape.name].shape == (\n stage_app.app.data_serializer.num_ticks_handshake(16, sub_frames), 4)\n assert f.shapes_intensity[shape.name].shape == (\n stage_app.app.data_serializer.num_ticks_handshake(16, 1) * sub_frames,\n 4)\n\n frame_time_counter = np.asarray(f._block.data_arrays['frame_time_counter'])\n frame_time = np.asarray(f._block.data_arrays['frame_time'])\n rendered_frames_bool = f.rendered_frames\n assert len(frame_time_counter) == len(frame_time)\n assert np.sum(rendered_frames_bool) == len(frame_time_counter) * sub_frames\n\n frame_counter = np.asarray(f._block.data_arrays['frame_counter'])\n n = f.shapes_intensity[shape.name].shape[0]\n # some frames will have been skipped because of higher frame rate than GPU\n if skip:\n assert sub_frames * len(frame_time_counter) < n\n else:\n assert sub_frames * len(frame_time_counter) == n\n\n # we didn't stop early so all frames are rendered\n rendered_indices = np.arange(0, n, sub_frames)\n if skip:\n assert len(frame_time_counter) < len(frame_counter) // sub_frames\n assert len(rendered_indices) > len(frame_time_counter)\n else:\n assert len(frame_time_counter) == len(frame_counter) // sub_frames\n assert len(rendered_indices) == len(frame_time_counter)\n\n assert np.all(np.arange(1, n + 1) == frame_counter)\n # count recorded is last sub-frame\n if skip:\n assert np.all(\n np.isin(frame_time_counter, rendered_indices + sub_frames))\n assert np.all(frame_time_counter[1:] - frame_time_counter[:-1] > 0)\n\n assert np.all(np.isin(\n frame_time_counter,\n frame_counter[rendered_indices + sub_frames - 1]))\n else:\n assert np.all(frame_time_counter == rendered_indices + sub_frames)\n assert np.all(\n frame_counter[rendered_indices + sub_frames - 1]\n == frame_time_counter)\n\n f.close_h5()\n\n\n@contextmanager\ndef add_to_path(tmp_path, *args):\n sys.path.append(str(tmp_path))\n mod = tmp_path / 'my_gui_stage_plugin' / '__init__.py'\n try:\n mod.parent.mkdir()\n mod.write_text(fake_plugin_stage)\n yield None\n finally:\n sys.path.remove(str(tmp_path))\n if 'my_gui_stage_plugin' in sys.modules:\n del sys.modules['my_gui_stage_plugin']\n\n\[email protected](\n \"ceed_app\",\n [{'yaml_config': {\n 'external_stage_plugin_package': 'my_gui_stage_plugin',\n 'view': {'teensy_frame_estimation': {'use_teensy': False}}},\n 'app_context': add_to_path}, ],\n indirect=True\n)\[email protected]('external', [False, True])\nasync def test_external_plugin_named_package(\n stage_app: CeedTestApp, tmp_path, external):\n stage_factory = stage_app.app.stage_factory\n\n assert 'FakeStage' in stage_factory.stages_cls\n\n stage = SerialAllStage(\n stage_factory=stage_factory, show_in_gui=True, app=stage_app,\n create_add_to_parent=False, stage_cls=stage_factory.get('FakeStage'))\n stage.stage.val = 13\n await run_plugin_experiment(stage_app, tmp_path, external, stage=stage)\n\n assert stage_factory.stage_names[last_experiment_stage_name].val == 13\n\n\[email protected](\n 'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])\[email protected]('main_frames', [1, 1.5, 2])\nasync def test_short_stage(\n stage_app: CeedTestApp, tmp_path, quad, sub_frames, main_frames):\n from ceed.analysis import CeedDataReader\n from ceed.function.plugin import LinearFunc\n from kivy.clock import Clock\n\n num_frames = int(math.ceil(main_frames * sub_frames))\n rate = main_frames\n\n root = SerialAllStage(\n stage_factory=stage_app.app.stage_factory, show_in_gui=False,\n app=stage_app, create_add_to_parent=True)\n shape = CircleShapeP1(\n app=None, painter=stage_app.app.shape_factory, show_in_gui=True)\n root.stage.add_shape(shape.shape)\n root.stage.add_func(LinearFunc(\n function_factory=stage_app.app.function_factory, b=0, m=1,\n duration=1))\n root.show_in_gui()\n await stage_app.wait_clock_frames(2)\n\n # use a larger frame rate so we have to drop frames\n stage_app.app.view_controller.frame_rate = rate\n stage_app.app.view_controller.use_software_frame_rate = False\n stage_app.app.view_controller.video_mode = quad\n stage_app.app.view_controller.pad_to_stage_handshake = False\n stage_app.app.view_controller.flip_projector = False\n\n frame = 0\n event = None\n cx, cy = shape.shape.centroid\n if sub_frames == 1:\n centers = [(cx, cy)]\n else:\n cx1, cy1 = cx // 2, cy // 2\n corners = ((0, 540), (960, 540), (0, 0), (960, 0))\n centers = [(cx1 + x, cy1 + y) for x, y in corners]\n intensity = []\n total_rounded_frames = math.ceil(main_frames) * sub_frames\n\n def verify_intensity(*largs):\n nonlocal frame\n if not stage_app.app.view_controller.stage_active:\n event.cancel()\n return\n # not yet started\n if not stage_app.app.view_controller.count:\n return\n\n assert frame < num_frames\n\n rgb = stage_app.get_widget_pos_pixel(\n stage_app.app.shape_factory, centers)\n rgb = [[c / 255 for c in p] for p in rgb]\n if sub_frames == 12:\n for plane in range(3):\n for point in rgb:\n value = point[plane]\n intensity.append((value, value, value, 1))\n else:\n intensity.extend(rgb)\n frame += sub_frames\n\n assert frame in (\n stage_app.app.view_controller.count, total_rounded_frames)\n assert not stage_app.app.view_controller._n_missed_frames\n\n event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)\n event()\n stage_app.app.view_controller.request_stage_start(root.name)\n\n await wait_experiment_done(stage_app, timeout=50)\n await wait_experiment_stopped(stage_app)\n\n assert stage_app.app.view_controller.count == num_frames + 1\n # only counts whole frames\n assert frame == total_rounded_frames\n # have data for blank frames at end\n assert len(intensity) == total_rounded_frames\n assert total_rounded_frames >= num_frames\n\n filename = str(tmp_path / 'short_stage.h5')\n stage_app.app.ceed_data.save(filename=filename)\n with CeedDataReader(filename) as f:\n f.load_experiment(0)\n\n shape_data = f.shapes_intensity[shape.name]\n shape_data_rendered = f.shapes_intensity_rendered[shape.name]\n recorded_rendered_frames = f.rendered_frames\n\n assert shape_data.shape[0] == num_frames\n assert shape_data_rendered.shape[0] == num_frames\n assert len(recorded_rendered_frames) == num_frames\n\n # for each sub-frame\n gray = quad == 'QUAD12X'\n r, g, b = root.color_r, root.color_g, root.color_b\n for i, ((v1, v2, v3, _), (r1, g1, b1, _)) in enumerate(\n zip(intensity[:num_frames], shape_data)):\n # we saw the intensity we expect\n val = i / (main_frames * sub_frames)\n assert isclose(val, v1, abs_tol=2 / 255) if r or gray else v1 == 0\n assert isclose(val, v2, abs_tol=2 / 255) if g or gray else v2 == 0\n assert isclose(val, v3, abs_tol=2 / 255) if b or gray else v3 == 0\n\n # what we saw is what is recorded\n assert isclose(v1, r1, abs_tol=2 / 255)\n assert isclose(v2, g1, abs_tol=2 / 255)\n assert isclose(v3, b1, abs_tol=2 / 255)\n\n assert recorded_rendered_frames[i]\n assert shape_data_rendered[i, 0] == r1\n assert shape_data_rendered[i, 1] == g1\n assert shape_data_rendered[i, 2] == b1\n\n # remaining frames are blank in quad mode\n for r, g, b, _ in intensity[num_frames:]:\n assert not r\n assert not g\n assert not b\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.all",
"numpy.array",
"numpy.sum",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
igorlucci/koalas | [
"8803344d620261981003175bd1edc3c4120b84e2"
] | [
"databricks/koalas/base.py"
] | [
"#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nBase and utility classes for Koalas objects.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nimport datetime\nfrom functools import wraps, partial\nfrom typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING\nimport warnings\n\nimport numpy as np\nimport pandas as pd # noqa: F401\nfrom pandas.api.types import is_list_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Window, Column\nfrom pyspark.sql.types import (\n BooleanType,\n DateType,\n DoubleType,\n FloatType,\n IntegralType,\n LongType,\n StringType,\n TimestampType,\n)\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas import numpy_compat\nfrom databricks.koalas.config import get_option, option_context\nfrom databricks.koalas.internal import (\n InternalFrame,\n DEFAULT_SERIES_NAME,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n)\nfrom databricks.koalas.spark import functions as SF\nfrom databricks.koalas.spark.accessors import SparkIndexOpsMethods\nfrom databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype\nfrom databricks.koalas.utils import (\n combine_frames,\n same_anchor,\n scol_for,\n validate_axis,\n ERROR_MESSAGE_CANNOT_COMBINE,\n)\nfrom databricks.koalas.frame import DataFrame\n\nif TYPE_CHECKING:\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series\n\n\ndef should_alignment_for_column_op(self: \"IndexOpsMixin\", other: \"IndexOpsMixin\") -> bool:\n from databricks.koalas.series import Series\n\n if isinstance(self, Series) and isinstance(other, Series):\n return not same_anchor(self, other)\n else:\n return self._internal.spark_frame is not other._internal.spark_frame\n\n\ndef align_diff_index_ops(func, this_index_ops: \"IndexOpsMixin\", *args) -> \"IndexOpsMixin\":\n \"\"\"\n Align the `IndexOpsMixin` objects and apply the function.\n\n Parameters\n ----------\n func : The function to apply\n this_index_ops : IndexOpsMixin\n A base `IndexOpsMixin` object\n args : list of other arguments including other `IndexOpsMixin` objects\n\n Returns\n -------\n `Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`\n \"\"\"\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series, first_series\n\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):\n combined = combine_frames(this_index_ops.to_frame(), *cols, how=\"full\")\n\n return column_op(func)(\n combined[\"this\"]._kser_for(combined[\"this\"]._internal.column_labels[0]),\n *[\n combined[\"that\"]._kser_for(label)\n for label in combined[\"that\"]._internal.column_labels\n ]\n )\n else:\n # This could cause as many counts, reset_index calls, joins for combining\n # as the number of `Index`s in `args`. So far it's fine since we can assume the ops\n # only work between at most two `Index`s. We might need to fix it in the future.\n\n self_len = len(this_index_ops)\n if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):\n raise ValueError(\"operands could not be broadcast together with shapes\")\n\n with option_context(\"compute.default_index_type\", \"distributed-sequence\"):\n if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):\n return (\n cast(\n Series,\n column_op(func)(\n this_index_ops.to_series().reset_index(drop=True),\n *[\n arg.to_series().reset_index(drop=True)\n if isinstance(arg, Index)\n else arg\n for arg in args\n ]\n ),\n )\n .sort_index()\n .to_frame(DEFAULT_SERIES_NAME)\n .set_index(DEFAULT_SERIES_NAME)\n .index.rename(this_index_ops.name)\n )\n elif isinstance(this_index_ops, Series):\n this = this_index_ops.reset_index()\n that = [\n cast(Series, col.to_series() if isinstance(col, Index) else col).reset_index(\n drop=True\n )\n for col in cols\n ]\n\n combined = combine_frames(this, *that, how=\"full\").sort_index()\n combined = combined.set_index(\n combined._internal.column_labels[: this_index_ops._internal.index_level]\n )\n combined.index.names = this_index_ops._internal.index_names\n\n return column_op(func)(\n first_series(combined[\"this\"]),\n *[\n combined[\"that\"]._kser_for(label)\n for label in combined[\"that\"]._internal.column_labels\n ]\n )\n else:\n this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)\n\n that_series = next(col for col in cols if isinstance(col, Series))\n that_frame = that_series._kdf[\n [col.to_series() if isinstance(col, Index) else col for col in cols]\n ]\n\n combined = combine_frames(this, that_frame.reset_index()).sort_index()\n\n self_index = (\n combined[\"this\"].set_index(combined[\"this\"]._internal.column_labels).index\n )\n\n other = combined[\"that\"].set_index(\n combined[\"that\"]._internal.column_labels[: that_series._internal.index_level]\n )\n other.index.names = that_series._internal.index_names\n\n return column_op(func)(\n self_index, *[other._kser_for(label) for label in other._internal.column_labels]\n )\n\n\ndef booleanize_null(left_scol, scol, f) -> Column:\n \"\"\"\n Booleanize Null in Spark Column\n \"\"\"\n comp_ops = [\n getattr(Column, \"__{}__\".format(comp_op))\n for comp_op in [\"eq\", \"ne\", \"lt\", \"le\", \"ge\", \"gt\"]\n ]\n\n if f in comp_ops:\n # if `f` is \"!=\", fill null with True otherwise False\n filler = f == Column.__ne__\n scol = F.when(scol.isNull(), filler).otherwise(scol)\n\n elif f == Column.__or__:\n scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)\n\n elif f == Column.__and__:\n scol = F.when(scol.isNull(), False).otherwise(scol)\n\n return scol\n\n\ndef column_op(f):\n \"\"\"\n A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be\n supported too. If this decorator is used for the `f` function that takes Spark Column and\n returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas\n Series.\n\n :param f: a function that takes Spark Column and returns Spark Column.\n :param self: Koalas Series\n :param args: arguments that the function `f` takes.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, *args):\n from databricks.koalas.series import Series\n\n # It is possible for the function `f` takes other arguments than Spark Column.\n # To cover this case, explicitly check if the argument is Koalas Series and\n # extract Spark Column. For other arguments, they are used as are.\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if all(not should_alignment_for_column_op(self, col) for col in cols):\n # Same DataFrame anchors\n args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]\n scol = f(self.spark.column, *args)\n scol = booleanize_null(self.spark.column, scol, f)\n\n if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):\n index_ops = self._with_new_scol(scol)\n else:\n kser = next(col for col in cols if isinstance(col, Series))\n index_ops = kser._with_new_scol(scol)\n elif get_option(\"compute.ops_on_diff_frames\"):\n index_ops = align_diff_index_ops(f, self, *args)\n else:\n raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)\n\n if not all(self.name == col.name for col in cols):\n index_ops = index_ops.rename(None)\n\n return index_ops\n\n return wrapper\n\n\ndef numpy_column_op(f):\n @wraps(f)\n def wrapper(self, *args):\n # PySpark does not support NumPy type out of the box. For now, we convert NumPy types\n # into some primitive types understandable in PySpark.\n new_args = []\n for arg in args:\n # TODO: This is a quick hack to support NumPy type. We should revisit this.\n if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):\n new_args.append(float(arg / np.timedelta64(1, \"s\")))\n else:\n new_args.append(arg)\n return column_op(f)(self, *new_args)\n\n return wrapper\n\n\nclass IndexOpsMixin(object, metaclass=ABCMeta):\n \"\"\"common ops mixin to support a unified interface / docs for Series / Index\n\n Assuming there are following attributes or properties and function.\n \"\"\"\n\n @property\n @abstractmethod\n def _internal(self) -> InternalFrame:\n pass\n\n @property\n @abstractmethod\n def _kdf(self) -> DataFrame:\n pass\n\n @abstractmethod\n def _with_new_scol(self, scol: spark.Column):\n pass\n\n @property\n @abstractmethod\n def _column_label(self) -> Tuple:\n pass\n\n @property\n @abstractmethod\n def spark(self) -> SparkIndexOpsMethods:\n pass\n\n @property\n def spark_column(self) -> Column:\n warnings.warn(\n \"Series.spark_column is deprecated as of Series.spark.column. \"\n \"Please use the API instead.\",\n FutureWarning,\n )\n return self.spark.column\n\n spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__\n\n # arithmetic operators\n __neg__ = column_op(Column.__neg__)\n\n def __add__(self, other) -> Union[\"Series\", \"Index\"]:\n if not isinstance(self.spark.data_type, StringType) and (\n (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n if isinstance(self.spark.data_type, StringType):\n # Concatenate string columns\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):\n return column_op(F.concat)(self, other)\n # Handle df['col'] + 'literal'\n elif isinstance(other, str):\n return column_op(F.concat)(self, F.lit(other))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__add__)(self, other)\n\n def __sub__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(\n other.spark.data_type, TimestampType\n ):\n warnings.warn(msg, UserWarning)\n return self.astype(\"long\") - other.astype(\"long\")\n elif isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return self.astype(\"long\") - F.lit(other).cast(as_spark_type(\"long\"))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, other).astype(\"long\")\n elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, F.lit(other)).astype(\"long\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__sub__)(self, other)\n\n def __mul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if (\n isinstance(self.spark.data_type, IntegralType)\n and isinstance(other, IndexOpsMixin)\n and isinstance(other.spark.data_type, StringType)\n ):\n return column_op(SF.repeat)(other, self)\n\n if isinstance(self.spark.data_type, StringType):\n if (\n isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)\n ) or isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__mul__)(self, other)\n\n def __truediv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __truediv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def truediv(left, right):\n return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n\n return numpy_column_op(truediv)(self, other)\n\n def __mod__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def mod(left, right):\n return ((left % right) + right) % right\n\n return column_op(mod)(self, other)\n\n def __radd__(self, other) -> Union[\"Series\", \"Index\"]:\n # Handle 'literal' + df['col']\n if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, str):\n return self._with_new_scol(F.concat(F.lit(other), self.spark.column))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__radd__)(self, other)\n\n def __rsub__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -(self.astype(\"long\") - F.lit(other).cast(as_spark_type(\"long\")))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -column_op(F.datediff)(self, F.lit(other)).astype(\"long\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__rsub__)(self, other)\n\n def __rmul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__rmul__)(self, other)\n\n def __rtruediv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rtruediv(left, right):\n return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(\n F.lit(right).__truediv__(left)\n )\n\n return numpy_column_op(rtruediv)(self, other)\n\n def __floordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __floordiv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def floordiv(left, right):\n return F.when(F.lit(right is np.nan), np.nan).otherwise(\n F.when(\n F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))\n ).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n )\n\n return numpy_column_op(floordiv)(self, other)\n\n def __rfloordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rfloordiv(left, right):\n return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(\n F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))\n )\n\n return numpy_column_op(rfloordiv)(self, other)\n\n def __rmod__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def rmod(left, right):\n return ((right % left) + left) % left\n\n return column_op(rmod)(self, other)\n\n __pow__ = column_op(Column.__pow__)\n __rpow__ = column_op(Column.__rpow__)\n __abs__ = column_op(F.abs)\n\n # comparison operators\n __eq__ = column_op(Column.__eq__)\n __ne__ = column_op(Column.__ne__)\n __lt__ = column_op(Column.__lt__)\n __le__ = column_op(Column.__le__)\n __ge__ = column_op(Column.__ge__)\n __gt__ = column_op(Column.__gt__)\n\n # `and`, `or`, `not` cannot be overloaded in Python,\n # so use bitwise operators as boolean operators\n __and__ = column_op(Column.__and__)\n __or__ = column_op(Column.__or__)\n __invert__ = column_op(Column.__invert__)\n __rand__ = column_op(Column.__rand__)\n __ror__ = column_op(Column.__ror__)\n\n def __len__(self):\n return len(self._kdf)\n\n # NDArray Compat\n def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):\n # Try dunder methods first.\n result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n\n # After that, we try with PySpark APIs.\n if result is NotImplemented:\n result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(\n self, ufunc, method, *inputs, **kwargs\n )\n\n if result is not NotImplemented:\n return result\n else:\n # TODO: support more APIs?\n raise NotImplementedError(\"Koalas objects currently do not support %s.\" % ufunc)\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Return the dtype object of the underlying data.\n\n Examples\n --------\n >>> s = ks.Series([1, 2, 3])\n >>> s.dtype\n dtype('int64')\n\n >>> s = ks.Series(list('abc'))\n >>> s.dtype\n dtype('O')\n\n >>> s = ks.Series(pd.date_range('20130101', periods=3))\n >>> s.dtype\n dtype('<M8[ns]')\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.dtype\n dtype('<M8[ns]')\n \"\"\"\n return spark_type_to_pandas_dtype(self.spark.data_type)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current object is empty. Otherwise, returns false.\n\n >>> ks.range(10).id.empty\n False\n\n >>> ks.range(0).id.empty\n True\n\n >>> ks.DataFrame({}, index=list('abc')).index.empty\n False\n \"\"\"\n return self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n\n @property\n def hasnans(self) -> bool:\n \"\"\"\n Return True if it has any missing values. Otherwise, it returns False.\n\n >>> ks.DataFrame({}, index=list('abc')).index.hasnans\n False\n\n >>> ks.Series(['a', None]).hasnans\n True\n\n >>> ks.Series([1.0, 2.0, np.nan]).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).hasnans\n False\n\n >>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).rename(\"a\").to_frame().set_index(\"a\").index.hasnans\n False\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n\n if isinstance(self.spark.data_type, (DoubleType, FloatType)):\n return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]\n else:\n return sdf.select(F.max(scol.isNull())).collect()[0][0]\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically increasing.\n\n .. note:: the current implementation of is_monotonic requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are\n transferred to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])\n >>> ser.is_monotonic\n True\n\n >>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})\n >>> df.dates.is_monotonic\n False\n\n >>> df.index.is_monotonic\n True\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic\n False\n\n >>> ser.index.is_monotonic\n True\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic\n True\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic\n False\n \"\"\"\n return self._is_monotonic(\"increasing\")\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically decreasing.\n\n .. note:: the current implementation of is_monotonic_decreasing requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are transferred\n to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})\n >>> df.dates.is_monotonic_decreasing\n False\n\n >>> df.index.is_monotonic_decreasing\n False\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.index.is_monotonic_decreasing\n False\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n False\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n True\n \"\"\"\n return self._is_monotonic(\"decreasing\")\n\n def _is_locally_monotonic_spark_column(self, order):\n window = (\n Window.partitionBy(F.col(\"__partition_id\"))\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-1, -1)\n )\n\n if order == \"increasing\":\n return (F.col(\"__origin\") >= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n else:\n return (F.col(\"__origin\") <= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n\n def _is_monotonic(self, order):\n assert order in (\"increasing\", \"decreasing\")\n\n sdf = self._internal.spark_frame\n\n sdf = (\n sdf.select(\n F.spark_partition_id().alias(\n \"__partition_id\"\n ), # Make sure we use the same partition id in the whole job.\n F.col(NATURAL_ORDER_COLUMN_NAME),\n self.spark.column.alias(\"__origin\"),\n )\n .select(\n F.col(\"__partition_id\"),\n F.col(\"__origin\"),\n self._is_locally_monotonic_spark_column(order).alias(\n \"__comparison_within_partition\"\n ),\n )\n .groupby(F.col(\"__partition_id\"))\n .agg(\n F.min(F.col(\"__origin\")).alias(\"__partition_min\"),\n F.max(F.col(\"__origin\")).alias(\"__partition_max\"),\n F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True))).alias(\n \"__comparison_within_partition\"\n ),\n )\n )\n\n # Now we're windowing the aggregation results without partition specification.\n # The number of rows here will be as the same of partitions, which is expected\n # to be small.\n window = Window.orderBy(F.col(\"__partition_id\")).rowsBetween(-1, -1)\n if order == \"increasing\":\n comparison_col = F.col(\"__partition_min\") >= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n else:\n comparison_col = F.col(\"__partition_min\") <= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n\n sdf = sdf.select(\n comparison_col.alias(\"__comparison_between_partitions\"),\n F.col(\"__comparison_within_partition\"),\n )\n\n ret = sdf.select(\n F.min(F.coalesce(F.col(\"__comparison_between_partitions\"), F.lit(True)))\n & F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True)))\n ).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n Return 1 for Series / Index / MultiIndex.\n\n Examples\n --------\n\n For Series\n\n >>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])\n >>> s.ndim\n 1\n\n For Index\n\n >>> s.index.ndim\n 1\n\n For MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index.ndim\n 1\n \"\"\"\n return 1\n\n def astype(self, dtype) -> Union[\"Index\", \"Series\"]:\n \"\"\"\n Cast a Koalas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> ser = ks.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.astype('int64')\n Int64Index([1, 2], dtype='int64', name='a')\n \"\"\"\n spark_type = as_spark_type(dtype)\n if not spark_type:\n raise ValueError(\"Type {} not understood\".format(dtype))\n if isinstance(spark_type, BooleanType):\n if isinstance(self.spark.data_type, StringType):\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n F.length(self.spark.column) > 0\n )\n elif isinstance(self.spark.data_type, (FloatType, DoubleType)):\n scol = F.when(\n self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)\n ).otherwise(self.spark.column.cast(spark_type))\n else:\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n self.spark.column.cast(spark_type)\n )\n elif isinstance(spark_type, StringType):\n scol = F.when(self.spark.column.isNull(), str(None)).otherwise(\n self.spark.column.cast(spark_type)\n )\n else:\n scol = self.spark.column.cast(spark_type)\n return self._with_new_scol(scol)\n\n def isin(self, values) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Check whether `values` are contained in Series or Index.\n\n Return a boolean Series or Index showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : list or set\n The sequence of values to test.\n\n Returns\n -------\n isin : Series (bool dtype) or Index (bool dtype)\n\n Examples\n --------\n >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.isin(['lama'])\n Index([True, False, True, False, True, False], dtype='object', name='a')\n \"\"\"\n if not is_list_like(values):\n raise TypeError(\n \"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a [{values_type}]\".format(values_type=type(values).__name__)\n )\n\n return self._with_new_scol(self.spark.column.isin(list(values)))\n\n def isnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values. Characters such as empty strings '' or\n numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser.isna() # doctest: +NORMALIZE_WHITESPACE\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.isna()\n Index([False, False, True], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n if isinstance(self.spark.data_type, (FloatType, DoubleType)):\n return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))\n else:\n return self._with_new_scol(self.spark.column.isNull())\n\n isna = isnull\n\n def notnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True.\n Characters such as empty strings '' or numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n NA values, such as None or numpy.NaN, get mapped to False values.\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n Show which entries in a Series are not NA.\n\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.notna()\n Index([True, True, False], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"notna is not defined for MultiIndex\")\n return (~self.isnull()).rename(\n self.name # type: ignore\n )\n\n notna = notnull\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def all(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([True, True]).all()\n True\n\n >>> ks.Series([True, False]).all()\n False\n\n >>> ks.Series([0, 1]).all()\n False\n\n >>> ks.Series([1, 2, 3]).all()\n True\n\n >>> ks.Series([True, True, None]).all()\n True\n\n >>> ks.Series([True, False, None]).all()\n False\n\n >>> ks.Series([]).all()\n True\n\n >>> ks.Series([np.nan]).all()\n True\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.all()\n False\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"every(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use min as its alternative:\n ret = sdf.select(F.min(F.coalesce(col.cast(\"boolean\"), F.lit(True)))).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def any(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([False, False]).any()\n False\n\n >>> ks.Series([True, False]).any()\n True\n\n >>> ks.Series([0, 0]).any()\n False\n\n >>> ks.Series([0, 1, 2]).any()\n True\n\n >>> ks.Series([False, False, None]).any()\n False\n\n >>> ks.Series([True, False, None]).any()\n True\n\n >>> ks.Series([]).any()\n False\n\n >>> ks.Series([np.nan]).any()\n False\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.any()\n True\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"any(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use max as its alternative:\n ret = sdf.select(F.max(F.coalesce(col.cast(\"boolean\"), F.lit(False)))).collect()[0][0]\n if ret is None:\n return False\n else:\n return ret\n\n # TODO: add frep and axis parameter\n def shift(self, periods=1, fill_value=None) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Shift Series/Index by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input Series/Index, shifted.\n\n Examples\n --------\n >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.Col1.shift(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 10.0\n 4 20.0\n Name: Col1, dtype: float64\n\n >>> df.Col2.shift(periods=3, fill_value=0)\n 0 0\n 1 0\n 2 0\n 3 13\n 4 23\n Name: Col2, dtype: int64\n\n >>> df.index.shift(periods=3, fill_value=0)\n Int64Index([0, 0, 0, 0, 1], dtype='int64')\n \"\"\"\n return self._shift(periods, fill_value)\n\n def _shift(self, periods, fill_value, part_cols=()):\n if not isinstance(periods, int):\n raise ValueError(\"periods should be an int; however, got [%s]\" % type(periods).__name__)\n\n col = self.spark.column\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-periods, -periods)\n )\n lag_col = F.lag(col, periods).over(window)\n col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)\n return self._with_new_scol(col)\n\n # TODO: Update Documentation for Bins Parameter when its supported\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ) -> \"Series\":\n \"\"\"\n Return a Series containing counts of unique values.\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by values.\n ascending : boolean, default False\n Sort in ascending order.\n bins : Not Yet Supported\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n\n Examples\n --------\n For Series\n\n >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})\n >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n Name: x, dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE\n 1.0 0.6\n 0.0 0.4\n Name: x, dtype: float64\n\n **dropna**\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n NaN 1\n Name: x, dtype: int64\n\n For Index\n\n >>> idx = ks.Index([3, 1, 2, 3, 4, np.nan])\n >>> idx\n Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')\n\n >>> idx.value_counts().sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **sort**\n\n With `sort` set to `False`, the result wouldn't be sorted by number of count.\n\n >>> idx.value_counts(sort=True).sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **normalize**\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> idx.value_counts(normalize=True).sort_index()\n 1.0 0.2\n 2.0 0.2\n 3.0 0.4\n 4.0 0.2\n dtype: float64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n NaN 1\n dtype: int64\n\n For MultiIndex.\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index # doctest: +SKIP\n MultiIndex([( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'weight'),\n ('falcon', 'length'),\n ('falcon', 'length')],\n )\n\n >>> s.index.value_counts().sort_index()\n (cow, length) 1\n (cow, weight) 2\n (falcon, length) 2\n (falcon, weight) 1\n (lama, weight) 3\n dtype: int64\n\n >>> s.index.value_counts(normalize=True).sort_index()\n (cow, length) 0.111111\n (cow, weight) 0.222222\n (falcon, length) 0.222222\n (falcon, weight) 0.111111\n (lama, weight) 0.333333\n dtype: float64\n\n If Index has name, keep the name up.\n\n >>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas')\n >>> idx.value_counts().sort_index()\n 0 3\n 1 2\n 2 1\n 3 1\n Name: koalas, dtype: int64\n \"\"\"\n from databricks.koalas.series import first_series\n\n if bins is not None:\n raise NotImplementedError(\"value_counts currently does not support bins\")\n\n if dropna:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()\n else:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column)\n index_name = SPARK_DEFAULT_INDEX_NAME\n column_name = self._internal.data_spark_column_names[0]\n sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()\n if sort:\n if ascending:\n sdf = sdf.orderBy(F.col(\"count\"))\n else:\n sdf = sdf.orderBy(F.col(\"count\").desc())\n\n if normalize:\n sum = sdf_dropna.count()\n sdf = sdf.withColumn(\"count\", F.col(\"count\") / F.lit(sum))\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, index_name)],\n column_labels=self._internal.column_labels,\n data_spark_columns=[scol_for(sdf, \"count\")],\n column_label_names=self._internal.column_label_names,\n )\n\n return first_series(DataFrame(internal))\n\n def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:\n \"\"\"\n Return number of unique elements in the object.\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to Koalas and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to Koalas.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> ks.Series([1, 2, 3, np.nan]).nunique()\n 3\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)\n 4\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)\n 3\n\n >>> idx = ks.Index([1, 1, 2, None])\n >>> idx\n Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')\n\n >>> idx.nunique()\n 2\n\n >>> idx.nunique(dropna=False)\n 3\n \"\"\"\n res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])\n return res.collect()[0][0]\n\n def _nunique(self, dropna=True, approx=False, rsd=0.05):\n colname = self._internal.data_spark_column_names[0]\n count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct\n if dropna:\n return count_fn(self.spark.column).alias(colname)\n else:\n return (\n count_fn(self.spark.column)\n + F.when(\n F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1\n ).otherwise(0)\n ).alias(colname)\n\n def take(self, indices) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n\n Series\n\n >>> kser = ks.Series([100, 200, 300, 400, 500])\n >>> kser\n 0 100\n 1 200\n 2 300\n 3 400\n 4 500\n dtype: int64\n\n >>> kser.take([0, 2, 4]).sort_index()\n 0 100\n 2 300\n 4 500\n dtype: int64\n\n Index\n\n >>> kidx = ks.Index([100, 200, 300, 400, 500])\n >>> kidx\n Int64Index([100, 200, 300, 400, 500], dtype='int64')\n\n >>> kidx.take([0, 2, 4]).sort_values()\n Int64Index([100, 300, 500], dtype='int64')\n\n MultiIndex\n\n >>> kmidx = ks.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\")])\n >>> kmidx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('x', 'c')],\n )\n\n >>> kmidx.take([0, 2]) # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'c')],\n )\n \"\"\"\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise ValueError(\"`indices` must be a list-like except dict or set\")\n if isinstance(self, ks.Series):\n return cast(ks.Series, self.iloc[indices])\n else:\n return self._kdf.iloc[indices].index\n"
] | [
[
"numpy.timedelta64",
"pandas.api.types.is_list_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.24"
],
"scipy": [],
"tensorflow": []
}
] |
abhi526691/Covid-Guard | [
"9c050ef44201c01f512169ffb146ad0da5278ec1",
"9c050ef44201c01f512169ffb146ad0da5278ec1"
] | [
"main.py",
"video_recorder.py"
] | [
"# import the necessary packages\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\r\nfrom tensorflow.keras.models import load_model\r\nfrom imutils.video import VideoStream,FileVideoStream\r\nimport imutils\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport cv2\r\nimport math\r\n\r\n\r\ndef mainc():\r\n\r\n\tscale_percent = 20 # percentage of original size\r\n\twidth = 0\r\n\theight = 0\r\n\r\n\tlabelsPath = \"Model/coco.names\" #path for model\r\n\tLABELS = open(labelsPath).read().strip().split(\"\\n\")\r\n\r\n\tnp.random.seed(42)\r\n\tCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\r\n\t\tdtype=\"uint8\")\r\n\r\n\tweightsPath = \"Model/yolov3.weights\" #path for yolov3 weights\r\n\tconfigPath = \"Model/yolov3.cfg\" #path for yolov3 configuration file\r\n\r\n\tnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\r\n\r\n\tcap = cv2.VideoCapture(0)\r\n\tif not cap.isOpened():\r\n\t\tprint(\"Could not open webcam\")\r\n\t\texit()\r\n\telse: #get dimension info\r\n\t\twidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n\t\theight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\t\tdim = (width, height)\r\n\t\tprint('Original Dimensions : ',dim)\r\n\t\twidth = int(width * scale_percent / 100)\r\n\t\theight = int(height * scale_percent / 100)\r\n\t\tdim = (width, height)\r\n\t\tprint('Resized Dimensions : ', dim)\r\n\r\n\r\n\tdef detect_and_predict_mask(frame, faceNet, maskNet):\r\n\t\t# grab the dimensions of the frame and then construct a blob from it\r\n\t\t(h, w) = frame.shape[:2]\r\n\t\tblob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),\r\n\t\t\t(104.0, 177.0, 123.0))\r\n\t\t# pass the blob through the network and obtain the face detections\r\n\t\tfaceNet.setInput(blob)\r\n\t\tdetections = faceNet.forward()\r\n\t\t# initialize our list of faces, their corresponding locations,\r\n\t\t# and the list of predictions from our face mask network\r\n\t\tfaces = []\r\n\t\tlocs = []\r\n\t\tpreds = []\r\n\r\n\r\n\t\t# loop over the detections\r\n\t\tfor i in range(0, detections.shape[2]):\r\n\t\t\t# extract the confidence (i.e., probability) associated with\r\n\t\t\t# the detection\r\n\t\t\tconfidence = detections[0, 0, i, 2]\r\n\t\t\t# filter out weak detections by ensuring the confidence is\r\n\t\t\t# greater than the minimum confidence\r\n\t\t\tif confidence > 0.5:\r\n\t\t\t\t# compute the (x, y)-coordinates of the bounding box for\r\n\t\t\t\t# the object\r\n\t\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\r\n\t\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\r\n\t\t\t\t# ensure the bounding boxes fall within the dimensions of\r\n\t\t\t\t# the frame\r\n\t\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\r\n\t\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\r\n\r\n\r\n\t\t\t\t# extract the face ROI, convert it from BGR to RGB channel\r\n\t\t\t\t# ordering, resize it to 224x224, and preprocess it\r\n\t\t\t\tface = frame[startY:endY, startX:endX]\r\n\t\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\r\n\t\t\t\tface = cv2.resize(face, (224, 224))\r\n\t\t\t\tface = img_to_array(face)\r\n\t\t\t\tface = preprocess_input(face)\r\n\t\t\t\t# add the face and bounding boxes to their respective\r\n\t\t\t\t# lists\r\n\t\t\t\tfaces.append(face)\r\n\t\t\t\tlocs.append((startX, startY, endX, endY))\r\n\r\n\r\n\t\t# only make a predictions if at least one face was detected\r\n\t\tif len(faces) > 0:\r\n\t\t\t# for faster inference we'll make batch predictions on *all*\r\n\t\t\t# faces at the same time rather than one-by-one predictions\r\n\t\t\t# in the above `for` loop\r\n\t\t\tfaces = np.array(faces, dtype=\"float32\")\r\n\t\t\tpreds = maskNet.predict(faces, batch_size=32)\r\n\t\t# return a 2-tuple of the face locations and their corresponding\r\n\t\t# locations\r\n\t\treturn (locs, preds)\r\n\r\n\r\n\r\n\tbase_dir=os.getcwd()\r\n\tbase_dir=base_dir.replace('\\\\','/')\r\n\r\n\tprint(base_dir)\r\n\tdataset_path=base_dir+'/dataset'\r\n\taccuracy_plot_dir=base_dir+'/Model'\r\n\tmodel_store_dir=base_dir+'/Model/mask_detector.model'\r\n\texample=base_dir+'/Image/1.jpg'\r\n\r\n\tconfidence=0.4\r\n\r\n\r\n\tface_detector_caffe=base_dir+'/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'\r\n\r\n\r\n\r\n\t# load our serialized face detector model from disk\r\n\tprint(\"[INFO] loading face detector model...\")\r\n\tprototxtPath = base_dir+'/Face Detector/deploy.prototxt'\r\n\tweightsPath = face_detector_caffe\r\n\tfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\r\n\t# load the face mask detector model from disk\r\n\tprint(\"[INFO] loading face mask detector model...\")\r\n\tmaskNet = load_model(model_store_dir)\r\n\t# initialize the video stream and allow the camera sensor to warm up\r\n\tprint(\"[INFO] starting video stream...\")\r\n\tvs = VideoStream(src=0).start()\r\n\t#time.sleep(2.0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t# loop over the frames from the video stream\r\n\titer=0\r\n\twhile True:\r\n\r\n\r\n\r\n\t\t# grab the frame from the threaded video stream and resize it\r\n\t\t# to have a maximum width of 400 pixels\r\n\t\tframe = vs.read()\r\n\t\tframe = imutils.resize(frame, width=1200)\r\n\r\n\t\tresized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\r\n\r\n\t\t(H, W) = frame.shape[:2]\r\n\t\tln = net.getLayerNames()\r\n\t\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\t\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)\r\n\t\tnet.setInput(blob)\r\n\t\tstart = time.time()\r\n\t\tlayerOutputs = net.forward(ln)\r\n\t\tend = time.time()\r\n\t\t# print(\"Frame Prediction Time : {:.6f} seconds\".format(end - start))\r\n\t\tboxes = []\r\n\t\tconfidences = []\r\n\t\tclassIDs = []\r\n\r\n\t\tfor output in layerOutputs:\r\n\t\t\tfor detection in output:\r\n\t\t\t\tscores = detection[5:]\r\n\t\t\t\tclassID = np.argmax(scores)\r\n\t\t\t\tconfidence = scores[classID]\r\n\t\t\t\tif confidence > 0.1 and classID == 0:\r\n\t\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\r\n\t\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\r\n\t\t\t\t\tx = int(centerX - (width / 2))\r\n\t\t\t\t\ty = int(centerY - (height / 2))\r\n\t\t\t\t\tboxes.append([x, y, int(width), int(height)])\r\n\t\t\t\t\tconfidences.append(float(confidence))\r\n\t\t\t\t\tclassIDs.append(classID)\r\n\r\n\t\tif iter % 3 == 0:\r\n\r\n\t\t\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)\r\n\t\t\tind = []\r\n\t\t\tfor i in range(0, len(classIDs)):\r\n\t\t\t\tif (classIDs[i] == 0):\r\n\t\t\t\t\tind.append(i)\r\n\t\t\ta = []\r\n\t\t\tb = []\r\n\r\n\t\t\tif len(idxs) > 0:\r\n\t\t\t\tfor i in idxs.flatten():\r\n\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\t\ta.append(x)\r\n\t\t\t\t\tb.append(y)\r\n\r\n\t\t\tdistance = []\r\n\t\t\tnsd = []\r\n\t\t\tfor i in range(0, len(a) - 1):\r\n\t\t\t\tfor k in range(1, len(a)):\r\n\t\t\t\t\tif (k == i):\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tx_dist = (a[k] - a[i])\r\n\t\t\t\t\t\ty_dist = (b[k] - b[i])\r\n\t\t\t\t\t\td = math.sqrt(x_dist * x_dist + y_dist * y_dist)\r\n\t\t\t\t\t\tdistance.append(d)\r\n\t\t\t\t\t\tif (d <= 6912):\r\n\t\t\t\t\t\t\tnsd.append(i)\r\n\t\t\t\t\t\t\tnsd.append(k)\r\n\t\t\t\t\t\tnsd = list(dict.fromkeys(nsd))\r\n\t\t\t\t\t# print(nsd)\r\n\r\n\t\t\tcolor = (0, 0, 255)\r\n\t\t\tfor i in nsd:\r\n\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\r\n\t\t\t\ttext = \"Alert\"\r\n\t\t\t\tcv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\r\n\t\t\tcolor = (0, 255, 0)\r\n\t\t\tif len(idxs) > 0:\r\n\t\t\t\tfor i in idxs.flatten():\r\n\t\t\t\t\tif (i in nsd):\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\r\n\t\t\t\t\t\ttext = 'OK'\r\n\t\t\t\t\t\tcv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\r\n\r\n\t\ttext = \"Social Distancing Violators: {}\".format(len(nsd))\r\n\t\tcv2.putText(frame, text, (660, frame.shape[0] - 45),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)\r\n\r\n\t\tcv2.putText(frame, \"Covid Guard: Team TrojanWave\", (140, 45),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\r\n\t\tcv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)\r\n\t\tcv2.putText(frame, \"COLOR CODE: RISK ANALYSIS\", (30, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)\r\n\t\tcv2.putText(frame, \"--- GREEN : SAFE\", (500, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\t\tcv2.putText(frame, \"--- RED: UNSAFE\", (1000, 85),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\r\n\r\n\r\n\t\ttot_str = \"TOTAL: \" + str(len(idxs))\r\n\t\thigh_str = \"HIGH RISK: \" + str(len(nsd))\r\n\t\tlow_str = \"LOW RISK: \" + str(0)\r\n\t\tsafe_str = \"SAFE: \" + str(len(idxs)-len(nsd))\r\n\r\n\t\tsub_img = frame[H - 270: H , 0:240]\r\n\t\tblack_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0\r\n\r\n\t\tres = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)\r\n\r\n\t\tframe[H - 270:H, 0:240] = res\r\n\r\n\t\tcv2.putText(frame, tot_str, (10, H - 235),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)\r\n\t\tcv2.putText(frame, safe_str, (10, H - 200),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\r\n\t\tcv2.putText(frame, low_str, (10, H - 165),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 120, 255), 2)\r\n\t\tcv2.putText(frame, high_str, (10, H - 130),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 150), 2)\r\n\r\n\t\t#cv2.imshow(\"Social Distancing Detector\", frame)\r\n\r\n\t\tcv2.rectangle(frame, (10, H-100 ), (600, H-10), (170, 170, 170), 2)\r\n\t\tcv2.putText(frame, \"COLOR CODE: MASK DETECTION\", (40, H-40),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)\r\n\t\tcv2.putText(frame, \"--- RED : NO MASK\", (420, H-70),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)\r\n\t\tcv2.putText(frame, \"--- GREEN : MASK\", (420, H-35),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\r\n\t\t# cv2.putText(frame, \"-- GREEN: SAFE\", (565, 150),\r\n\t\t# \t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\r\n\r\n\t\t# detect faces in the frame and determine if they are wearing a\r\n\t\t# face mask or not\r\n\t\t(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\r\n\r\n\t\t# loop over the detected face locations and their corresponding\r\n\t\t# locations\r\n\t\tfor (box, pred) in zip(locs, preds):\r\n\t\t\t# unpack the bounding box and predictions\r\n\t\t\t(startX, startY, endX, endY) = box\r\n\t\t\t(mask, withoutMask) = pred\r\n\t\t\t# determine the class label and color we'll use to draw\r\n\t\t\t# the bounding box and text\r\n\t\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\r\n\t\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\r\n\t\t\t# include the probability in the label\r\n\t\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\r\n\t\t\t# display the label and bounding box rectangle on the output\r\n\t\t\t# frame\r\n\t\t\tcv2.putText(frame, label, (startX, startY - 10),\r\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\r\n\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\r\n\r\n\r\n\t\t# show the output frame\r\n\t\tcv2.namedWindow('frame', cv2.WINDOW_NORMAL)\r\n\t\tcv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\r\n\t\tcv2.imshow('frame', frame)\r\n\t\tkey = cv2.waitKey(1) & 0xFF\r\n\t\t# if the `q` key was pressed, break from the loop\r\n\r\n\t\r\n\t\tif key == ord(\"q\"):\r\n\t\t\tbreak\r\n\r\n\r\n\r\n\t# do a bit of cleanup\r\n\tcv2.destroyAllWindows()\r\n\tvs.stop()\r\n\r\n",
"# import the necessary packages\r\nimport tensorflow\r\nimport numpy as np\r\nimport pyautogui\r\nimport cv2\r\nimport os\r\n\r\n\r\n\r\ndef start():\r\n\r\n output = \"Output/video.avi\"\r\n img = pyautogui.screenshot()\r\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\r\n # get info from img\r\n height, width, channels = img.shape\r\n # Define the codec and create VideoWriter object\r\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\r\n out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))\r\n\r\n\r\n\r\n while True:\r\n # make a screenshot\r\n img = pyautogui.screenshot()\r\n # convert these pixels to a proper numpy array to work with OpenCV\r\n frame = np.array(img)\r\n # convert colors from BGR to RGB\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n # write the frame\r\n out.write(frame)\r\n # show the frame\r\n #cv2.imshow(\"screenshot\", frame)\r\n # if the user clicks q, it exits\r\n k = cv2.waitKey(1) & 0xFF\r\n if k == ord(\"q\"):\r\n break\r\n\r\n # make sure everything is closed when exited\r\n cv2.destroyAllWindows()\r\n out.release()\r\n\r\nif __name__==\"__main__\":\r\n start()\r\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.random.seed",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"numpy.ones",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fmamitrotta/pyNastran | [
"90f957887a4f68f8e58b07c15e1ac69c66b9c6f4"
] | [
"pyNastran/op2/tables/geom/ept.py"
] | [
"\"\"\"\ndefines readers for BDF objects in the OP2 EPT/EPTS table\n\"\"\"\n#pylint: disable=C0103,R0914\nfrom __future__ import annotations\nfrom struct import unpack, Struct\nfrom functools import partial\nfrom typing import Tuple, List, TYPE_CHECKING\n\nimport numpy as np\n\n#from pyNastran import is_release\nfrom pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSML\nfrom pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBEND, PBEAM3\nfrom pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP\nfrom pyNastran.bdf.cards.properties.bush import PBUSH, PBUSHT\nfrom pyNastran.bdf.cards.properties.damper import PDAMP, PVISC\nfrom pyNastran.bdf.cards.properties.properties import PFAST, PGAP\nfrom pyNastran.bdf.cards.properties.rods import PROD, PTUBE\nfrom pyNastran.bdf.cards.properties.shell import PSHEAR, PSHELL, PCOMP\nfrom pyNastran.bdf.cards.properties.solid import PSOLID\nfrom pyNastran.bdf.cards.properties.springs import PELAS, PELAST\n\nfrom pyNastran.bdf.cards.thermal.thermal import PCONV, PHBDY, PCONVM\n# PCOMPG, PBUSH1D, PBEAML, PBEAM3\nfrom pyNastran.op2.op2_interface.op2_reader import (\n mapfmt, reshape_bytes_block_size) # reshape_bytes_block,\nfrom .utils import get_minus1_start_end\nfrom .geom2 import DoubleCardError\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.op2.op2_geom import OP2Geom\n\n\nclass EPT:\n \"\"\"defines methods for reading op2 properties\"\"\"\n\n @property\n def size(self) -> int:\n return self.op2.size\n @property\n def factor(self) -> int:\n return self.op2.factor\n\n def _read_fake(self, data: bytes, n: int) -> int:\n return self.op2._read_fake(data, n)\n\n def read_ept_4(self, data: bytes, ndata: int):\n return self.op2._read_geom_4(self.ept_map, data, ndata)\n\n def __init__(self, op2: OP2Geom):\n self.op2 = op2\n self.ept_map = {\n (3201, 32, 55): ['NSM', self._read_nsm], # record 2\n (52, 20, 181): ['PBAR', self._read_pbar], # record 11 - buggy\n (9102, 91, 52): ['PBARL', self._read_pbarl], # record 12 - almost there...\n (2706, 27, 287): ['PCOMP', self._read_pcomp], # record 22 - buggy\n (302, 3, 46): ['PELAS', self._read_pelas], # record 39\n (2102, 21, 121): ['PGAP', self._read_pgap], # record 42\n (902, 9, 29): ['PROD', self._read_prod], # record 49\n (1002, 10, 42): ['PSHEAR', self._read_pshear], # record 50\n (2402, 24, 281): ['PSOLID', self._read_psolid], # record 51\n (2302, 23, 283): ['PSHELL', self._read_pshell], # record 52\n (1602, 16, 30): ['PTUBE', self._read_ptube], # record 56\n\n (5402, 54, 262): ['PBEAM', self._read_pbeam], # record 14 - not done\n (9202, 92, 53): ['PBEAML', self._read_pbeaml], # record 15\n (2502, 25, 248): ['PBEND', self._read_pbend], # record 16 - not done\n (1402, 14, 37): ['PBUSH', self._read_pbush], # record 19 - not done\n (3101, 31, 219): ['PBUSH1D', self._read_pbush1d], # record 20 - not done\n (152, 19, 147): ['PCONEAX', self._read_pconeax], # record 24 - not done\n (11001, 110, 411): ['PCONV', self._read_pconv], # record 25 - not done\n # record 26\n (202, 2, 45): ['PDAMP', self._read_pdamp], # record 27 - not done\n (2802, 28, 236): ['PHBDY', self._read_phbdy], # record 43 - not done\n (402, 4, 44): ['PMASS', self._read_pmass], # record 48\n (1802, 18, 31): ['PVISC', self._read_pvisc], # record 59\n (10201, 102, 400): ['PVAL', self._read_pval], # record 58 - not done\n (2606, 26, 289): ['VIEW', self._read_view], # record 62 - not done\n (3201, 32, 991) : ['NSM', self._read_nsm_2], # record\n (3301, 33, 992) : ['NSM1', self._read_nsm1], # record\n (3701, 37, 995) : ['NSML1', self._read_nsml1_nx], # record\n (3601, 36, 62): ['NSML1', self._read_nsml1_msc], # record 7\n (15006, 150, 604): ['PCOMPG', self._read_pcompg], # record\n\n (702, 7, 38): ['PBUSHT', self._read_pbusht], # record 1\n (3301, 33, 56): ['NSM1', self._read_fake], # record 3\n (3401, 34, 57) : ['NSMADD', self._read_fake], # record 5\n (3501, 35, 58): ['NSML', self._read_fake], # record 6\n (3501, 35, 994) : ['NSML', self._read_nsml],\n (1502, 15, 36): ['PAABSF', self._read_fake], # record 8\n (8300, 83, 382): ['PACABS', self._read_fake], # record 9\n (8500, 85, 384): ['PACBAR', self._read_fake], # record 10\n (5403, 55, 349): ['PBCOMP', self._read_pbcomp], # record 13\n (13301, 133, 509): ['PBMSECT', self._read_fake], # record 17\n (2902, 29, 420): ['PCONVM', self._read_pconvm], # record 26\n (1202, 12, 33): ['PDAMPT', self._read_pdampt], # record 28\n (8702, 87, 412): ['PDAMP5', self._read_pdamp5], # record 29\n (6802, 68, 164): ['PDUM8', self._read_fake], # record 37\n (6902, 69, 165): ['PDUM9', self._read_fake], # record 38\n (1302, 13, 34): ['PELAST', self._read_pelast], # record 41\n (12001, 120, 480): ['PINTC', self._read_fake], # record 44\n (12101, 121, 484): ['PINTS', self._read_fake], # record 45\n (4606, 46, 375): ['PLPLANE', self._read_plplane], # record 46\n (4706, 47, 376): ['PLSOLID', self._read_plsolid], # record 47\n (10301, 103, 399): ['PSET', self._read_pset], # record 57\n (3002, 30, 415): ['VIEW3D', self._read_fake], # record 63\n\n (13501, 135, 510) : ['PFAST', self._read_pfast_msc], # MSC-specific\n (3601, 36, 55) : ['PFAST', self._read_pfast_nx], # NX-specific\n (3801, 38, 979) : ['PPLANE', self._read_pplane],\n (11801, 118, 560) : ['PWELD', self._read_fake],\n (3401, 34, 993) : ['NSMADD', self._read_nsmadd],\n (9300, 93, 684) : ['ELAR', self._read_fake],\n (9400, 94, 685) : ['ELAR2', self._read_fake],\n (16006, 160, 903) : ['PCOMPS', self._read_fake],\n\n # MSC-specific\n (14602, 146, 692): ['PSLDN1', self._read_fake],\n (16502, 165, 916): ['PAXSYMH', self._read_fake],\n (13201, 132, 513): ['PBRSECT', self._read_fake],\n\n (13701, 137, 638): ['PWSEAM', self._read_fake],\n (7001, 70, 632): ['???', self._read_fake],\n (15106, 151, 953): ['PCOMPG1', self._read_fake],\n (3901, 39, 969): ['PSHL3D', self._read_fake],\n (17006, 170, 901): ['MATCID', self._read_fake],\n\n (9601, 96, 691): ['PJOINT', self._read_fake],\n (16502, 165, 916): ['???', self._read_fake],\n\n (9701, 97, 692): ['PJOINT2', self._read_fake],\n (13401, 134, 611): ['PBEAM3', self._read_pbeam3],\n (8901, 89, 905): ['PSOLCZ', self._read_fake],\n (9801, 98, 698): ['DESC', self._read_desc],\n #(9701, 97, 692): ['???', self._read_fake],\n #(9701, 97, 692): ['???', self._read_fake],\n #(9701, 97, 692): ['???', self._read_fake],\n\n }\n\n def _add_op2_property(self, prop):\n \"\"\"helper method for op2\"\"\"\n op2 = self.op2\n #if prop.pid > 100000000:\n #raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))\n #print(str(prop)[:-1])\n ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')\n pid = prop.pid\n allow_overwrites = (\n ntables > 1 and\n pid in op2.properties and\n op2.properties[pid].type == prop.type)\n op2._add_methods._add_property_object(prop, allow_overwrites=allow_overwrites)\n\n def _add_op2_property_mass(self, prop):\n \"\"\"helper method for op2\"\"\"\n op2 = self.op2\n #if prop.pid > 100000000:\n #raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))\n #print(str(prop)[:-1])\n ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')\n pid = prop.pid\n allow_overwrites = (\n ntables > 1 and\n pid in op2.properties_mass and\n op2.properties_mass[pid].type == prop.type)\n op2._add_methods._add_property_mass_object(prop, allow_overwrites=allow_overwrites)\n\n def _add_pconv(self, prop: PCONV) -> None:\n if prop.pconid > 100000000:\n raise RuntimeError('bad parsing pconid > 100000000...%s' % str(prop))\n self.op2._add_methods._add_convection_property_object(prop)\n\n# HGSUPPR\n\n def _read_desc(self, data: bytes, n: int) -> int:\n \"\"\"\n RECORD – DESC(9801,98,698)\n\n Word Name Type Description\n 1 DID I Description identification number\n 2 NWORDS I Number of words for the description string\n 3 DESC CHAR4 Description\n Words 3 repeats NWORDS times\n\n data = (1, 14, 'FACE CONTACT(1) ')\n \"\"\"\n op2 = self.op2\n assert self.size == 4, 'DESC size={self.size} is not supported'\n #op2.show_data(data[n:], types='ifs')\n struct_2i = Struct(op2._endian + b'2i')\n while n < len(data):\n\n datai = data[n:n+8]\n desc_id, nwords = struct_2i.unpack(datai)\n #print(desc_id, nwords)\n ndatai = 8 + nwords * 4\n word_bytes = data[n+8:n+ndatai]\n word = word_bytes.decode('ascii').rstrip()\n assert len(word_bytes) == nwords * 4\n #print('word_bytes =', word_bytes)\n op2.log.warning(f'geom skipping DESC={desc_id}: {word!r}')\n n += ndatai\n assert n == len(data), n\n return n\n\n def _read_nsml(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n RECORD – NSML(3501, 35, 994)\n\n Defines a set of lumped nonstructural mass by ID.\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP(2) CHAR4 Set of properties or elements\n 4 ID I Property of element identification number\n 5 VALUE RS Lumped nonstructural mass value\n Words 4 and 5 repeat until -1 occurs\n\n ints = (3, ELEMENT, 0, 200, 0.7, -1, 4, PSHELL, 0, 6401, 4.2, -1)\n floats = (3, ELEMENT, 0.0, 200, 0.7, -1, 4, PSHELL, 0.0, 6401, 4.2, -1)\n\n \"\"\"\n op2 = self.op2\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -1, ints[i1]\n sid = ints[i0]\n prop_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]\n #print(sid, prop_bytes)\n ids = ints[i0+4:i1:2].tolist()\n values = floats[i0+5:i1:2].tolist()\n #print(ids, values)\n assert len(ids) == len(values)\n nsm_type = prop_bytes.decode('latin1').rstrip()\n nsml = op2.add_nsml(sid, nsm_type, ids, values)\n #print(nsml)\n str(nsml)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSML'] = ncards\n return n\n\n def _read_nsmadd(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n (3401, 34, 993)\n\n RECORD – NSMADD(3401,34,993)\n Combines the nonstructural mass inputs.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 ID I Set of properties or elements\n Word 2 repeats until End of Record\n\n (1, 2, 3, 4, -1)\n \"\"\"\n op2 = self.op2\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n istart = [0] + list(iend + 1)\n size = self.size\n for (i0, i1) in zip(istart, iend):\n assert ints[i1] == -1, ints[i1]\n sid, *nsms = ints[i0:i1]\n nsmadd = op2.add_nsmadd(sid, nsms)\n #print(nsmadd)\n str(nsmadd)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSMADD'] = ncards\n return n\n\n def _read_nsml1_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n NSML1(3701, 37, 995)\n Alternate form of NSML entry. Defines lumped nonstructural mass entries by VALUE, ID list.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements\n 4 VALUE RS Lumped nonstructural mass value\n 5 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 6 ID I Property of element identification number\n Word 6 repeats until -1 occurs\n SPECOPT=2 All\n 6 ALL(2) CHAR4 Keyword ALL\n Words 6 and 7 repeat until -1 occurs\n SPECOPT=3 Thru range\n 6 ID1 I Starting identification number\n 7 THRU(2) CHAR4 Keyword THRU\n 9 ID2 I Ending identification number\n Words 6 through 9 repeat until -1 occurs\n SPECOPT=4 Thru range with by\n 6 ID1 I Starting identification number\n 7 THRU(2) CHAR4 Keyword THRU\n 9 ID2 I Ending identification number\n 10 BY(2) CHAR4 Keyword BY\n 12 N I Increment\n Words 6 through 12 repeat until -1 occurs\n\n data = (\n 3701, 37, 995,\n 1, ELEMENT, 466.2,\n 3, 249311, THRU, 250189, -1,\n 3, 250656, THRU, 251905, -1,\n 3, 270705, THRU, 275998, -1,\n 3, 332687, THRU, 334734, -1,\n -2,\n\n 2, ELEMENT, 77.7,\n 3, 225740, THRU 227065, -1,\n 3, 227602, THRU, 228898, -1,\n 3, 229435, THRU, 230743, -1,\n 3, 231280, THRU, 233789, -1,\n 3, 233922, THRU, 235132, -1,\n 3, 235265, THRU, 236463, -1,\n 3, 338071, THRU, 341134, -1, -2)\n \"\"\"\n #ints = (1, ELEMENT, 466.2,\n # 3, 249311, THRU, 250189, -1,\n # 3, 250656, THRU, 251905, -1,\n # 3, 270705, THRU, 275998, -1,\n # 3, 332687, THRU, 334734, -1,\n # -2,\n #\n # 2, ELEMENT, 77.7,\n # 3, 225740, THRU 227065, -1,\n # 3, 227602, THRU, 228898, -1,\n # 3, 229435, THRU, 230743, -1,\n # 3, 231280, THRU, 233789, -1,\n # 3, 233922, THRU, 235132, -1,\n # 3, 235265, THRU, 236463, -1,\n # 3, 338071, THRU, 341134, -1, -2)\n op2 = self.op2\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n iminus2 = np.where(ints == -2)[0]\n istart = [0] + list(iminus2[:-1] + 1)\n iend = iminus2\n #print(istart, iend)\n assert len(data[n:]) > 12, data[n:]\n #op2.show_data(data[n:], types='ifs')\n\n ncards = 0\n istart = [0] + list(iend + 1)\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -2, ints[i1]\n sid = ints[i0]\n nsm_type = data[n0+(i0+1)*size:n0+(i0+2)*size].decode('latin1').rstrip()\n value = float(floats[i0+3])\n #print(f'sid={sid} nsm_type={nsm_type} value={value}')\n\n iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]\n #print('-1', iminus1)\n #print('-2', iminus2)\n istart2 = [i0 + 4] + list(iminus1[:-1] + 1)\n iend2 = iminus1\n #print(istart2, iend2)\n\n for istarti, iendi in zip(istart2, iend2):\n #print(istarti, iendi)\n spec_opt = ints[istarti] # 4\n #print(f'ints[{istarti}] = spec_opt = {spec_opt}')\n if spec_opt == 1:\n # 6 ID I Property of element identification number\n\n ivalues = list(range(istarti, iendi))\n #print('ivalues =', ivalues)\n pid_eids = ints[ivalues].tolist()\n #print('pid_eids =', pid_eids)\n elif spec_opt == 3:\n # datai = (3, 249311, 'THRU ', 250189)\n #print(f'i0={i0}')\n #datai = data[n0+(i0+6)*size:n0+i1*size]\n #op2.show_data(datai)\n ids = ints[istarti:iendi]\n istart = ids[1]\n iend = ids[-1]\n pid_eids = list(range(istart, iend+1))\n else:\n raise NotImplementedError(spec_opt)\n\n if nsm_type == 'ELEM':\n nsm_type = 'ELEMENT'\n #for pid_eid in pid_eids:\n #nsml = op2.add_nsml1(sid, nsm_type, pid_eids, [value])\n assert len(pid_eids) > 0, pid_eids\n nsml1 = op2.add_nsml1(sid, nsm_type, value, pid_eids)\n #print(nsml1)\n str(nsml1)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSML'] = ncards\n return n\n\n def _read_nsml1_msc(self, data: bytes, n: int) -> int:\n r\"\"\"\n NSML1(3601, 36, 62)\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of property or elements\n 3 VALUE RS Lumped nonstructural mass value\n 4 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 5 IDs , =FLG1LIST in ixidlst.prm\n 6 ID I Property or element ID\n Word 6 repeats until End of Record\n SPECOPT=2 means ALL, =FLG1ALL in ixidlst.prm\n 5 ALL(2) CHAR4 Keyword ALL\n Words 5 through 6 repeat until End of Record\n SPECOPT=3 means THRU range, =FLG1THRU in ixidlst.prm\n 5 ID1 I Starting ID\n 6 THRU(2) CHAR4 Keyword THRU\n 8 ID2 I Ending ID\n Words 5 through 8 repeat until End of Record\n SPECOPT=4 means THRU range with BY, =FLG1THBY in ixidlst.prm\n 5 ID1 I Starting ID\n 6 THRU(2) CHAR4 Keyword THRU\n 8 ID2 I Ending ID\n 9 BY(2) CHAR4 Keyword BY\n 11 N I Increment\n Words 5 through 11 repeat until End of Record\n End SPECOPT\n Words 4 through max repeat until End of Record\n\n C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post2\\elsum15.op2\n\n data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n\n \"\"\"\n op2 = self.op2\n op2.log.info(f'geom skipping NSML1 in {op2.table_name}; ndata={len(data)-12}')\n #op2.show_data(data[n:], types='ifs')\n #bbb\n return len(data)\n\n def _read_nsm1(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM1(3301, 33, 992)\n\n Defines the properties of a nonstructural mass.\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements\n 4 ORIGIN I Entry origin\n 5 VALUE RS Nonstructural mass value\n 6 SPECOPT I Specification option\n SPECOPT=1 By IDs\n 7 ID I\n Word 7 repeats until -1 occurs\n SPECOPT=2 All\n 7 ALL(2) CHAR4\n Words 7 and 8 repeat until -1 occurs\n SPECOPT=3 Thru range\n 7 ID I\n 8 THRU(2) CHAR4\n 10 ID I\n Words 7 through 10 repeat until -1 occurs\n SPECOPT=4 Thru range with by\n 7 ID I\n 8 THRU(2) CHAR4\n 10 ID I\n 11 BY(2) CHAR4\n 13 N I\n Words 7 through 13 repeat until -1 occurs\n\n data = (3, PCOMP, 0, 0.37, 2, ALL, -1,\n 4, ELEMENT, 2, 2.1, 1, 3301, -1)\n\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:], types='ifs')\n n0 = n\n #op2.show_data(data[n:])\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n assert ints[i1] == -1, ints[i1]\n # 1 SID I Set identification number\n sid = ints[i0]\n\n # 2 PROP CHAR4 Set of properties\n # 3 TYPE CHAR4 Set of elements\n # 4 ORIGIN I Entry origin\n # 5 VALUE RS Nonstructural mass value\n # 6 SPECOPT I Specification option\n nsm_type = data[n0+(i0+1)*size:n0+(i0+3)*size].decode('latin1').rstrip()\n zero_two = ints[i0+3]\n value = float(floats[i0+4])\n spec_opt = ints[i0+5]\n assert zero_two in [0, 2], zero_two\n #nii = 6\n #print(ints[i0+nii:i1])\n #print(floats[i0+nii:i1])\n #print(sid, nsm_type, value, spec_opt)\n\n iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]\n #print('-1', iminus1)\n #print('-2', iminus2)\n istart2 = [i0 + 5] + list(iminus1[:-1] + 1)\n iend2 = iminus1\n #print(istart2, iend2)\n\n if spec_opt == 1:\n # 7 ID I\n ids = ints[i0+6:i1]\n elif spec_opt == 2:\n word = data[n0+(i0+6)*size:n0+i1*size]\n ids = word\n elif spec_opt == 3: # thru\n # datai = (249311, 'THRU ', 250189)\n #datai = data[n0+(i0+6)*size:n0+i1*size]\n ids = ints[i0+6:i1]\n istart = ids[0]\n iend = ids[-1]\n ids = list(range(istart, iend+1))\n else:\n raise NotImplementedError(spec_opt)\n #print(sid, nsm_type, zero_two, value, ids)\n #if nsm_type == 'ELEM':\n #nsm_type = 'ELEMENT'\n #for pid_eid in pid_eids:\n #nsml = self.add_nsml1(sid, nsm_type, pid_eids, [value])\n nsm1 = op2.add_nsm1(sid, nsm_type, value, ids)\n #print(nsm1)\n str(nsm1)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSM1'] = ncards\n return n\n\n def _read_nsm(self, data: bytes, n: int) -> int:\n \"\"\"NSM\"\"\"\n op2 = self.op2\n n = op2.reader_geom2._read_dual_card(\n data, n,\n self._read_nsm_nx, self._read_nsm_msc,\n 'NSM', op2._add_methods._add_nsm_object)\n return n\n\n def _read_nsm_2(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 2019.2\n NSM(3201, 32, 991)\n\n RECORD – NSM(3201,32,991)\n Defines the properties of a nonstructural mass.\n\n Word Name Type Description\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of properties\n 3 TYPE CHAR4 Set of elements <---- not right...it's an integer and not used...\n 4 ID I Property or element identification number\n 5 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n\n NSM,2,conrod,1007,0.3\n\n data = (2, CONROD, 0, 1007, 0.3, -1,\n 2, ELEMENT, 0, 200, 0.20, -1,\n 3, PSHELL, 0, 3301, 0.20, -1,\n 3, ELEMENT, 2, 200, 1.0, -1,\n 4, PSHELL, 2, 6401, 4.2, -1)\n \"\"\"\n op2 = self.op2\n n0 = n\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n ncards = 0\n size = self.size\n for (i0, i1) in zip(istart, iend):\n #data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)\n assert ints[i1] == -1, ints[i1]\n sid = ints[i0]\n prop_type = data[n0+(i0+1)*size:n0+(i0+3)*size]\n elem_type = data[n0+(i0+3)*size:n0+(i0+4)*size]\n nsm_type = prop_type.decode('latin1').rstrip()\n dunno_int = ints[i0+3]\n #print(ints[i0+4:i1])\n #print(floats[i0+4:i1])\n ids = ints[i0+4:i1:2].tolist()\n values = floats[i0+5:i1:2].tolist()\n assert len(ids) == len(values)\n assert dunno_int in [0, 2], (sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)\n #print(sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)\n nsm = op2.add_nsm(sid, nsm_type, ids, values)\n #print(nsm[0])\n str(nsm)\n n += (i1 - i0 + 1) * size\n ncards += 1\n op2.card_count['NSM'] = ncards\n return n\n\n def _read_nsm_msc(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM(3201,32,55) - the marker for Record 2\n\n MSC\n 1 SID I Set identification number\n 2 PROP CHAR4 Set of property or elements\n 3 ID I Property or element identification number\n 4 VALUE RS Nonstructural mass value\n ORIGIN=0 NSM Bulk Data entry\n 5 ID I Property or element ID\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n ORIGIN=2 NSML Bulk Data entry\n 5 ID I Property or element ID\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n Words 3 through 4 repeat until End of Record\n \"\"\"\n op2 = self.op2\n properties = []\n struct1 = Struct(op2._endian + b'i 4s if')\n ndelta = 16\n\n i = 0\n ints = np.frombuffer(data[n:], op2.idtype).copy()\n floats = np.frombuffer(data[n:], op2.fdtype).copy()\n\n while n < len(data):\n edata = data[n:n+ndelta]\n out = struct1.unpack(edata)\n (sid, prop_set, pid, value) = out\n # 538976312\n assert pid < 100000000\n i += 4\n n += ndelta\n\n prop_set = prop_set.decode('utf8').rstrip(' ') # \\x00\n values = [value]\n #print('ints[i:]=', ints[i:])\n while ints[i] != -1:\n value2 = floats[i]\n values.append(value2)\n n += 4\n i += 1\n op2.log.info(\"MSC: NSM-sid=%s prop_set=%s pid=%s values=%s\" % (\n sid, prop_set, pid, values))\n prop = NSM.add_op2_data([sid, prop_set, pid, value])\n #op2._add_methods._add_nsm_object(prop)\n properties.append(prop)\n\n # handle the trailing -1\n i += 1\n n += 4\n return n, properties\n\n def _read_nsm_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n NSM(3201,32,55) - the marker for Record 2\n\n 1 SID I Set identification number\n 2 PROP(2) CHAR4 Set of properties or elements\n 4 ORIGIN I Entry origin\n 5 ID I Property or element identification number\n 6 VALUE RS Nonstructural mass value\n Words 5 through 6 repeat until End of Record\n \"\"\"\n op2 = self.op2\n properties = []\n\n #NX: C:\\Users\\sdoyle\\Dropbox\\move_tpl\\nsmlcr2s.op2\n struct1 = Struct(op2._endian + b'i 8s ii f')\n ndelta = 24\n #op2.show_data(data[12:], 'ifs')\n\n i = 0\n ints = np.frombuffer(data[n:], op2.idtype).copy()\n floats = np.frombuffer(data[n:], op2.fdtype).copy()\n\n unused_packs = break_by_minus1(ints)\n #for pack in packs:\n #print(pack)\n\n #ipack = 0\n while n < len(data):\n #print('ints[i:]=', ints[i:].tolist())\n #i1, i2 = packs[ipack]\n #print('idata=%s' % idata[i1:i2])\n #print('fdata=%s' % fdata[i1:i2])\n #print(idata[i1:i2])\n edata = data[n:n+ndelta]\n out = struct1.unpack(edata)\n (sid, prop_set, origin, pid, value) = out\n # 538976312\n assert pid < 100000000\n i += 6\n n += ndelta\n\n prop_set = prop_set.decode('utf8').rstrip(' ') # \\x00\n pids = [pid]\n values = [value]\n #print('ints[i:]=', ints[i:].tolist())\n while ints[i] != -1:\n pid = ints[i]\n value2 = floats[i+1]\n assert pid != -1\n pids.append(pid)\n values.append(value2)\n n += 8\n i += 2\n\n for pid, value in zip(pids, values):\n if origin == 0:\n #op2.log.info(\"NX: NSM-sid=%s prop_set=%s pid=%s values=%s\" % (\n #sid, prop_set, pid, values))\n prop = NSM.add_op2_data([sid, prop_set, pid, value])\n elif origin == 2:\n #op2.log.info(\"NX: NSML-sid=%s prop_set=%s pid=%s values=%s\" % (\n #sid, prop_set, pid, values))\n prop = NSML.add_op2_data([sid, prop_set, pid, value])\n\n #print(prop.rstrip(), pid, value)\n #op2._add_methods._add_nsm_object(prop)\n properties.append(prop)\n #print('----')\n\n # handle the trailing -1\n i += 1\n n += 4\n #ipack += 1\n return n, properties\n\n# NSM1\n# NSML1\n# NSMADD\n# NSML\n# NSML1\n# PAABSF\n# PACABS\n# PACBAR\n\n def _read_pbar(self, data: bytes, n: int) -> int:\n \"\"\"\n PBAR(52,20,181) - the marker for Record 11\n .. warning:: this makes a funny property...\n\n MSC 2016/NX10\n\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 NSM RS Nonstructural mass per unit length\n 8 FE RS\n 9 C1 RS Stress recovery location at point C in element y-axis\n 10 C2 RS Stress recovery location at point C in element z-axis\n 11 D1 RS Stress recovery location at point D in element y-axis\n 12 D2 RS Stress recovery location at point D in element z-axis\n 13 E1 RS Stress recovery location at point E in element y-axis\n 14 E2 RS Stress recovery location at point E in element z-axis\n 15 F1 RS Stress recovery location at point F in element y-axis\n 16 F2 RS Stress recovery location at point F in element z-axis\n 17 K1 RS Area factor for shear in plane 1\n 18 K2 RS Area factor for shear in plane 2\n 19 I12 RS Area product of inertia for plane 1 and 2\n \"\"\"\n op2 = self.op2\n ntotal = 76 * self.factor # 19*4\n struct1 = Struct(mapfmt(op2._endian + b'2i17f', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, mid, a, I1, I2, J, nsm, fe, c1, c2, d1, d2,\n #e1, e2, f1, f2, k1, k2, I12) = out\n prop = PBAR.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PBAR'] = nentries\n return n\n\n def _read_pbarl(self, data: bytes, n: int) -> int:\n \"\"\"\n PBARL(9102,91,52) - the marker for Record 12\n TODO: buggy\n It's possible to have a PBARL and a PBAR at the same time.\n NSM is at the end of the element.\n \"\"\"\n op2 = self.op2\n valid_types = {\n 'ROD': 1,\n 'TUBE': 2,\n 'TUBE2': 2,\n 'I': 6,\n 'CHAN': 4,\n 'T': 4,\n 'BOX': 4,\n 'BAR': 2,\n 'CROSS': 4,\n 'H': 4,\n 'T1': 4,\n 'I1': 4,\n 'CHAN1': 4,\n 'Z': 4,\n 'CHAN2': 4,\n \"T2\": 4,\n 'BOX1': 6,\n 'HEXA': 3,\n 'HAT': 4,\n 'HAT1': 5,\n 'DBOX': 10, # was 12\n #'MLO TUBE' : 2,\n } # for GROUP=\"MSCBML0\"\n\n size = self.size\n ntotal = 28 * self.factor # 7*4 - ROD - shortest entry...could be buggy... # TODO fix this\n if size == 4:\n struct1 = Struct(op2._endian + b'2i 8s 8s f')\n else:\n struct1 = Struct(op2._endian + b'2q 16s 16s d')\n\n #nentries = (len(data) - n) // ntotal\n #print(self.show_ndata(80))\n ndata = len(data)\n\n while ndata - n > ntotal:\n edata = data[n:n+ntotal]\n n += ntotal\n\n out = struct1.unpack(edata)\n (pid, mid, group, beam_type, value) = out\n if pid > 100000000 or pid < 1:\n op2.log.debug(\" pid=%s mid=%s group=%r beam_type=%r value=%s\" % (\n pid, mid, group, beam_type, value))\n raise RuntimeError('bad parsing...')\n\n beam_type = reshape_bytes_block_size(beam_type, size=size)\n group = reshape_bytes_block_size(group, size=size)\n data_in = [pid, mid, group, beam_type, value]\n\n expected_length = valid_types[beam_type]\n iformat = op2._endian + b'%if' % expected_length\n\n ndelta = expected_length * 4\n dims_nsm = list(unpack(iformat, data[n:n+ndelta]))\n data_in += dims_nsm\n #print(\" pid=%s mid=%s group=%r beam_type=%r value=%s dims_nsm=%s\" % (\n #pid, mid, group, beam_type, value, dims_nsm))\n\n # TODO why do i need the +4???\n # is that for the nsm?\n #min_len = expected_length * 4 + 4\n #if len(data)\n #data = data[n + expected_length * 4 + 4:]\n n += ndelta\n\n #prin( \"len(out) = \",len(out)))\n #print(\"PBARL = %s\" % data_in)\n prop = PBARL.add_op2_data(data_in) # last value is nsm\n pid = prop.pid\n if pid in op2.properties:\n #op2.log.debug(\"removing:\\n%s\" % op2.properties[pid])\n op2._type_to_id_map['PBAR'].remove(pid)\n del op2.properties[pid]\n self._add_op2_property(prop)\n #op2.properties[pid] = prop\n #print(prop.get_stats())\n #print(op2.show_data(data[n-8:-100]))\n\n # the PBARL ends with a -1 flag\n #value, = unpack(op2._endian + b'i', data[n:n+4])\n n += 4 * self.factor\n if len(op2._type_to_id_map['PBAR']) == 0 and 'PBAR' in op2.card_count:\n del op2._type_to_id_map['PBAR']\n del op2.card_count['PBAR']\n op2.increase_card_count('PBARL')\n #assert len(data) == n\n if self.size == 8:\n n += 16\n #n += 8 # same for 32/64 bit - not 100% that it's always active\n return n\n\n def _read_pbcomp(self, data: bytes, n: int) -> int:\n \"\"\"\n PBCOMP(5403, 55, 349)\n\n pid mid A I1 I2 I12 J NSM\n PBCOMP 3 2 2.00E-4 6.67E-9 1.67E-9 0.0 4.58E-9 0.0 +\n pid mid\n floats = (3, 2, 0.0002, 6.67e-09, 1.67e-09, 0.0, 4.58e-09, 0.0, 1.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n ints = (3, 2, 0.0002, 6.67E-9, 1.67E-9, 0, 4.58E-9, 0, 1.0, 1.0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n\n \"\"\"\n op2 = self.op2\n struct1 = Struct(mapfmt(op2._endian + b'2i 12f i', self.size))\n struct2 = Struct(mapfmt(op2._endian + b'3f 2i', self.size))\n nproperties = 0\n ntotal1 = 60 * self.factor # 4*15\n ntotal2 = 20 * self.factor\n\n ndata = len(data)\n #print(ntotal1, ntotal2)\n if self.factor == 2:\n op2.show_data(data[12*self.factor:], types='qd')\n #print(len(data[12*self.factor:]))\n while n < ndata:\n #op2.log.debug(f\"n={n} ndata={ndata}\")\n edata = data[n:n+ntotal1]\n #if len(edata) == ntotal1:\n data1 = struct1.unpack(edata)\n #else:\n #op2.show_data(edata, types='qdi')\n #n += ntotal2\n #continue\n nsections = data1[-1]\n if op2.is_debug_file:\n (pid, mid, a, i1, i2, i12, j, nsm, k1, k2,\n m1, m2, n1, n2, unused_nsections) = data1\n op2.log.info(f'PBCOMP pid={pid} mid={mid} nsections={nsections} '\n f'k1={k1} k2={k2} m=({m1},{m2}) n=({n1},{n2})\\n')\n #if pid > 0 and nsections == 0:\n #print('n1')\n #n += ntotal1\n #continue\n #if pid == 0 and nsections == 0:\n #print('n2')\n #n += ntotal2\n #continue\n\n data2 = []\n n += ntotal1\n if nsections in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:\n # 16 Y RS Lumped area location along element's y-axis\n # 17 Z RS Lumped area location along element's z-axis\n # 18 C RS Fraction of the total area for the lumped area\n # 19 MID I Material identification number\n # 20 UNDEF None\n # Words 16 through 20 repeat NSECT times\n for unused_i in range(nsections):\n datai = data[n:n+ntotal2]\n xi, yi, ci, mid, unused_null = struct2.unpack(datai)\n data2.append((xi, yi, ci, mid))\n n += ntotal2\n else:\n op2.log.error(f'PBCOMP={data1[0]} has no sections; check your bdf')\n return n\n #raise NotImplementedError('PBCOMP nsections=%r' % nsections)\n\n if op2.is_debug_file:\n op2.binary_debug.write(' PBCOMP: %s\\n' % str([data1, data2]))\n msg = (\n ' i=%-2s so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n 'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (\n nsections, None, -9999., a, i1, i2, i12, j, nsm,\n None, None, None, None, None, None, None, None,)\n )\n op2.log.debug(msg)\n #op2.log.debug(data1)\n #op2.log.debug(data2)\n\n data_in = [data1, data2]\n prop = PBCOMP.add_op2_data(data_in)\n pid = data1[0]\n if pid in op2.properties:\n op2._type_to_id_map['PBEAM'].remove(pid)\n del op2.properties[pid]\n\n self._add_op2_property(prop)\n nproperties += 1\n #print(f\"n={n} ndata={ndata}\")\n assert nproperties > 0, 'PBCOMP nproperties=%s' % (nproperties)\n if len(op2._type_to_id_map['PBEAM']) == 0 and 'PBEAM' in op2.card_count:\n del op2._type_to_id_map['PBEAM']\n del op2.card_count['PBEAM']\n op2.card_count['PBCOMP'] = nproperties\n return n\n\n def _read_pbeam(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEAM(5402,54,262) - the marker for Record 14\n .. todo:: add object\n \"\"\"\n op2 = self.op2\n cross_section_type_map = {\n 0 : 'variable',\n 1 : 'constant',\n 2 : '???',\n }\n\n struct1 = Struct(mapfmt(op2._endian + b'4if', self.size))\n struct2 = Struct(mapfmt(op2._endian + b'16f', self.size))\n struct3 = Struct(mapfmt(op2._endian + b'16f', self.size))\n unused_ntotal = 768 # 4*(5+16*12)\n #nproperties = (len(data) - n) // ntotal\n #assert nproperties > 0, 'ndata-n=%s n=%s datai\\n%s' % (len(data)-n, n, op2.show_data(data[n:100+n]))\n ndata = len(data)\n #op2.show_data(data[12:], 'if')\n #assert ndata % ntotal == 0, 'ndata-n=%s n=%s ndata%%ntotal=%s' % (len(data)-n, n, ndata % ntotal)\n nproperties = 0\n\n ntotal1 = 20 * self.factor\n ntotal2 = 64 * self.factor\n while n < ndata:\n #while 1: #for i in range(nproperties):\n edata = data[n:n+ntotal1]\n n += ntotal1\n data_in = list(struct1.unpack(edata))\n #if op2.is_debug_file:\n #op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s\\n' % tuple(data_in))\n (pid, unused_mid, unused_nsegments, ccf, unused_x) = data_in\n #op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s' % tuple(data_in))\n\n # Constant cross-section flag: 1=yes and 0=no\n # what is 2?\n if ccf not in [0, 1, 2]:\n msg = (' PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s; '\n 'ccf must be in [0, 1, 2]\\n' % tuple(data_in))\n raise ValueError(msg)\n\n cross_section_type = cross_section_type_map[ccf]\n #print('cross_section_type = %s' % cross_section_type)\n\n is_pbcomp = False\n is_bad_so = False\n\n so = []\n xxb = []\n for i in range(11):\n edata = data[n:n+ntotal2]\n if len(edata) != ntotal2:\n endpack = []\n raise RuntimeError(f'PBEAM unexpected length i={i:d}...')\n n += ntotal2\n pack = struct2.unpack(edata)\n (soi, xxbi, a, i1, i2, i12, j, nsm, c1, c2,\n d1, d2, e1, e2, f1, f2) = pack\n xxb.append(xxbi)\n so.append(soi)\n\n if soi == 0.0:\n so_str = 'NO'\n elif soi == 1.0:\n so_str = 'YES'\n else:\n so_str = str(soi)\n is_bad_so = True\n #msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; soi not in 0.0 or 1.0' % (\n #pid, i, xxb, soi)\n #raise NotImplementedError(msg)\n\n #if xxb != 0.0:\n #msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; xxb not in 0.0 or 1.0' % (\n #pid, i, xxb, soi)\n #raise NotImplementedError(msg)\n\n pack2 = (so_str, xxbi, a, i1, i2, i12, j, nsm, c1, c2,\n d1, d2, e1, e2, f1, f2)\n data_in.append(pack2)\n if op2.is_debug_file:\n op2.binary_debug.write(f' {pack}\\n')\n msg = (\n ' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n 'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))\n )\n op2.binary_debug.write(msg)\n #msg = (\n #' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '\n #'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))\n #)\n #print(msg)\n\n edata = data[n:n+ntotal2]\n if len(edata) != ntotal2:\n endpack = []\n raise RuntimeError('PBEAM unexpected length 2...')\n endpack = struct3.unpack(edata)\n n += ntotal2\n\n assert len(endpack) == 16, endpack\n #(k1, k2, s1, s2, nsia, nsib, cwa, cwb, # 8\n #m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b) = endpack # 8 -> 16\n if op2.is_debug_file:\n op2.binary_debug.write(' k=[%s,%s] s=[%s,%s] nsi=[%s,%s] cw=[%s,%s] '\n 'ma=[%s,%s] mb=[%s,%s] na=[%s,%s] nb=[%s,%s]' % (\n tuple(endpack)))\n data_in.append(endpack)\n\n if is_bad_so:\n #if soi < 0.:\n xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])\n so_str = ', '.join(['%g' % soi for soi in so])\n msg = (f'PBEAM pid={pid} i={i} soi=[{so_str}]; '\n 'soi not 0.0 or 1.0; assuming PBCOMP & dropping')\n op2.log.error(msg)\n is_pbcomp = True\n\n if min(xxb) < 0.0 or max(xxb) > 1.0:\n xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])\n msg = (f'PBEAM pid={pid} i={i} x/xb=[{xxb_str}]; '\n 'x/xb must be between 0.0 and 1.0; assuming PBCOMP & dropping')\n op2.log.error(msg)\n is_pbcomp = True\n\n if is_pbcomp:\n continue\n if pid in op2.properties:\n if op2.properties[pid].type == 'PBCOMP':\n continue\n\n prop = PBEAM.add_op2_data(data_in)\n nproperties += 1\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PBEAM'] = nproperties\n return n\n\n def _read_pbeaml(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEAML(9202,92,53)\n\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 GROUP(2) CHAR4 Cross-section group name\n 5 TYPE(2) CHAR4 Cross section type\n 7 VALUE RS Cross section values for XXB, SO, NSM, and dimensions\n Word 7 repeats until (-1) occurs\n \"\"\"\n op2 = self.op2\n #strs = numpy.core.defchararray.reshapesplit(data, sep=\",\")\n #ints = np.frombuffer(data[n:], self._uendian + 'i').copy()\n #floats = np.frombuffer(data[n:], self._uendian + 'f').copy()\n ints = np.frombuffer(data[n:], op2.idtype8).copy()\n floats = np.frombuffer(data[n:], op2.fdtype8).copy()\n istart, iend = get_minus1_start_end(ints)\n\n size = self.size\n nproperties = len(istart)\n if size == 4:\n struct1 = Struct(op2._endian + b'2i 8s 8s')\n else:\n struct1 = Struct(op2._endian + b'2q 16s 16s')\n\n for unused_i, (istarti, iendi) in enumerate(zip(istart, iend)):\n idata = data[n+istarti*size : n+(istarti+6)*size]\n pid, mid, group, beam_type = struct1.unpack(idata)\n group = group.decode('latin1').strip()\n beam_type = beam_type.decode('latin1').strip()\n fvalues = floats[istarti+6: iendi]\n if op2.is_debug_file:\n op2.binary_debug.write(' %s\\n' % str(fvalues))\n op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')\n op2.log.debug(fvalues)\n #op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')\n data_in = [pid, mid, group, beam_type, fvalues]\n prop = PBEAML.add_op2_data(data_in)\n if pid in op2.properties:\n # this is a fake PSHELL\n propi = op2.properties[pid]\n assert propi.type in ['PBEAM'], propi.get_stats()\n nproperties -= 1\n continue\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PBEAML'] = nproperties\n return len(data)\n\n def _read_pbend(self, data: bytes, n: int) -> int:\n \"\"\"PBEND\"\"\"\n op2 = self.op2\n n = op2.reader_geom2._read_dual_card(\n data, n,\n self._read_pbend_nx, self._read_pbend_msc,\n 'PBEND', op2._add_methods._add_property_object)\n return n\n\n def _read_pbend_msc(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEND\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 FSI I flexibility and stress intensification factors\n 8 RM RS Mean cross-sectional radius of the curved pipe\n 9 T RS Wall thickness of the curved pipe\n 10 P RS Internal pressure\n 11 RB RS Bend radius of the line of centroids\n 12 THETAB RS Arc angle of element\n 13 C1 RS Stress recovery location at point C in element y-axis\n 14 C2 RS Stress recovery location at point C in element z-axis\n 15 D1 RS Stress recovery location at point D in element y-axis\n 16 D2 RS Stress recovery location at point D in element z-axis\n 17 E1 RS Stress recovery location at point E in element y-axis\n 18 E2 RS Stress recovery location at point E in element z-axis\n 19 F1 RS Stress recovery location at point F in element y-axis\n 20 F2 RS Stress recovery location at point F in element z-axis\n 21 K1 RS Area factor for shear in plane 1\n 22 K2 RS Area factor for shear in plane 2\n 23 NSM RS Nonstructural mass per unit length\n 24 RC RS Radial offset of the geometric centroid\n 25 ZC RS Offset of the geometric centroid\n 26 DELTAN I Radial offset of the neutral axis from the geometric\n centroid\n \"\"\"\n op2 = self.op2\n ntotal = 104 # 26*4\n struct1 = Struct(op2._endian + b'2i 4f i 18f f') # delta_n is a float, not an integer\n nproperties = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n properties = []\n for unused_i in range(nproperties):\n edata = data[n:n+104]\n out = struct1.unpack(edata)\n (pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,\n delta_n) = out\n beam_type = fsi\n\n if (area, rm, t, p) == (0., 0., 0., 0.):\n area = None\n rm = None\n t = None\n p = None\n delta_n = None\n beam_type = 2\n if delta_n == 0:\n #: Radial offset of the neutral axis from the geometric\n #: centroid, positive is toward the center of curvature\n delta_n = None\n pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,\n nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)\n #print(pbend)\n pbend.validate()\n\n properties.append(pbend)\n n += ntotal\n return n, properties\n\n def _read_pbend_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n PBEND\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 A RS Area\n 4 I1 RS Area moment of inertia in plane 1\n 5 I2 RS Area moment of inertia in plane 2\n 6 J RS Torsional constant\n 7 FSI I Flexibility and stress intensification factors\n 8 RM RS Mean cross-sectional radius of the curved pipe\n 9 T RS Wall thickness of the curved pipe\n 10 P RS Internal pressure\n 11 RB RS Bend radius of the line of centroids\n 12 THETAB RS Arc angle of element\n 13 C1 RS Stress recovery location at point C in element y-axis\n 14 C2 RS Stress recovery location at point C in element z-axis\n 15 D1 RS Stress recovery location at point D in element y-axis\n 16 D2 RS Stress recovery location at point D in element z-axis\n 17 E1 RS Stress recovery location at point E in element y-axis\n 18 E2 RS Stress recovery location at point E in element z-axis\n 19 F1 RS Stress recovery location at point F in element y-axis\n 20 F2 RS Stress recovery location at point F in element z-axis\n 21 K1 RS Area factor for shear in plane 1\n 22 K2 RS Area factor for shear in plane 2\n 23 NSM RS Nonstructural mass per unit length\n 24 RC RS Radial offset of the geometric centroid\n 25 ZC RS Offset of the geometric centroid\n 26 DELTAN RS Radial offset of the neutral axis from the geometric\n centroid\n 27 SACL RS Miter spacing at center line.\n 28 ALPHA RS One-half angle between the adjacent miter axis\n (Degrees).\n 29 FLANGE I For FSI=5, defines the number of flanges attached.\n 30 KX RS For FSI=6, the user defined flexibility factor for the\n torsional moment.\n 31 KY RS For FSI=6, the user defined flexibility factor for the\n out-of-plane bending moment.\n 32 KZ RS For FSI=6, the user defined flexbility factor for the\n in-plane bending moment.\n 33 Not used\n \"\"\"\n op2 = self.op2\n #op2.log.info('geom skipping PBEND in EPT')\n #return len(data)\n ntotal = 132 # 33*4\n struct1 = Struct(op2._endian + b'2i 4f i 21f i 4f')\n nproperties = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n properties = []\n for unused_i in range(nproperties):\n edata = data[n:n+132]\n out = struct1.unpack(edata)\n (pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,\n delta_n, unused_sacl, unused_alpha, unused_flange,\n unused_kx, unused_ky, unused_kz, unused_junk,) = out\n beam_type = fsi\n\n pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,\n c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,\n nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)\n pbend.validate()\n properties.append(pbend)\n n += ntotal\n return n, properties\n\n# PBMSECT\n# PBRSECT\n\n def _read_pbush(self, data: bytes, n: int) -> int:\n \"\"\"\n The PBUSH card is different between MSC and NX Nastran.\n\n DMAP NX 11\n ----------\n NX has 23 fields in NX 11-NX 2019.2 (same as MSC 2005)\n NX has 18 fields in the pre-2001 format\n\n DMAP MSC 2005\n -------------\n MSC has 23 fields in 2005\n MSC has 18 fields in the pre-2001 format\n\n DMAP MSC 2016\n -------------\n MSC has 24 fields in 2016.1\n MSC has 18 fields in the pre-2001 format\n\n DMAP MSC 2021\n -------------\n MSC has 27 fields in 2021\n\n \"\"\"\n op2 = self.op2\n card_name = 'PBUSH'\n card_obj = PBUSH\n methods = {\n 72 : self._read_pbush_nx_72, # 72=4*18\n 92 : self._read_pbush_msc_92, # 92=4*23\n 96 : self._read_pbush_msc_96, # 96=4*24\n 108 : self._read_pbush_msc_108, # 108=4*27\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, self._add_op2_property,\n methods, data, n)\n except DoubleCardError:\n nx_method = partial(self._read_pbush_nx_72, card_obj)\n msc_method = partial(self._read_pbush_msc_92, card_obj)\n n = op2.reader_geom2._read_dual_card(\n data, n,\n nx_method, msc_method,\n card_name, self._add_op2_property)\n\n # we're listing nx twice because NX/MSC used to be consistent\n # the new form for MSC is not supported\n #n = self._read_dual_card(data, n, self._read_pbush_nx, self._read_pbush_msc,\n #'PBUSH', self._add_op2_property)\n return n\n\n def _read_pbush_nx_72(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"\n PBUSH(1402,14,37) - 18 fields\n legacy MSC/NX format\n \"\"\"\n op2 = self.op2\n ntotal = 72 * self.factor\n struct1 = Struct(mapfmt(op2._endian + b'i17f', self.size))\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, sa, st, ea, et) = out\n #op2.log.debug(out)\n assert pid > 0, pid\n g2 = g3 = g4 = g5 = g6 = g1\n data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6, sa, st, ea, et)\n prop = PBUSH.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_92(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"PBUSH(1402,14,37) - 23 fields\n\n MSC 2005r2 to <MSC 2016\n \"\"\"\n op2 = self.op2\n ntotal = 92 * self.factor # 23*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f', self.size))\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_96(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"PBUSH(1402,14,37) - 24 fields\n\n MSC 2016.1? to 2020\n \"\"\"\n op2 = self.op2\n ntotal = 96 * self.factor # 24*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f f', self.size))\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et, mass) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush_msc_108(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:\n \"\"\"\n PBUSH(1402,14,37) - 27 fields\n MSC 2021 to current\n\n ints = (1402, 14, 37, 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0,\n -1577048263, -1577048263, -1577048263, -1577048263, -1577048263, 1065353216, 1065353216, 1065353216, 1065353216, 0, 0, 0, 0)\n floats = (1402, 14, 37,\n 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0.0,\n -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 108 * self.factor # 27*4\n struct1 = Struct(mapfmt(op2._endian + b'i22f 4f', self.size))\n #op2.show_data(data, types='ifs')\n\n ndata = len(data) - n\n nentries = ndata // ntotal\n assert nentries > 0, 'table={op2.table_name} len={ndata}'\n assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,\n #g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out\n pid = out[0]\n assert pid > 0, pid\n prop = PBUSH.add_op2_data(out)\n str(prop)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbush1d(self, data: bytes, n: int) -> int:\n \"\"\"\n Record 18 -- PBUSH1D(3101,31,219)\n\n 1 PID I Property identification number\n 2 K RS Stiffness\n 3 C RS Viscous Damping\n 4 M RS Mass\n 5 ALPHA RS Temperature coefficient\n 6 SA RS Stress recovery coefficient\n 7 EA/SE RS Strain recovery coefficient\n\n 8 TYPEA I Shock data type:0=Null, 1=Table, 2=Equation\n 9 CVT RS Coefficient of translation velocity tension\n 10 CVC RS Coefficient of translation velocity compression\n 11 EXPVT RS Exponent of velocity tension\n 12 EXPVC RS Exponent of velocity compression\n 13 IDTSU I TABLEDi or DEQATN entry identification number for scale factor vs displacement\n 14 IDTCU I DEQATN entry identification number for scale factor vs displacement\n 15 IDTSUD I DEQATN entry identification number for derivative tension\n 16 IDCSUD I DEQATN entry identification number for derivative compression\n\n 17 TYPES I Spring data type: 0=Null, 1=Table, 2=Equation\n 18 IDTS I TABLEDi or DEQATN entry identification number for tension compression\n 19 IDCS I DEQATN entry identification number for compression\n 20 IDTDU I DEQATN entry identification number for scale factor vs displacement\n 21 IDCDU I DEQATN entry identification number for force vs displacement\n\n 22 TYPED I Damper data type: 0=Null, 1=Table, 2=Equation\n 23 IDTD I TABLEDi or DEQATN entry identification number for tension compression\n 24 IDCD I DEQATN entry identification number for compression\n 25 IDTDV I DEQATN entry identification number for scale factor versus velocity\n 26 IDCDV I DEQATN entry identification number for force versus velocity\n\n 27 TYPEG I General data type: 0=Null, 1=Table, 2=Equation\n 28 IDTG I TABLEDi or DEQATN entry identification number for tension compression\n 29 IDCG I DEQATN entry identification number for compression\n 30 IDTDU I DEQATN entry identification number for scale factor versus displacement\n 31 IDCDU I DEQATN entry identification number for force versus displacement\n 32 IDTDV I DEQATN entry identification number for scale factor versus velocity\n 33 IDCDV I DEQATN entry identification number for force vs velocity\n\n 34 TYPEF I Fuse data type: 0=Null, 1=Table\n 35 IDTF I TABLEDi entry identification number for tension\n 36 IDCF I TABLEDi entry identification number for compression\n\n 37 UT RS Ultimate tension\n 38 UC RS Ultimate compression\n \"\"\"\n op2 = self.op2\n type_map = {\n 0 : None, # NULL\n 1 : 'TABLE',\n 2 : 'EQUAT',\n }\n ntotal = 152 * self.factor # 38*4\n struct1 = Struct(mapfmt(op2._endian + b'i 6f i 4f 24i 2f', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid, k, c, m, unused_alpha, sa, se,\n typea, cvt, cvc, expvt, expvc, idtsu, idtcu, idtsud, idcsud,\n types, idts, idcs, idtdus, idcdus,\n typed, idtd, idcd, idtdvd, idcdvd,\n typeg, idtg, idcg, idtdug, idcdug, idtdvg, idcdvg,\n typef, idtf, idcf,\n unused_ut, unused_uc) = out\n # test_op2_other_05\n #pbush1d, 204, 1.e+5, 1000., , , , , , +pb1\n #+pb1, spring, table, 205, , , , , , +pb2\n #+pb2, damper, table, 206\n #pid=204 k=100000.0 c=1000.0 m=0.0 sa=nan se=nan\n\n\n msg = f'PBUSH1D pid={pid} k={k} c={c} m={m} sa={sa} se={se}'\n optional_vars = {}\n typea_str = type_map[typea]\n types_str = type_map[types]\n typed_str = type_map[typed]\n unused_typeg_str = type_map[typeg]\n unused_typef_str = type_map[typef]\n\n if min([typea, types, typed, typeg, typef]) < 0:\n raise RuntimeError(f'typea={typea} types={types} typed={typed} typeg={typeg} typef={typef}')\n if typea in [1, 2]: # SHOCKA?\n #pbush1d, 204, 1.e+5, 1000., , , , , , +pb4\n #+pb4, shocka, table, 1000., , 1., , 214, , +pb41\n #+pb41, spring, table, 205\n\n idts = idtsu # if typea_str == 'TABLE' else 0\n idets = idtsu # if typea_str == 'EQUAT' else 0\n optional_vars['SHOCKA'] = [typea_str, cvt, cvc, expvt, expvc,\n idts, idets, idtcu, idtsud, idcsud]\n #(shock_type, shock_cvt, shock_cvc, shock_exp_vt, shock_exp_vc,\n #shock_idts, shock_idets, shock_idecs, shock_idetsd, shock_idecsd\n #)\n #print('shock_idts, shock_idets', typea_str, idtsu, idtsu)\n msg += (\n f' SHOCKA type={typea} cvt={cvt} cvc={cvc} expvt={expvt} expvc={expvc}\\n'\n f' idtsu={idtsu} (idts={idts} idets={idets}) idtcu={idtcu} idtsud={idtsud} idcsud={idcsud}')\n if types in [1, 2]: # SPRING: Spring data type: 0=Null, 1=Table, 2=Equation\n #(spring_type, spring_idt, spring_idc, spring_idtdu, spring_idcdu) = values\n # SPRING, TYPE IDT IDC IDTDU IDCDU\n optional_vars['SPRING'] = [types_str, idts, idcs, idtdus, idcdus]\n msg += f' SPRING type={types} idt={idts} idc={idcs} idtdu={idtdus} idcdu={idcdus}'\n if typed in [1, 2]: # Damper data type: 0=Null, 1=Table, 2=Equation\n optional_vars['DAMPER'] = [typed_str, idtd, idcd, idtdvd, idcdvd]\n msg += f' DAMPER type={typed} idt={idtd} idc={idtd} idtdv={idtdvd} idcdv={idcdvd}'\n if typeg in [1, 2]: # general, GENER?: 0=Null, 1=Table 2=Equation\n # C:\\NASA\\m4\\formats\\git\\examples\\move_tpl\\ar29scbt.bdf\n #pbush1d, 206, 1.e+3, 10., , , , , , +pb6\n #+pb6, gener, equat, 315, , 3015, , 3016\n msg += f' GENER type={typeg} idt={idtg} idc={idcg} idtdu={idtdug} idcdu={idcdug} idtdv={idtdvg} idcdv={idcdvg}'\n optional_vars['GENER'] = [idtg, idcg, idtdug, idcdug, idtdvg, idcdvg]\n if typef in [1, 2]: # Fuse data type: 0=Null, 1=Table\n raise NotImplementedError(f'typef={typef} idtf={idtf} idcf={idcf}')\n\n if op2.is_debug_file:\n op2.binary_debug.write(msg)\n\n pbush1d = op2.add_pbush1d(pid, k=k, c=c, m=m, sa=sa, se=se,\n optional_vars=optional_vars,)\n str(pbush1d)\n n += ntotal\n op2.card_count['PBUSH1D'] = nentries\n return n\n\n #def _read_pbusht(self, data: bytes, n: int) -> int:\n #\"\"\"reads the PBUSHT(702, 7, 38)\"\"\"\n #n, props = self._read_pbusht_nx(data, n)\n #for prop in props:\n ##print(prop)\n #op2._add_pbusht_object(prop)\n #return n\n\n def _read_pbusht(self, data: bytes, n: int) -> int:\n \"\"\"\n NX 12 / MSC 2005\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID(6) I TABLEDi entry identification number for structural damping\n 20 TKNID(6) I TABLEDi entry identification numbers for force versus deflection\n\n old style\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID I TABLEDi entry identification number for structural damping\n 15 TKNID(6) I TABLEDi entry IDs for force versus deflection\n \"\"\"\n op2 = self.op2\n card_name = 'PBUSHT'\n card_obj = PBUSHT\n methods = {\n 80 : self._read_pbusht_80,\n 100 : self._read_pbusht_100,\n 136 : self._read_pbusht_136,\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, op2._add_methods._add_pbusht_object,\n methods, data, n)\n except DoubleCardError:\n raise\n op2.log.warning(f'try-except {card_name}')\n #n = self._read_split_card(data, n,\n #self._read_cquad8_current, self._read_cquad8_v2001,\n #card_name, self.add_op2_element)\n #nelements = op2.card_count['CQUAD8']\n #op2.log.debug(f'nCQUAD8 = {nelements}')\n\n #n = self._read_dual_card(data, n, self._read_ctriax_8, self._read_ctriax_9,\n #'CTRIAX', self.add_op2_element)\n return n\n\n def _read_pbusht_nx_old(self, data: bytes, n: int) -> int:\n op2 = self.op2\n #op2.show_data(data[12:])\n ndata = (len(data) - n) // self.factor\n\n if ndata % 100 == 0 and ndata % 80 == 0:\n op2.log.warning(f\"skipping PBUSHT in EPT because nfields={ndata//4}, which is \"\n 'nproperties*25 or nproperties*20')\n return len(data), []\n if ndata % 100 == 0:\n n, props = self._read_pbusht_100(data, n)\n elif ndata % 80 == 0:\n n, props = self._read_pbusht_80(data, n)\n else:\n # C:\\MSC.Software\\msc_nastran_runs\\mbsh14.op2\n # ints = (1,\n # 51, 51, 0, 0, 0, 0,\n # 61, 61, 0, 0, 0, 0,\n # 0, 0, 0, 0, 0, 0,\n # 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0,\n # 7,\n # 51, 51, 0, 0, 0, 0,\n # 61, 61, 0, 0, 0, 0,\n # 0, 0, 0, 0, 0, 0,\n # 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0)\n # strings = (b\"1 51 51 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00=\\x00\\x00\\x00=\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xac\\xc5'7\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x07\\x00\\x00\\x003\\x00\\x00\\x003\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00=\\x00\\x00\\x00=\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00 \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xac\\xc5'7\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\",)\n # ints = (1, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0 , 0,\n #\n # 7, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0, 0)\n #op2.show_data(data[n:], types='is')\n raise NotImplementedError('You have blank lines in your PBUSHT')\n return n, props\n\n def _read_pbusht_80(self, card_obj, data: bytes, n: int) -> int:\n \"\"\"\n Word Name Type Description\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID I TABLEDi entry identification number for structural damping\n 15 TKNID(6) I TABLEDi entry identification numbers for force versus deflection\n 16,17,18,19,20\n ???\n \"\"\"\n op2 = self.op2\n ntotal = 80 * self.factor\n struct1 = Struct(op2._endian + b'20i')\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n\n props = []\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n #(pid,\n #k1, k2, k3, k4, k5, k6,\n #b1, b2, b3, b4, b5, b6,\n #g1, sa, st, ea, et) = out\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1,\n n1, n2, n3, n4, n5, n6) = out\n g2 = g3 = g4 = g5 = g6 = g1\n k_tables = [k1, k2, k3, k4, k5, k6]\n b_tables = [b1, b2, b3, b4, b5, b6]\n ge_tables = [g1, g2, g3, g4, g5, g6]\n kn_tables = [n1, n2, n3, n4, n5, n6]\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbusht_100(self, card_obj, data: bytes, n: int) -> int:\n op2 = self.op2\n props = []\n ntotal = 100 * self.factor\n struct1 = Struct(mapfmt(op2._endian + b'25i', self.size))\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6,\n n1, n2, n3, n4, n5, n6) = out\n k_tables = [k1, k2, k3, k4, k5, k6]\n b_tables = [b1, b2, b3, b4, b5, b6]\n ge_tables = [g1, g2, g3, g4, g5, g6]\n kn_tables = [n1, n2, n3, n4, n5, n6]\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pbusht_136(self, card_obj, data: bytes, n: int) -> int:\n r\"\"\"not 100%\n\n 1 PID I Property identification number\n 2 TKID(6) I TABLEDi entry identification numbers for stiffness\n 8 TBID(6) I TABLEDi entry identification numbers for viscous damping\n 14 TGEID(6) I TABLEDi entry identification number for structural damping\n 20 TKNID(6) I TABLEDi entry IDs for force vs. deflection\n 26 FDC(2) CHAR4 Force deflection curve rule\n 28 FUSE I Failure level\n 29 DIR I Fuse direction\n 30 OPTION(2) CHAR4 Failure mode\n 32 LOWER RS Lower failure bound\n 33 UPPER RS Upper failure bound\n 34 FRATE RS FACTOR of scales the stiffness\n 35 LRGR I Controls large rotation\n 36 UNDEF(4) none\n\n # C:\\MSC.Software\\msc_nastran_runs\\mbsh14.op2\n PBUSHT\t1\t K\t51\t51\n B\t61\t61\n PBUSHT\t7\t K\t51\t51\n B\t61\t61\n\n 538976288 = ' '\n ints = (\n 702, 7, 38,\n 1, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0,\n 7, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0)\n floats = (\n 702, 7, 38,\n 1, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0,\n 7, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n props = []\n ntotal = 136 * self.factor # k b g n fdc\n struct1 = Struct(mapfmt(op2._endian + b'i 6i 6i 6i 6i 4s 2i i 5i', self.size))\n nentries = (len(data) - n) // ntotal\n assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)\n for unused_i in range(nentries):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n (pid,\n k1, k2, k3, k4, k5, k6,\n b1, b2, b3, b4, b5, b6,\n g1, g2, g3, g4, g5, g6,\n n1, n2, n3, n4, n5, n6,\n word1, a, word2, c, *other) = out\n\n\n k_tables = [ki if ki != 538976288 else 0\n for ki in [k1, k2, k3, k4, k5, k6]]\n\n b_tables = [bi if bi != 538976288 else 0\n for bi in [b1, b2, b3, b4, b5, b6]]\n ge_tables = [gei if gei != 538976288 else 0\n for gei in [g1, g2, g3, g4, g5, g6]]\n kn_tables = [kni if kni != 538976288 else 0\n for kni in [n1, n2, n3, n4, n5, n6]]\n op2.log.warning(\n f'PBUSHT: pid={pid} '\n f'k={k_tables} '\n f'b={b_tables} '\n f'ge={ge_tables} '\n f'n={kn_tables} ' +\n 'words=' + str([word1, a, word2, c]) +\n f' other={other}')\n assert sum(other) == 0, other\n prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pcomp(self, data: bytes, n: int) -> int:\n r\"\"\"\n PCOMP(2706,27,287) - the marker for Record 22\n\n standard:\n EPTS; 64-bit: C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post1\\cqrdbxdra3lg.op2\n\n optistruct:\n ints = (2706, 27, 287,\n 5,\n 3, -2.75, 0, 0, 1, 0, 0,\n 2, 0.25, 0, 2, # why is sout=2?\n 3, 5.0, 0, 3, # why is sout=3?\n 2, 0.25, 0, 2, # why is sout=2?\n\n 6, 5, -3.0, 0, 0, 1, 0, 0,\n 2, 0.25, 0, 2,\n 2, 0.25, 0, 2,\n 3, 5.0, 0, 3,\n 2, 0.25, 0, 2,\n 2, 0.25, 0, 2, 7, 7, -1068498944, 0, 0, 1, 0, 0, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 3, 5.0, 0, 3, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2)\n floats = (2706, 27, 287,\n 5, 3, -2.75, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 6, 5, -3.0, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 9.80908925027372e-45, 9.80908925027372e-45, -3.25, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2)\n \"\"\"\n op2 = self.op2\n if self.size == 4:\n n2, props = self._read_pcomp_32_bit(data, n)\n nproperties = len(props)\n for prop in props:\n self._add_op2_property(prop)\n op2.card_count['PCOMP'] = nproperties\n else:\n n2 = op2.reader_geom2._read_dual_card(\n data, n, self._read_pcomp_32_bit,\n self._read_pcomp_64_bit,\n 'PCOMP', self._add_op2_property)\n return n2\n\n def _read_pcomp_64_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:\n \"\"\"\n PCOMP(2706,27,287) - the marker for Record 22\n\n 1 PID I Property identification number\n 2 N(C) I Number of plies\n 3 Z0 RS Distance from the reference plane to the bottom surface\n 4 NSM RS Nonstructural mass per unit area\n 5 SB RS Allowable shear stress of the bonding material\n 6 FT I Failure theory\n 7 TREF RS Reference temperature\n 8 GE RS Damping coefficient\n\n 9 MID I Material identification number\n 10 T RS Thicknesses of the ply\n 11 THETA RS Orientation angle of the longitudinal direction of the ply\n 12 SOUT I Stress or strain output request of the ply\n Words 9 through 12 repeat N times\n\n TODO:\n 64-bit bug: why is the number of plies 0???\n\n doubles (float64) = (\n 1, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1,\n 21, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1)\n long long (int64) = (\n 1, 0, 1.7368e-18, 0, 1.0, 3, 0, 0, 1, 4592590756007337001, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1,\n 21, 0, 4341475431749739292, 0, 4607182418800017408, 3, 0, 0,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n 1, 0.11, 0, 1,\n -1, -1, -1, -1)\n\n doubles (float64) = (5e-324, 0.0, -0.005, 0.0, 0.0, 0.0, 0.0, 0.0,\n 4e-323, 0.005, 0.0, 5e-324,\n 4e-323, 0.005, 0.0, 5e-324,\n nan, nan, nan, nan)\n long long (int64) = (1, 0, -4650957407178058629, 0, 0, 0, 0, 0,\n 8, 4572414629676717179, 0, 1,\n 8, 4572414629676717179, 0, 1,\n -1, -1, -1, -1)\n\n C:\\MSC.Software\\simcenter_nastran_2019.2\\tpl_post2\\dbxdr12lg.op2\n data = (3321, 2, -0.5, 0.0, 1.0, 4, 0.0, 0.0,\n 3, 0.5, 0, 1,\n 3, 0.5, 0, 1)\n \"\"\"\n op2 = self.op2\n op2.to_nx(' because PCOMP-64 was found')\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))\n ntotal1 = 32 * self.factor\n s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))\n\n four_minus1 = Struct(mapfmt(op2._endian + b'4i', self.size))\n ndata = len(data)\n ntotal2 = 16 * self.factor\n props = []\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, nlayers, z0, nsm, sb, ft, tref, ge) = out\n assert pid > 0\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n assert isinstance(nlayers, int), out\n #print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n #f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n n += ntotal1\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n is_symmetrical = 'NO'\n #if nlayers < 0:\n #is_symmetrical = 'SYM'\n #nlayers = abs(nlayers)\n\n mids = []\n T = []\n thetas = []\n souts = []\n edata2 = data[n:n+ntotal2]\n idata = four_minus1.unpack(edata2)\n while idata != (-1, -1, -1, -1):\n (mid, t, theta, sout) = s2.unpack(edata2)\n mids.append(mid)\n T.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\\n')\n n += ntotal2\n #print(f' mid={mid} t={t} theta={theta} sout={sout}')\n edata2 = data[n:n+ntotal2]\n if n == ndata:\n op2.log.warning(' no (-1, -1, -1, -1) flag was found to close the PCOMPs')\n break\n idata = four_minus1.unpack(edata2)\n\n if self.size == 4:\n assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s Tref=%s ge=%s' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge)\n else:\n assert nlayers == 0, nlayers\n nlayers = len(mids)\n\n data_in = [\n pid, z0, nsm, sb, ft, tref, ge,\n is_symmetrical, mids, T, thetas, souts]\n prop = PCOMP.add_op2_data(data_in)\n nproperties += 1\n n += ntotal2\n props.append(prop)\n return n, props\n\n def _read_pcomp_32_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]: # pragma: no cover\n \"\"\"PCOMP(2706,27,287) - the marker for Record 22\"\"\"\n op2 = self.op2\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))\n ntotal1 = 32 * self.factor\n s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))\n\n ndata = len(data)\n ntotal2 = 16 * self.factor\n props = []\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, nlayers, z0, nsm, sb, ft, tref, ge) = out\n assert pid > 0\n\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n assert isinstance(nlayers, int), out\n #print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '\n #f'sb={sb} ft={ft} Tref={tref} ge={ge}')\n n += ntotal1\n\n mids = []\n T = []\n thetas = []\n souts = []\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n is_symmetrical = 'NO'\n if nlayers < 0:\n is_symmetrical = 'SYM'\n nlayers = abs(nlayers)\n assert nlayers > 0, out\n\n assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge)\n\n if op2.is_debug_file:\n op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s\\n' % (\n pid, nlayers, z0, nsm, sb, ft, tref, ge))\n #if op2._nastran_format == 'optistruct':\n #print(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge))\n for unused_ilayer in range(nlayers):\n (mid, t, theta, sout) = s2.unpack(data[n:n+ntotal2])\n if op2._nastran_format == 'optistruct':\n #print(f' mid={mid} t={t} theta={theta} sout={sout}')\n if sout in [2, 3]: # TODO: Why is this 2/3?\n sout = 1 # YES\n\n mids.append(mid)\n assert mid > 0\n\n T.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\\n')\n n += ntotal2\n\n data_in = [\n pid, z0, nsm, sb, ft, tref, ge,\n is_symmetrical, mids, T, thetas, souts]\n prop = PCOMP.add_op2_data(data_in)\n #print(prop)\n props.append(prop)\n nproperties += 1\n return n, props\n\n def _read_pcompg(self, data: bytes, n: int) -> int:\n \"\"\"\n PCOMP(2706,27,287)\n\n 1 PID I Property identification number\n 2 LAMOPT I Laminate option\n 3 Z0 RS Distance from the reference plane to the bottom surface\n 4 NSM RS Nonstructural mass per unit area\n 5 SB RS Allowable shear stress of the bonding material\n 6 FT I Failure theory\n 7 TREF RS Reference temperature\n 8 GE RS Damping coefficient\n\n 9 GPLYIDi I Global ply IDs.\n 10 MID I Material identification number\n 11 T RS Thicknesses of the ply\n 12 THETA RS Orientation angle of the longitudinal direction of the ply\n 13 SOUT I Stress or strain output request of the ply\n Words 9 through 13 repeat N times (until -1, -1, -1, -1, -1 as Nplies doesn't exist...)\n\n float = (15006, 150, 604,\n 5, 0.0, 1.7368e-18, 0.0, 0.0, 0.0, 20.0, 0.0,\n 5e-324, 5e-324, 2.0, 0.0, 0.0,\n 1e-323, 1e-323, 3.0, 0.0, 0.0,\n 1.5e-323, 1e-323, 3.0, 0.0, 0.0,\n 2e-323, 5e-324, 2.0, 0.0, 0.0,\n nan, nan, nan, nan, nan)\n int = (15006, 150, 604,\n 5, 0, 1.7368e-18, 0, 0, 0, 20.0, 0,\n 1, 1, 4611686018427387904, 0, 0,\n 2, 2, 4613937818241073152, 0, 0,\n 3, 2, 4613937818241073152, 0, 0,\n 4, 1, 4611686018427387904, 0, 0,\n -1, -1, -1, -1, -1)\n\n \"\"\"\n op2 = self.op2\n nproperties = 0\n s1 = Struct(mapfmt(op2._endian + b'2i 3f i 2f', self.size))\n s2 = Struct(mapfmt(op2._endian + b'2i 2f i', self.size))\n struct_i5 = Struct(mapfmt(op2._endian + b'5i', self.size))\n\n # lam - SYM, MEM, BEND, SMEAR, SMCORE, None\n lam_map = {\n 0 : None,\n # MEM\n # BEND\n # SMEAR\n # SMCORE\n }\n\n # ft - HILL, HOFF, TSAI, STRN, None\n ft_map = {\n 0 : None,\n # HILL\n # HOFF\n 3 : 'TSAI',\n # STRN\n }\n # sout - YES, NO\n sout_map = {\n 0 : 'NO',\n 1 : 'YES',\n }\n ndata = len(data)\n #op2.show_data(data, types='qd')\n ntotal1 = 32 * self.factor\n ntotal2 = 20 * self.factor\n while n < (ndata - ntotal1):\n out = s1.unpack(data[n:n+ntotal1])\n (pid, lam_int, z0, nsm, sb, ft_int, tref, ge) = out\n if op2.binary_debug:\n op2.binary_debug.write(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} '\n f'sb={sb} ft_int={ft_int} tref={tref} ge={ge}')\n #print(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} sb={sb} '\n #f'ft_int={ft_int} tref={tref} ge={ge}')\n assert isinstance(lam_int, int), out\n assert pid > -1, out\n n += ntotal1\n\n mids = []\n thicknesses = []\n thetas = []\n souts = []\n global_ply_ids = []\n\n # None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'\n #is_symmetrical = 'NO'\n #if nlayers < 0:\n #is_symmetrical = 'SYM'\n #nlayers = abs(nlayers)\n #assert nlayers > 0, out\n\n #assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge)\n\n #if op2.is_debug_file:\n #op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s\\n' % (\n #pid, nlayers, z0, nsm, sb, ft, tref, ge))\n ilayer = 0\n while ilayer < 1000:\n ints5 = struct_i5.unpack(data[n:n+ntotal2])\n if ints5 == (-1, -1, -1, -1, -1):\n if op2.is_debug_file:\n op2.binary_debug.write(' global_ply=%-1 mid=%-1 t=%-1 theta=%-1 sout=-1\\n')\n break\n (global_ply, mid, t, theta, sout_int) = s2.unpack(data[n:n+ntotal2])\n #print(' ', (global_ply, mid, t, theta, sout_int))\n try:\n sout = sout_map[sout_int]\n except KeyError:\n op2.log.error('cant parse global_ply=%s sout=%s; assuming 0=NO' % (\n global_ply, sout_int))\n sout = 'NO'\n\n global_ply_ids.append(global_ply)\n mids.append(mid)\n thicknesses.append(t)\n thetas.append(theta)\n souts.append(sout)\n if op2.is_debug_file:\n op2.binary_debug.write(' global_ply=%s mid=%s t=%s theta=%s sout_int=%s sout=%r\\n' % (\n global_ply, mid, t, theta, sout_int, sout))\n n += ntotal2\n ilayer += 1\n n += ntotal2\n\n try:\n ft = ft_map[ft_int]\n except KeyError:\n op2.log.error('pid=%s cant parse ft=%s; should be HILL, HOFF, TSAI, STRN'\n '...skipping' % (pid, ft_int))\n continue\n\n try:\n lam = lam_map[lam_int]\n except KeyError:\n op2.log.error('pid=%s cant parse lam=%s; should be HILL, HOFF, TSAI, STRN'\n '...skipping' % (pid, lam_int))\n continue\n\n # apparently Nastran makes duplicate property ids...\n if pid in op2.properties and op2.properties[pid].type == 'PCOMP':\n del op2.properties[pid]\n\n op2.add_pcompg(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,\n nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0, comment='')\n nproperties += 1\n op2.card_count['PCOMPG'] = nproperties\n return n\n\n# PCOMPA\n\n def _read_pconeax(self, data: bytes, n: int) -> int:\n \"\"\"\n (152,19,147) - Record 24\n \"\"\"\n self.op2.log.info('geom skipping PCONEAX in EPT')\n return len(data)\n\n def _read_pconv(self, data: bytes, n: int) -> int:\n \"\"\"common method for reading PCONVs\"\"\"\n op2 = self.op2\n #n = self._read_dual_card(data, n, self._read_pconv_nx, self._read_pconv_msc,\n #'PCONV', self._add_pconv)\n\n card_name = 'PCONV'\n card_obj = PCONV\n methods = {\n 16 : self._read_pconv_nx_16, # 16=4*4\n 56 : self._read_pconv_msc_56, # 56=4*14\n }\n try:\n n, elements = op2.reader_geom2._read_double_card_load(\n card_name, card_obj,\n methods, data, n)\n except DoubleCardError:\n nx_method = partial(self._read_pconv_nx_16, card_obj)\n msc_method = partial(self._read_pconv_msc_56, card_obj)\n n, elements = op2._read_dual_card_load(\n data, n,\n nx_method, msc_method,\n card_name, self._add_op2_property)\n\n nelements = len(elements)\n for prop in elements:\n key = prop.pconid\n if key in op2.convection_properties:\n prop_old = op2.convection_properties[key]\n if prop != prop_old:\n op2.log.warning(prop.raw_fields())\n op2.log.warning(prop_old.raw_fields())\n op2.log.warning(f'PCONV pconid={key}; old, new\\n{prop_old}{prop}')\n # this will fail due to a duplicate id\n self._add_pconv(prop)\n #else:\n # already exists\n else:\n self._add_pconv(prop)\n op2.card_count['PCONV'] = nelements\n\n return n\n\n def _read_pconv_nx_16(self, card_obj: PCONV, data: bytes, n: int) -> int:\n \"\"\"\n (11001,110,411)- NX version\n \"\"\"\n op2 = self.op2\n ntotal = 16 # 4*4\n struct_3if = Struct(op2._endian + b'3if')\n nentries = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n props = []\n for unused_i in range(nentries):\n out = struct_3if.unpack(data[n:n+ntotal])\n (pconid, mid, form, expf) = out\n ftype = tid = chlen = gidin = ce = e1 = e2 = e3 = None\n data_in = (pconid, mid, form, expf, ftype, tid, chlen,\n gidin, ce, e1, e2, e3)\n\n prop = PCONV.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pconv_msc_56(self, card_obj: PCONV, data: bytes, n: int) -> int:\n \"\"\"\n (11001,110,411)- MSC version - Record 25\n \"\"\"\n op2 = self.op2\n ntotal = 56 # 14*4\n s = Struct(op2._endian + b'3if 4i fii 3f')\n nentries = (len(data) - n) // ntotal\n assert (len(data) - n) % ntotal == 0\n props = []\n for unused_i in range(nentries):\n out = s.unpack(data[n:n+ntotal])\n (pconid, mid, form, expf, ftype, tid, unused_undef1, unused_undef2, chlen,\n gidin, ce, e1, e2, e3) = out\n data_in = (pconid, mid, form, expf, ftype, tid, chlen,\n gidin, ce, e1, e2, e3)\n\n prop = PCONV.add_op2_data(data_in)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pconvm(self, data: bytes, n: int) -> int:\n \"\"\"Record 24 -- PCONVM(2902,29,420)\n\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 FORM I Type of formula used for free convection\n 4 FLAG I Flag for mass flow convection\n 5 COEF RS Constant coefficient used for forced convection\n 6 EXPR RS Reynolds number convection exponent\n 7 EXPPI RS Prandtl number convection exponent into the working fluid\n 8 EXPPO RS Prandtl number convection exponent out of the working fluid\n \"\"\"\n op2 = self.op2\n ntotal = 32 # 8*4\n structi = Struct(op2._endian + b'4i 4f')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = structi.unpack(data[n:n+ntotal])\n if out != (0, 0, 0, 0, 0., 0., 0., 0.):\n (pconid, mid, form, flag, coeff, expr, expri, exppo) = out\n #print(out)\n prop = PCONVM(pconid, mid, coeff, form=form, flag=flag,\n expr=expr, exppi=expri, exppo=exppo, comment='')\n op2._add_methods._add_convection_property_object(prop)\n n += ntotal\n op2.card_count['PCONVM'] = nentries\n return n\n\n def _read_pdamp(self, data: bytes, n: int) -> int:\n \"\"\"\n PDAMP(202,2,45) - the marker for Record ???\n \"\"\"\n op2 = self.op2\n ntotal = 8 * self.factor # 2*4\n struct_if = Struct(mapfmt(op2._endian + b'if', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = struct_if.unpack(data[n:n+ntotal])\n #(pid, b) = out\n prop = PDAMP.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PDAMP'] = nentries\n return n\n\n def _read_pdampt(self, data: bytes, n: int) -> int: # 26\n self.op2.log.info('geom skipping PDAMPT in EPT')\n return len(data)\n\n def _read_pdamp5(self, data: bytes, n: int) -> int: # 26\n self.op2.log.info('geom skipping PDAMP5 in EPT')\n return len(data)\n\n# PDUM1\n# PDUM2\n# PDUM3\n# PDUM4\n# PDUM5\n# PDUM6\n# PDUM7\n# PDUM8\n# PDUM9\n\n def _read_pelas(self, data: bytes, n: int) -> int:\n \"\"\"PELAS(302,3,46) - the marker for Record 39\"\"\"\n op2 = self.op2\n struct_i3f = Struct(mapfmt(op2._endian + b'i3f', self.size))\n ntotal = 16 * self.factor # 4*4\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_i3f.unpack(edata)\n #(pid, k, ge, s) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PELAS=%s\\n' % str(out))\n prop = PELAS.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PELAS'] = nproperties\n return n\n\n def _read_pfast_msc(self, data: bytes, n: int) -> int:\n r\"\"\"\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material property identification number\n 3 D RS Diameter of the fastener\n 4 CONNBEH I Connection behavior (0=FF/F, 1=FR, 10=RF/R, 11=RR)\n 5 CONNTYPE I Connection type (0=clamp, 1=hinge, 2=bolt)\n 6 EXTCON I External constraint flag (0=off, 1=on)\n 7 CONDTYPE I Condition type (0=rigid, 1=equivalent)\n 8 WELDTYPE I Weld type (0=spot weld, 1=but seam, 2=T-seam)\n\n 9 MINLEN RS Minimum length of spot weld\n 10 MAXLEN RS Maximum length of spot weld\n 11 GMCHK I Perform geometry check\n 12 SPCGS I SPC the master grid GS\n 13 CMASS RS Concentrated mass\n 14 GE RS Structureal Damping\n\n 15 UNDEF(3) none Not used\n 18 MCID I Element stiffness coordinate system\n 19 MFLAG I Defined the coordinate system type\n 20 KT(3) RS Stiffness values in direction 1\n 23 KR(3) RS Rotation stiffness values in direction 1\n\n C:\\MSC.Software\\msc_nastran_runs\\cfmass.op2\n pid mid D con con ext cond weld min max chk spc cmass ge und und und mcid mfag kt1 kt2 kt3 kr1 kr2 kr3\n ints = (99, 0, 0.1, 0, 0, 0, 0, -1, 0.2, 5.0, 0, 0, 7.9, 0, 0, 0, 0, -1, 0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)\n floats = (99, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, -1, 0.2, 5.0, 0.0, 0.0, 7.9, 0.0, 0.0, 0.0, 0.0, -1, 0.0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:], types='ifs')\n #ntotal = 92 * self.factor # 26*4\n #struct1 = Struct(op2._endian + b'ifii 3f')\n\n ntotal = 100 * self.factor # 25*4\n struct1 = Struct(op2._endian + b'2if 5i 2f2i2f 3i 2i 6f')\n ndatai = len(data) - n\n nproperties = ndatai // ntotal\n delta = ndatai % ntotal\n assert delta == 0, 'len(data)-n=%s n=%s' % (ndatai, ndatai / 100.)\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PFAST=%s\\n' % str(out))\n (pid, d, mcid, unused_connbeh, unused_conntype, unused_extcon,\n unused_condtype, unused_weldtype, unused_minlen, unused_maxlen,\n unused_gmcheck, unused_spcgs, mass, ge,\n unused_aa, unused_bb, unused_cc, mcid, mflag,\n kt1, kt2, kt3, kr1, kr2, kr3) = out\n\n data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,\n kr1, kr2, kr3, mass, ge)\n prop = PFAST.add_op2_data(data_in)\n str(prop)\n #print(prop)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PFAST'] = nproperties\n return n\n\n def _read_pfast_nx(self, data: bytes, n: int) -> int:\n \"\"\"\n PFAST(3601,36,55)\n NX only\n \"\"\"\n op2 = self.op2\n ntotal = 48\n struct1 = Struct(op2._endian + b'ifii 8f')\n nproperties = (len(data) - n) // ntotal\n delta = (len(data) - n) % ntotal\n assert delta == 0, 'len(data)-n=%s n=%s' % (len(data) - n, (len(data) - n) / 48.)\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct1.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PFAST=%s\\n' % str(out))\n (pid, d, mcid, mflag, kt1, kt2, kt3, kr1, kr2, kr3, mass, ge) = out\n\n data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,\n kr1, kr2, kr3, mass, ge)\n prop = PFAST.add_op2_data(data_in)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PFAST'] = nproperties\n op2.to_nx(' because PFAST-NX was found')\n return n\n\n def _read_pelast(self, data: bytes, n: int) -> int:\n \"\"\"\n Record 41 -- PELAST(1302,13,34)\n\n 1 PID I Property identification number\n 2 TKID I TABLEDi entry identification number for stiffness\n 3 TGEID I TABLEDi entry identification number for structural\n damping\n 4 TKNID I TABLEDi entry\n \"\"\"\n op2 = self.op2\n ntotal = 16 * self.factor\n struct_4i = Struct(mapfmt(op2._endian + b'4i', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_4i.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PELAST=%s\\n' % str(out))\n #(pid, tkid, tgeid, tknid) = out\n prop = PELAST.add_op2_data(out)\n op2._add_methods._add_pelast_object(prop)\n n += ntotal\n op2.card_count['PELAST'] = nproperties\n return n\n\n def _read_pgap(self, data: bytes, n: int) -> int:\n \"\"\"\n PGAP(2102,21,121) - the marker for Record 42\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor\n struct_i10f = Struct(mapfmt(op2._endian + b'i10f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_i10f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PGAP=%s\\n' % str(out))\n #(pid,u0,f0,ka,kb,kt,mu1,mu2,tmax,mar,trmin) = out\n prop = PGAP.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PGAP'] = nproperties\n return n\n\n def _read_phbdy(self, data: bytes, n: int) -> int:\n \"\"\"\n PHBDY(2802,28,236) - the marker for Record 43\n \"\"\"\n op2 = self.op2\n struct_i3f = Struct(op2._endian + b'ifff')\n nproperties = (len(data) - n) // 16\n for unused_i in range(nproperties):\n edata = data[n:n+16]\n out = struct_i3f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PHBDY=%s\\n' % str(out))\n #(pid, af, d1, d2) = out\n prop = PHBDY.add_op2_data(out)\n op2._add_methods._add_phbdy_object(prop)\n n += 16\n op2.card_count['PHBDY'] = nproperties\n return n\n\n def _read_pintc(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping PINTC in EPT')\n return len(data)\n\n def _read_pints(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping PINTS in EPT')\n return len(data)\n\n def _read_pbeam3(self, data: bytes, n: int) -> int:\n op2 = self.op2\n card_name = 'PBUSHT'\n card_obj = PBUSHT\n methods = {\n 264 : self._read_pbeam3_264,\n 456 : self._read_pbeam3_456,\n }\n try:\n n = op2.reader_geom2._read_double_card(\n card_name, card_obj, self._add_op2_property,\n methods, data, n)\n except DoubleCardError:\n raise\n op2.log.warning(f'try-except {card_name}')\n return n\n\n def _read_pbeam3_456(self, card_obj, data: bytes, n: int) -> int:\n r\"\"\"\n\n # per C:\\MSC.Software\\msc_nastran_runs\\b3plod3.op2\n ints = (2201, 1, 1.0, 0.1833, 0.0833, 0, -1.0, 0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,\n 2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2901, 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5,\n 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n floats = (2201, 1, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,\n 2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2901, 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5,\n 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:])\n ntotal = 456 * self.factor # 114*4\n #\n struct1 = Struct(mapfmt(op2._endian +\n b'2i' # pid, mid\n b'3f' # A, Iy, Iz\n b'5f' # # a, b, c, d, e\n b'5f fi 14f i' #fj ki 14f i\n b'2i3f' #aa-ee - good\n b'5f' #ff-jj\n b'5f' #kk-oo\n b'5f' #pp-tt\n b'6f' #uu-zz\n b'5f' #aaa-eee\n b'4i' #fff-iii\n # jjj-ooo\n b'2f iii f'\n # ppp-ttt\n b'5f'\n # uuu-zzz\n b'6f'\n b'30f', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n\n props = []\n for unused_i in range(nentries):\n #print(n, ntotal)\n datai = data[n:n+ntotal]\n #op2.show_data(datai, types='ifqd')\n n += ntotal\n\n (pid, mid, A, iz, iy,\n a, b, c, d, e,\n f, g, h, i, j,\n k, inta, l, m, ni, o, p, q, r, s, t, u, v, w, x, y, z,\n aa, bb, cc, dd, ee,\n ff, gg, hh, ii, jj,\n kk, ll, mm, nn, oo,\n pp, qq, rr, ss, tt,\n uu, vv, ww, xx, yy, zz,\n aaa, bbb, ccc, ddd, eee,\n fff, ggg, hhh, iii,\n jjj, kkk, lll, mmm, nnn, ooo,\n ppp, qqq, rrr, sss, ttt,\n uuu, vvv, www, xxx, yyy, zzz,\n *other) = struct1.unpack(datai)\n #print(pid, mid, A, iz, iy)\n #print('a-e', (a, b, c, d, e))\n #print('f-j', (f, g, h, i, j))\n #print(k, inta, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z)\n #print('aa-ee', (aa, bb, cc, dd, ee))\n #print('ff-jj', (ff, gg, hh, ii, jj))\n #print('kk-oo', (kk, ll, mm, nn, oo))\n #print('pp-tt', (pp, qq, rr, ss, tt))\n #print('uu-zz', (uu, vv, ww, xx, yy, zz))\n #print('aaa-eee', (aaa, bbb, ccc, ddd, eee))\n #print('fff-jjj', (fff, ggg, hhh, iii))\n #print('jjj-ooo', (jjj, kkk, lll, mmm, nnn, ooo))\n #print('ppp-ttt', (ppp, qqq, rrr, sss, ttt))\n #print('uuu-zzz', (uuu, vvv, www, xxx, yyy, zzz))\n\n if mid == 0:\n continue\n #assert sum(other) < 100, other\n prop = PBEAM3(\n pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,\n so=None,\n cy=None, cz=None,\n dy=None, dz=None,\n ey=None, ez=None,\n fy=None, fz=None,\n ky=1., kz=1.,\n ny=None, nz=None, my=None, mz=None,\n nsiy=None, nsiz=None, nsiyz=None,\n cw=None, stress='GRID',\n w=None, wy=None, wz=None, comment='')\n assert pid > 0, prop.get_stats()\n assert mid > 0, prop.get_stats()\n str(prop)\n props.append(prop)\n #self._add_op2_property(prop)\n #op2.card_count['PBEAM3'] = nentries\n return n, props\n\n def _read_pbeam3_264(self, card_obj, data: bytes, n: int) -> int:\n \"\"\"\n TODO: partial\n # per test_cbeam_cbeam3???\n ints = (2901, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0)\n floats = (2901, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 264 * self.factor # 66*4\n # p/m ayz ae fj ki 14f i\n struct1 = Struct(mapfmt(op2._endian + b'2i 3f 5f 5f fi 14f i 30f 4i', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n\n props = []\n for unused_i in range(nentries):\n pid, mid, A, iz, iy, a, b, c, d, e, f, g, h, i, j, k, inta, *other = struct1.unpack(data[n:n+ntotal])\n #print(pid, mid, A, iz, iy)\n #print((a, b, c, d, e))\n #print((f, g, h, i, j))\n #print(k, inta)\n assert sum(other) < 100, other\n prop = PBEAM3(\n pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,\n so=None,\n cy=None, cz=None,\n dy=None, dz=None,\n ey=None, ez=None,\n fy=None, fz=None,\n ky=1., kz=1.,\n ny=None, nz=None, my=None, mz=None,\n nsiy=None, nsiz=None, nsiyz=None,\n cw=None, stress='GRID',\n w=None, wy=None, wz=None, comment='')\n assert pid > 0, prop.get_stats()\n assert mid > 0, prop.get_stats()\n str(prop)\n props.append(prop)\n n += ntotal\n return n, props\n\n def _read_pplane(self, data: bytes, n: int) -> int:\n \"\"\"\n RECORD – PPLANE(3801,38,979)\n Word Name Type Description\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 T RS Default membrane thickness for Ti on the connection entry\n 4 NSM RS Nonstructural mass per unit area\n 5 FOROPT I Formulation option number\n 6 CSOPT I Reserved for coordinate system definition of plane\n 7 UNDEF(2) None\n\n ints = (1, 1, 1.0, 0, 0, 0, 0, 0, 2, 2, 1.0, 0, 0, 0, 0, 0)\n floats = (1, 1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 2, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n \"\"\"\n op2 = self.op2\n ntotal = 32 * self.factor # 8*4\n struct1 = Struct(mapfmt(op2._endian + b'2i 2f 4i', self.size))\n\n ndatai = len(data) - n\n nentries = ndatai // ntotal\n assert ndatai % ntotal == 0\n for unused_i in range(nentries):\n out = struct1.unpack(data[n:n+ntotal])\n pid, mid, t, nsm, foropt, csopt = out[:6]\n #print(out)\n assert csopt == 0, csopt\n pplane = op2.add_pplane(pid, mid, t=t, nsm=nsm,\n formulation_option=foropt)\n pplane.validate()\n #print(pplane)\n str(pplane)\n n += ntotal\n op2.card_count['PLPLANE'] = nentries\n return n\n\n def _read_plplane(self, data: bytes, n: int) -> int:\n \"\"\"\n PLPLANE(4606,46,375)\n\n NX 10\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 CID I Coordinate system identification number\n 4 STR CHAR4 Location of stress and strain output\n 5 T RS Default membrane thickness for Ti on the connection entry\n 6 CSOPT I Reserved for coordinate system definition of plane\n 7 UNDEF(5) None\n\n MSC 2016\n PID I Property identification number\n 2 MID I Material identification number\n 3 CID I Coordinate system identification number\n 4 STR CHAR4 Location of stress and strain output\n 5 UNDEF(7 ) none Not used\n\n .. warning:: CSOPT ad T are not supported\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor # 4*11\n if self.size == 4:\n s = Struct(op2._endian + b'3i 4s f 6i')\n else:\n s = Struct(op2._endian + b'3q 8s d 6q')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = s.unpack(data[n:n+ntotal])\n pid, mid, cid, location, unused_t, unused_csopt = out[:6]\n location = location.decode('latin1')\n #op2.show_data(data[n:n+ntotal], 'ifs')\n op2.add_plplane(pid, mid, cid=cid, stress_strain_output_location=location)\n n += ntotal\n op2.card_count['PLPLANE'] = nentries\n return n\n\n def _read_plsolid(self, data: bytes, n: int) -> int:\n \"\"\"\n MSC 2016\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 STR CHAR4 Location of stress and strain output\n 4 UNDEF(4 ) none Not used\n\n NX 10\n 1 PID I Property identification number\n 2 MID I Material identification number\n 3 STR CHAR4 Location of stress and strain output\n 4 CSOPT I Reserved for coordinate system definition of plane\n 5 UNDEF(3) None\n\n .. warning:: CSOPT is not supported\n \"\"\"\n op2 = self.op2\n ntotal = 28 * self.factor # 4*7\n if self.size == 4:\n struct1 = Struct(op2._endian + b'2i 4s 4i')\n else:\n struct1 = Struct(op2._endian + b'2q 8s 4q')\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n out = struct1.unpack(data[n:n+ntotal])\n pid, mid, location, unused_csopt, unused_null_a, unused_null_b, unused_null_c = out\n location = location.decode('latin1')\n #op2.show_data(data[n:n+ntotal], 'ifs')\n op2.add_plsolid(pid, mid, stress_strain=location, ge=0.)\n n += ntotal\n op2.card_count['PLSOLID'] = nentries\n return n\n\n def _read_pmass(self, data: bytes, n: int) -> int:\n \"\"\"\n PMASS(402,4,44) - the marker for Record 48\n \"\"\"\n op2 = self.op2\n ntotal = 8 * self.factor # 2*4\n struct_if = Struct(mapfmt(op2._endian + b'if', self.size))\n nentries = (len(data) - n) // ntotal\n for unused_i in range(nentries):\n edata = data[n:n + ntotal]\n out = struct_if.unpack(edata)\n #out = (pid, mass)\n if op2.is_debug_file:\n op2.binary_debug.write(' PMASS=%s\\n' % str(out))\n prop = PMASS.add_op2_data(out)\n self._add_op2_property_mass(prop)\n n += ntotal\n return n\n\n def _read_prod(self, data: bytes, n: int) -> int:\n \"\"\"\n PROD(902,9,29) - the marker for Record 49\n \"\"\"\n op2 = self.op2\n ntotal = 24 * self.factor # 6*4\n struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_2i4f.unpack(edata)\n #(pid, mid, a, j, c, nsm) = out\n prop = PROD.add_op2_data(out)\n if op2.is_debug_file:\n op2.binary_debug.write(' PROD=%s\\n' % str(out))\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PROD'] = nproperties\n return n\n\n def _read_pshear(self, data: bytes, n: int) -> int:\n \"\"\"\n PSHEAR(1002,10,42) - the marker for Record 50\n \"\"\"\n op2 = self.op2\n ntotal = 24 * self.factor\n struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_2i4f.unpack(edata)\n #(pid, mid, t, nsm, f1, f2) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PSHEAR=%s\\n' % str(out))\n prop = PSHEAR.add_op2_data(out)\n self._add_op2_property(prop)\n n += ntotal\n op2.card_count['PSHEAR'] = nproperties\n return n\n\n def _read_pshell(self, data: bytes, n: int) -> int:\n \"\"\"\n PSHELL(2302,23,283) - the marker for Record 51\n \"\"\"\n op2 = self.op2\n ntotal = 44 * self.factor # 11*4\n s = Struct(mapfmt(op2._endian + b'iififi4fi', self.size))\n nproperties = (len(data) - n) // ntotal\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = s.unpack(edata)\n (pid, mid1, unused_t, mid2, unused_bk, mid3, unused_ts,\n unused_nsm, unused_z1, unused_z2, mid4) = out\n if op2.is_debug_file:\n op2.binary_debug.write(' PSHELL=%s\\n' % str(out))\n prop = PSHELL.add_op2_data(out)\n n += ntotal\n\n if pid in op2.properties:\n # this is a fake PSHELL\n propi = op2.properties[pid]\n if prop == propi:\n op2.log.warning(f'Fake PSHELL {pid:d} (skipping):\\n{propi}')\n nproperties -= 1\n continue\n #assert propi.type in ['PCOMP', 'PCOMPG'], propi.get_stats()\n op2.log.error(f'PSHELL {pid:d} is also {propi.type} (skipping PSHELL):\\n{propi}{prop}')\n nproperties -= 1\n continue\n #continue\n #if max(pid, mid1, mid2, mid3, mid4) > 1e8:\n #self.big_properties[pid] = prop\n #else:\n self._add_op2_property(prop)\n if nproperties:\n op2.card_count['PSHELL'] = nproperties\n return n\n\n def _read_psolid(self, data: bytes, n: int) -> int:\n \"\"\"\n PSOLID(2402,24,281) - the marker for Record 52\n \"\"\"\n op2 = self.op2\n #print(\"reading PSOLID\")\n if self.size == 4:\n ntotal = 28 # 7*4\n struct_6i4s = Struct(op2._endian + b'6i4s')\n else:\n ntotal = 28 * 2\n struct_6i4s = Struct(op2._endian + b'6q8s')\n\n nproperties = (len(data) - n) // ntotal\n nproperties_found = 0\n for unused_i in range(nproperties):\n edata = data[n:n+ntotal]\n out = struct_6i4s.unpack(edata)\n #(pid, mid, cid, inp, stress, isop, fctn) = out\n #data_in = [pid, mid, cid, inp, stress, isop, fctn]\n if op2.is_debug_file:\n op2.binary_debug.write(' PSOLID=%s\\n' % str(out))\n\n n += ntotal\n fctn = out[-1]\n if fctn == b'FAKE':\n op2.log.warning(' PSOLID=%s; is this a PCOMPLS?' % str(out))\n continue\n prop = PSOLID.add_op2_data(out)\n self._add_op2_property(prop)\n nproperties_found += 1\n op2.card_count['PSOLID'] = nproperties_found\n return n\n\n# PSOLIDL\n# PTRIA6\n# PTSHELL\n\n def _read_ptube(self, data: bytes, n: int) -> int:\n \"\"\"\n PTUBE(1602,16,30) - the marker for Record 56\n\n .. todo:: OD2 only exists for heat transfer...\n how do i know if there's heat transfer at this point?\n I could store all the tubes and add them later,\n but what about themal/non-thermal subcases?\n\n .. warning:: assuming OD2 is not written (only done for thermal)\n \"\"\"\n op2 = self.op2\n struct_2i3f = Struct(op2._endian + b'2i3f')\n nproperties = (len(data) - n) // 20\n for unused_i in range(nproperties):\n edata = data[n:n+20] # or 24???\n out = struct_2i3f.unpack(edata)\n (pid, mid, OD, t, nsm) = out\n data_in = [pid, mid, OD, t, nsm]\n if op2.is_debug_file:\n op2.binary_debug.write(' PTUBE=%s\\n' % str(out))\n prop = PTUBE.add_op2_data(data_in)\n self._add_op2_property(prop)\n n += 20\n op2.card_count['PTUBE'] = nproperties\n return n\n\n def _read_pset(self, data: bytes, n: int) -> int:\n op2 = self.op2\n struct_5i4si = Struct(op2._endian + b'5i4si')\n nentries = 0\n while n < len(data):\n edata = data[n:n+28]\n out = struct_5i4si.unpack(edata)\n #print(out)\n idi, poly1, poly2, poly3, cid, typei, typeid = out\n typei = typei.rstrip().decode('latin1')\n assert typei in ['SET', 'ELID'], (idi, poly1, poly2, poly3, cid, typei, typeid)\n if op2.is_debug_file:\n op2.binary_debug.write(' PVAL=%s\\n' % str(out))\n #print(idi, poly1, poly2, poly3, cid, typei, typeid)\n typeids = []\n n += 28\n while typeid != -1:\n typeids.append(typeid)\n typeid, = op2.struct_i.unpack(data[n:n+4])\n n += 4\n #print(val)\n #print(typeids)\n # PSET ID POLY1 POLY2 POLY3 CID SETTYP ID\n if len(typeids) == 1:\n typeids = typeids[0]\n op2.add_pset(idi, poly1, poly2, poly3, cid, typei, typeids)\n op2.card_count['PSET'] = nentries\n return n\n\n def _read_pval(self, data: bytes, n: int) -> int:\n \"\"\"\n PVAL(10201,102,400)\n\n Word Name Type Description\n 1 ID I p-value set identification number\n 2 POLY1 I Polynomial order in 1 direction of the CID system\n 3 POLY2 I Polynomial order in 2 direction of the CID system\n 4 POLY3 I Polynomial order in 2 direction of the CID system\n 5 CID I Coordinate system identification number\n 6 TYPE CHAR4 Type of set provided: \"SET\" or \"ELID\"\n 7 TYPEID I SET identification number or element identification\n number with this p-value specification.\n Words 1 through 7 repeat until End of Record\n \"\"\"\n op2 = self.op2\n #op2.show_data(data[n:])\n if self.size == 4:\n struct_5i4si = Struct(op2._endian + b'5i 4s i')\n struct_i = op2.struct_i\n else:\n struct_5i4si = Struct(op2._endian + b'5q 8s q')\n struct_i = op2.struct_q\n\n nentries = 0\n ntotal = 28 * self.factor\n size = self.size\n while n < len(data):\n edata = data[n:n+ntotal]\n out = struct_5i4si.unpack(edata)\n #print(out)\n idi, poly1, poly2, poly3, cid, typei, typeid = out\n typei = typei.rstrip().decode('latin1')\n assert typei in ['SET', 'ELID'], f'idi={idi} poly1={poly1} poly2={poly2} poly3={poly3} cid={cid} typei={typei} typeid={typeid}'\n if op2.is_debug_file:\n op2.binary_debug.write(' PVAL=%s\\n' % str(out))\n #print(idi, poly1, poly2, poly3, cid, typei, typeid)\n typeids = []\n n += ntotal\n while typeid != -1:\n typeids.append(typeid)\n typeid, = struct_i.unpack(data[n:n+size])\n n += size\n #print(val)\n #print(typeids)\n # PVAL ID POLY1 POLY2 POLY3 CID SETTYP ID\n op2.add_pval(idi, poly1, poly2, poly3, cid, typei, typeids)\n op2.card_count['PVAL'] = nentries\n return n\n\n def _read_pvisc(self, data: bytes, n: int) -> int:\n \"\"\"PVISC(1802,18,31) - the marker for Record 39\"\"\"\n op2 = self.op2\n struct_i2f = Struct(op2._endian + b'i2f')\n nproperties = (len(data) - n) // 12\n for unused_i in range(nproperties):\n edata = data[n:n+12]\n out = struct_i2f.unpack(edata)\n if op2.is_debug_file:\n op2.binary_debug.write(' PVISC=%s\\n' % str(out))\n #(pid, ce, cr) = out\n prop = PVISC.add_op2_data(out)\n self._add_op2_property(prop)\n n += 12\n op2.card_count['PVISC'] = nproperties\n return n\n\n# PWELD\n# PWSEAM\n def _read_view(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping VIEW in EPT')\n return len(data)\n\n def _read_view3d(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping VIEW3D in EPT')\n return len(data)\n\ndef break_by_minus1(idata):\n \"\"\"helper for ``read_nsm_nx``\"\"\"\n i1 = 0\n i = 0\n i2 = None\n packs = []\n for idatai in idata:\n #print('data[i:] = ', data[i:])\n if idatai == -1:\n i2 = i\n packs.append((i1, i2))\n i1 = i2 + 1\n i += 1\n continue\n i += 1\n #print(packs)\n return packs\n"
] | [
[
"numpy.frombuffer",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FelipeH92/Task-Space-Control-Vision | [
"77d9f709d7cb0afb50ef9baf6ba39304aca445e5",
"77d9f709d7cb0afb50ef9baf6ba39304aca445e5"
] | [
"Experiments/src/Task Control - Python/UR5Class.py",
"Experiments/src/Task Control - Python/trajectoryCheck.py"
] | [
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n## @package UR5\r\n# Documentação para o pacote de classes UR5.\r\n#\r\n# Documentação do código produzido para controle do manipulador UR5 e geração e controle de suas posições.\r\n# Cada código aqui documentado possui uma breve descrição de sua função, suas entradas e saídas.\r\nimport numpy as np\r\nfrom numpy.linalg import inv\r\nfrom numpy.linalg import norm\r\nfrom numpy.linalg import pinv\r\nfrom scipy.signal import butter,lfilter\r\nfrom scipy.signal import freqz\r\nimport struct\r\nimport time\r\nimport csv\r\nimport Transformations as tf\r\nimport os\r\n\r\n## Documentação da Classe UR5Class para controle remoto do manipulador Universal Robots 5 (UR5).\r\n#\r\n# Essa classe é responsável por interpretar os dados recebidos pela caixa de controle do UR5 e controlar seu funcionamento ao longo do projeto.\r\n# A ela cabe as funções dos cálculos de cinemática direta e inversa para as diversas posições do robô, interpretar os dados do robô, verificar\r\n# seu estado de segurança e funcionamento, assim como realizar qualquer cálculo de calibração ou posição necessário.\r\nclass UR5Class:\r\n _standard_DH = np.mat([[0,-.425,-.39225,0,0,0], [1.570796327, 0, 0, 1.570796327, -1.570796327, 0], [.089159,0,0,.10915,.09465,.0823], [0, 0, 0, 0, 0, 0]])\r\n # _standard_DH é a tabela DH tradicional do Robô. As linhas correspondem respectivamente a (a, alpha, d,q)\r\n \r\n _robot_data = []\r\n # Lista vazia para receber os dados do robô\r\n\r\n _data_pack_max = 133\r\n # Tamanho maximo e esperado de valores recebidos em lista no pacote de dados\r\n processTimeList = []\r\n\r\n errorDB = []\r\n error_D_DB = []\r\n wDB = []\r\n u = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorSaturation = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorPrevious = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n errorSum = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)\r\n\r\n normErro = np.zeros(6,dtype=np.float64)\r\n\r\n ## Construtor da classe.\r\n # @param self O ponteiro do objeto.\r\n # @param delta_DH Os dados de calibração da matriz Denavit-Hartenberg do robô a ser controlado. \r\n def __init__(self, delta_DH = np.zeros((5,6))):\r\n self.delta_standard_DH = delta_DH\r\n\r\n self._effective_a = self._standard_DH[0,:] + self.delta_standard_DH[0,:]\r\n self._effective_alpha = self._standard_DH[1,:] + self.delta_standard_DH[1,:]\r\n self._effective_d = self._standard_DH[2,:] + self.delta_standard_DH[2,:]\r\n self._effective_q = np.array(self._standard_DH[3,:] + self.delta_standard_DH[3,:])\r\n \r\n # Os dados efetivos equivalem aos dados esperados do UR5 mais os dados de calibração do robô específico.\r\n\r\n Rot_x_1 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,0]), -np.sin(self._effective_alpha[0,0]), 0], [0, np.sin(self._effective_alpha[0,0]), np.cos(self._effective_alpha[0,0]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_2 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,1]), -np.sin(self._effective_alpha[0,1]), 0], [0, np.sin(self._effective_alpha[0,1]), np.cos(self._effective_alpha[0,1]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_3 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,2]), -np.sin(self._effective_alpha[0,2]), 0], [0, np.sin(self._effective_alpha[0,2]), np.cos(self._effective_alpha[0,2]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_4 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,3]), -np.sin(self._effective_alpha[0,3]), 0], [0, np.sin(self._effective_alpha[0,3]), np.cos(self._effective_alpha[0,3]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_5 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,4]), -np.sin(self._effective_alpha[0,4]), 0], [0, np.sin(self._effective_alpha[0,4]), np.cos(self._effective_alpha[0,4]), 0], [ 0, 0, 0, 1]])\r\n Rot_x_6 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,5]), -np.sin(self._effective_alpha[0,5]), 0], [0, np.sin(self._effective_alpha[0,5]), np.cos(self._effective_alpha[0,5]), 0], [ 0, 0, 0, 1]])\r\n\r\n Trans_d_1 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,0]], [0, 0, 0, 1]])\r\n Trans_d_2 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,1]], [0, 0, 0, 1]])\r\n Trans_d_3 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,2]], [0, 0, 0, 1]])\r\n Trans_d_4 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,3]], [0, 0, 0, 1]])\r\n Trans_d_5 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,4]], [0, 0, 0, 1]])\r\n Trans_d_6 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,5]], [0, 0, 0, 1]])\r\n\r\n Trans_a_1 = np.mat([[1, 0, 0, self._effective_a[0,0]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_2 = np.mat([[1, 0, 0, self._effective_a[0,1]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_3 = np.mat([[1, 0, 0, self._effective_a[0,2]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_4 = np.mat([[1, 0, 0, self._effective_a[0,3]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_5 = np.mat([[1, 0, 0, self._effective_a[0,4]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n Trans_a_6 = np.mat([[1, 0, 0, self._effective_a[0,5]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n\r\n self._A_0_1 = Trans_d_1 * Trans_a_1 * Rot_x_1\r\n self._A_0_2 = Trans_d_2 * Trans_a_2 * Rot_x_2\r\n self._A_0_3 = Trans_d_3 * Trans_a_3 * Rot_x_3\r\n self._A_0_4 = Trans_d_4 * Trans_a_4 * Rot_x_4\r\n self._A_0_5 = Trans_d_5 * Trans_a_5 * Rot_x_5\r\n self._A_0_6 = Trans_d_6 * Trans_a_6 * Rot_x_6\r\n # Transformações comuns, indiferentes a movimentação, utilizadas em cálculos futuros.\r\n\r\n return\r\n ## Método que recebe e configura o pacote de dados do robô.\r\n # @param self O ponteiro do objeto.\r\n # @param data O pacote de dados recebido pela conexão Ethernet com o robô.\r\n def setRobotData(self, data):\r\n size = len(data)\r\n self._robot_data = []\r\n # O primeiro dado recebido, de tempo, é um inteiro de 4 bytes.\r\n self._robot_data.append(struct.unpack('!i', data[0:4]))\r\n i = 4\r\n # O resto dos dados recebidos vem em formato de double de 8 bytes.\r\n while i < size:\r\n self._robot_data.append(struct.unpack('!d', data[i:i+8])[0])\r\n i += 8\r\n # Já atualiza os dados de juntas do robô.\r\n if (size < (4+(34*8))):\r\n print(\"[WARNING] Data size smaller than expected. Bytes: \" + str(size))\r\n return\r\n\r\n self._effective_q = np.array(self._robot_data[32:38]) + self.delta_standard_DH[3,:]\r\n return \r\n # setRobotData recebe o pacote de 1060 bytes e os separa nos 160 valores da lista de dados.\r\n\r\n def setRobotDataRTDE(self, data):\r\n\r\n #print(data.actual_TCP_pose)\r\n self._robot_data[1] = np.asarray(data.timestamp, dtype = np.float64)\r\n self._robot_data[2:8] = np.asarray(data.target_q, dtype = np.float64)\r\n self._robot_data[8:14] = np.asarray(data.target_qd, dtype = np.float64)\r\n\r\n self._robot_data[32:38] = np.asarray(data.actual_q, dtype = np.float64)\r\n self._robot_data[38:44] = np.asarray(data.actual_qd, dtype = np.float64)\r\n\r\n self._robot_data[56:62] = np.asarray(data.actual_TCP_pose, dtype = np.float64)\r\n\r\n self._robot_data[62:68] = np.asarray(data.actual_TCP_speed, dtype = np.float64)\r\n self._robot_data[68:74] = np.asarray(data.actual_TCP_force, dtype = np.float64)\r\n\r\n self._robot_data[74:80] = np.asarray(data.target_TCP_pose, dtype = np.float64)\r\n self._robot_data[80:86] = np.asarray(data.target_TCP_speed, dtype = np.float64)\r\n\r\n self._robot_data[102] = np.asarray(data.safety_mode, dtype = np.int32)\r\n\r\n self._robot_data[132] = np.asarray(data.runtime_state, dtype = np.uint32)\r\n\r\n\r\n\r\n q = np.asarray(data.actual_q)\r\n\r\n self._effective_q = q + self.delta_standard_DH[3,:]\r\n # <field name=\"timestamp\" type=\"DOUBLE\"/>\r\n # <field name=\"target_q\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_qd\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_qdd\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_current\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_moment\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_q\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_qd\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_current\" type=\"VECTOR6D\"/>\r\n # <field name=\"joint_control_output\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_pose\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_speed\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_TCP_force\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_TCP_pose\" type=\"VECTOR6D\"/>\r\n # <field name=\"target_TCP_speed\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_digital_input_bits\" type=\"UINT64\"/>\r\n # <field name=\"joint_temperatures\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_execution_time\" type=\"DOUBLE\"/>\r\n # <field name=\"robot_mode\" type=\"INT32\"/>\r\n # <field name=\"joint_mode\" type=\"VECTOR6INT32\"/>\r\n # <field name=\"safety_mode\" type=\"INT32\"/>\r\n # <field name=\"actual_tool_accelerometer\" type=\"VECTOR3D\"/>\r\n # <field name=\"speed_scaling\" type=\"DOUBLE\"/>\r\n # <field name=\"target_speed_fraction\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_momentum\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_main_voltage\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_robot_voltage\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_robot_current\" type=\"DOUBLE\"/>\r\n # <field name=\"actual_joint_voltage\" type=\"VECTOR6D\"/>\r\n # <field name=\"actual_digital_output_bits\" type=\"UINT64\"/>\r\n # <field name=\"runtime_state\" type=\"UINT32\"/>\r\n return\r\n\r\n ## Retorna verdadeiro ou falso para o estado de segurança do robô.\r\n # @param self O ponteiro do objeto.\r\n def checkSafety(self):\r\n try:\r\n if self._robot_data[102] == 1:\r\n safety = True\r\n else:\r\n safety = False\r\n return safety\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # checkSafety verifica se a variável de segurança do robô está apta a operar\r\n\r\n ## Retorna verdadeiro ou falso para o estado de operação do robô.\r\n # @param self O ponteiro do objeto.\r\n def programStateCheck(self):\r\n try:\r\n if self._robot_data[132] == 1:\r\n state = True\r\n else:\r\n state = False\r\n return state\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # programStateCheck verifica se a variável de estado do robô está apta a operar\r\n\r\n ## Imprime em prompt de comando as 133 informações recebidas pelo pacote de dados do robô.\r\n # @param self O ponteiro do objeto.\r\n def printRobotData(self):\r\n size = len(self._robot_data)\r\n\r\n if size == self._datapackmax:\r\n print(\"[INFO] Message Size in Bytes: \" + str(self._robot_data[0]))\r\n print(\"[INFO] Time: \" + str(self._robot_data[1]))\r\n print(\"[INFO] q target\" + str(self._robot_data[2:8]))\r\n print(\"[INFO] qd target\" + str(self._robot_data[8:14]))\r\n print(\"[INFO] qdd target\" + str(self._robot_data[14:20]))\r\n print(\"[INFO] I target\" + str(self._robot_data[20:26]))\r\n print(\"[INFO] M target\" + str(self._robot_data[26:32]))\r\n print(\"[INFO] q actual\" + str(self._robot_data[32:38]))\r\n print(\"[INFO] qd actual\" + str(self._robot_data[38:44]))\r\n print(\"[INFO] I actual\" + str(self._robot_data[44:50]))\r\n print(\"[INFO] I control\" + str(self._robot_data[50:56]))\r\n print(\"[INFO] Tool Vector Actual\" + str(self._robot_data[56:62]))\r\n print(\"[INFO] TCP Speed Actual\" + str(self._robot_data[62:68]))\r\n print(\"[INFO] TCP Force\" + str(self._robot_data[68:74]))\r\n print(\"[INFO] Tool Vector Target\" + str(self._robot_data[74:80]))\r\n print(\"[INFO] TCP Speed Target\" + str(self._robot_data[80:86]))\r\n print(\"[INFO] digital input bits\" + str(self._robot_data[86]))\r\n print(\"[INFO] Motor Temperatures\" + str(self._robot_data[87:93]))\r\n print(\"[INFO] Controller Timer\" + str(self._robot_data[93]))\r\n print(\"[INFO] Test Value\" + str(self._robot_data[94]))\r\n print(\"[INFO] Robot Mode\" + str(self._robot_data[95]))\r\n print(\"[INFO] Joint Modes\" + str(self._robot_data[96:102]))\r\n print(\"[INFO] Safety Mode\" + str(self._robot_data[102]))\r\n print(\"[INFO] Tool Acceleration Values\" + str(self._robot_data[109:112]))\r\n print(\"[INFO] Speed Scaling\" + str(self._robot_data[118]))\r\n print(\"[INFO] Linear Momentum Norm\" + str(self._robot_data[119]))\r\n print(\"[INFO] V Main\" + str(self._robot_data[122]))\r\n print(\"[INFO] V Robot\" + str(self._robot_data[123]))\r\n print(\"[INFO] I Robot\" + str(self._robot_data[124]))\r\n print(\"[INFO] V actual\" + str(self._robot_data[125:131]))\r\n print(\"[INFO] Digital Outputs\" + str(self._robot_data[131]))\r\n print(\"[INFO] Program State\" + str(self._robot_data[132]))\r\n # Exceção caso o pacote venha menor que 1060 Bytes\r\n else:\r\n print(\"[WARNING] Size of data smaller than expected: \", size)\r\n return\r\n # printRobotData imprime em tela todos os valores do pacote de dados traduzido, para depuração\r\n\r\n ## Retorna o vetor de posição do efetuador do robô, em formato [x, y, z, rx, ry, rz].\r\n # @param self O ponteiro do objeto.\r\n def getPositionTarget(self):\r\n try:\r\n array = np.array(self._robot_data[74:80])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getPosition retorna a posição atual do vetor da ferramenta.\r\n\r\n def getPosition(self):\r\n try:\r\n array = np.array(self._robot_data[56:62])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getPosition retorna a posição atual do vetor da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeed(self):\r\n try:\r\n array = np.array(self._robot_data[62:68])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeedTarget(self):\r\n try:\r\n array = np.array(self._robot_data[80:86])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade modular do efetuador do robô, em formato [v].\r\n # @param self O ponteiro do objeto.\r\n def getTCPSpeedMod(self):\r\n try:\r\n v = np.sqrt(self._robot_data[62]*self._robot_data[62] + self._robot_data[63]*self._robot_data[63] + self._robot_data[64]*self._robot_data[64])\r\n return v\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getTCPSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de posição das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointPosition(self):\r\n try:\r\n array = np.array(self._robot_data[32:38])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n\r\n ## Retorna o vetor de posição das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointPositionTarget(self):\r\n try:\r\n array = np.array(self._robot_data[2:8])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # Retorna o valor das articulações da ferramenta\r\n\r\n ## Retorna o vetor de velocidade das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointSpeed(self):\r\n try:\r\n array = np.array(self._robot_data[38:44])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o vetor de velocidade das seis juntas do robô.\r\n # @param self O ponteiro do objeto.\r\n def getJointSpeedTarget(self):\r\n try:\r\n array = np.array(self._robot_data[8:14])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n def getTCPForce(self):\r\n try:\r\n array = np.array(self._robot_data[68:74])\r\n return array\r\n except:\r\n print(\"[ERROR] Could not find Robot Data!\")\r\n return None\r\n # getJointSpeed retorna a velocidade da ferramenta.\r\n\r\n ## Retorna o tempo atual do robô desde que foi ligado.\r\n # @param self O ponteiro do objeto.\r\n def getTime(self):\r\n return self._robot_data[1]\r\n\r\n # Retorna o valor do tempo de uso atual\r\n\r\n ## Realiza a cinemática direta do UR5 para a posição de juntas atual. O método retorna a matriz homogênea 4x4 da posição atual, ou um vetor em RV ou RPY.\r\n # @param self O ponteiro do objeto.\r\n # @param q O vetor de coordenadas de junta.\r\n # @param vector parâmetro que define se o tipo de retorno como vetor de posições em RV.\r\n # @param rpy parâmetro que, juntamente de vector, define o retorno como vetor de posições em RPY.\r\n def ur5_direct_kinematics(self, q, vector = False, rpy = False, apply_offset = False):\r\n\r\n if (apply_offset == True):\r\n # q = q + self.delta_standard_DH[3,:]\r\n q = np.squeeze(np.asarray(q + self.delta_standard_DH[3,:]))\r\n\r\n _rot_z_1 = np.mat([[np.cos(q[0]), -np.sin(q[0]), 0, 0],[np.sin(q[0]), np.cos(q[0]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_2 = np.mat([[np.cos(q[1]), -np.sin(q[1]), 0, 0],[np.sin(q[1]), np.cos(q[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_3 = np.mat([[np.cos(q[2]), -np.sin(q[2]), 0, 0],[np.sin(q[2]), np.cos(q[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_4 = np.mat([[np.cos(q[3]), -np.sin(q[3]), 0, 0],[np.sin(q[3]), np.cos(q[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_5 = np.mat([[np.cos(q[4]), -np.sin(q[4]), 0, 0],[np.sin(q[4]), np.cos(q[4]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n _rot_z_6 = np.mat([[np.cos(q[5]), -np.sin(q[5]), 0, 0],[np.sin(q[5]), np.cos(q[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\r\n\r\n # Utiliza as matrizes definidas no construtor e as de rotação das juntas atuais para retornar a matriz final.\r\n self._A_1 = _rot_z_1 * self._A_0_1\r\n self._A_2 = _rot_z_2 * self._A_0_2\r\n self._A_3 = _rot_z_3 * self._A_0_3\r\n self._A_4 = _rot_z_4 * self._A_0_4\r\n self._A_5 = _rot_z_5 * self._A_0_5\r\n self._A_6 = _rot_z_6 * self._A_0_6\r\n\r\n self._H = self._A_1 * self._A_2 * self._A_3 * self._A_4 * self._A_5 * self._A_6\r\n #print self._H\r\n\r\n if (vector == False):\r\n return self._H\r\n else:\r\n vetor = tf.matrix2RotationVector(self._H[0:3,0:3])\r\n array = np.array([self._H[0,3], self._H[1,3], self._H[2,3]], float)\r\n vetor = np.hstack((array,vetor))\r\n #print vetor\r\n if (rpy == False):\r\n return vetor\r\n else:\r\n vetor[3:6] = tf.rotationVector2RollPitchYaw(vetor[3:6])\r\n return vetor\r\n # ur5_direct_kinematics executa a cinemática direta do UR5 e retorna a matriz 4x4 de posição e orientação do UR5\r\n\r\n\r\n def verifyDelta(self, epsilon = 10e-6):\r\n\r\n direct = self.ur5_direct_kinematics(self.getJointPosition(), vector = True, apply_offset = True)\r\n real = self.getPosition()\r\n\r\n diff = tf.computeDifference(real,direct)\r\n\r\n print(\"[INFO] Direct Kinematics calculated with Delta: \" + str(direct))\r\n print(\"[INFO] Direct Kinematics real: \" + str(real))\r\n\r\n error = norm(diff[0:3])\r\n\r\n print(\"[INFO] Error: \", error)\r\n\r\n\r\n if (error < epsilon):\r\n print(\"[INFO] Correct Delta Matrix!\")\r\n return True\r\n else:\r\n print(\"[WARNING] Incorrect Delta Matrix!\")\r\n return False\r\n\r\n\r\n def _DH(self, a, alpha, d, theta):\r\n\r\n Td = np.asmatrix(np.eye(4))\r\n Td[2,3] = d\r\n Ta = np.asmatrix(np.eye(4))\r\n Ta[0,3] = a\r\n Rtheta = tf.Rot_z(theta)\r\n Rtheta = np.mat([[Rtheta[0,0], Rtheta[0,1], Rtheta[0,2], 0], [Rtheta[1,0], Rtheta[1,1], Rtheta[1,2], 0], [Rtheta[2,0], Rtheta[2,1], Rtheta[2,2], 0], [0,0,0,1]])\r\n Ralpha = tf.Rot_x(alpha)\r\n Ralpha = np.mat([[Ralpha[0,0], Ralpha[0,1], Ralpha[0,2], 0], [Ralpha[1,0], Ralpha[1,1], Ralpha[1,2], 0], [Ralpha[2,0], Ralpha[2,1], Ralpha[2,2], 0], [0,0,0,1]])\r\n\r\n G = Td * Rtheta * Ta * Ralpha\r\n\r\n return G\r\n # _DH retorna uma matrix 4x4 de junta especifica, utilizado na cinemática inversa analítica\r\n\r\n\r\n def _analytic_ur5_inverse_kinematics(self, p):\r\n\r\n\r\n rvMatrix = tf.rotationVector2Matrix(p[3:6])\r\n\r\n gd = np.mat(([[rvMatrix[0,0], rvMatrix[0,1], rvMatrix[0,2], p[0]], [rvMatrix[1,0], rvMatrix[1,1], rvMatrix[1,2], p[1]], [rvMatrix[2,0], rvMatrix[2,1], rvMatrix[2,2], p[2]], [0, 0, 0, 1]]))\r\n\r\n theta = np.zeros((6, 8))\r\n\r\n d1 = self._standard_DH[2,0]\r\n d2 = self._standard_DH[2,1]\r\n d3 = self._standard_DH[2,2]\r\n d4 = self._standard_DH[2,3]\r\n d5 = self._standard_DH[2,4]\r\n d6 = self._standard_DH[2,5]\r\n\r\n a1 = self._standard_DH[0,0]\r\n a2 = self._standard_DH[0,1]\r\n a3 = self._standard_DH[0,2]\r\n a4 = self._standard_DH[0,3]\r\n a5 = self._standard_DH[0,4]\r\n a6 = self._standard_DH[0,5]\r\n\r\n alpha1 = self._standard_DH[1,0]\r\n alpha2 = self._standard_DH[1,1]\r\n alpha3 = self._standard_DH[1,2]\r\n alpha4 = self._standard_DH[1,3]\r\n alpha5 = self._standard_DH[1,4]\r\n alpha6 = self._standard_DH[1,5]\r\n\r\n # Calculating theta1\r\n p05 = gd * np.mat([[0], [0], [-d6], [1]])\r\n p05 = p05 - np.mat([[0], [0], [0], [1]])\r\n psi = np.arctan2(p05[1], p05[0])\r\n p05xy = np.sqrt(p05[1]*p05[1] + p05[0]*p05[0])\r\n if (d4 > p05xy):\r\n print (\"[WARNING] No solution for Theta1: d4 > P05xy\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n d4 = p05xy - 1e-10\r\n try:\r\n phi = np.arccos(d4 / p05xy)\r\n except:\r\n print(\"[ERROR] Division by zero: \" + str(p05xy))\r\n return None\r\n theta[0, 0:4] = np.radians(90) + psi + phi\r\n theta[0, 4:8] = np.radians(90) + psi - phi\r\n theta = np.real(theta)\r\n\r\n # Calculating theta5\r\n cols = np.array([0, 4])\r\n for i in range(0, cols.size):\r\n c = cols[i];\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n except:\r\n print(\"[ERROR] Could not find inverse: \" + str(self._DH(a1, alpha1, d1, theta[0,c])))\r\n return None\r\n T16 = T10 * gd\r\n p16z = T16[2,3]\r\n try:\r\n if (((p16z-d4)/d6) > 1):\r\n print (\"[WARNING] No solution for Theta5: (p16z-d4)/d6) > 1\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n d6 = (p16z-d4) + 1e-10\r\n t5 = np.arccos((p16z-d4)/d6)\r\n except:\r\n print(\"[ERROR] Division by zero: \" + str(d6))\r\n return None\r\n theta[4, c:c+2] = t5\r\n theta[4, c+2:c+4] = -t5\r\n theta = np.real(theta)\r\n\r\n # Calculating theta6\r\n cols = np.array([0, 2, 4, 6])\r\n for i in range(0, cols.size):\r\n c = cols[i]\r\n T01 = self._DH(a1, alpha1, d1, theta[0,c])\r\n try:\r\n T61 = inv(gd) * T01\r\n except:\r\n print(\"[ERROR] Could not find inverse: \" + str(gd))\r\n return None\r\n T61zy = T61[1, 2]\r\n T61zx = T61[0, 2]\r\n t5 = theta[4, c]\r\n if (np.sin(t5) == 0):\r\n theta[5, c:c+2] = 0\r\n else: \r\n theta[5, c:c+2] = np.arctan2(-T61zy/np.sin(t5), T61zx/np.sin(t5))\r\n theta = np.real(theta)\r\n\r\n # Calculating theta3\r\n cols = np.array([0, 2, 4, 6])\r\n for i in range (0, cols.size):\r\n c = cols[i]\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))\r\n T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 1, \" + str(T10))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 2, \" + str(T65))\r\n return None\r\n except T54:\r\n print(\"[ERROR] Could not find inverse: Theta3, inverse 3, \" + str(T54))\r\n return None\r\n T14 = T10 * gd * T65 * T54\r\n p13 = T14 * np.mat([[0], [-d4], [0], [1]])\r\n p13 = p13 - np.mat([[0], [0], [0], [1]])\r\n p13norm2 = norm(p13) * norm(p13)\r\n arg = (p13norm2-a2*a2-a3*a3)/(2*a2*a3)\r\n if (arg > 1 or arg < -1):\r\n print (\"[WARNING] No solution for Theta3: arg < -1 or arg > 1\")\r\n print (\"[WARNING] Creating aproximation highly inaccurate\")\r\n if (arg >1):\r\n arg = 1 - 1e-10\r\n else:\r\n arg = -1 + 1e-10\r\n t3p = np.arccos(arg)\r\n theta[2, c] = t3p\r\n theta[2, c+1] = -t3p\r\n theta = np.real(theta)\r\n\r\n # Calculating theta2 and theta4\r\n cols = np.array([0, 1, 2, 3, 4, 5, 6, 7])\r\n for i in range (0, cols.size):\r\n c = cols[i]\r\n try:\r\n T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))\r\n T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))\r\n T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta2 inverse 1, \" + str(T10))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta2, inverse 2, \" + str(T65))\r\n return None\r\n except T54:\r\n print(\"[ERROR] Could not find inverse: Theta2, inverse 3, \" + str(T54))\r\n return None\r\n T14 = T10 * gd * T65 * T54\r\n p13 = T14 * np.mat([[0], [-d4], [0], [1]]) - np.mat([[0], [0], [0], [1]])\r\n p13norm = norm(p13)\r\n theta[1, c] = -np.arctan2(p13[1], -p13[0])+np.arcsin(a3*np.sin(theta[2,c])/p13norm)\r\n try:\r\n T32 = inv(self._DH(a3, alpha3, d3, theta[2,c]))\r\n T21 = inv(self._DH(a2, alpha2, d2, theta[1,c]))\r\n except T10:\r\n print(\"[ERROR] Could not find inverse: Theta4 inverse 1, \" + str(T32))\r\n return None\r\n except T65:\r\n print(\"[ERROR] Could not find inverse: Theta4, inverse 2, \" + str(T21))\r\n return None\r\n T34 = T32 * T21 * T14;\r\n theta[3, c] = np.arctan2(T34[1,0], T34[0,0])\r\n theta = np.real(theta)\r\n\r\n for i in range (0, 5):\r\n for j in range(0,7):\r\n if theta[i,j] > np.pi:\r\n theta[i,j] -= 2*np.pi\r\n elif theta[i,j] < -np.pi:\r\n theta[i,j] += 2*np.pi\r\n\r\n return theta\r\n # _analytic_ur5_inverse_kinematics retorna a matriz 6x8 com as 8 possiveis posições de 6 angulos dos motores que inferem na posição atual do UR5\r\n\r\n ## Cálcula a matriz Jacobiana da relação entre juntas e vetor de pose.\r\n # @param self O ponteiro do objeto.\r\n # @param q_Past Um vetor de juntas inicial a ser aplicado a derivada.\r\n # @param deltaTheta Um vetor de diferença de juntas em um tempo infinitesimal para o cálculo de derivada.\r\n def jacobian(self, q_Past, deltaTheta, rpy = False):\r\n\r\n jacobian_matrix = np.zeros((6,6))\r\n FK_init = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_Past.transpose() + self.delta_standard_DH[3,:])), vector = True, rpy = rpy)\r\n step = deltaTheta\r\n NaN_check = False\r\n\r\n for i in range(0,6):\r\n q_aux = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_aux[i] += step[i]\r\n q_aux = q_Past + q_aux\r\n q_aux = np.squeeze(np.asarray(q_aux.transpose() + self.delta_standard_DH[3,:]))\r\n FK_next = self.ur5_direct_kinematics(q_aux, vector = True, rpy = rpy)\r\n jacobian_matrix[i,:] = (tf.computeDifference(FK_next, FK_init)/(step[i]))\r\n if(np.any(np.isnan(jacobian_matrix[i,:]))):\r\n jacobian_matrix[i,:] = np.zeros(6)\r\n NaN_check = True\r\n \r\n if(NaN_check):\r\n print(\"[WARNING] NaN found on Jacobian.\")\r\n\r\n return jacobian_matrix.transpose()\r\n\r\n def jacobian2(self, q):\r\n\r\n jacobian_matrix = np.zeros((6,6))\r\n\r\n # Atualiza as matrizes\r\n\r\n self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])))\r\n\r\n # R^0_{i-1}dot(0,0,1)cross(d^0_n - d^0_{i-1})\r\n\r\n auxRow = np.array([[0],[0],[1]])\r\n # Row 1\r\n\r\n jacobian_matrix[0:3,0] = np.cross(np.dot(np.eye(3),auxRow),self._H[0:3,3],axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,0] = np.dot(np.eye(3),auxRow).transpose()\r\n\r\n # Row 2\r\n \r\n jacobian_matrix[0:3,1] = np.cross(np.dot(self._A_1[0:3,0:3],auxRow),(self._H[0:3,3] - self._A_1[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,1] = np.dot(self._A_1[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 3\r\n\r\n aux = self._A_1 * self._A_2\r\n\r\n jacobian_matrix[0:3,2] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,2] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 4\r\n\r\n aux = aux * self._A_3\r\n\r\n jacobian_matrix[0:3,3] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,3] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 5\r\n\r\n aux = aux * self._A_4\r\n\r\n jacobian_matrix[0:3,4] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,4] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n # Row 6\r\n\r\n aux = aux * self._A_5\r\n\r\n jacobian_matrix[0:3,5] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)\r\n jacobian_matrix[3:6,5] = np.dot(aux[0:3,0:3],auxRow).transpose()\r\n\r\n return jacobian_matrix\r\n\r\n def jacobianEndEffectorReference(self,jacobian):\r\n\r\n fowardKinematics = self._H\r\n\r\n jacobianTransform = np.eye(6)\r\n #jacobianTransform[0:3,0:3] = fowardKinematics[0:3,0:3].transpose()\r\n jacobianTransform[3:6,3:6] = fowardKinematics[0:3,0:3].transpose()\r\n\r\n newJacobian = np.dot(jacobianTransform,jacobian)\r\n\r\n return newJacobian\r\n\r\n\r\n def jacobianAnalytic(self, q):\r\n\r\n pose = self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])),vector = True, rpy = True)\r\n\r\n jacobian = self.jacobian2(q)\r\n jacobian = self.jacobianEndEffectorReference(jacobian)\r\n\r\n # r = pose[3]\r\n # p = pose[4]\r\n # #y = pose[5]\r\n\r\n # B = np.array([[1,0,np.sin(p)],[0,np.cos(r),-np.cos(p)*np.sin(r)],[0,np.sin(r),np.cos(p)*np.cos(r)]])\r\n # invB = inv(B)\r\n # auxMat = np.eye(6)\r\n # auxMat[3:6,3:6] = invB\r\n\r\n # jacobianAnalytic = np.dot(auxMat,jacobian)\r\n\r\n #jacobianAnalytic = self.jacobianEndEffectorReference(jacobianAnalytic)\r\n\r\n return jacobian\r\n\r\n ## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Newton-Raphson. \r\n # Ele retorna um vetor com as seis juntas que representam a configuração escolhida.\r\n # @param self O ponteiro do objeto.\r\n # @param cartesian_position Vetor [1x6] da posição a ser transformada.\r\n # @param chosen_theta Configuração escolhida. Default = 2.\r\n # @param theta Um parametro que pode ser usado como posição proxima inicial para aproximação numérica\r\n # @param rpy Um parâmetro que especifica se a posição cartesiana dada foi em RV ou RPY.\r\n def ur5_inverse_kinematics_newthon_raphson(self, cartesian_position, chosen_theta = 2, theta = np.zeros(6), rpy = False):\r\n\r\n #t = time.clock()\r\n\r\n if (rpy == True):\r\n cartesian_position[3:6] = tf.rollPitchYaw2RotationVector(cartesian_position[3:6])\r\n # A cinemática inversa analitica é inicialmente calculada\r\n if (np.all(theta == 0)):\r\n theta = self._analytic_ur5_inverse_kinematics(cartesian_position)\r\n joint_analytic_IK = theta[:,chosen_theta]\r\n else:\r\n joint_analytic_IK = theta\r\n\r\n NaN_check = np.isnan(joint_analytic_IK) \r\n\r\n if (np.any(NaN_check)):\r\n joint_analytic_IK = self.getJointPosition()\r\n print (\"[WARNING] Nan position found in analytic IK solution, using Actual Joint Position as start position.\")\r\n\r\n # O vetor de juntas inicial a ser corrigido numéricamente é escolhido\r\n \r\n #print joint_analytic_IK\r\n\r\n q_i = np.array([0,0,0,0,0,0], float)\r\n q_i += joint_analytic_IK\r\n \r\n joint_analytic_IK = joint_analytic_IK + self.delta_standard_DH[3,:]\r\n joint_analytic_IK = np.squeeze(np.asarray(joint_analytic_IK))\r\n FK = self.ur5_direct_kinematics(joint_analytic_IK, True)\r\n\r\n\r\n # Transformação de RV para RPY é realizada para se iniciar o cálculo.\r\n cartesian_position_rpy = cartesian_position\r\n erro = tf.computeDifference(cartesian_position_rpy, FK)\r\n \r\n norm_erro = norm(erro)\r\n\r\n episilon = 0.0001*0.0001\r\n max_iteractions = 500\r\n iteraction = 1\r\n q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])\r\n erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])\r\n\r\n delta_theta = np.ones(6)*0.000006\r\n delta_theta = np.array([[delta_theta[0]], [delta_theta[1]],[delta_theta[2]], [delta_theta[3]],[delta_theta[4]], [delta_theta[5]]])\r\n while (norm_erro > episilon):\r\n # Calcula\r\n j = self.jacobian(q_i, delta_theta)\r\n try:\r\n jt = pinv(j)\r\n except:\r\n print(\"[WARNING] Pseudo Inverse with SVD diverged\")\r\n jt = np.dot(j.transpose(),inv(np.dot(j,j.transpose())))\r\n\r\n q_in = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_in = q_i + np.dot(jt,erro)\r\n\r\n delta_theta = q_in - q_i\r\n q_i = np.array([[0],[0],[0],[0],[0],[0]], float)\r\n q_i += q_in\r\n q_i = np.squeeze(np.asarray(q_i.transpose()))\r\n FK = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_i + self.delta_standard_DH[3,:])), True)\r\n erro = tf.computeDifference(cartesian_position_rpy, FK)\r\n norm_erro = norm(erro)\r\n\r\n erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])\r\n \r\n q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])\r\n \r\n iteraction += 1\r\n if (iteraction > max_iteractions):\r\n print (\"[ERROR] Maximum interactions reached.\")\r\n break\r\n\r\n #t2 = time.clock()\r\n\r\n #print (\"Tempo de convergencia NRa: \", t2 - t)\r\n\r\n q_i = q_i.transpose()\r\n q_aux = np.array([q_i[0,0],q_i[0,1],q_i[0,2],q_i[0,3],q_i[0,4],q_i[0,5]], float)\r\n\r\n return q_aux\r\n\r\n ## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Cyclic Coordinate Descent. \r\n # Ele retorna um vetor com as seis juntas que representam a configuração escolhida. Obs.: Lento.\r\n # @param self O ponteiro do objeto.\r\n # @param cartesian_position Vetor [1x6] da posição a ser transformada.\r\n # @param chosen_theta Configuração escolhida. Default = 2.\r\n \r\n def ur5_inverse_kinematics_ccd(self, cartesian_position, chosen_theta = 2):\r\n\r\n # A cinemática inversa analitica é inicialmente calculada\r\n\r\n t = time.clock()\r\n\r\n theta = self._analytic_ur5_inverse_kinematics(cartesian_position)\r\n\r\n # O vetor de juntas inicial a ser corrigido numéricamente é escolhido\r\n joint_analytic_IK = theta[:,chosen_theta]\r\n\r\n self._effective_q = joint_analytic_IK + self.delta_standard_DH[3,:]\r\n Initial_DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)\r\n Initial_DK[3:6] = tf.rotationVector2RollPitchYaw(Initial_DK[3:6])\r\n # Cyclic Coordinate Descent\r\n cartesian_position_rpy = np.hstack((cartesian_position[0:3], tf.rotationVector2RollPitchYaw(cartesian_position[3:6])))\r\n\r\n # Constantes a serem utilizadas\r\n epsilon = 0.0001\r\n quad_epsilon = epsilon*epsilon\r\n joint_count = 5\r\n max_interection = 5000\r\n interection_count = 1\r\n interection_count_joint = 1\r\n direction = 1\r\n min_step = 0.000017\r\n max_step = 0.1\r\n alpha_step = max_step\r\n\r\n Radius = np.sqrt(cartesian_position[0:3].transpose()*cartesian_position[0:3])\r\n\r\n joint_interact = np.zeros(6)\r\n joint_interact += joint_analytic_IK\r\n\r\n # Erros Iniciais\r\n\r\n Error_Position = cartesian_position[0:3] - Initial_DK[0:3]\r\n Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))\r\n\r\n Error_Rotation = tf.computeDifference(cartesian_position_rpy[3:6],Initial_DK[3:6], True)\r\n Linear_Rotation_Error = Radius*Error_Rotation\r\n Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))\r\n\r\n erro_quad = (Mean_Position + Mean_Rotation)/2\r\n\r\n erro_quad_aux = erro_quad\r\n\r\n # Correção numérica.\r\n while erro_quad > quad_epsilon:\r\n \r\n joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n\r\n self._effective_q = joint_interact + self.delta_standard_DH[3,:]\r\n\r\n DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)\r\n DK[3:6] = rotationVector2RollPitchYaw(DK[3:6])\r\n\r\n Error_Position = cartesian_position[0:3] - DK[0:3] \r\n Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))\r\n\r\n Error_Rotation = computeDifference(cartesian_position_rpy[3:6],DK[3:6], True)\r\n Linear_Rotation_Error = Radius*Error_Rotation\r\n Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))\r\n\r\n erro_quad = (Mean_Position + Mean_Rotation)/2\r\n\r\n if erro_quad > erro_quad_aux:\r\n if interection_count_joint == 1:\r\n direction = -1*direction\r\n joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n interection_count_joint = 0\r\n error_direction = erro_quad\r\n else:\r\n if alpha_step > min_step:\r\n joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n alpha_step = alpha_step/2\r\n interection_count_joint = 1\r\n else:\r\n joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n alpha_step = max_step\r\n interection_count_joint = 1\r\n joint_count -=1\r\n if joint_count < 0:\r\n joint_count = 5\r\n interection_count +=1\r\n else:\r\n alpha_step = alpha_step/2\r\n interection_count_joint = 1\r\n erro_quad_aux = erro_quad\r\n\r\n #if interection_count_joint == 1:\r\n #if erro_quad < erro_quad_aux:\r\n #erro_quad_aux = erro_quad\r\n #interection_count_joint += 1\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step/2\r\n #else:\r\n #direction = -1*direction\r\n #joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n #interection_count_joint += 1\r\n #else:\r\n #if erro_quad < erro_quad_aux:\r\n #erro_quad_aux = erro_quad\r\n #interection_count_joint += 1\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step/2\r\n #else:\r\n #if (alpha_step < 0.000017)\r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #alpha_step = alpha_step*2\r\n #joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step\r\n #alpha_step = np.pi\r\n #interection_count_joint = 1\r\n #joint_count -=1\r\n #if joint_count < 0:\r\n #joint_count = 5\r\n #interection_count +=1\r\n #else: \r\n #joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step\r\n #interection_count_joint = 1\r\n #joint_count -=1\r\n #if joint_count < 0:\r\n #joint_count = 5\r\n #interection_count +=1\r\n if interection_count > max_interection:\r\n print (\"[ERROR] Maximum interations reached.\")\r\n break\r\n\r\n t2 = time.clock()\r\n\r\n print (\"[INFO] CCD Total time: \"+ str(t2 - t))\r\n\r\n return joint_interact\r\n\r\n\r\n def getMeanValueVector(self, vectorArray):\r\n\r\n print(\"[INFO] Mean Value: Array, Mean, \" + str(vectorArray) + \", \" + str(np.mean(vectorArray, axis = 0, dtype=np.float64)))\r\n\r\n\r\n def controlLoopTranspose(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n self.normErro = norm(poseError)\r\n\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(jacob.transpose(),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def controlLoopPseudoInverse(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n self.normErro = norm(poseError)\r\n\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(pinv(jacob),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n\r\n def controlLoopInverse(self, desiredPose, poseActual = None):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n np.nan_to_num(rotationError, False)\r\n\r\n error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]\r\n\r\n self.normErro = norm(poseError)\r\n self.errorDB.append(error)\r\n\r\n jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose())\r\n\r\n # Control\r\n\r\n K = 0.5*np.eye(6,6)\r\n\r\n jointControl = np.dot(np.dot(inv(jacob),K),error.transpose())\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def controlLoopDLS(self, desiredPose, poseActual = None, step = 0.008, jointSpeedReference = np.array([0, 0, 0, 0, 0, 0]), cartesianSpeedReference = np.array([0, 0, 0, 0, 0, 0])):\r\n\r\n if (poseActual == None):\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n #print(self.getPosition())\r\n #print(self.getJointPosition())\r\n\r\n poseActual = self.getPosition()\r\n poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])\r\n\r\n poseActualFK = tf.pose2Matrix(poseActual)\r\n desiredPoseFK = tf.pose2Matrix(desiredPose)\r\n\r\n poseError = desiredPose[0:3] - poseActual[0:3]\r\n\r\n rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)\r\n\r\n if np.any(np.isnan(rotationError)):\r\n print('[INFO][ControlLoopDLS] NaN found on control')\r\n np.nan_to_num(rotationError, False)\r\n\r\n # Error Calculation\r\n\r\n #Kp\r\n error = np.hstack((poseError, rotationError))\r\n\r\n #Kd\r\n error_D = (error - self.errorPrevious)/step\r\n self.error_D_DB.append(error_D)\r\n self.errorPrevious = error\r\n errorFiltered = butter_lowpass_filter(np.asarray(self.error_D_DB, dtype=np.float32), 3, 125, order=2)\r\n error_D = errorFiltered[errorFiltered.shape[0]-1]\r\n \r\n #Ki\r\n self.errorSum = self.errorSum + error\r\n # for i in range(0,6):\r\n # if (self.errorSum[i] > 0.1):\r\n # self.errorSum[i] = 0.1\r\n # elif(self.errorSum[i] < -0.1):\r\n # self.errorSum[i] = -0.1\r\n\r\n # print('Error Sum ' + str(self.errorSum))\r\n # if (len(self.errorDB) > 1000):\r\n # self.errorSum = self.errorSum - np.asarray(self.errorDB[len(self.errorDB) - 1000], dtype=np.float32)\r\n\r\n #DB\r\n self.normErro = norm(poseError)\r\n self.errorDB.append(error)\r\n\r\n #jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)\r\n #jacob = self.jacobian2(self.getJointPosition())\r\n jacob = self.jacobianAnalytic(self.getJointPosition())\r\n\r\n # Control\r\n\r\n Kp = 5*np.eye(6,6) #10 #5\r\n # Kp[0,0] = 1.5\r\n # Kp[1,1] = 1.5\r\n # Kp[2,2] = 1.5\r\n # Kp[0,3] = 0.2#0.5\r\n # Kp[0,4] = 0.1#0.5\r\n # Kp[0,5] = 0.1#0.5\r\n # Kp[1,3] = 0#0.5\r\n # Kp[1,4] = 0#0.5\r\n # Kp[1,5] = 0#0.5\r\n # Kp[2,3] = 0#0.5\r\n # Kp[2,4] = 0#0.5\r\n # Kp[2,5] = 0#0.5\r\n #Kp[3,3] = 16#0.5\r\n # Kp[3,4] = 0#0.5\r\n # Kp[3,5] = 0#0.5\r\n # Kp[4,3] = 0#0.5\r\n #Kp[4,4] = 16#0.5\r\n # Kp[4,5] = 0#0.5\r\n # Kp[5,3] = 0#0.5\r\n # Kp[5,4] = 0#0.5\r\n #Kp[5,5] = 16#0.5\r\n\r\n Kd = 2*np.eye(6,6)\r\n\r\n # Kd[3,3] = 0.1\r\n # Kd[4,4] = 0.1\r\n # Kd[5,5] = 0.1\r\n\r\n Ki = 0.25*np.eye(6,6)\r\n # Ki[3,3] = 0.00055 #0.55\r\n # Ki[4,4] = 0.00055\r\n # Ki[5,5] = 0.00055\r\n # WindupUpperLimit = np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])\r\n # WindupLowerLimit = -np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])\r\n\r\n k0 = 0.01\r\n\r\n w0 = 0.01\r\n\r\n \r\n\r\n KpControl = np.dot(Kp,error.transpose())\r\n KdControl = np.dot(Kd,error_D.transpose())\r\n KiControl = np.dot(Ki,self.errorSum.transpose())\r\n # print(KiControl)\r\n # print('\\n')\r\n # for i in range(0,6):\r\n # if (KiControl[i] > 0.02):\r\n # KiControl[i] = 0.02\r\n # elif(KiControl[i] < -0.02):\r\n # KiControl[i] = -0.02\r\n ControlSum = KpControl + cartesianSpeedReference #+ KiControl\r\n\r\n t1 = time.perf_counter()\r\n \r\n w = np.sqrt(np.linalg.det(np.dot(jacob,jacob.transpose())))\r\n\r\n if (w < w0):\r\n lamb = k0*(np.power((1 - (w/w0)),2))\r\n print('[WARNING] Near Singularity: ' + str(w))\r\n else:\r\n lamb = 0\r\n\r\n lamb2 = lamb*np.eye(6,6)\r\n invJacob = np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2))\r\n t2 = time.perf_counter()\r\n \r\n #t1 = time.perf_counter()\r\n #invJacob = inv(jacob)\r\n #t2 = time.perf_counter()\r\n\r\n\r\n JacobianProcessTime = t2 - t1\r\n self.processTimeList.append(JacobianProcessTime)\r\n \r\n\r\n\r\n self.wDB.append(w)\r\n #invJacob = jacob.transpose()\r\n jointControl = np.dot(invJacob,ControlSum) #np.dot(np.dot(np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2)),Kp),error.transpose())\r\n\r\n #jointControl = jointControl + jointSpeedReference\r\n\r\n # for i in range(0,6):\r\n # if (jointControl[i] > WindupUpperLimit[i]):\r\n # self.u[i] = WindupUpperLimit[i]\r\n # elif(jointControl[i] < WindupLowerLimit[i]):\r\n # self.u[i] = WindupLowerLimit[i]\r\n # else:\r\n # self.u[i] = jointControl[i]\r\n\r\n # self.errorSaturation = jointControl - self.u\r\n # print(self.errorSaturation)\r\n\r\n # print('Error Sum windup' + str((np.dot(jacob,jointControl) - KpControl)/Ki[0,0]))\r\n\r\n # for i in range(0,6):\r\n # if (jointControl[i] > 0.4):\r\n # jointControl[i] = 0.4\r\n # elif (jointControl[i] < -0.4):\r\n # jointControl[i] = -0.4\r\n\r\n return np.squeeze(np.asarray(jointControl))\r\n\r\n def speedTransform(self, desiredSpeed, q = None, step = 0.008):\r\n\r\n if (q == None):\r\n q = self.getJointPosition()\r\n\r\n #jacobian = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)\r\n #jacobian = self.jacobian2(q)\r\n jacobian = self.jacobianAnalytic(q)\r\n\r\n jointSpeed = np.dot(inv(jacobian),desiredSpeed.transpose())\r\n\r\n return jointSpeed\r\n\r\ndef butter_lowpass(cutoff, fs, order=5):\r\n nyq = 0.5 * fs\r\n normal_cutoff = cutoff / nyq\r\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\r\n return b, a\r\n\r\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\r\n b, a = butter_lowpass(cutoff, fs, order=order)\r\n y = lfilter(b, a, data)\r\n return y",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Get Pose\nimport numpy as np\nfrom numpy.linalg import norm\nfrom scipy.io import savemat\nimport matplotlib.pyplot as plot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport struct\nimport UR5Class\nimport socket\nimport time\nimport sys\nimport csv\n#import json\nimport Transformations as tf\nimport os\nimport threading\n\nimport trajectoryGenerator\n\nHOST = \"192.168.0.98\" # The remote host \nPORT = 30003 # The same port as used by the server\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\ns.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)\n\ndef getData(host, port):\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)\n data = s.recv(1060)\n return data\n# getData abre uma conexao com o robo e recebe o pacote de dados de 1060 Bytes\n\ndef sendString(host, port, string, move_time = 8, pose = False):\n string = \"[\" + str(string[0]) + \",\" + str(string[1]) + \",\" + str(string[2]) + \",\" + str(string[3]) + \",\" + str(string[4]) + \",\" + str(string[5]) + \"]\"\n if (pose == True):\n \tp = \"p\"\n else:\n \tp = \"\"\n str_data = \"movej(\" + p + string + \", t = \" + str(move_time) + \")\" + \"\\n\"\n s.send(str_data.encode('ascii'))\n return\n\ndef speedJ(host, port, string, a = 2*np.pi):\n string = \"[\" + str(string[0]) + \",\" + str(string[1]) + \",\" + str(string[2]) + \",\" + str(string[3]) + \",\" + str(string[4]) + \",\" + str(string[5]) + \"]\"\n str_data = \"speedj(\" + string + \", a = \" + str(a) + \",t=0.02)\" + \"\\n\"\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)\n s.send(str_data.encode('ascii'))\n return\n\nprint (\"Starting Program\")\n\ndelta_standard_DH = np.mat([[7.80880090239748336e-05, 0.361257734372265993, 0.00128388035686166635, 1.67232993846963135e-05, 2.02354943719599362e-05, 0], \\\n [-0.000718642187888640649, 0.00106284384336133905, -0.022893992683020014, -0.00115732902891929612, 0.000201414435319735574, 0], \\\n [7.02198637382578372e-05, -395.302340315824551, 396.777096992026259, -1.47374645443299634,0.000169498815833071803, 0.000364725429982712401], \\\n [2.91984009971350678e-05, -1.42023254669109278, 1.33410045447338699, 0.0861037286066216462, -3.46593927803766182e-05, -2.71063161709674666e-05]])\n\ndelta_standard_DH_2 = np.mat([[ -5.39038176483263552e-06, 0.200686268169445209, 0.00228952454238523506, 2.04485825460639469e-05, -1.56897709565794351e-05, 0],\\\n [ 0.00039024637623907843, 0.000904178045744563359, 0.0145652098260125283, -0.000690055586142879207, 0.000644539557413503772, 0],\\\n [ 0.000178790506571227525, 399.392832822527851, -396.49020940525736, -2.90172143203552535, 0.000311791168683808739, 0.000378711630321493242], \\\n [ 7.05887359599974621e-05, 1.01499272342048541, -0.906943504886603802, -6.39125177018525026, 2.3011110588447593e-05, 5.9590107063629152e-05]])\n# Dados de calibracao do robo\n\nur5 = UR5Class.UR5Class(delta_standard_DH_2)\n\n# process = threading.Thread(target=speedJ,args=[HOST,PORT])\n# process.start()\n\ntime.sleep(0.3)\n\nur5.setRobotData(getData(HOST, PORT))\n\narmTrajectory = np.load(\"/home/nascimento/Projects/MESTRADO - Task Space Control/generatedPaths/UR5/Arm1.npy\")\n\n\ntimeArray = np.linspace(0,31.04,num=3892)\n\n\n\n\nplot.figure(1)\nplot.plot(timeArray,armTrajectory[:,0])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"X(m)\")\nplot.suptitle(\"Trajetoria - X\")\n\nplot.figure(2)\nplot.plot(timeArray,armTrajectory[:,1])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"Y(m)\")\nplot.suptitle(\"Trajetoria - Y\")\n\nplot.figure(3)\nplot.plot(timeArray,armTrajectory[:,2])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"Z(m)\")\nplot.suptitle(\"Trajetoria - Z\")\n\nplot.figure(4)\nplot.plot(timeArray,armTrajectory[:,3])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"RX(rad)\")\nplot.suptitle(\"Trajetoria - RX\")\n\nplot.figure(5)\nplot.plot(timeArray,armTrajectory[:,4])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"RY(rad)\")\nplot.suptitle(\"Trajetoria - RY\")\n\nplot.figure(6)\nplot.plot(timeArray,armTrajectory[:,5])\nplot.xlabel(\"Tempo(s)\")\nplot.ylabel(\"RZ(rad)\")\nplot.suptitle(\"Trajetoria - RZ\")\n\nfig = plot.figure(7)\nax = fig.gca(projection='3d')\nax.plot(armTrajectory[:,0],armTrajectory[:,1],armTrajectory[:,2])\n\nplot.show()"
] | [
[
"numpy.dot",
"numpy.radians",
"numpy.sqrt",
"numpy.asarray",
"numpy.nan_to_num",
"numpy.arctan2",
"numpy.all",
"numpy.concatenate",
"numpy.mean",
"numpy.any",
"numpy.hstack",
"numpy.eye",
"numpy.sin",
"scipy.signal.butter",
"numpy.real",
"scipy.signal.lfilter",
"numpy.mat",
"numpy.zeros",
"numpy.power",
"numpy.isnan",
"numpy.linalg.inv",
"numpy.arccos",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"numpy.linalg.pinv"
],
[
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.load",
"matplotlib.pyplot.suptitle",
"numpy.mat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
piojanu/tf_utils | [
"169bd3334dd11954cf8f411f2c918f76cd609fab"
] | [
"samples/mnist_vae.py"
] | [
"import argparse\nimport io\nimport os.path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tqdm import tqdm\n\nfrom tf_utils import AttrDict, attrdict_from_yaml, lazy_property_with_scope, share_variables\n\ntfd = tfp.distributions\ntfl = tf.layers\n\n\nclass Model(object):\n def __init__(self, data, config):\n # Initialize attributes\n self.data = data\n self.data_shape = list(self.data.shape[1:])\n self.config = config\n\n # Build model\n self.prior\n self.posterior\n self.code\n self.likelihood\n self.sample\n self.samples\n self.log_prob\n self.divergence\n self.elbo\n self.loss\n self.optimiser\n self.gradients\n self.optimise\n\n # Define summaries\n self.summary\n self.images\n\n @lazy_property_with_scope\n def prior(self):\n \"\"\"Standard normal distribution prior.\"\"\"\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(self.config.code_size),\n scale_diag=tf.ones(self.config.code_size))\n\n @lazy_property_with_scope(scope_name=\"encoder\")\n def posterior(self):\n \"\"\"a.k.a the encoder\"\"\"\n x = tfl.Flatten()(self.data)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n loc = tfl.Dense(self.config.code_size)(x)\n scale = tfl.Dense(self.config.code_size, activation='softplus')(x)\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)\n\n @lazy_property_with_scope\n def code(self):\n \"\"\"Code sample from the posterior.\"\"\"\n return self.posterior.sample()\n\n @lazy_property_with_scope(scope_name=\"decoder\")\n def likelihood(self):\n \"\"\"a.k.a the decoder.\"\"\"\n return self._make_decoder(self.code)\n\n @lazy_property_with_scope\n def sample(self):\n \"\"\"Sample example.\"\"\"\n return self._make_decoder(self.prior.sample(1))\n\n @lazy_property_with_scope\n def samples(self):\n \"\"\"Generated examples.\"\"\"\n return self._make_decoder(self.prior.sample(self.config.n_samples)).mean()\n\n @lazy_property_with_scope\n def log_prob(self):\n \"\"\"Log. likelihood of data under code sampled from posterior.\"\"\"\n return self.likelihood.log_prob(self.data)\n\n @lazy_property_with_scope\n def divergence(self):\n \"\"\"KL divergence between posterior and prior.\"\"\"\n return tfd.kl_divergence(self.posterior, self.prior)\n\n @lazy_property_with_scope\n def elbo(self):\n \"\"\"Evidence lower bound with a Lagrangian multiplier beta.\"\"\"\n return self.log_prob - self.config.beta * self.divergence\n\n @lazy_property_with_scope\n def loss(self):\n \"\"\"Negative ELBO reduced over the whole batch and every pixel.\"\"\"\n return -tf.reduce_mean(self.elbo)\n\n @lazy_property_with_scope\n def optimiser(self):\n \"\"\"ADAM optimiser.\"\"\"\n return tf.train.AdamOptimizer(self.config.learning_rate)\n\n @lazy_property_with_scope\n def gradients(self):\n \"\"\"Variables values and gradients of the loss (negative ELBO).\"\"\"\n return self.optimiser.compute_gradients(self.loss)\n\n @lazy_property_with_scope\n def optimise(self):\n \"\"\"Optimise the loss op. (apply gradients).\"\"\"\n return self.optimiser.apply_gradients(self.gradients)\n\n @lazy_property_with_scope\n def summary(self):\n \"\"\"Merged the model's summaries.\"\"\"\n return tf.summary.merge(self._define_summaries())\n\n @lazy_property_with_scope\n def images(self):\n \"\"\"Image summary of generated examples.\"\"\"\n images = tf.reshape(self.samples, (-1, self.samples.shape[2])) # Create col. of images\n images = tf.expand_dims(images, axis=0) # Add batch dim.\n images = tf.expand_dims(images, axis=-1) # Add channel dim.\n return tf.summary.image(\"samples\", images, max_outputs=1)\n\n @share_variables\n def _make_decoder(self, code):\n \"\"\"Build decoder network.\"\"\"\n x = tfl.Dense(self.config.hidden_size, activation='relu')(code)\n x = tfl.Dense(self.config.hidden_size, activation='relu')(x)\n logits = tfl.Dense(np.product(self.data_shape))(x)\n logits = tf.reshape(logits, [-1] + self.data_shape)\n return tfd.Independent(tfd.Bernoulli(logits), 2)\n\n def _define_summaries(self):\n \"\"\"Define the model's summaries.\"\"\"\n summaries = []\n\n # Learning rate\n summaries.append(tf.summary.scalar(\"learning_rate\",\n self.optimiser._lr))\n\n # ELBO and loss\n summaries.append(tf.summary.histogram(\"evidence/lower_bound_log_prob/image\",\n self.elbo))\n summaries.append(tf.summary.scalar(\"mean/evidence/lower_bound_log_prob/image\",\n tf.reduce_mean(self.elbo)))\n summaries.append(tf.summary.scalar(\"loss\",\n self.loss))\n\n # KL divergence\n summaries.append(tf.summary.histogram(\"divergence\",\n self.divergence))\n summaries.append(tf.summary.scalar(\"mean/divergence\",\n tf.reduce_mean(self.divergence)))\n\n # Gradients and variables norm\n gradients, variables = list(zip(*self.gradients))\n for gradient, variable in zip(gradients, variables):\n summaries.append(tf.summary.histogram(\"gradients/batch_norm/\" + variable.name,\n tf.norm(gradient, axis=0)))\n summaries.append(tf.summary.histogram(\"variables/batch_norm/\" + variable.name,\n tf.norm(variable, axis=0)))\n summaries.append(tf.summary.scalar(\"gradients/global_norm\",\n tf.global_norm(gradients)))\n summaries.append(tf.summary.scalar(\"variables/global_norm\",\n tf.global_norm(variables)))\n\n # Prior and posterior entropy\n summaries.append(tf.summary.histogram(\"prior/entropy\",\n self.prior.entropy()))\n summaries.append(tf.summary.scalar(\"mean/prior/entropy\",\n tf.reduce_mean(self.prior.entropy())))\n summaries.append(tf.summary.histogram(\"posterior/entropy\",\n self.posterior.entropy()))\n summaries.append(tf.summary.scalar(\"mean/posterior/entropy\",\n tf.reduce_mean(self.posterior.entropy())))\n\n # Prior and posterior log_prob\n summaries.append(tf.summary.histogram(\"prior/log_prob/image\",\n self.sample.log_prob(self.data)))\n summaries.append(tf.summary.scalar(\"mean/prior/log_prob/image\",\n tf.reduce_mean(self.sample.log_prob(self.data))))\n summaries.append(tf.summary.histogram(\"posterior/log_prob/image\",\n self.log_prob))\n summaries.append(tf.summary.scalar(\"mean/posterior/log_prob/image\",\n tf.reduce_mean(self.log_prob)))\n\n return summaries\n\n\ndef plot_codes(codes, labels):\n # Scatter plot\n fig, ax = plt.subplots()\n ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)\n ax.set_aspect('equal')\n ax.set_xlim(codes.min() - .1, codes.max() + .1)\n ax.set_ylim(codes.min() - .1, codes.max() + .1)\n ax.tick_params(\n axis='both', which='both', left=False, bottom=False,\n labelleft=False, labelbottom=False)\n\n # Save to io buffer\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n buf.seek(0)\n\n # Create image summary\n image = tf.Summary.Image(encoded_image_string=buf.getvalue())\n summary = tf.Summary(value=[tf.Summary.Value(tag=\"images/codes/image\", image=image)])\n return summary\n\n\ndef create_datasets(train_set, test_set, config):\n train_dataset = tf.data.Dataset.from_tensor_slices(\n tf.convert_to_tensor(train_set, dtype=tf.float32)) \\\n .map(lambda x: x / 255) \\\n .shuffle(train_set.shape[0]) \\\n .batch(config.batch_size)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(\n tf.convert_to_tensor(test_set, dtype=tf.float32)) \\\n .map(lambda x: x / 255) \\\n .batch(test_set.shape[0])\n\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,\n train_dataset.output_shapes)\n\n next_batch = iterator.get_next()\n train_init_op = iterator.make_initializer(train_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n\n return next_batch, train_init_op, test_init_op\n\n\ndef train(model, train_init_op, test_init_op, test_labels, config):\n with tf.train.MonitoredSession() as sess:\n summary_writer_train = tf.summary.FileWriter(\n os.path.join(config.logs_dir, \"train\"), sess.graph)\n summary_writer_test = tf.summary.FileWriter(\n os.path.join(config.logs_dir, \"test\"))\n\n step = 0\n for epoch in tqdm(range(config.epochs)):\n # Test\n sess.run(test_init_op)\n test_summary, test_images, test_codes = sess.run(\n [model.summary, model.images, model.code])\n summary_writer_test.add_summary(test_summary, step)\n summary_writer_test.add_summary(test_images, step)\n\n # Plot codes\n # TODO: Use TensorBoard projector.\n codes = plot_codes(test_codes, test_labels)\n summary_writer_test.add_summary(codes, step)\n\n # Train\n # TODO: Add tfu.loop that will run whole epoch, have callbacks and reduce returns.\n sess.run(train_init_op)\n while True:\n try:\n fetches = AttrDict({\"optimise\": model.optimise})\n if step % config.log_every == 0:\n fetches.summary = model.summary\n\n returns = sess.run(fetches)\n if \"summary\" in returns:\n summary_writer_train.add_summary(returns.summary, step)\n\n step += 1\n except tf.errors.OutOfRangeError:\n break\n\n summary_writer_train.close()\n summary_writer_test.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Train VAE for MNIST dataset.\")\n parser.add_argument('--config', type=str, default=\"\", help=\"YAML formatted configuration\")\n user_config_json = parser.parse_args().config\n\n default_config = AttrDict({\n \"batch_size\": 100,\n \"epochs\": 20,\n \"n_samples\": 10,\n \"hidden_size\": 200,\n \"code_size\": 2,\n \"beta\": 1.,\n \"learning_rate\": 0.001,\n \"logs_dir\": \"./logs\",\n \"log_every\": 100\n })\n config = default_config.nested_update(attrdict_from_yaml(user_config_json))\n\n (train_set, _), (test_set, test_labels) = tf.keras.datasets.mnist.load_data()\n # TODO: Use whole test set, but batch it like train set and average summaries.\n # https://stackoverflow.com/questions/40788785/how-to-average-summaries-over-multiple-batches\n train_set, test_set, test_labels = train_set[:], test_set[:5000], test_labels[:5000]\n\n next_batch, train_init_op, test_init_op = create_datasets(train_set, test_set, config)\n\n model = Model(next_batch, config)\n train(model, train_init_op, test_init_op, test_labels, config)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.norm",
"numpy.product",
"tensorflow.reduce_mean",
"tensorflow.zeros",
"tensorflow.summary.image",
"tensorflow.train.MonitoredSession",
"tensorflow.reshape",
"matplotlib.pyplot.subplots",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.Summary.Value",
"tensorflow.data.Iterator.from_structure",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.global_norm",
"tensorflow.summary.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
pkyIntelligence/FasterRCNN | [
"230953938efdba8f8c127fcc0bb746fcce8d9463",
"230953938efdba8f8c127fcc0bb746fcce8d9463"
] | [
"FasterRCNN/layers/roi_align.py",
"FasterRCNN/data/samplers/grouped_batch_sampler.py"
] | [
"import torch\nimport math\n\nfrom torch import nn\nfrom ..utils.utils import point_interpolate\n\n\nclass ROIAlign(nn.Module):\n def __init__(self, output_size, spatial_scale, sampling_ratio):\n \"\"\"\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each output\n sample. 0 to take samples densely.\n\n Note:\n point interpolate already accounts for alignment, just make sure the continuous coordinates are correct\n \"\"\"\n super(ROIAlign, self).__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n\n def forward(self, input, rois):\n \"\"\"\n Args:\n input: NCHW images\n rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.\n\n returns: ROIAligned output, shape = (B, Channels, self.output_size[0], self.output_size[1])\n \"\"\"\n assert rois.dim() == 2 and rois.size(1) == 5\n\n batch_indices, rois_only = torch.split(rois, split_size_or_sections=[1, 4], dim=1)\n batch_indices = batch_indices.squeeze().long()\n rois_only = rois_only * self.spatial_scale\n\n n_rois = len(batch_indices)\n\n pooled_height = self.output_size[0]\n pooled_width = self.output_size[1]\n\n channels = input.shape[1]\n\n output = input.new_zeros(size=(rois.shape[0], channels, pooled_height, pooled_width))\n\n for i in range(n_rois):\n batch_index = batch_indices[i]\n roi = rois_only[i]\n\n roi_start_w = roi[0]\n roi_start_h = roi[1]\n roi_end_w = roi[2]\n roi_end_h = roi[3]\n\n roi_width = roi_end_w - roi_start_w\n roi_height = roi_end_h - roi_start_h\n\n roi_width = max(roi_width, 1.)\n roi_height = max(roi_height, 1.)\n\n bin_size_h = roi_height / pooled_height\n bin_size_w = roi_width / pooled_width\n\n roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_height / pooled_height)\n roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_width / pooled_width)\n\n count = max(roi_bin_grid_h * roi_bin_grid_w, 1)\n\n # Construct Pooled ROI for all channels\n for ph in range(pooled_height):\n for pw in range(pooled_width):\n pooled_sum = input.new_zeros(size=(channels, ))\n\n for sample_h in range(roi_bin_grid_h):\n y = roi_start_h + ph * bin_size_h + ((sample_h + 0.5) / roi_bin_grid_h) * bin_size_h\n\n for sample_w in range(roi_bin_grid_w):\n x = roi_start_w + pw * bin_size_w + ((sample_w + 0.5) / roi_bin_grid_w) * bin_size_w\n\n sampled_point = point_interpolate(input[batch_index], torch.Tensor([x, y]))\n pooled_sum = pooled_sum + sampled_point\n\n output[i, :, ph, pw] = pooled_sum / count\n\n return output\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + \"(\"\n tmpstr += \"output_size=\" + str(self.output_size)\n tmpstr += \", spatial_scale=\" + str(self.spatial_scale)\n tmpstr += \", sampling_ratio=\" + str(self.sampling_ratio)\n tmpstr += \", aligned=\" + str(self.aligned)\n tmpstr += \")\"\n return tmpstr\n",
"import numpy as np\nfrom torch.utils.data.sampler import BatchSampler, Sampler\n\n\nclass GroupedBatchSampler(BatchSampler):\n \"\"\"\n Wraps another sampler to yield a mini-batch of indices.\n It enforces that the batch only contain elements from the same group.\n It also tries to provide mini-batches which follows an ordering which is\n as close as possible to the ordering from the original sampler.\n \"\"\"\n\n def __init__(self, sampler, group_ids, batch_size):\n \"\"\"\n Args:\n sampler (Sampler): Base sampler.\n group_ids (list[int]): If the sampler produces indices in range [0, N),\n `group_ids` must be a list of `N` ints which contains the group id of each sample.\n The group ids must be a set of integers in the range [0, num_groups).\n batch_size (int): Size of mini-batch.\n \"\"\"\n if not isinstance(sampler, Sampler):\n raise ValueError(\n \"sampler should be an instance of \"\n \"torch.utils.data.Sampler, but got sampler={}\".format(sampler)\n )\n self.sampler = sampler\n self.group_ids = np.asarray(group_ids)\n assert self.group_ids.ndim == 1\n self.batch_size = batch_size\n groups = np.unique(self.group_ids).tolist()\n\n # buffer the indices of each group until batch size is reached\n self.buffer_per_group = {k: [] for k in groups}\n\n def __iter__(self):\n for idx in self.sampler:\n group_id = self.group_ids[idx]\n group_buffer = self.buffer_per_group[group_id]\n group_buffer.append(idx)\n if len(group_buffer) == self.batch_size:\n yield group_buffer[:] # yield a copy of the list\n del group_buffer[:]\n\n def __len__(self):\n raise NotImplementedError(\"len() of GroupedBatchSampler is not well-defined.\")\n"
] | [
[
"torch.Tensor",
"torch.split"
],
[
"numpy.asarray",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dhingratul/RNN | [
"9e1ac582dbf8251769817b34fc9d791fa8c20376"
] | [
"Memory_RNN.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 11:28:50 2017\n\n@author: dhingratul\n\"\"\"\nfrom __future__ import print_function, division\nimport numpy as np\nimport tensorflow as tf\nimport helpers\n# hyperparams\nnum_epochs = 10000\ntotal_series_length = 100\ntruncated_backprop_length = 5\nstate_size = 4 # Number of neurons in the hidden layer\nnum_classes = 2 # Data is binary, 0 / 1 = Two Classes\nbatch_size = 8\nnum_batches = total_series_length//batch_size//truncated_backprop_length\n\n# Step 1 - Data Generation\n# Generate integers and corresponding binary numbers randomly selected in a\n# range of 10,000. The data points are zero padded so as to make a constant\n# lenght of 100\n\nshift_batch = 0\n\n\ndef generateData(shift_batch):\n vector_size = 100\n batches = helpers.random_sequences(length_from=3, length_to=8,\n vocab_lower=0, vocab_upper=2,\n batch_size=vector_size)\n batch = next(batches)\n x, _ = helpers.batch(batch)\n if shift_batch == 0: # Learning the same sequence\n y = x\n else:\n y_inter2 = helpers.shifter(batch, shift_batch)\n y, _ = helpers.batch(y_inter2)\n return x, y\n\n# Step 2 - Build the Model\nbatchX_placeholder = tf.placeholder(\n tf.float32, [batch_size, truncated_backprop_length])\nbatchY_placeholder = tf.placeholder(\n tf.int32, [batch_size, truncated_backprop_length])\ninit_state = tf.placeholder(tf.float32, [batch_size, state_size])\n\n# Randomly initialize weights\nW = tf.Variable(np.random.rand(state_size+1, state_size), dtype=tf.float32)\nb = tf.Variable(np.zeros((1, state_size)), dtype=tf.float32)\n\nW2 = tf.Variable(np.random.rand(state_size, num_classes), dtype=tf.float32)\nb2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)\n# Unpack columns\ninputs_series = tf.unstack(batchX_placeholder, axis=1)\nlabels_series = tf.unstack(batchY_placeholder, axis=1)\n# Forward pass\n# State placeholder\ncurrent_state = init_state\n# series of states through time\nstates_series = []\n\n# For each set of inputs, forward pass through the network to get new state\n# values and store all states in memory\nfor current_input in inputs_series:\n current_input = tf.reshape(current_input, [batch_size, 1])\n # Concatenate state and input data\n input_and_state_concatenated = tf.concat(\n axis=1, values=[current_input, current_state])\n next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b)\n # Store the state in memory\n states_series.append(next_state)\n # Set current state to next one\n current_state = next_state\n# Calculate loss\nlogits_series = [tf.matmul(state, W2) + b2 for state in states_series]\n# Softmax Non-linearity\npredictions_series = [tf.nn.softmax(logits) for logits in logits_series]\n\n# Measure loss, calculate softmax again on logits, then compute cross entropy\nlosses = [tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels) for logits,\n labels in zip(logits_series, labels_series)]\n# Average Loss\ntotal_loss = tf.reduce_mean(losses)\n# Use adagrad for minimization\ntrain_step = tf.train.AdagradOptimizer(0.2).minimize(total_loss)\n# Step 3 Training the network\nwith tf.Session() as sess:\n y = np.zeros([batch_size])\n sess.run(tf.global_variables_initializer())\n loss_list = []\n for epoch_idx in range(num_epochs):\n # Generate new data at every epoch\n x, y = generateData(shift_batch)\n while (len(y) > 8 or len(y) < 8):\n x, y = generateData(shift_batch)\n # Empty hidden state\n _current_state = np.zeros((batch_size, state_size))\n\n print(\"epoch\", epoch_idx)\n for batch_idx in range(num_batches):\n # layers unrolled to a limited number of time-steps:\n # truncated length\n start_idx = batch_idx * truncated_backprop_length\n end_idx = start_idx + truncated_backprop_length\n\n batchX = x[:, start_idx:end_idx]\n batchY = y[:, start_idx:end_idx]\n # Run the computation graph, give it the values\n _total_loss, _train_step, _current_state, _predictions_series = \\\n sess.run(\n [total_loss, train_step, current_state,\n predictions_series],\n feed_dict={\n batchX_placeholder: batchX,\n batchY_placeholder: batchY,\n init_state: _current_state\n })\n # print(batchX, batchY)\n loss_list.append(_total_loss)\n\n if batch_idx % 100 == 0:\n print(\"Loss\", _total_loss)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.train.AdagradOptimizer",
"tensorflow.unstack",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.random.rand",
"tensorflow.Session",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ashok-arjun/few-shot-ssl-public | [
"3cf522031aa40b4ffb61e4693d0b48fdd5669276"
] | [
"fewshot/data/compress_tiered_imagenet.py"
] | [
"# Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,\n# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# =============================================================================\nimport cv2\nimport numpy as np\nimport six\nimport sys\nimport pickle as pkl\n\nfrom tqdm import tqdm\n\n\ndef compress(path, output):\n with np.load(path, mmap_mode=\"r\") as data:\n images = data[\"images\"]\n array = []\n for ii in tqdm(six.moves.xrange(images.shape[0]), desc='compress'):\n im = images[ii]\n im_str = cv2.imencode('.png', im)[1]\n array.append(im_str)\n with open(output, 'wb') as f:\n pkl.dump(array, f, protocol=pkl.HIGHEST_PROTOCOL)\n\n\ndef decompress(path, output):\n with open(output, 'rb') as f:\n array = pkl.load(f)\n images = np.zeros([len(array), 84, 84, 3], dtype=np.uint8)\n for ii, item in tqdm(enumerate(array), desc='decompress'):\n im = cv2.imdecode(item, 1)\n images[ii] = im\n np.savez(path, images=images)\n\n\ndef main():\n if sys.argv[1] == 'compress':\n compress(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == 'decompress':\n decompress(sys.argv[2], sys.argv[3])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.savez"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
selimfirat/pysad | [
"dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede",
"dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede"
] | [
"tests/transform/preprocessing/test_instance_unit_norm_scaler.py",
"pysad/models/loda.py"
] | [
"\n\ndef test_instance_unit_norm_scaler():\n import numpy as np\n from pysad.transform.preprocessing import InstanceUnitNormScaler\n\n X = np.random.rand(100, 25)\n scaler = InstanceUnitNormScaler()\n\n scaled_X = scaler.fit_transform(X)\n assert np.all(np.isclose(np.linalg.norm(scaled_X, axis=1), 1.0))\n\n scaler = scaler.fit(X)\n scaled_X = scaler.transform(X)\n assert np.all(np.isclose(np.linalg.norm(scaled_X, axis=1), 1.0))\n",
"from pysad.core.base_model import BaseModel\nimport numpy as np\n\n\nclass LODA(BaseModel):\n \"\"\"The LODA model :cite:`pevny2016loda` The implemnetation is adapted to the steraming framework from the `PyOD framework <https://pyod.readthedocs.io/en/latest/_modules/pyod/models/loda.html#LODA>`_.\n\n Args:\n num_bins (int): The number of bins of the histogram.\n num_random_cuts (int): The number of random cuts.\n \"\"\"\n\n def __init__(self, num_bins=10, num_random_cuts=100):\n self.to_init = True\n self.n_bins = num_bins\n self.n_random_cuts = num_random_cuts\n\n def fit_partial(self, X, y=None):\n \"\"\"Fits the model to next instance.\n\n Args:\n X (np.float array of shape (num_features,)): The instance to fit.\n y (int): Ignored since the model is unsupervised (Default=None).\n\n Returns:\n object: Returns the self.\n \"\"\"\n if self.to_init:\n self.num_features = X.shape[0]\n self.weights = np.ones(\n self.n_random_cuts,\n dtype=np.float) / self.n_random_cuts\n self.projections_ = np.random.randn(\n self.n_random_cuts, self.num_features)\n self.histograms_ = np.zeros((self.n_random_cuts, self.n_bins))\n self.limits_ = np.zeros((self.n_random_cuts, self.n_bins + 1))\n\n n_nonzero_components = np.sqrt(self.num_features)\n self.n_zero_components = self.num_features - \\\n np.int(n_nonzero_components)\n\n self.to_init = False\n\n X = X.reshape(1, -1)\n\n for i in range(self.n_random_cuts):\n rands = np.random.permutation(self.num_features)[\n :self.n_zero_components]\n self.projections_[i, rands] = 0.\n projected_data = self.projections_[i, :].dot(X.T)\n self.histograms_[i, :], self.limits_[i, :] = np.histogram(\n projected_data, bins=self.n_bins, density=False)\n self.histograms_[i, :] += 1e-12\n self.histograms_[i, :] /= np.sum(self.histograms_[i, :])\n\n return self\n\n def score_partial(self, X):\n \"\"\"Scores the anomalousness of the next instance.\n\n Args:\n X (np.float array of shape (num_features,)): The instance to score. Higher scores represent more anomalous instances whereas lower scores correspond to more normal instances.\n\n Returns:\n float: The anomalousness score of the input instance.\n \"\"\"\n X = X.reshape(1, -1)\n\n pred_scores = np.zeros([X.shape[0], 1])\n for i in range(self.n_random_cuts):\n projected_data = self.projections_[i, :].dot(X.T)\n inds = np.searchsorted(self.limits_[i, :self.n_bins - 1],\n projected_data, side='left')\n pred_scores[:, 0] += -self.weights[i] * np.log(\n self.histograms_[i, inds])\n pred_scores /= self.n_random_cuts\n\n return pred_scores.ravel()\n"
] | [
[
"numpy.linalg.norm",
"numpy.random.rand"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.ones",
"numpy.int",
"numpy.random.permutation",
"numpy.random.randn",
"numpy.searchsorted",
"numpy.histogram",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
corganhejijun/frontal-trans | [
"1509babf2447a53a772703b09cb6a2daec6968a7"
] | [
"test_sample.py"
] | [
"# -*- coding: utf-8 -*- \nimport os\nimport cv2\nfrom scipy import misc\nfrom PIL import Image\n\nsample_path = 'datasets/celeb_train/lfw_trans'\ndest_path = sample_path + \"/../dest\"\nmiddleSize = 64\nimgSize = 256\nkernel_size = (5, 5)\nsigma = 5\n\nif not os.path.exists(dest_path):\n os.mkdir(dest_path)\n\nfileList = os.listdir(sample_path)\nfor index, file in enumerate(fileList):\n imgPath = os.path.join(sample_path, file)\n if os.path.isdir(imgPath):\n continue\n print(\"procesing \" + file + \" \" + str(index+1) + '/' + str(len(fileList)))\n img = cv2.cvtColor(cv2.imread(imgPath), cv2.COLOR_BGR2RGB)\n img = misc.imresize(img, (middleSize, middleSize), interp='bilinear')\n img = misc.imresize(img, (imgSize, imgSize), interp='bilinear')\n img = cv2.GaussianBlur(img, kernel_size, sigma)\n combineImg = Image.new('RGB', (img.shape[0]*2, img.shape[0]))\n combineImg.paste(Image.fromarray(img), (0,0))\n combineImg.paste(Image.fromarray(img), (img.shape[0]+1,0))\n misc.imsave(os.path.join(dest_path, file), combineImg)\n"
] | [
[
"scipy.misc.imresize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
YusrilHasanuddin/bangkit-capstone-CAP0166 | [
"51742f7af47fa285154793a6ea74de1d78d945b3"
] | [
"ml-project/extract_face_yusril.py"
] | [
"import sys\nimport os\nimport traceback\nfrom PIL import Image\nfrom facenet_pytorch import MTCNN\nimport matplotlib.image as mpimg\nimport numpy as np\n\n\ndef detect_faces(image_path):\n mtcnn = MTCNN(margin=20, keep_all=True,\n post_process=False, device='cuda:0')\n image = image_path\n image = mpimg.imread(image)\n image = Image.fromarray(image)\n faces = mtcnn(image)\n count = 0\n for face in faces:\n face = face.permute(1, 2, 0).int().numpy()\n # cv2.imwrite(os.path.join(\n # path_folder, \"face\" + str(count) + \".jpg\"),face)\n face = Image.fromarray((face).astype(np.uint8))\n face.save(os.path.join(path_folder, \"face\" + str(count) + \".jpg\"))\n count = count + 1\n\n\nif __name__ == \"__main__\":\n fcount = 0\n while os.path.exists(\"ExtractedFaceFolder\" + str(fcount)) == True:\n fcount = fcount + 1\n if os.path.exists(\"ExtractedFaceFolder\" + str(fcount)) == False:\n break\n else:\n continue\n os.mkdir(\"ExtractedFaceFolder\" + str(fcount))\n path_folder = \"ExtractedFaceFolder\" + str(fcount)\n\n if len(sys.argv) < 2:\n print(\"Usage: python detect_extract_save.py 'image path'\")\n sys.exit()\n\n if os.path.isdir(sys.argv[1]):\n for image in os.listdir(sys.argv[1]):\n try:\n print(\"Processing.....\",os.path.abspath(\n os.path.join(sys.argv[1],image)))\n detect_faces(os.path.abspath(\n os.path.join(sys.argv[1],image)),False)\n except Exception:\n print(\"Could not process \",os.path.abspath(\n os.path.join(sys.argv[1],image)))\n else:\n detect_faces(sys.argv[1])\n"
] | [
[
"matplotlib.image.imread"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sailab-code/SAILenv | [
"e202be04de468a58e58ae858693245f5556c3597"
] | [
"example_unity_socket.py"
] | [
"#\n# Copyright (C) 2020 Enrico Meloni, Luca Pasqualini, Matteo Tiezzi\n# University of Siena - Artificial Intelligence Laboratory - SAILab\n#\n#\n# SAILenv is licensed under a MIT license.\n#\n# You should have received a copy of the license along with this\n# work. If not, see <https://en.wikipedia.org/wiki/MIT_License>.\n\n\n# Import packages\n\nimport time\nimport numpy as np\nimport cv2\nimport tkinter as tk\nfrom PIL import Image, ImageTk\n\n# Import src\n\nfrom sailenv.agent import Agent\n\nframes: int = 1000\n\n\ndef decode_image(array: np.ndarray):\n \"\"\"\n Decode the given numpy array with OpenCV.\n\n :param array: the numpy array to decode\n :return: the decoded image that can be displayed\n \"\"\"\n image = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)\n return image\n\n\ndef draw_flow_lines(current_frame, optical_flow, line_step=16, line_color=(0, 255, 0)):\n frame_with_lines = current_frame.copy()\n line_color = (line_color[2], line_color[1], line_color[0])\n\n for y in range(0, optical_flow.shape[0], line_step):\n for x in range(0, optical_flow.shape[1], line_step):\n fx, fy = optical_flow[y, x]\n cv2.line(frame_with_lines, (x, y), (int(x + fx), int(y + fy)), line_color)\n cv2.circle(frame_with_lines, (x, y), 1, line_color, -1)\n\n return frame_with_lines\n\n\ndef draw_flow_map(optical_flow):\n hsv = np.zeros((optical_flow.shape[0], optical_flow.shape[1], 3), dtype=np.uint8)\n hsv[..., 1] = 255\n\n mag, ang = cv2.cartToPolar(optical_flow[..., 0], optical_flow[..., 1])\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)\n frame_flow_map = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n return frame_flow_map\n\n\ndef create_windows(agent: Agent):\n windows = {}\n for view, is_active in agent.active_frames.items():\n if is_active:\n window = tk.Tk()\n window.geometry(f\"{agent.width}x{agent.height}\")\n windows[view] = window\n\n\n\n\n\n# host = \"bronte.diism.unisi.it\"\nhost = \"127.0.0.1\"\n# host = \"eliza.diism.unisi.it\"\nif __name__ == '__main__':\n print(\"Generating agent...\")\n agent = Agent(depth_frame_active=True,\n flow_frame_active=True,\n object_frame_active=True,\n main_frame_active=True,\n category_frame_active=True, width=256, height=192, host=host, port=8085, use_gzip=False)\n print(\"Registering agent on server...\")\n agent.register()\n print(f\"Agent registered with ID: {agent.id}\")\n last_unity_time: float = 0.0\n\n print(f\"Available scenes: {agent.scenes}\")\n\n scene = agent.scenes[0]\n print(f\"Changing scene to {scene}\")\n agent.change_scene(scene)\n\n print(f\"Available categories: {agent.categories}\")\n\n # print(agent.get_resolution())\n try:\n print(\"Press ESC to close\")\n while True:\n start_real_time = time.time()\n start_unity_time = last_unity_time\n\n start_get = time.time()\n frame = agent.get_frame()\n step_get = time.time() - start_get\n\n print(f\"get frame in seconds: {step_get}, fps: {1/step_get}\")\n\n if frame[\"main\"] is not None:\n main_img = cv2.cvtColor(frame[\"main\"], cv2.COLOR_RGB2BGR)\n cv2.imshow(\"PBR\", main_img)\n\n if frame[\"category\"] is not None:\n start_get_cat = time.time()\n # cat_img = np.zeros((agent.height * agent.width, 3), dtype=np.uint8)\n # Extract values and keys\n k = np.array(list(agent.cat_colors.keys()))\n v = np.array(list(agent.cat_colors.values()))\n\n mapping_ar = np.zeros((np.maximum(np.max(k)+1, 256), 3), dtype=v.dtype)\n mapping_ar[k] = v\n out = mapping_ar[frame[\"category\"]]\n\n # for idx, sup in enumerate(frame[\"category\"]):\n # try:\n # color = agent.cat_colors[sup]\n # cat_img[idx] = color\n # except KeyError:\n # #print(f\"key error on color get: {sup}\")\n # cat_img[idx] = [0,0,0]\n\n cat_img = np.reshape(out, (agent.height, agent.width, 3))\n cat_img = cat_img.astype(np.uint8)\n\n # unity stores the image as left to right, bottom to top\n # while CV2 reads it left to right, top to bottom\n # a flip up-down solves the problem\n # cat_img = np.flipud(cat_img)\n\n step_get_cat = time.time() - start_get_cat\n print(f\"Plot category in : {step_get_cat}\")\n cv2.imshow(\"Category\", cat_img)\n\n if frame[\"object\"] is not None:\n obj_img = decode_image(frame[\"object\"])\n cv2.imshow(\"Object ID\", obj_img)\n\n if frame[\"flow\"] is not None:\n flow = frame[\"flow\"]\n flow_img = draw_flow_map(flow)\n cv2.imshow(\"Optical Flow\", flow_img)\n\n if frame[\"depth\"] is not None:\n depth = frame[\"depth\"]\n cv2.imshow(\"Depth\", depth)\n\n key = cv2.waitKey(1)\n # print(f\"FPS: {1/(time.time() - start_real_time)}\")\n if key == 27: # ESC Pressed\n break\n finally:\n print(f\"Closing agent {agent.id}\")\n agent.delete()\n"
] | [
[
"numpy.reshape",
"numpy.max",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
georgeAccnt-GH/Azure2019 | [
"5c9774b644d3ea15590d72d3de9363df72abf7ab"
] | [
"src/AzureFunctions/ComputeGradient/AzureUtilities.py"
] | [
"import numpy as np\nimport segyio\nimport subprocess\nimport os, h5py\nfrom scipy import interpolate\nfrom devito import Eq, Operator\nfrom azure.storage.blob import BlockBlobService, PublicAccess\n\nblob_service = BlockBlobService(account_name='', account_key='')\n\n####################################################################################################\n# array put and get\n\ndef convert_to_string(t):\n if len(t) == 1:\n return str(t[0])\n elif len(t) == 2:\n return str(t[0]) + 'S' + str(t[1])\n else:\n return str(t[0]) + 'S' + str(t[1]) + 'S' + str(t[2])\n\ndef convert_int_from_string(s):\n s_split = s.split('S')\n ndim = len(s_split)\n if ndim==1:\n n = int(s_split[0])\n elif ndim==2:\n n1 = int(s_split[0])\n n2 = int(s_split[1])\n n = (n1, n2)\n else:\n n1 = int(s_split[0])\n n2 = int(s_split[1])\n n3 = int(s_split[2])\n n = (n1, n2, n3)\n return n\n\ndef convert_float_from_string(s):\n s_split = s.split('S')\n ndim = len(s_split)\n d1 = float(s_split[0])\n d2 = float(s_split[1])\n if ndim==2:\n d = (d1, d2)\n else:\n d3 = float(s_split[2])\n d = (d1, d2, d3)\n return d\n\n# write array\ndef array_put(blob, container, blob_name, index=0, count=None, validate_content=False):\n shape_str = convert_to_string(blob.shape)\n meta = {'dtype':str(blob.dtype), 'shape': shape_str}\n blob_service.create_blob_from_bytes(\n container,\n blob_name,\n blob.tostring(), # blob\n index = index, # start index in array of bytes\n count = count, # number of bytes to upload\n metadata = meta, # Name-value pairs\n validate_content = validate_content\n )\n\n# put array\ndef array_get(container, blob_name, start_range=None, end_range=None, validate_content=False):\n binary_blob = blob_service.get_blob_to_bytes(\n container,\n blob_name,\n start_range=start_range,\n end_range=end_range,\n validate_content=validate_content\n )\n try:\n meta = binary_blob.metadata\n shape = convert_int_from_string(meta['shape'])\n x = np.fromstring(binary_blob.content, dtype=meta['dtype'])\n return x.reshape(shape)\n except:\n x = np.fromstring(binary_blob.content, dtype='float32')\n return x\n####################################################################################################\n# model put and get\n\n# write model\ndef model_put(model_blob, origin, spacing, container, blob_name, index=0, count=None, validate_content=False):\n shape_str = convert_to_string(model_blob.shape)\n origin_str = convert_to_string(origin)\n spacing_str = convert_to_string(spacing)\n meta = {'dtype':str(model_blob.dtype), 'shape': shape_str, 'origin': origin_str, 'spacing': spacing_str}\n blob_service.create_blob_from_bytes(\n container,\n blob_name,\n model_blob.tostring(), # blob\n index = index, # start index in array of bytes\n count = count, # number of bytes to upload\n metadata = meta, # Name-value pairs\n validate_content = validate_content\n )\n\n# read model\ndef model_get(container, blob_name, start_range=None, end_range=None, validate_content=False):\n binary_blob = blob_service.get_blob_to_bytes(\n container,\n blob_name,\n start_range=start_range,\n end_range=end_range,\n validate_content=validate_content\n )\n meta = binary_blob.metadata\n shape = convert_int_from_string(meta['shape'])\n origin = convert_float_from_string(meta['origin'])\n spacing = convert_float_from_string(meta['spacing'])\n x = np.fromstring(binary_blob.content, dtype=meta['dtype'])\n return x.reshape(shape), origin, spacing\n\ndef model_read(filename):\n h5f = h5py.File(filename, 'r')\n m = h5f['m'][:]\n o = h5f['origin'][:]\n d = h5f['spacing'][:]\n h5f.close()\n return m, o, d\n\ndef model_write(m, origin, spacing, filename):\n h5f = h5py.File(filename, 'w')\n h5f.create_dataset('m', data=m)\n h5f.create_dataset('origin', data=origin)\n h5f.create_dataset('spacing', data=spacing)\n h5f.close()\n\n####################################################################################################\n# segy read\n\ndef segy_get(container, path, filename, ndims=2, keepFile=False):\n\n # copy from s3 to local volume\n subprocess.run(['az', 'storage', 'blob', 'download', '--container-name', container, '--name', path + filename,\n '--file', os.getcwd() + '/' + filename, '--output', 'table'])\n argout = segy_read(filename, ndims=ndims)\n\n if keepFile is False:\n subprocess.run(['rm', '-f', filename])\n\n return argout\n\ndef segy_read(filename, ndims=2):\n\n with segyio.open(filename, \"r\", ignore_geometry=True) as segyfile:\n segyfile.mmap()\n\n # Assume input data is for single common shot gather\n sourceX = segyfile.attributes(segyio.TraceField.SourceX)[0]\n sourceY = segyfile.attributes(segyio.TraceField.SourceY)[0]\n sourceZ = segyfile.attributes(segyio.TraceField.SourceSurfaceElevation)[0]\n groupX = segyfile.attributes(segyio.TraceField.GroupX)[:]\n groupY = segyfile.attributes(segyio.TraceField.GroupY)[:]\n groupZ = segyfile.attributes(segyio.TraceField.ReceiverGroupElevation)[:]\n dt = segyio.dt(segyfile)/1e3\n\n # Apply scaling\n elevScalar = segyfile.attributes(segyio.TraceField.ElevationScalar)[0]\n coordScalar = segyfile.attributes(segyio.TraceField.SourceGroupScalar)[0]\n\n if coordScalar < 0.:\n sourceX = sourceX / np.abs(coordScalar)\n sourceY = sourceY / np.abs(coordScalar)\n sourceZ = sourceZ / np.abs(elevScalar)\n groupX = groupX / np.abs(coordScalar)\n groupY = groupY / np.abs(coordScalar)\n elif coordScalar > 0.:\n sourceX = sourceX * np.abs(coordScalar)\n sourceY = sourceY * np.abs(coordScalar)\n sourceZ = sourceZ * np.abs(elevScalar)\n groupX = groupX * np.abs(coordScalar)\n groupY = groupY * np.abs(coordScalar)\n\n if elevScalar < 0.:\n groupZ = groupZ / np.abs(elevScalar)\n elif elevScalar > 0.:\n groupZ = groupZ * np.abs(elevScalar)\n\n nrec = len(groupX)\n nt = len(segyfile.trace[0])\n\n # Extract data\n data = np.zeros(shape=(nt, nrec), dtype='float32')\n for i in range(nrec):\n data[:,i] = segyfile.trace[i]\n tmax = (nt-1)*dt\n\n if ndims == 2:\n return data, sourceX, sourceZ, groupX, groupZ, tmax, dt, nt\n else:\n return data, sourceX, sourceY, sourceZ, groupX, groupY, groupZ, tmax, dt, nt\n\n\ndef segy_model_read(filename):\n\n with segyio.open(filename, \"r\", ignore_geometry=True) as segyfile:\n segyfile.mmap()\n\n # Assume input data is for single common shot gather\n sourceX = segyfile.attributes(segyio.TraceField.SourceX)\n dx = segyio.dt(segyfile)/1e3\n\n # Apply scaling\n coordScalar = segyfile.attributes(segyio.TraceField.SourceGroupScalar)[0]\n\n if coordScalar < 0.:\n sourceX = sourceX / np.abs(coordScalar)\n elif coordScalar > 0.:\n sourceX = sourceX * np.abs(coordScalar)\n\n nx = len(sourceX)\n nz = len(segyfile.trace[0])\n\n # Extract data\n data = np.zeros(shape=(nx, nz), dtype='float32')\n for i in range(nx):\n data[i,:] = segyfile.trace[i]\n\n return data, sourceX, dx\n\n\ndef segy_put(data, sourceX, sourceZ, groupX, groupZ, dt, container, path, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000, keepFile=False):\n\n # Write segy file\n segy_write(data, sourceX, sourceZ, groupX, groupZ, dt, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000)\n\n # copy from local volume to s3\n status = subprocess.run(['az', 'storage', 'blob', 'upload', '--container-name', container, '--file', filename, '--name', path+filename])\n\n if keepFile is False:\n subprocess.run(['rm', '-f', filename])\n\n return status\n\n\ndef segy_write(data, sourceX, sourceZ, groupX, groupZ, dt, filename, sourceY=None, groupY=None, elevScalar=-1000, coordScalar=-1000):\n\n nt = data.shape[0]\n nsrc = 1\n nxrec = len(groupX)\n if sourceY is None and groupY is None:\n sourceY = np.zeros(1, dtype='int')\n groupY = np.zeros(nxrec, dtype='int')\n nyrec = len(groupY)\n\n # Create spec object\n spec = segyio.spec()\n spec.ilines = np.arange(nxrec) # dummy trace count\n spec.xlines = np.zeros(1, dtype='int') # assume coordinates are already vectorized for 3D\n spec.samples = range(nt)\n spec.format=1\n spec.sorting=1\n\n with segyio.create(filename, spec) as segyfile:\n for i in range(nxrec):\n segyfile.header[i] = {\n segyio.su.tracl : i+1,\n segyio.su.tracr : i+1,\n segyio.su.fldr : 1,\n segyio.su.tracf : i+1,\n segyio.su.sx : int(np.round(sourceX[0] * np.abs(coordScalar))),\n segyio.su.sy : int(np.round(sourceY[0] * np.abs(coordScalar))),\n segyio.su.selev: int(np.round(sourceZ[0] * np.abs(elevScalar))),\n segyio.su.gx : int(np.round(groupX[i] * np.abs(coordScalar))),\n segyio.su.gy : int(np.round(groupY[i] * np.abs(coordScalar))),\n segyio.su.gelev : int(np.round(groupZ[i] * np.abs(elevScalar))),\n segyio.su.dt : int(dt*1e3),\n segyio.su.scalel : int(elevScalar),\n segyio.su.scalco : int(coordScalar)\n }\n segyfile.trace[i] = data[:, i]\n segyfile.dt=int(dt*1e3)\n\n\n\n####################################################################################################\n# Auxiliary modeling functions\n\n# Add/subtract devito data w/ MPI\ndef add_rec(d1, d2):\n eq = Eq(d1, d1 + d2)\n op = Operator([eq])\n op()\n return d1\n\ndef sub_rec(d1, d2):\n eq = Eq(d1, d1 - d2)\n op = Operator([eq],subs={d2.indices[-1]: d1.indices[-1]})\n op()\n return d1\n\n# Create 3D receiver grid from 1D x and y receiver vectors\ndef create_3D_grid(xrec, yrec, zrec):\n\n nxrec = len(xrec)\n nyrec = len(yrec)\n nrec_total = nxrec * nyrec\n\n rec = np.zeros(shape=(nrec_total, 3), dtype='float32')\n count = 0\n for j in range(nxrec):\n for k in range(nyrec):\n rec[count, 0] = xrec[j]\n rec[count, 1] = yrec[k]\n rec[count, 2] = zrec\n count += 1\n return rec\n\n\ndef restrict_model_to_receiver_grid(sx, gx, m, spacing, origin, sy=None, gy=None, buffer_size=500, numpy_coords=True):\n\n # Model parameters\n shape = m.shape\n ndim = len(shape)\n if ndim == 2:\n domain_size = ((shape[0] - 1) * spacing[0], (shape[1] - 1) * spacing[1])\n else:\n domain_size = ((shape[0] - 1) * spacing[0], (shape[1] - 1) * spacing[1], \\\n (shape[2] - 1) * spacing[2])\n\n # Scan for minimum/maximum source/receiver coordinates\n min_x = np.min([np.min(sx), np.min(gx)])\n max_x = np.max([np.max(sx), np.max(gx)])\n if sy is not None and gy is not None:\n min_y = np.min([np.min(sy), np.min(gy)])\n max_y = np.max([np.max(sy), np.max(gy)])\n\n # Add buffer zone if possible\n min_x = np.max([origin[0], min_x - buffer_size])\n max_x = np.min([origin[0] + domain_size[0], max_x + buffer_size])\n #print(\"min_x: \", min_x)\n #print(\"max_x: \", max_x)\n if ndim == 3:\n min_y = np.max([origin[1], min_y - buffer_size])\n max_y = np.min([origin[1] + domain_size[1], max_y + buffer_size])\n #print(\"min_y: \", min_y)\n #print(\"max_y: \", max_y)\n\n # Extract model part\n nx_min = int(min_x / spacing[0])\n nx_max = int(max_x / spacing[0])\n #print(\"nx_min: \", nx_min)\n #print(\"nx_max: \", nx_max)\n ox = nx_min * spacing[0]\n oz = origin[-1]\n if ndim == 3:\n ny_min = int(min_y / spacing[1])\n ny_max = int(max_y / spacing[1])\n #print(\"ny_min: \", ny_min)\n #print(\"ny_max: \", ny_max)\n oy = ny_min * spacing[1]\n\n # Extract relevant part of model\n n_orig = shape\n #print(\"Original shape: \", n_orig)\n if ndim == 2:\n m = m[nx_min:nx_max+1, :]\n origin = (ox, oz)\n else:\n m = m[nx_min:nx_max+1, ny_min:ny_max+1, :]\n origin = (ox, oy, oz)\n shape = m.shape\n #print(\"New shape: \", shape)\n\n return m, shape, origin\n\n\ndef extent_gradient(shape_full, origin_full, shape_sub, origin_sub, spacing, g):\n\n nz = shape_full[-1]\n ndim = len(shape_full)\n\n nx_left = int((origin_sub[0] - origin_full[0]) / spacing[0])\n nx_right = shape_full[0] - shape_sub[0] - nx_left\n\n if ndim == 3:\n ny_left = int((origin_sub[1] - origin_full[1]) / spacing[1])\n ny_right = shape_full[1] - shape_sub[1] - ny_left\n\n if ndim == 2:\n block1 = np.zeros(shape=(nx_left, nz), dtype='float32')\n block2 = np.zeros(shape=(nx_right, nz), dtype='float32')\n g = np.concatenate((block1, g, block2), axis=0)\n else:\n block1 = np.zeros(shape=(nx_left, shape_sub[1], nz), dtype='float32')\n block2 = np.zeros(shape=(nx_right, shape_sub[1], nz), dtype='float32')\n g = np.concatenate((block1, g, block2), axis=0)\n del block1, block2\n block3 = np.zeros(shape=(shape_full[0], ny_left, nz), dtype='float32')\n block4 = np.zeros(shape=(shape_full[0], ny_right, nz), dtype='float32')\n g = np.concatenate((block3, g, block4), axis=1)\n\n return g\n\n\n####################################################################################################\n# Auxiliary AWS functions\n\n\ndef resample(data, t0, tn, nt_prev, nt_new):\n\n time_prev = np.linspace(start=t0, stop=tn, num=nt_prev)\n time_new = np.linspace(start=t0, stop=tn, num=nt_new)\n\n d_resamp = np.zeros(shape=(len(time_new), data.shape[1]), dtype='float32')\n for i in range(data.shape[1]):\n tck = interpolate.splrep(time_prev, data[:, i], k=3)\n d_resamp[:, i] = interpolate.splev(time_new, tck)\n return d_resamp\n\n\n# Get chunk size of gradient\ndef get_chunk_size(g_size, num_chunks):\n\n average_size = int(g_size/num_chunks)\n num_residuals = g_size % num_chunks\n chunk_size = np.ones(num_chunks, dtype='int')*average_size\n if num_residuals > 0:\n for j in range(num_residuals):\n chunk_size[j] += 1\n return chunk_size\n"
] | [
[
"scipy.interpolate.splrep",
"numpy.abs",
"numpy.linspace",
"numpy.min",
"numpy.arange",
"scipy.interpolate.splev",
"numpy.concatenate",
"numpy.max",
"numpy.ones",
"numpy.fromstring",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
TwinIsland/img2java | [
"6b6788daa0a97acb1e455ead9d7bd09d7d881ab2"
] | [
"treat.py"
] | [
"from matplotlib import pyplot as plt\nimport numpy as np\nimport cv2\nfrom scipy import stats\nimport translate\nfrom skimage import transform\n\n#####################################\nimgData = cv2.imread('van.jpg',0)\ncompressRate = 0.4\n#####################################\n\nimgData = np.array(imgData)\nshape = imgData.shape\npas = p = 'unknown'\n\ndef twoWayTreat():\n global imgData\n imgData = stats.zscore(imgData)\n\n for raw in range(shape[0]):\n for col in range(shape[1]):\n if imgData[raw][col] < 0:\n imgData[raw][col] = 0\n else:\n imgData[raw][col] = 255\n\n\ndef debugImg():\n global imgData\n plt.imshow(imgData)\n plt.show()\n\n\ndef getCode():\n code = ''\n for this_line_index in range(len(imgData)-1):\n lineLib = []\n this_line = imgData[this_line_index]\n newTurn = False\n for this_line_data_index in range(len(this_line)-1):\n if this_line[this_line_data_index] == 255:\n begin_draw = this_line_data_index\n newTurn = True\n\n if this_line[this_line_data_index] == 0 and newTurn:\n end_draw = this_line_data_index\n lineLib.append([begin_draw,end_draw])\n newTurn = False\n\n for i in lineLib:\n code = code + translate.getCode([i[0],this_line_index,i[1],this_line_index]) + '\\n'\n\n return code\n\ndef compressImg():\n global imgData,compressRate\n imgData = transform.rescale(imgData, [compressRate,compressRate])\n\n\ndef passivate():\n count = 0\n global imgData\n shape = imgData.shape\n lineLenght = shape[1]\n for lineIndex in range(shape[0]-1):\n for numberIndex in range(0,lineLenght-6):\n thisFive = list(imgData[lineIndex,numberIndex:numberIndex+5])\n if thisFive == [0,255,255,255,255]:\n count += 1\n thisFive[0] =255\n imgData[lineIndex,numberIndex:numberIndex+5] = thisFive\n return 'passivate rate: ' + str(count/(shape[0]*shape[1])) + '%'\n\n\ntwoWayTreat()\ncompressImg()\npas = passivate()\ndebugImg()\np = getCode()\ntranslate.setSize(imgData.shape)\n\nwith open('draw.java','w') as f:\n f.write(translate.upper_code)\n f.write(p)\n f.write(translate.lower_code)\n\ntry:\n print('==================')\n print('compressRate: ' + str(compressRate))\n print('passivateRate: ' + str(pas))\n print('size: ' + str(imgData.shape))\n print('==================')\nexcept Exception:\n print('cannot print out the post-info!')\n\nf.close()\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.array",
"scipy.stats.zscore",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
MinhTuDo/MD-MOENAS | [
"edd6ec8c3f89cfbe9674873425c5056e72899edb"
] | [
"procedure/problem/efficiency_performance/mo_nats.py"
] | [
"from procedure.problem.base import nats as base\n\nimport numpy as np\n\nclass EfficiencyAccuracyNATS(base.NATS):\n def __init__(self, efficiency, **kwargs):\n super().__init__(n_obj=2, **kwargs)\n self.msg += efficiency + '={:.3f}, ' + 'valid-error' + '={:.3f}'\n self.efficiency = efficiency\n\n def _calc_F(self, genotype, **kwargs):\n accuracy, latency, _, runtime = self.api.simulate_train_eval(\n genotype, self.dataset, iepoch=self.epoch, hp=self.api.full_train_epochs\n )\n\n idx = self.api.query_index_by_arch(genotype)\n cost_info = self.api.get_cost_info(idx, self.dataset, hp=self.api.full_train_epochs)\n params, flops = cost_info['params'], cost_info['flops']\n \n efficiency = eval(self.efficiency)\n error = 100 - accuracy\n\n F = [efficiency, error]\n return F, runtime\n\n\n def _convert_to_pf_space(self, X):\n F = []\n dataset = self.pf_dict['dataset']\n for x in X:\n genotype = self._decode(x)\n idx = self.api.query_index_by_arch(genotype)\n efficiency = self.api.get_cost_info(\n idx, dataset, hp=self.api.full_train_epochs\n )[self.efficiency]\n acc = self.api.get_more_info(\n idx, dataset, hp=self.api.full_train_epochs, is_random=False\n )['test-accuracy']\n err = 100 - acc\n f = [efficiency, err]\n F += [np.column_stack(f)]\n F = np.row_stack(F)\n return F\n\nclass MDEfficiencyAccuracyNATS(base.NATS):\n def __init__(self, efficiency, **kwargs):\n super().__init__(n_obj=2, **kwargs)\n self.msg += 'avg-' + efficiency + '={:.3f}, avg-val-err={:.3f}'\n self.efficiency = efficiency\n \n\n def _calc_F(self, genotype, **kwargs):\n idx = self.api.query_index_by_arch(genotype)\n\n efficiency = []; runtime = []; accuracy = []\n for dts in self.dataset:\n _accuracy, latency, _, _runtime = self.api.simulate_train_eval(\n genotype, dataset=dts, iepoch=self.epoch, hp=self.api.full_train_epochs\n )\n\n idx = self.api.query_index_by_arch(genotype)\n cost_info = self.api.get_cost_info(idx, dts, hp=self.api.full_train_epochs)\n params, flops = cost_info['params'], cost_info['flops']\n \n _efficiency = eval(self.efficiency)\n efficiency += [_efficiency]\n runtime += [_runtime]\n accuracy += [_accuracy]\n\n efficiency = np.mean(efficiency)\n runtime = sum(runtime)\n accuracy = np.mean(accuracy)\n\n err = 100 - accuracy\n\n F = [efficiency, err]\n\n return F, runtime\n\n def _convert_to_pf_space(self, X):\n F = []\n dataset = self.pf_dict['dataset']\n \n for x in X:\n genotype = self._decode(x)\n idx = self.api.query_index_by_arch(genotype)\n efficiency = self.api.get_cost_info(idx, dataset, hp=self.api.full_train_epochs)[self.efficiency]\n acc = \\\n self.api.get_more_info(idx, dataset, hp=self.api.full_train_epochs, is_random=False)['test-accuracy']\n err = 100 - acc\n f = [efficiency, err]\n F += [np.column_stack(f)]\n F = np.row_stack(F)\n return F\n \n \n"
] | [
[
"numpy.row_stack",
"numpy.mean",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gaskell-1206/MSI_vs_MSS_Classification | [
"be6fd8a6961624367b2bb0e1299219e940f6f418"
] | [
"Step2_Training_MIL/train_MIL_classification_trained_cnn_models.py"
] | [
"# Run MIL classification use pretrained CNN models\n# Reference: 1.Campanella, G. et al. Clinical-grade computational pathology using weakly supervised\n# deep learning on whole slide images. Nat Med 25, 1301–1309 (2019).\n# doi:10.1038/s41591-019-0508-1. Available from http://www.nature.com/articles/s41591-019-0508-1\n# The source codes of the referenced paper available at https://github.com/MSKCC-Computational-Pathology/MIL-nature-medicine-2019\n# This code was modified by Shengjia Chen for our work.\nimport argparse\nimport os\nimport random\nimport sys\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom typing import Callable, Optional, Union\nfrom urllib.error import HTTPError\nimport glob\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom PIL import Image\nfrom pytorch_lightning.callbacks import (EarlyStopping, LearningRateMonitor,\n ModelCheckpoint)\nfrom pytorch_lightning.lite import LightningLite\nfrom pytorch_lightning.loops import Loop\nfrom skimage import io\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\nfrom tqdm import tqdm\nsys.path.append('/gpfs/scratch/sc9295/digPath/MSI_vs_MSS_Classification/Step1_Training_MSI_MSS')\nfrom train_tile_level_classification import MSI_MSS_Module\nfrom sklearn.metrics import (auc, confusion_matrix, f1_score, roc_auc_score,\n roc_curve)\n\nbest_acc = 0\n\ndef inference(loader, model):\n model.eval()\n probs = torch.FloatTensor(len(loader.dataset))\n with torch.no_grad():\n for i, input in enumerate(loader):\n # print(\n # 'Inference\\tEpoch: [{}/{}]\\tBatch: [{}/{}]'.format(run+1, args.nepochs, i+1, len(loader)))\n output = F.softmax(model(input), dim=1)\n probs[i*args.batch_size:i*args.batch_size +\n input.size(0)] = output.detach()[:, 1].clone()\n return probs.cpu().numpy()\n\n\ndef train(run, loader, model, criterion, optimizer):\n model.train()\n running_loss = 0.\n for i, (input, target) in enumerate(loader):\n input = input.cuda()\n target = target.cuda()\n output = model(input)\n loss = criterion(output, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n running_loss += loss.item()*input.size(0)\n return running_loss/len(loader.dataset)\n\n\ndef calc_err(pred, real):\n pred = np.array(pred)\n real = np.array(real)\n pos = np.equal(pred, real)\n neq = np.not_equal(pred, real)\n acc = float(pos.sum())/pred.shape[0]\n err = float(neq.sum())/pred.shape[0]\n fpr = float(np.logical_and(pred == 1, neq).sum())/(real == 0).sum()\n fnr = float(np.logical_and(pred == 0, neq).sum())/(real == 1).sum()\n return acc, err, fpr, fnr\n\n\ndef group_argtopk(groups, data, k=1):\n # groups in slide, data is prob of each tile\n k = min(k,len(data))\n order = np.lexsort((data, groups))\n groups = groups[order]\n data = data[order]\n index = np.empty(len(groups), 'bool')\n index[-k:] = True\n index[:-k] = groups[k:] != groups[:-k]\n return list(order[index]) # output top prob tile index in each slide\n\n\ndef group_max(groups, data, nmax):\n out = np.empty(nmax)\n out[:] = np.nan\n order = np.lexsort((data, groups))\n groups = groups[order]\n data = data[order]\n index = np.empty(len(groups), 'bool')\n index[-1] = True\n index[:-1] = groups[1:] != groups[:-1]\n out[groups[index]] = data[index]\n return out\n\n\nclass MILdataset(Dataset):\n def __init__(self, libraryfile_dir='', root_dir='', dataset_mode='Train', transform=None, subset_rate=None):\n libraryfile_path = os.path.join(\n libraryfile_dir, f'CRC_DX_{dataset_mode}_ALL.csv')\n lib = pd.read_csv(libraryfile_path)\n lib = lib if subset_rate is None else lib.sample(\n frac=subset_rate, random_state=2022)\n lib = lib.sort_values(['subject_id'], ignore_index=True)\n lib.to_csv(os.path.join(libraryfile_dir,\n f'{dataset_mode}_temporary.csv'))\n slides = []\n for i, name in enumerate(lib['subject_id'].unique()):\n # sys.stdout.write(\n # 'Slides: [{}/{}]\\r'.format(i+1, len(lib['subject_id'].unique())))\n # sys.stdout.flush()\n slides.append(name)\n\n # Flatten grid\n grid = []\n slideIDX = []\n for i, g in enumerate(lib['subject_id'].unique()):\n tiles = lib[lib['subject_id'] == g]['slice_id']\n grid.extend(tiles)\n slideIDX.extend([i]*len(tiles))\n\n # print('Number of tiles: {}'.format(len(grid)))\n self.dataframe = self.load_data_and_get_class(lib)\n self.slidenames = list(lib['subject_id'].values)\n self.slides = slides\n self.targets = self.dataframe['Class']\n self.grid = grid\n self.slideIDX = slideIDX\n self.transform = transform\n self.root_dir = root_dir\n self.dset = f\"CRC_DX_{dataset_mode}\"\n\n def setmode(self, mode):\n self.mode = mode\n\n def maketraindata(self, idxs):\n self.t_data = [(self.slideIDX[x], self.grid[x],\n self.targets[x]) for x in idxs]\n\n def shuffletraindata(self):\n self.t_data = random.sample(self.t_data, len(self.t_data))\n\n def load_data_and_get_class(self, df):\n df.loc[df['label'] == 'MSI', 'Class'] = 1\n df.loc[df['label'] == 'MSS', 'Class'] = 0\n return df\n\n def __getitem__(self, index):\n if self.mode == 1:\n slideIDX = self.slideIDX[index]\n tile_id = self.grid[index]\n slide_id = self.slides[slideIDX]\n img_name = \"blk-{}-{}.png\".format(tile_id, slide_id)\n target = self.targets[index]\n label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'\n img_path = os.path.join(self.root_dir, self.dset, label, img_name)\n img = io.imread(img_path)\n if self.transform is not None:\n img = self.transform(img)\n return img\n elif self.mode == 2:\n slideIDX, tile_id, target = self.t_data[index]\n slide_id = self.slides[slideIDX]\n label = 'CRC_DX_MSIMUT' if target == 1 else 'CRC_DX_MSS'\n img_name = \"blk-{}-{}.png\".format(tile_id, slide_id)\n img_path = os.path.join(self.root_dir, self.dset, label, img_name)\n img = io.imread(img_path)\n\n if self.transform is not None:\n img = self.transform(img)\n return img, target\n\n def __len__(self):\n if self.mode == 1:\n return len(self.grid)\n elif self.mode == 2:\n return len(self.t_data)\n\n\nclass Lite(LightningLite):\n\n def run(self, args):\n global best_acc\n print(args)\n\n self.seed_everything(2022)\n model_name = args.model_name\n sample_rate = args.sample_rate\n ckpt_path = os.path.join(args.model_path, f'{args.model_name}_bs{args.batch_size}_lr{args.learning_rate}')\n ckpt_file_path = glob.glob(os.path.join(ckpt_path,'*.ckpt'))[0]\n model = MSI_MSS_Module.load_from_checkpoint(ckpt_file_path)\n\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=args.learning_rate, weight_decay=1e-4)\n if args.weights == 0.5:\n criterion = nn.CrossEntropyLoss()\n else:\n w = torch.Tensor([1-args.weights, args.weights])\n criterion = nn.CrossEntropyLoss(w)\n # Scale model and optimizers\n model, optimizer = self.setup(model, optimizer, move_to_device=True)\n\n DATA_MEANS = [0.485, 0.456, 0.406]\n DATA_STD = [0.229, 0.224, 0.225]\n\n train_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize(DATA_MEANS, DATA_STD)])\n test_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(DATA_MEANS, DATA_STD)])\n\n train_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Train', transform=train_transform, subset_rate=sample_rate)\n val_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Val', transform=test_transform, subset_rate=sample_rate)\n test_dataset = MILdataset(\n args.lib_dir, args.root_dir, 'Test', transform=test_transform, subset_rate=sample_rate)\n\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n train_dataloader, val_dataloader, test_dataloader = self.setup_dataloaders(\n train_dataloader, val_dataloader, test_dataloader, move_to_device=True)\n\n # open output file\n version_name = f'MIL_{model_name}_bs{args.batch_size}_lr{args.learning_rate}_w{args.weights}_k{args.k}_output'\n # logger\n output_path = os.path.join(args.output_path,version_name)\n writer = SummaryWriter(output_path)\n\n for epoch in tqdm(range(args.nepochs)):\n train_dataset.setmode(1)\n # print(\"train_set_len:\", len(train_dataloader.dataset))\n probs = inference(train_dataloader, model)\n # return the indices of topk tile(s) in each slides\n topk = group_argtopk(\n np.array(train_dataset.slideIDX), probs, args.k)\n train_dataset.maketraindata(topk)\n train_dataset.shuffletraindata()\n train_dataset.setmode(2)\n\n model.train()\n running_loss = 0.\n for i, (input, target) in enumerate(train_dataloader):\n output = model(input)\n loss = criterion(output, target.long())\n optimizer.zero_grad()\n self.backward(loss)\n optimizer.step()\n running_loss += loss.item()*input.size(0)\n\n train_loss = running_loss/len(train_dataloader.dataset)\n print(\n 'Training\\tEpoch: [{}/{}]\\tLoss: {}'.format(epoch+1, args.nepochs, train_loss))\n writer.add_scalar('train_loss', train_loss, epoch+1)\n\n\n # Validation\n if (epoch+1) % args.test_every == 0:\n val_dataset.setmode(1)\n probs = inference(val_dataloader, model)\n maxs = group_max(np.array(val_dataset.slideIDX),\n probs, len(val_dataset.targets))\n pred = [1 if x >= 0.5 else 0 for x in probs]\n val_acc, err, fpr, fnr = calc_err(pred, val_dataset.targets)\n\n print('Validation\\tEpoch: [{}/{}]\\t ACC: {}\\tError: {}\\tFPR: {}\\tFNR: {}'.format(\n epoch+1, args.nepochs, val_acc, err, fpr, fnr))\n\n writer.add_scalar('val_acc', val_acc, epoch+1)\n writer.add_scalar('fpr', fpr, epoch+1)\n writer.add_scalar('fnr', fnr, epoch+1)\n\n # Save best model\n err = (fpr+fnr)/2.\n if 1-err >= best_acc:\n best_acc = 1-err\n obj = {\n 'epoch': epoch+1,\n 'state_dict': model.state_dict(),\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict()\n }\n torch.save(obj, os.path.join(output_path, 'checkpoint_best.pth'))\n\n # test\n ch = torch.load(os.path.join(output_path,'checkpoint_best.pth'))\n # load params\n model.load_state_dict(ch['state_dict'])\n model = model.cuda()\n cudnn.benchmark = True\n train_dataset.setmode(1)\n val_dataset.setmode(1)\n test_dataset.setmode(1)\n\n # Train\n probs = inference(train_dataloader, model)\n maxs = group_max(np.array(train_dataset.slideIDX), probs, len(train_dataset.targets))\n fp = open(os.path.join(output_path, f'Train_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(train_dataset.slidenames, train_dataset.grid, train_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close()\n\n # Val\n probs = inference(val_dataloader, model)\n maxs = group_max(np.array(val_dataset.slideIDX), probs, len(val_dataset.targets))\n fp = open(os.path.join(output_path, f'Val_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(val_dataset.slidenames, val_dataset.grid, val_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close()\n\n # Test\n probs = inference(test_dataloader, model)\n maxs = group_max(np.array(test_dataset.slideIDX), probs, len(test_dataset.targets))\n fp = open(os.path.join(output_path, f'Test_{version_name}.csv'), 'w')\n fp.write('slides,tiles,target,prediction,probability\\n')\n for slides, tiles, target, prob in zip(test_dataset.slidenames, test_dataset.grid, test_dataset.targets, probs):\n fp.write('{},{},{},{},{}\\n'.format(slides, tiles, target, int(prob>=0.5), prob))\n fp.close() \n\n pred = [1 if x >= 0.5 else 0 for x in probs]\n test_acc, err, fnr, fpr = calc_err(pred, test_dataset.targets)\n test_f1_score = f1_score(test_dataset.targets, pred, average='binary')\n\n try:\n test_auroc_score = roc_auc_score(test_dataset.targets, probs)\n writer.add_scalar(\"test_auroc_score\", test_auroc_score)\n except ValueError:\n writer.add_scalar('test_auroc_score', .0)\n\n writer.add_scalar('test_f1_score', test_f1_score) \n writer.add_scalar('test_acc', test_acc)\n\n\n\ndef main(args):\n Lite(devices=\"auto\", accelerator=\"auto\").run(args)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n \"--root_dir\",\n type=Path,\n required=True,\n help=\"root directory of dataset\",\n )\n parser.add_argument(\n \"--lib_dir\",\n type=Path,\n required=True,\n help=\"root directory of libraryfile\",\n )\n parser.add_argument(\n \"--model_path\",\n type=Path,\n required=True,\n help=\"root directory of pretrained models\",\n )\n parser.add_argument(\n \"--output_path\",\n type=Path,\n required=True,\n help=\"output directory\",\n )\n parser.add_argument(\n \"--model_name\",\n default='alexnet',\n choices=('resnet18', 'resnet34', 'alexnet', 'vgg',\n 'squeezenet', 'densenet', 'inception'),\n type=str,\n help=\"model use for train\",\n )\n parser.add_argument(\n \"--sample_rate\",\n default=1,\n type=float,\n help=\"undersample rate\",\n )\n parser.add_argument(\n \"--batch_size\",\n default=128,\n type=int,\n help=\"batch size\",\n )\n parser.add_argument(\n \"--learning_rate\",\n default=1e-3,\n type=float,\n help=\"learning rate\",\n )\n parser.add_argument(\n \"--num_workers\",\n default=0,\n type=int,\n required=True,\n help=\"number of workers\",\n )\n parser.add_argument(\n \"--nepochs\",\n default=50,\n type=int,\n help=\"training epoch\",\n )\n parser.add_argument(\n '--test_every',\n default=1,\n type=int,\n help='test on val every (default: 10)')\n\n parser.add_argument(\n \"--weights\",\n default=0.5,\n type=float,\n help=\"unbalanced positive class weight (default: 0.5, balanced classes)\",\n )\n\n parser.add_argument(\n \"--k\",\n default=1,\n type=int,\n help=\"top k tiles are assumed to be of the same class as the slide (default: 1, standard MIL)\",\n )\n \n args = parser.parse_args()\n main(args)\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.Tensor",
"numpy.logical_and",
"torch.utils.data.DataLoader",
"numpy.lexsort",
"torch.no_grad",
"numpy.equal",
"torch.utils.tensorboard.SummaryWriter",
"numpy.not_equal",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
siddhantwahal/scipy | [
"411fbbda0f942fcce3e4b314efb11c4553baaa7c"
] | [
"scipy/stats/_distn_infrastructure.py"
] | [
"#\n# Author: Travis Oliphant 2002-2011 with contributions from\n# SciPy Developers 2004-2011\n#\nfrom scipy._lib._util import getfullargspec_no_self as _getfullargspec\n\nimport sys\nimport keyword\nimport re\nimport types\nimport warnings\nimport inspect\nfrom itertools import zip_longest\n\nfrom scipy._lib import doccer\nfrom ._distr_params import distcont, distdiscrete\nfrom scipy._lib._util import check_random_state\nfrom scipy._lib._util import _valarray as valarray\n\nfrom scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)\n\n# for root finding for continuous distribution ppf, and max likelihood estimation\nfrom scipy import optimize\n\n# for functions of continuous distributions (e.g. moments, entropy, cdf)\nfrom scipy import integrate\n\n# to approximate the pdf of a continuous distribution given its cdf\nfrom scipy.misc import derivative\n\nfrom numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,\n logical_and, log, sqrt, place, argmax, vectorize, asarray,\n nan, inf, isinf, NINF, empty)\n\nimport numpy as np\n\nfrom ._constants import _XMAX\n\n\n# These are the docstring parts used for substitution in specific\n# distribution docstrings\n\ndocheaders = {'methods': \"\"\"\\nMethods\\n-------\\n\"\"\",\n 'notes': \"\"\"\\nNotes\\n-----\\n\"\"\",\n 'examples': \"\"\"\\nExamples\\n--------\\n\"\"\"}\n\n_doc_rvs = \"\"\"\\\nrvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)\n Random variates.\n\"\"\"\n_doc_pdf = \"\"\"\\\npdf(x, %(shapes)s, loc=0, scale=1)\n Probability density function.\n\"\"\"\n_doc_logpdf = \"\"\"\\\nlogpdf(x, %(shapes)s, loc=0, scale=1)\n Log of the probability density function.\n\"\"\"\n_doc_pmf = \"\"\"\\\npmf(k, %(shapes)s, loc=0, scale=1)\n Probability mass function.\n\"\"\"\n_doc_logpmf = \"\"\"\\\nlogpmf(k, %(shapes)s, loc=0, scale=1)\n Log of the probability mass function.\n\"\"\"\n_doc_cdf = \"\"\"\\\ncdf(x, %(shapes)s, loc=0, scale=1)\n Cumulative distribution function.\n\"\"\"\n_doc_logcdf = \"\"\"\\\nlogcdf(x, %(shapes)s, loc=0, scale=1)\n Log of the cumulative distribution function.\n\"\"\"\n_doc_sf = \"\"\"\\\nsf(x, %(shapes)s, loc=0, scale=1)\n Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).\n\"\"\"\n_doc_logsf = \"\"\"\\\nlogsf(x, %(shapes)s, loc=0, scale=1)\n Log of the survival function.\n\"\"\"\n_doc_ppf = \"\"\"\\\nppf(q, %(shapes)s, loc=0, scale=1)\n Percent point function (inverse of ``cdf`` --- percentiles).\n\"\"\"\n_doc_isf = \"\"\"\\\nisf(q, %(shapes)s, loc=0, scale=1)\n Inverse survival function (inverse of ``sf``).\n\"\"\"\n_doc_moment = \"\"\"\\\nmoment(n, %(shapes)s, loc=0, scale=1)\n Non-central moment of order n\n\"\"\"\n_doc_stats = \"\"\"\\\nstats(%(shapes)s, loc=0, scale=1, moments='mv')\n Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').\n\"\"\"\n_doc_entropy = \"\"\"\\\nentropy(%(shapes)s, loc=0, scale=1)\n (Differential) entropy of the RV.\n\"\"\"\n_doc_fit = \"\"\"\\\nfit(data)\n Parameter estimates for generic data.\n See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the\n keyword arguments.\n\"\"\"\n_doc_expect = \"\"\"\\\nexpect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_expect_discrete = \"\"\"\\\nexpect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)\n Expected value of a function (of one argument) with respect to the distribution.\n\"\"\"\n_doc_median = \"\"\"\\\nmedian(%(shapes)s, loc=0, scale=1)\n Median of the distribution.\n\"\"\"\n_doc_mean = \"\"\"\\\nmean(%(shapes)s, loc=0, scale=1)\n Mean of the distribution.\n\"\"\"\n_doc_var = \"\"\"\\\nvar(%(shapes)s, loc=0, scale=1)\n Variance of the distribution.\n\"\"\"\n_doc_std = \"\"\"\\\nstd(%(shapes)s, loc=0, scale=1)\n Standard deviation of the distribution.\n\"\"\"\n_doc_interval = \"\"\"\\\ninterval(alpha, %(shapes)s, loc=0, scale=1)\n Endpoints of the range that contains alpha percent of the distribution\n\"\"\"\n_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,\n _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,\n _doc_logsf, _doc_ppf, _doc_isf, _doc_moment,\n _doc_stats, _doc_entropy, _doc_fit,\n _doc_expect, _doc_median,\n _doc_mean, _doc_var, _doc_std, _doc_interval])\n\n_doc_default_longsummary = \"\"\"\\\nAs an instance of the `rv_continuous` class, `%(name)s` object inherits from it\na collection of generic methods (see below for the full list),\nand completes them with details specific for this particular distribution.\n\"\"\"\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape,\nlocation, and scale parameters returning a \"frozen\" continuous RV object:\n\nrv = %(name)s(%(shapes)s, loc=0, scale=1)\n - Frozen RV object with the same methods but holding the given shape,\n location, and scale fixed.\n\"\"\"\n_doc_default_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability density function (``pdf``):\n\n>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s), 100)\n>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),\n... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape, location and scale parameters. This returns a \"frozen\"\nRV object holding the given parameters fixed.\n\nFreeze the distribution and display the frozen ``pdf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)\n>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\nAnd compare the histogram:\n\n>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\n\"\"\"\n\n_doc_default_locscale = \"\"\"\\\nThe probability density above is defined in the \"standardized\" form. To shift\nand/or scale the distribution use the ``loc`` and ``scale`` parameters.\nSpecifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically\nequivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with\n``y = (x - loc) / scale``.\n\"\"\"\n\n_doc_default = ''.join([_doc_default_longsummary,\n _doc_allmethods,\n '\\n',\n _doc_default_example])\n\n_doc_default_before_notes = ''.join([_doc_default_longsummary,\n _doc_allmethods])\n\ndocdict = {\n 'rvs': _doc_rvs,\n 'pdf': _doc_pdf,\n 'logpdf': _doc_logpdf,\n 'cdf': _doc_cdf,\n 'logcdf': _doc_logcdf,\n 'sf': _doc_sf,\n 'logsf': _doc_logsf,\n 'ppf': _doc_ppf,\n 'isf': _doc_isf,\n 'stats': _doc_stats,\n 'entropy': _doc_entropy,\n 'fit': _doc_fit,\n 'moment': _doc_moment,\n 'expect': _doc_expect,\n 'interval': _doc_interval,\n 'mean': _doc_mean,\n 'std': _doc_std,\n 'var': _doc_var,\n 'median': _doc_median,\n 'allmethods': _doc_allmethods,\n 'longsummary': _doc_default_longsummary,\n 'frozennote': _doc_default_frozen_note,\n 'example': _doc_default_example,\n 'default': _doc_default,\n 'before_notes': _doc_default_before_notes,\n 'after_notes': _doc_default_locscale\n}\n\n# Reuse common content between continuous and discrete docs, change some\n# minor bits.\ndocdict_discrete = docdict.copy()\n\ndocdict_discrete['pmf'] = _doc_pmf\ndocdict_discrete['logpmf'] = _doc_logpmf\ndocdict_discrete['expect'] = _doc_expect_discrete\n_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',\n 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',\n 'mean', 'var', 'std', 'interval']\nfor obj in _doc_disc_methods:\n docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')\n\n_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']\nfor obj in _doc_disc_methods_err_varname:\n docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')\n\ndocdict_discrete.pop('pdf')\ndocdict_discrete.pop('logpdf')\n\n_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])\ndocdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods\n\ndocdict_discrete['longsummary'] = _doc_default_longsummary.replace(\n 'rv_continuous', 'rv_discrete')\n\n_doc_default_frozen_note = \"\"\"\nAlternatively, the object may be called (as a function) to fix the shape and\nlocation parameters returning a \"frozen\" discrete RV object:\n\nrv = %(name)s(%(shapes)s, loc=0)\n - Frozen RV object with the same methods but holding the given shape and\n location fixed.\n\"\"\"\ndocdict_discrete['frozennote'] = _doc_default_frozen_note\n\n_doc_default_discrete_example = \"\"\"\\\nExamples\n--------\n>>> from scipy.stats import %(name)s\n>>> import matplotlib.pyplot as plt\n>>> fig, ax = plt.subplots(1, 1)\n\nCalculate a few first moments:\n\n%(set_vals_stmt)s\n>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')\n\nDisplay the probability mass function (``pmf``):\n\n>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),\n... %(name)s.ppf(0.99, %(shapes)s))\n>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')\n>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)\n\nAlternatively, the distribution object can be called (as a function)\nto fix the shape and location. This returns a \"frozen\" RV object holding\nthe given parameters fixed.\n\nFreeze the distribution and display the frozen ``pmf``:\n\n>>> rv = %(name)s(%(shapes)s)\n>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,\n... label='frozen pmf')\n>>> ax.legend(loc='best', frameon=False)\n>>> plt.show()\n\nCheck accuracy of ``cdf`` and ``ppf``:\n\n>>> prob = %(name)s.cdf(x, %(shapes)s)\n>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))\nTrue\n\nGenerate random numbers:\n\n>>> r = %(name)s.rvs(%(shapes)s, size=1000)\n\"\"\"\n\n\n_doc_default_discrete_locscale = \"\"\"\\\nThe probability mass function above is defined in the \"standardized\" form.\nTo shift distribution use the ``loc`` parameter.\nSpecifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically\nequivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.\n\"\"\"\n\ndocdict_discrete['example'] = _doc_default_discrete_example\ndocdict_discrete['after_notes'] = _doc_default_discrete_locscale\n\n_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods']])\ndocdict_discrete['before_notes'] = _doc_default_before_notes\n\n_doc_default_disc = ''.join([docdict_discrete['longsummary'],\n docdict_discrete['allmethods'],\n docdict_discrete['frozennote'],\n docdict_discrete['example']])\ndocdict_discrete['default'] = _doc_default_disc\n\n# clean up all the separate docstring elements, we do not need them anymore\nfor obj in [s for s in dir() if s.startswith('_doc_')]:\n exec('del ' + obj)\ndel obj\n\n\ndef _moment(data, n, mu=None):\n if mu is None:\n mu = data.mean()\n return ((data - mu)**n).mean()\n\n\ndef _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):\n if (n == 0):\n return 1.0\n elif (n == 1):\n if mu is None:\n val = moment_func(1, *args)\n else:\n val = mu\n elif (n == 2):\n if mu2 is None or mu is None:\n val = moment_func(2, *args)\n else:\n val = mu2 + mu*mu\n elif (n == 3):\n if g1 is None or mu2 is None or mu is None:\n val = moment_func(3, *args)\n else:\n mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment\n val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment\n elif (n == 4):\n if g1 is None or g2 is None or mu2 is None or mu is None:\n val = moment_func(4, *args)\n else:\n mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment\n mu3 = g1*np.power(mu2, 1.5) # 3rd central moment\n val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu\n else:\n val = moment_func(n, *args)\n\n return val\n\n\ndef _skew(data):\n \"\"\"\n skew is third central moment / variance**(1.5)\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m3 = ((data - mu)**3).mean()\n return m3 / np.power(m2, 1.5)\n\n\ndef _kurtosis(data):\n \"\"\"\n kurtosis is fourth central moment / variance**2 - 3\n \"\"\"\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu)**2).mean()\n m4 = ((data - mu)**4).mean()\n return m4 / m2**2 - 3\n\n\n# Frozen RV class\nclass rv_frozen(object):\n\n def __init__(self, dist, *args, **kwds):\n self.args = args\n self.kwds = kwds\n\n # create a new instance\n self.dist = dist.__class__(**dist._updated_ctor_param())\n\n shapes, _, _ = self.dist._parse_args(*args, **kwds)\n self.a, self.b = self.dist._get_support(*shapes)\n\n @property\n def random_state(self):\n return self.dist._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self.dist._random_state = check_random_state(seed)\n\n def pdf(self, x): # raises AttributeError in frozen discrete distribution\n return self.dist.pdf(x, *self.args, **self.kwds)\n\n def logpdf(self, x):\n return self.dist.logpdf(x, *self.args, **self.kwds)\n\n def cdf(self, x):\n return self.dist.cdf(x, *self.args, **self.kwds)\n\n def logcdf(self, x):\n return self.dist.logcdf(x, *self.args, **self.kwds)\n\n def ppf(self, q):\n return self.dist.ppf(q, *self.args, **self.kwds)\n\n def isf(self, q):\n return self.dist.isf(q, *self.args, **self.kwds)\n\n def rvs(self, size=None, random_state=None):\n kwds = self.kwds.copy()\n kwds.update({'size': size, 'random_state': random_state})\n return self.dist.rvs(*self.args, **kwds)\n\n def sf(self, x):\n return self.dist.sf(x, *self.args, **self.kwds)\n\n def logsf(self, x):\n return self.dist.logsf(x, *self.args, **self.kwds)\n\n def stats(self, moments='mv'):\n kwds = self.kwds.copy()\n kwds.update({'moments': moments})\n return self.dist.stats(*self.args, **kwds)\n\n def median(self):\n return self.dist.median(*self.args, **self.kwds)\n\n def mean(self):\n return self.dist.mean(*self.args, **self.kwds)\n\n def var(self):\n return self.dist.var(*self.args, **self.kwds)\n\n def std(self):\n return self.dist.std(*self.args, **self.kwds)\n\n def moment(self, n):\n return self.dist.moment(n, *self.args, **self.kwds)\n\n def entropy(self):\n return self.dist.entropy(*self.args, **self.kwds)\n\n def pmf(self, k):\n return self.dist.pmf(k, *self.args, **self.kwds)\n\n def logpmf(self, k):\n return self.dist.logpmf(k, *self.args, **self.kwds)\n\n def interval(self, alpha):\n return self.dist.interval(alpha, *self.args, **self.kwds)\n\n def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):\n # expect method only accepts shape parameters as positional args\n # hence convert self.args, self.kwds, also loc/scale\n # See the .expect method docstrings for the meaning of\n # other parameters.\n a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)\n if isinstance(self.dist, rv_discrete):\n return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)\n else:\n return self.dist.expect(func, a, loc, scale, lb, ub,\n conditional, **kwds)\n\n def support(self):\n return self.dist.support(*self.args, **self.kwds)\n\n\n# This should be rewritten\ndef argsreduce(cond, *args):\n \"\"\"Return the sequence of ravel(args[i]) where ravel(condition) is\n True in 1D.\n\n Examples\n --------\n >>> import numpy as np\n >>> rand = np.random.random_sample\n >>> A = rand((4, 5))\n >>> B = 2\n >>> C = rand((1, 5))\n >>> cond = np.ones(A.shape)\n >>> [A1, B1, C1] = argsreduce(cond, A, B, C)\n >>> B1.shape\n (20,)\n >>> cond[2,:] = 0\n >>> [A2, B2, C2] = argsreduce(cond, A, B, C)\n >>> B2.shape\n (15,)\n\n \"\"\"\n newargs = np.atleast_1d(*args)\n if not isinstance(newargs, list):\n newargs = [newargs, ]\n expand_arr = (cond == cond)\n return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]\n\n\nparse_arg_template = \"\"\"\ndef _parse_args(self, %(shape_arg_str)s %(locscale_in)s):\n return (%(shape_arg_str)s), %(locscale_out)s\n\ndef _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):\n return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)\n\ndef _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):\n return (%(shape_arg_str)s), %(locscale_out)s, moments\n\"\"\"\n\n\n# Both the continuous and discrete distributions depend on ncx2.\n# The function name ncx2 is an abbreviation for noncentral chi squared.\n\ndef _ncx2_log_pdf(x, df, nc):\n # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the\n # factor of exp(-xs*ns) into the ive function to improve numerical\n # stability at large values of xs. See also `rice.pdf`.\n df2 = df/2.0 - 1.0\n xs, ns = np.sqrt(x), np.sqrt(nc)\n res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2\n res += np.log(ive(df2, xs*ns) / 2.0)\n return res\n\n\ndef _ncx2_pdf(x, df, nc):\n return np.exp(_ncx2_log_pdf(x, df, nc))\n\n\ndef _ncx2_cdf(x, df, nc):\n return chndtr(x, df, nc)\n\n\nclass rv_generic(object):\n \"\"\"Class which encapsulates common functionality between rv_discrete\n and rv_continuous.\n\n \"\"\"\n def __init__(self, seed=None):\n super(rv_generic, self).__init__()\n\n # figure out if _stats signature has 'moments' keyword\n sig = _getfullargspec(self._stats)\n self._stats_has_moments = ((sig.varkw is not None) or\n ('moments' in sig.args) or\n ('moments' in sig.kwonlyargs))\n self._random_state = check_random_state(seed)\n\n # For historical reasons, `size` was made an attribute that was read\n # inside _rvs(). The code is being changed so that 'size' is an argument\n # to self._rvs(). However some external (non-SciPy) distributions have not\n # been updated. Maintain backwards compatibility by checking if\n # the self._rvs() signature has the 'size' keyword, or a **kwarg,\n # and if not set self._size inside self.rvs() before calling self._rvs().\n argspec = inspect.getfullargspec(self._rvs)\n self._rvs_uses_size_attribute = (argspec.varkw is None and\n 'size' not in argspec.args and\n 'size' not in argspec.kwonlyargs)\n # Warn on first use only\n self._rvs_size_warned = False\n\n @property\n def random_state(self):\n \"\"\" Get or set the RandomState object for generating random variates.\n\n This can be either None, int, a RandomState instance, or a\n np.random.Generator instance.\n\n If None (or np.random), use the RandomState singleton used by np.random.\n If already a RandomState or Generator instance, use it.\n If an int, use a new RandomState instance seeded with seed.\n\n \"\"\"\n return self._random_state\n\n @random_state.setter\n def random_state(self, seed):\n self._random_state = check_random_state(seed)\n\n def __getstate__(self):\n return self._updated_ctor_param(), self._random_state\n\n def __setstate__(self, state):\n ctor_param, r = state\n self.__init__(**ctor_param)\n self._random_state = r\n return self\n\n def _construct_argparser(\n self, meths_to_inspect, locscale_in, locscale_out):\n \"\"\"Construct the parser for the shape arguments.\n\n Generates the argument-parsing functions dynamically and attaches\n them to the instance.\n Is supposed to be called in __init__ of a class for each distribution.\n\n If self.shapes is a non-empty string, interprets it as a\n comma-separated list of shape parameters.\n\n Otherwise inspects the call signatures of `meths_to_inspect`\n and constructs the argument-parsing functions from these.\n In this case also sets `shapes` and `numargs`.\n \"\"\"\n\n if self.shapes:\n # sanitize the user-supplied shapes\n if not isinstance(self.shapes, str):\n raise TypeError('shapes must be a string.')\n\n shapes = self.shapes.replace(',', ' ').split()\n\n for field in shapes:\n if keyword.iskeyword(field):\n raise SyntaxError('keywords cannot be used as shapes.')\n if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):\n raise SyntaxError(\n 'shapes must be valid python identifiers')\n else:\n # find out the call signatures (_pdf, _cdf etc), deduce shape\n # arguments. Generic methods only have 'self, x', any further args\n # are shapes.\n shapes_list = []\n for meth in meths_to_inspect:\n shapes_args = _getfullargspec(meth) # NB: does not contain self\n args = shapes_args.args[1:] # peel off 'x', too\n\n if args:\n shapes_list.append(args)\n\n # *args or **kwargs are not allowed w/automatic shapes\n if shapes_args.varargs is not None:\n raise TypeError(\n '*args are not allowed w/out explicit shapes')\n if shapes_args.varkw is not None:\n raise TypeError(\n '**kwds are not allowed w/out explicit shapes')\n if shapes_args.kwonlyargs:\n raise TypeError(\n 'kwonly args are not allowed w/out explicit shapes')\n if shapes_args.defaults is not None:\n raise TypeError('defaults are not allowed for shapes')\n\n if shapes_list:\n shapes = shapes_list[0]\n\n # make sure the signatures are consistent\n for item in shapes_list:\n if item != shapes:\n raise TypeError('Shape arguments are inconsistent.')\n else:\n shapes = []\n\n # have the arguments, construct the method from template\n shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None\n dct = dict(shape_arg_str=shapes_str,\n locscale_in=locscale_in,\n locscale_out=locscale_out,\n )\n ns = {}\n exec(parse_arg_template % dct, ns)\n # NB: attach to the instance, not class\n for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:\n setattr(self, name, types.MethodType(ns[name], self))\n\n self.shapes = ', '.join(shapes) if shapes else None\n if not hasattr(self, 'numargs'):\n # allows more general subclassing with *args\n self.numargs = len(shapes)\n\n def _construct_doc(self, docdict, shapes_vals=None):\n \"\"\"Construct the instance docstring with string substitutions.\"\"\"\n tempdict = docdict.copy()\n tempdict['name'] = self.name or 'distname'\n tempdict['shapes'] = self.shapes or ''\n\n if shapes_vals is None:\n shapes_vals = ()\n vals = ', '.join('%.3g' % val for val in shapes_vals)\n tempdict['vals'] = vals\n\n tempdict['shapes_'] = self.shapes or ''\n if self.shapes and self.numargs == 1:\n tempdict['shapes_'] += ','\n\n if self.shapes:\n tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)\n else:\n tempdict['set_vals_stmt'] = ''\n\n if self.shapes is None:\n # remove shapes from call parameters if there are none\n for item in ['default', 'before_notes']:\n tempdict[item] = tempdict[item].replace(\n \"\\n%(shapes)s : array_like\\n shape parameters\", \"\")\n for i in range(2):\n if self.shapes is None:\n # necessary because we use %(shapes)s in two forms (w w/o \", \")\n self.__doc__ = self.__doc__.replace(\"%(shapes)s, \", \"\")\n try:\n self.__doc__ = doccer.docformat(self.__doc__, tempdict)\n except TypeError as e:\n raise Exception(\"Unable to construct docstring for distribution \\\"%s\\\": %s\" % (self.name, repr(e)))\n\n # correct for empty shapes\n self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')\n\n def _construct_default_doc(self, longname=None, extradoc=None,\n docdict=None, discrete='continuous'):\n \"\"\"Construct instance docstring from the default template.\"\"\"\n if longname is None:\n longname = 'A'\n if extradoc is None:\n extradoc = ''\n if extradoc.startswith('\\n\\n'):\n extradoc = extradoc[2:]\n self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),\n '\\n\\n%(before_notes)s\\n', docheaders['notes'],\n extradoc, '\\n%(example)s'])\n self._construct_doc(docdict)\n\n def freeze(self, *args, **kwds):\n \"\"\"Freeze the distribution for the given arguments.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution. Should include all\n the non-optional arguments, may include ``loc`` and ``scale``.\n\n Returns\n -------\n rv_frozen : rv_frozen instance\n The frozen distribution.\n\n \"\"\"\n return rv_frozen(self, *args, **kwds)\n\n def __call__(self, *args, **kwds):\n return self.freeze(*args, **kwds)\n __call__.__doc__ = freeze.__doc__\n\n # The actual calculation functions (no basic checking need be done)\n # If these are defined, the others won't be looked at.\n # Otherwise, the other set can be defined.\n def _stats(self, *args, **kwds):\n return None, None, None, None\n\n # Noncentral moments (also known as the moment about the origin).\n # Expressed in LaTeX, munp would be $\\mu'_{n}$, i.e. \"mu-sub-n-prime\".\n # The primed mu is a widely used notation for the noncentral moment.\n def _munp(self, n, *args):\n # Silence floating point warnings from integration.\n with np.errstate(all='ignore'):\n vals = self.generic_moment(n, *args)\n return vals\n\n def _argcheck_rvs(self, *args, **kwargs):\n # Handle broadcasting and size validation of the rvs method.\n # Subclasses should not have to override this method.\n # The rule is that if `size` is not None, then `size` gives the\n # shape of the result (integer values of `size` are treated as\n # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)\n #\n # `args` is expected to contain the shape parameters (if any), the\n # location and the scale in a flat tuple (e.g. if there are two\n # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).\n # The only keyword argument expected is 'size'.\n size = kwargs.get('size', None)\n all_bcast = np.broadcast_arrays(*args)\n\n def squeeze_left(a):\n while a.ndim > 0 and a.shape[0] == 1:\n a = a[0]\n return a\n\n # Eliminate trivial leading dimensions. In the convention\n # used by numpy's random variate generators, trivial leading\n # dimensions are effectively ignored. In other words, when `size`\n # is given, trivial leading dimensions of the broadcast parameters\n # in excess of the number of dimensions in size are ignored, e.g.\n # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)\n # array([ 1.00104267, 3.00422496, 4.99799278])\n # If `size` is not given, the exact broadcast shape is preserved:\n # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])\n # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])\n #\n all_bcast = [squeeze_left(a) for a in all_bcast]\n bcast_shape = all_bcast[0].shape\n bcast_ndim = all_bcast[0].ndim\n\n if size is None:\n size_ = bcast_shape\n else:\n size_ = tuple(np.atleast_1d(size))\n\n # Check compatibility of size_ with the broadcast shape of all\n # the parameters. This check is intended to be consistent with\n # how the numpy random variate generators (e.g. np.random.normal,\n # np.random.beta) handle their arguments. The rule is that, if size\n # is given, it determines the shape of the output. Broadcasting\n # can't change the output size.\n\n # This is the standard broadcasting convention of extending the\n # shape with fewer dimensions with enough dimensions of length 1\n # so that the two shapes have the same number of dimensions.\n ndiff = bcast_ndim - len(size_)\n if ndiff < 0:\n bcast_shape = (1,)*(-ndiff) + bcast_shape\n elif ndiff > 0:\n size_ = (1,)*ndiff + size_\n\n # This compatibility test is not standard. In \"regular\" broadcasting,\n # two shapes are compatible if for each dimension, the lengths are the\n # same or one of the lengths is 1. Here, the length of a dimension in\n # size_ must not be less than the corresponding length in bcast_shape.\n ok = all([bcdim == 1 or bcdim == szdim\n for (bcdim, szdim) in zip(bcast_shape, size_)])\n if not ok:\n raise ValueError(\"size does not match the broadcast shape of \"\n \"the parameters. %s, %s, %s\" % (size, size_, bcast_shape))\n\n param_bcast = all_bcast[:-2]\n loc_bcast = all_bcast[-2]\n scale_bcast = all_bcast[-1]\n\n return param_bcast, loc_bcast, scale_bcast, size_\n\n ## These are the methods you must define (standard form functions)\n ## NB: generic _pdf, _logpdf, _cdf are different for\n ## rv_continuous and rv_discrete hence are defined in there\n def _argcheck(self, *args):\n \"\"\"Default check for correct values on args and keywords.\n\n Returns condition array of 1's where arguments are correct and\n 0's where they are not.\n\n \"\"\"\n cond = 1\n for arg in args:\n cond = logical_and(cond, (asarray(arg) > 0))\n return cond\n\n def _get_support(self, *args, **kwargs):\n \"\"\"Return the support of the (unscaled, unshifted) distribution.\n\n *Must* be overridden by distributions which have support dependent\n upon the shape parameters of the distribution. Any such override\n *must not* set or change any of the class members, as these members\n are shared amongst all instances of the distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n Returns\n -------\n a, b : numeric (float, or int or +/-np.inf)\n end-points of the distribution's support for the specified\n shape parameters.\n \"\"\"\n return self.a, self.b\n\n def _support_mask(self, x, *args):\n a, b = self._get_support(*args)\n with np.errstate(invalid='ignore'):\n return (a <= x) & (x <= b)\n\n def _open_support_mask(self, x, *args):\n a, b = self._get_support(*args)\n with np.errstate(invalid='ignore'):\n return (a < x) & (x < b)\n\n def _rvs(self, *args, size=None, random_state=None):\n # This method must handle size being a tuple, and it must\n # properly broadcast *args and size. size might be\n # an empty tuple, which means a scalar random variate is to be\n # generated.\n\n ## Use basic inverse cdf algorithm for RV generation as default.\n U = random_state.uniform(size=size)\n Y = self._ppf(U, *args)\n return Y\n\n def _logcdf(self, x, *args):\n with np.errstate(divide='ignore'):\n return log(self._cdf(x, *args))\n\n def _sf(self, x, *args):\n return 1.0-self._cdf(x, *args)\n\n def _logsf(self, x, *args):\n with np.errstate(divide='ignore'):\n return log(self._sf(x, *args))\n\n def _ppf(self, q, *args):\n return self._ppfvec(q, *args)\n\n def _isf(self, q, *args):\n return self._ppf(1.0-q, *args) # use correct _ppf for subclasses\n\n # These are actually called, and should not be overwritten if you\n # want to keep error checking.\n def rvs(self, *args, **kwds):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional\n Scale parameter (default=1).\n size : int or tuple of ints, optional\n Defining number of random variates (default is 1).\n random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n discrete = kwds.pop('discrete', None)\n rndm = kwds.pop('random_state', None)\n args, loc, scale, size = self._parse_args_rvs(*args, **kwds)\n cond = logical_and(self._argcheck(*args), (scale >= 0))\n if not np.all(cond):\n raise ValueError(\"Domain error in arguments.\")\n\n if np.all(scale == 0):\n return loc*ones(size, 'd')\n\n # extra gymnastics needed for a custom random_state\n if rndm is not None:\n random_state_saved = self._random_state\n random_state = check_random_state(rndm)\n else:\n random_state = self._random_state\n\n # Maintain backwards compatibility by setting self._size\n # for distributions that still need it.\n if self._rvs_uses_size_attribute:\n if not self._rvs_size_warned:\n warnings.warn(\n f'The signature of {self._rvs} does not contain '\n f'a \"size\" keyword. Such signatures are deprecated.',\n np.VisibleDeprecationWarning)\n self._rvs_size_warned = True\n self._size = size\n self._random_state = random_state\n vals = self._rvs(*args)\n else:\n vals = self._rvs(*args, size=size, random_state=random_state)\n\n vals = vals * scale + loc\n\n # do not forget to restore the _random_state\n if rndm is not None:\n self._random_state = random_state_saved\n\n # Cast to int if discrete\n if discrete:\n if size == ():\n vals = int(vals)\n else:\n vals = vals.astype(int)\n\n return vals\n\n def stats(self, *args, **kwds):\n \"\"\"\n Some statistics of the given RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional (continuous RVs only)\n scale parameter (default=1)\n moments : str, optional\n composed of letters ['mvsk'] defining which moments to compute:\n 'm' = mean,\n 'v' = variance,\n 's' = (Fisher's) skew,\n 'k' = (Fisher's) kurtosis.\n (default is 'mv')\n\n Returns\n -------\n stats : sequence\n of requested moments.\n\n \"\"\"\n args, loc, scale, moments = self._parse_args_stats(*args, **kwds)\n # scale = 1 by construction for discrete RVs\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = []\n default = valarray(shape(cond), self.badvalue)\n\n # Use only entries that are valid in calculation\n if np.any(cond):\n goodargs = argsreduce(cond, *(args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n\n if self._stats_has_moments:\n mu, mu2, g1, g2 = self._stats(*goodargs,\n **{'moments': moments})\n else:\n mu, mu2, g1, g2 = self._stats(*goodargs)\n if g1 is None:\n mu3 = None\n else:\n if mu2 is None:\n mu2 = self._munp(2, *goodargs)\n if g2 is None:\n # (mu2**1.5) breaks down for nan and inf\n mu3 = g1 * np.power(mu2, 1.5)\n\n if 'm' in moments:\n if mu is None:\n mu = self._munp(1, *goodargs)\n out0 = default.copy()\n place(out0, cond, mu * scale + loc)\n output.append(out0)\n\n if 'v' in moments:\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n # if mean is inf then var is also inf\n with np.errstate(invalid='ignore'):\n mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)\n out0 = default.copy()\n place(out0, cond, mu2 * scale * scale)\n output.append(out0)\n\n if 's' in moments:\n if g1 is None:\n mu3p = self._munp(3, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n with np.errstate(invalid='ignore'):\n mu3 = (-mu*mu - 3*mu2)*mu + mu3p\n g1 = mu3 / np.power(mu2, 1.5)\n out0 = default.copy()\n place(out0, cond, g1)\n output.append(out0)\n\n if 'k' in moments:\n if g2 is None:\n mu4p = self._munp(4, *goodargs)\n if mu is None:\n mu = self._munp(1, *goodargs)\n if mu2 is None:\n mu2p = self._munp(2, *goodargs)\n mu2 = mu2p - mu * mu\n if mu3 is None:\n mu3p = self._munp(3, *goodargs)\n with np.errstate(invalid='ignore'):\n mu3 = (-mu * mu - 3 * mu2) * mu + mu3p\n with np.errstate(invalid='ignore'):\n mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p\n g2 = mu4 / mu2**2.0 - 3.0\n out0 = default.copy()\n place(out0, cond, g2)\n output.append(out0)\n else: # no valid args\n output = [default.copy() for _ in moments]\n\n if len(output) == 1:\n return output[0]\n else:\n return tuple(output)\n\n def entropy(self, *args, **kwds):\n \"\"\"\n Differential entropy of the RV.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n scale : array_like, optional (continuous distributions only).\n Scale parameter (default=1).\n\n Notes\n -----\n Entropy is defined base `e`:\n\n >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))\n >>> np.allclose(drv.entropy(), np.log(2.0))\n True\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n # NB: for discrete distributions scale=1 by construction in _parse_args\n loc, scale = map(asarray, (loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n output = zeros(shape(cond0), 'd')\n place(output, (1-cond0), self.badvalue)\n goodargs = argsreduce(cond0, scale, *args)\n goodscale = goodargs[0]\n goodargs = goodargs[1:]\n place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))\n return output\n\n def moment(self, n, *args, **kwds):\n \"\"\"\n n-th order non-central moment of distribution.\n\n Parameters\n ----------\n n : int, n >= 1\n Order of moment.\n arg1, arg2, arg3,... : float\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n if not (self._argcheck(*args) and (scale > 0)):\n return nan\n if (floor(n) != n):\n raise ValueError(\"Moment must be an integer.\")\n if (n < 0):\n raise ValueError(\"Moment must be positive.\")\n mu, mu2, g1, g2 = None, None, None, None\n if (n > 0) and (n < 5):\n if self._stats_has_moments:\n mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}\n else:\n mdict = {}\n mu, mu2, g1, g2 = self._stats(*args, **mdict)\n val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)\n\n # Convert to transformed X = L + S*Y\n # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)\n if loc == 0:\n return scale**n * val\n else:\n result = 0\n fac = float(scale) / float(loc)\n for k in range(n):\n valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)\n result += comb(n, k, exact=True)*(fac**k) * valk\n result += fac**n * val\n return result * loc**n\n\n def median(self, *args, **kwds):\n \"\"\"\n Median of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter, Default is 0.\n scale : array_like, optional\n Scale parameter, Default is 1.\n\n Returns\n -------\n median : float\n The median of the distribution.\n\n See Also\n --------\n rv_discrete.ppf\n Inverse of the CDF\n\n \"\"\"\n return self.ppf(0.5, *args, **kwds)\n\n def mean(self, *args, **kwds):\n \"\"\"\n Mean of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n mean : float\n the mean of the distribution\n\n \"\"\"\n kwds['moments'] = 'm'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def var(self, *args, **kwds):\n \"\"\"\n Variance of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n var : float\n the variance of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = self.stats(*args, **kwds)\n if isinstance(res, ndarray) and res.ndim == 0:\n return res[()]\n return res\n\n def std(self, *args, **kwds):\n \"\"\"\n Standard deviation of the distribution.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n std : float\n standard deviation of the distribution\n\n \"\"\"\n kwds['moments'] = 'v'\n res = sqrt(self.stats(*args, **kwds))\n return res\n\n def interval(self, alpha, *args, **kwds):\n \"\"\"\n Confidence interval with equal areas around the median.\n\n Parameters\n ----------\n alpha : array_like of float\n Probability that an rv will be drawn from the returned range.\n Each value should be in the range [0, 1].\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n\n Returns\n -------\n a, b : ndarray of float\n end-points of range that contain ``100 * alpha %`` of the rv's\n possible values.\n\n \"\"\"\n alpha = asarray(alpha)\n if np.any((alpha > 1) | (alpha < 0)):\n raise ValueError(\"alpha must be between 0 and 1 inclusive\")\n q1 = (1.0-alpha)/2\n q2 = (1.0+alpha)/2\n a = self.ppf(q1, *args, **kwds)\n b = self.ppf(q2, *args, **kwds)\n return a, b\n\n def support(self, *args, **kwargs):\n \"\"\"\n Return the support of the distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n location parameter, Default is 0.\n scale : array_like, optional\n scale parameter, Default is 1.\n Returns\n -------\n a, b : float\n end-points of the distribution's support.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwargs)\n _a, _b = self._get_support(*args)\n return _a * scale + loc, _b * scale + loc\n\n\ndef _get_fixed_fit_value(kwds, names):\n \"\"\"\n Given names such as `['f0', 'fa', 'fix_a']`, check that there is\n at most one non-None value in `kwds` associaed with those names.\n Return that value, or None if none of the names occur in `kwds`.\n As a side effect, all occurrences of those names in `kwds` are\n removed.\n \"\"\"\n vals = [(name, kwds.pop(name)) for name in names if name in kwds]\n if len(vals) > 1:\n repeated = [name for name, val in vals]\n raise ValueError(\"fit method got multiple keyword arguments to \"\n \"specify the same fixed parameter: \" +\n ', '.join(repeated))\n return vals[0][1] if vals else None\n\n\n## continuous random variables: implement maybe later\n##\n## hf --- Hazard Function (PDF / SF)\n## chf --- Cumulative hazard function (-log(SF))\n## psf --- Probability sparsity function (reciprocal of the pdf) in\n## units of percent-point-function (as a function of q).\n## Also, the derivative of the percent-point function.\n\nclass rv_continuous(rv_generic):\n \"\"\"\n A generic continuous random variable class meant for subclassing.\n\n `rv_continuous` is a base class to construct specific distribution classes\n and instances for continuous random variables. It cannot be used\n directly as a distribution.\n\n Parameters\n ----------\n momtype : int, optional\n The type of generic moment calculation to use: 0 for pdf, 1 (default)\n for ppf.\n a : float, optional\n Lower bound of the support of the distribution, default is minus\n infinity.\n b : float, optional\n Upper bound of the support of the distribution, default is plus\n infinity.\n xtol : float, optional\n The tolerance for fixed point calculation for generic ppf.\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example ``\"m, n\"`` for a\n distribution that takes two integers as the two shape arguments for all\n its methods. If not provided, shape parameters will be inferred from\n the signature of the private methods, ``_pdf`` and ``_cdf`` of the\n instance.\n extradoc : str, optional, deprecated\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random variates.\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Methods\n -------\n rvs\n pdf\n logpdf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n fit\n fit_loc_scale\n nnlf\n support\n\n Notes\n -----\n Public methods of an instance of a distribution class (e.g., ``pdf``,\n ``cdf``) check their arguments and pass valid arguments to private,\n computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid\n if it is within the support of the distribution.\n Whether a shape parameter is valid is decided by an ``_argcheck`` method\n (which defaults to checking that its arguments are strictly positive.)\n\n **Subclassing**\n\n New random variables can be defined by subclassing the `rv_continuous` class\n and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized\n to location 0 and scale 1).\n\n If positive argument checking is not correct for your RV\n then you will also need to re-define the ``_argcheck`` method.\n\n For most of the scipy.stats distributions, the support interval doesn't\n depend on the shape parameters. ``x`` being in the support interval is\n equivalent to ``self.a <= x <= self.b``. If either of the endpoints of\n the support do depend on the shape parameters, then\n i) the distribution must implement the ``_get_support`` method; and\n ii) those dependent endpoints must be omitted from the distribution's\n call to the ``rv_continuous`` initializer.\n\n Correct, but potentially slow defaults exist for the remaining\n methods but for speed and/or accuracy you can over-ride::\n\n _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf\n\n The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,\n applied to a uniform random variate. In order to generate random variates\n efficiently, either the default ``_ppf`` needs to be overwritten (e.g.\n if the inverse cdf can expressed in an explicit form) or a sampling\n method needs to be implemented in a custom ``_rvs`` method.\n\n If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.\n The main reason would be to improve numerical accuracy: for example,\n the survival function ``_sf`` is computed as ``1 - _cdf`` which can\n result in loss of precision if ``_cdf(x)`` is close to one.\n\n **Methods that can be overwritten by subclasses**\n ::\n\n _rvs\n _pdf\n _cdf\n _sf\n _ppf\n _isf\n _stats\n _munp\n _entropy\n _argcheck\n _get_support\n\n There are additional (internal and private) generic methods that can\n be useful for cross-checking and for debugging, but might work in all\n cases when directly called.\n\n A note on ``shapes``: subclasses need not specify them explicitly. In this\n case, `shapes` will be automatically deduced from the signatures of the\n overridden methods (`pdf`, `cdf` etc).\n If, for some reason, you prefer to avoid relying on introspection, you can\n specify ``shapes`` explicitly as an argument to the instance constructor.\n\n\n **Frozen Distributions**\n\n Normally, you must provide shape parameters (and, optionally, location and\n scale parameters to each call of a method of a distribution.\n\n Alternatively, the object may be called (as a function) to fix the shape,\n location, and scale parameters returning a \"frozen\" continuous RV object:\n\n rv = generic(<shape(s)>, loc=0, scale=1)\n `rv_frozen` object with the same methods but holding the given shape,\n location, and scale fixed\n\n **Statistics**\n\n Statistics are computed using numerical integration by default.\n For speed you can redefine this using ``_stats``:\n\n - take shape parameters and return mu, mu2, g1, g2\n - If you can't compute one of these, return it as None\n - Can also be defined with a keyword argument ``moments``, which is a\n string composed of \"m\", \"v\", \"s\", and/or \"k\".\n Only the components appearing in string should be computed and\n returned in the order \"m\", \"v\", \"s\", or \"k\" with missing values\n returned as None.\n\n Alternatively, you can override ``_munp``, which takes ``n`` and shape\n parameters and returns the n-th non-central moment of the distribution.\n\n Examples\n --------\n To create a new Gaussian distribution, we would do the following:\n\n >>> from scipy.stats import rv_continuous\n >>> class gaussian_gen(rv_continuous):\n ... \"Gaussian distribution\"\n ... def _pdf(self, x):\n ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)\n >>> gaussian = gaussian_gen(name='gaussian')\n\n ``scipy.stats`` distributions are *instances*, so here we subclass\n `rv_continuous` and create an instance. With this, we now have\n a fully functional distribution with all relevant methods automagically\n generated by the framework.\n\n Note that above we defined a standard normal distribution, with zero mean\n and unit variance. Shifting and scaling of the distribution can be done\n by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``\n essentially computes ``y = (x - loc) / scale`` and\n ``gaussian._pdf(y) / scale``.\n\n \"\"\"\n def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,\n badvalue=None, name=None, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_continuous, self).__init__(seed)\n\n # save the ctor parameters, cf generic freeze\n self._ctor_param = dict(\n momtype=momtype, a=a, b=b, xtol=xtol,\n badvalue=badvalue, name=name, longname=longname,\n shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n if name is None:\n name = 'Distribution'\n self.badvalue = badvalue\n self.name = name\n self.a = a\n self.b = b\n if a is None:\n self.a = -inf\n if b is None:\n self.b = inf\n self.xtol = xtol\n self.moment_type = momtype\n self.shapes = shapes\n self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],\n locscale_in='loc=0, scale=1',\n locscale_out='loc, scale')\n\n # nin correction\n self._ppfvec = vectorize(self._ppf_single, otypes='d')\n self._ppfvec.nin = self.numargs + 1\n self.vecentropy = vectorize(self._entropy, otypes='d')\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self._cdfvec.nin = self.numargs + 1\n\n self.extradoc = extradoc\n if momtype == 0:\n self.generic_moment = vectorize(self._mom0_sc, otypes='d')\n else:\n self.generic_moment = vectorize(self._mom1_sc, otypes='d')\n # Because of the *args argument of _mom0_sc, vectorize cannot count the\n # number of arguments correctly.\n self.generic_moment.nin = self.numargs + 1\n\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict,\n discrete='continuous')\n else:\n dct = dict(distcont)\n self._construct_doc(docdict, dct.get(self.name))\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['xtol'] = self.xtol\n dct['badvalue'] = self.badvalue\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _ppf_to_solve(self, x, q, *args):\n return self.cdf(*(x, )+args)-q\n\n def _ppf_single(self, q, *args):\n factor = 10.\n left, right = self._get_support(*args)\n\n if np.isinf(left):\n left = min(-factor, right)\n while self._ppf_to_solve(left, q, *args) > 0.:\n left, right = left * factor, left\n # left is now such that cdf(left) <= q\n # if right has changed, then cdf(right) > q\n\n if np.isinf(right):\n right = max(factor, left)\n while self._ppf_to_solve(right, q, *args) < 0.:\n left, right = right, right * factor\n # right is now such that cdf(right) >= q\n\n return optimize.brentq(self._ppf_to_solve,\n left, right, args=(q,)+args, xtol=self.xtol)\n\n # moment from definition\n def _mom_integ0(self, x, m, *args):\n return x**m * self.pdf(x, *args)\n\n def _mom0_sc(self, m, *args):\n _a, _b = self._get_support(*args)\n return integrate.quad(self._mom_integ0, _a, _b,\n args=(m,)+args)[0]\n\n # moment calculated using ppf\n def _mom_integ1(self, q, m, *args):\n return (self.ppf(q, *args))**m\n\n def _mom1_sc(self, m, *args):\n return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]\n\n def _pdf(self, x, *args):\n return derivative(self._cdf, x, dx=1e-5, args=args, order=5)\n\n ## Could also define any of these\n def _logpdf(self, x, *args):\n return log(self._pdf(x, *args))\n\n def _cdf_single(self, x, *args):\n _a, _b = self._get_support(*args)\n return integrate.quad(self._pdf, _a, x, args=args)[0]\n\n def _cdf(self, x, *args):\n return self._cdfvec(x, *args)\n\n ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined\n ## in rv_generic\n\n def pdf(self, x, *args, **kwds):\n \"\"\"\n Probability density function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._pdf(*goodargs) / scale)\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpdf(self, x, *args, **kwds):\n \"\"\"\n Log of the probability density function at x of the given RV.\n\n This uses a more numerically accurate calculation if available.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logpdf : array_like\n Log of the probability density function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n putmask(output, (1-cond0)+np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args+(scale,)))\n scale, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._logpdf(*goodargs) - log(scale))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, x, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `x`\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = (x >= np.asarray(_b)) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._cdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, x, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = (x >= _b) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, x, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at x of the given RV.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at x\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = cond0 & (x <= _a)\n cond = cond0 & cond1\n output = zeros(shape(cond), dtyp)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._sf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, x, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as (1 - `cdf`),\n evaluated at `x`.\n\n Parameters\n ----------\n x : array_like\n quantiles\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `x`.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.find_common_type([x.dtype, np.float64], [])\n x = np.asarray((x - loc)/scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._open_support_mask(x, *args) & (scale > 0)\n cond2 = cond0 & (x <= _a)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(NINF)\n place(output, (1-cond0)+np.isnan(x), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((x,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n lower tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : array_like\n quantile corresponding to the lower tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 0)\n cond3 = cond0 & (q == 1)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = _a * scale + loc\n upper_bound = _b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond): # call only if at least 1 entry\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._ppf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n upper tail probability\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n x : ndarray or scalar\n Quantile corresponding to the upper tail probability q.\n\n \"\"\"\n args, loc, scale = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 1)\n cond3 = cond0 & (q == 0)\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue)\n\n lower_bound = _a * scale + loc\n upper_bound = _b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))\n scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]\n place(output, cond, self._isf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output\n\n def _nnlf(self, x, *args):\n return -np.sum(self._logpdf(x, *args), axis=0)\n\n def _unpack_loc_scale(self, theta):\n try:\n loc = theta[-2]\n scale = theta[-1]\n args = tuple(theta[:-2])\n except IndexError:\n raise ValueError(\"Not enough input arguments.\")\n return loc, scale, args\n\n def nnlf(self, theta, x):\n '''Return negative loglikelihood function.\n\n Notes\n -----\n This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the\n parameters (including loc and scale).\n '''\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n n_log_scale = len(x) * log(scale)\n if np.any(~self._support_mask(x, *args)):\n return inf\n return self._nnlf(x, *args) + n_log_scale\n\n def _nnlf_and_penalty(self, x, args):\n cond0 = ~self._support_mask(x, *args)\n n_bad = np.count_nonzero(cond0, axis=0)\n if n_bad > 0:\n x = argsreduce(~cond0, x)[0]\n logpdf = self._logpdf(x, *args)\n finite_logpdf = np.isfinite(logpdf)\n n_bad += np.sum(~finite_logpdf, axis=0)\n if n_bad > 0:\n penalty = n_bad * log(_XMAX) * 100\n return -np.sum(logpdf[finite_logpdf], axis=0) + penalty\n return -np.sum(logpdf, axis=0)\n\n def _penalized_nnlf(self, theta, x):\n ''' Return penalized negative loglikelihood function,\n i.e., - sum (log pdf(x, theta), axis=0) + penalty\n where theta are the parameters (including loc and scale)\n '''\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = asarray((x-loc) / scale)\n n_log_scale = len(x) * log(scale)\n return self._nnlf_and_penalty(x, args) + n_log_scale\n\n # return starting point for fit (shape arguments + loc + scale)\n def _fitstart(self, data, args=None):\n if args is None:\n args = (1.0,)*self.numargs\n loc, scale = self._fit_loc_scale_support(data, *args)\n return args + (loc, scale)\n\n def _reduce_func(self, args, kwds):\n \"\"\"\n Return the (possibly reduced) function to optimize in order to find MLE\n estimates for the .fit method.\n \"\"\"\n # Convert fixed shape parameters to the standard numeric form: e.g. for\n # stats.beta, shapes='a, b'. To fix `a`, the caller can give a value\n # for `f0`, `fa` or 'fix_a'. The following converts the latter two\n # into the first (numeric) form.\n if self.shapes:\n shapes = self.shapes.replace(',', ' ').split()\n for j, s in enumerate(shapes):\n key = 'f' + str(j)\n names = [key, 'f' + s, 'fix_' + s]\n val = _get_fixed_fit_value(kwds, names)\n if val is not None:\n kwds[key] = val\n\n args = list(args)\n Nargs = len(args)\n fixedn = []\n names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']\n x0 = []\n for n, key in enumerate(names):\n if key in kwds:\n fixedn.append(n)\n args[n] = kwds.pop(key)\n else:\n x0.append(args[n])\n\n if len(fixedn) == 0:\n func = self._penalized_nnlf\n restore = None\n else:\n if len(fixedn) == Nargs:\n raise ValueError(\n \"All parameters fixed. There is nothing to optimize.\")\n\n def restore(args, theta):\n # Replace with theta for all numbers not in fixedn\n # This allows the non-fixed values to vary, but\n # we still call self.nnlf with all parameters.\n i = 0\n for n in range(Nargs):\n if n not in fixedn:\n args[n] = theta[i]\n i += 1\n return args\n\n def func(theta, x):\n newtheta = restore(args[:], theta)\n return self._penalized_nnlf(newtheta, x)\n\n return x0, func, restore, args\n\n def fit(self, data, *args, **kwds):\n \"\"\"\n Return MLEs for shape (if applicable), location, and scale\n parameters from data.\n\n MLE stands for Maximum Likelihood Estimate. Starting estimates for\n the fit are given by input arguments; for any arguments not provided\n with starting estimates, ``self._fitstart(data)`` is called to generate\n such.\n\n One can hold some parameters fixed to specific values by passing in\n keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)\n and ``floc`` and ``fscale`` (for location and scale parameters,\n respectively).\n\n Parameters\n ----------\n data : array_like\n Data to use in calculating the MLEs.\n arg1, arg2, arg3,... : floats, optional\n Starting value(s) for any shape-characterizing arguments (those not\n provided will be determined by a call to ``_fitstart(data)``).\n No default value.\n kwds : floats, optional\n - `loc`: initial guess of the distribution's location parameter.\n - `scale`: initial guess of the distribution's scale parameter.\n\n Special keyword arguments are recognized as holding certain\n parameters fixed:\n\n - f0...fn : hold respective shape parameters fixed.\n Alternatively, shape parameters to fix can be specified by name.\n For example, if ``self.shapes == \"a, b\"``, ``fa`` and ``fix_a``\n are equivalent to ``f0``, and ``fb`` and ``fix_b`` are\n equivalent to ``f1``.\n\n - floc : hold location parameter fixed to specified value.\n\n - fscale : hold scale parameter fixed to specified value.\n\n - optimizer : The optimizer to use. The optimizer must take ``func``,\n and starting position as the first two arguments,\n plus ``args`` (for extra arguments to pass to the\n function to be optimized) and ``disp=0`` to suppress\n output as keyword arguments.\n\n Returns\n -------\n mle_tuple : tuple of floats\n MLEs for any shape parameters (if applicable), followed by those\n for location and scale. For most random variables, shape statistics\n will be returned, but there are exceptions (e.g. ``norm``).\n\n Notes\n -----\n This fit is computed by maximizing a log-likelihood function, with\n penalty applied for samples outside of range of the distribution. The\n returned answer is not guaranteed to be the globally optimal MLE, it\n may only be locally optimal, or the optimization may fail altogether.\n If the data contain any of np.nan, np.inf, or -np.inf, the fit routine\n will throw a RuntimeError.\n\n Examples\n --------\n\n Generate some data to fit: draw random variates from the `beta`\n distribution\n\n >>> from scipy.stats import beta\n >>> a, b = 1., 2.\n >>> x = beta.rvs(a, b, size=1000)\n\n Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):\n\n >>> a1, b1, loc1, scale1 = beta.fit(x)\n\n We can also use some prior knowledge about the dataset: let's keep\n ``loc`` and ``scale`` fixed:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)\n >>> loc1, scale1\n (0, 1)\n\n We can also keep shape parameters fixed by using ``f``-keywords. To\n keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,\n equivalently, ``fa=1``:\n\n >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)\n >>> a1\n 1\n\n Not all distributions return estimates for the shape parameters.\n ``norm`` for example just returns estimates for location and scale:\n\n >>> from scipy.stats import norm\n >>> x = norm.rvs(a, b, size=1000, random_state=123)\n >>> loc1, scale1 = norm.fit(x)\n >>> loc1, scale1\n (0.92087172783841631, 2.0015750750324668)\n \"\"\"\n Narg = len(args)\n if Narg > self.numargs:\n raise TypeError(\"Too many input arguments.\")\n\n if not np.isfinite(data).all():\n raise RuntimeError(\"The data contains non-finite values.\")\n\n start = [None]*2\n if (Narg < self.numargs) or not ('loc' in kwds and\n 'scale' in kwds):\n # get distribution specific starting locations\n start = self._fitstart(data)\n args += start[Narg:-2]\n loc = kwds.pop('loc', start[-2])\n scale = kwds.pop('scale', start[-1])\n args += (loc, scale)\n x0, func, restore, args = self._reduce_func(args, kwds)\n\n optimizer = kwds.pop('optimizer', optimize.fmin)\n # convert string to function in scipy.optimize\n if not callable(optimizer) and isinstance(optimizer, str):\n if not optimizer.startswith('fmin_'):\n optimizer = \"fmin_\"+optimizer\n if optimizer == 'fmin_':\n optimizer = 'fmin'\n try:\n optimizer = getattr(optimize, optimizer)\n except AttributeError:\n raise ValueError(\"%s is not a valid optimizer\" % optimizer)\n\n # by now kwds must be empty, since everybody took what they needed\n if kwds:\n raise TypeError(\"Unknown arguments: %s.\" % kwds)\n\n vals = optimizer(func, x0, args=(ravel(data),), disp=0)\n if restore is not None:\n vals = restore(args, vals)\n vals = tuple(vals)\n return vals\n\n def _fit_loc_scale_support(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data accounting for support.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n data = np.asarray(data)\n\n # Estimate location and scale according to the method of moments.\n loc_hat, scale_hat = self.fit_loc_scale(data, *args)\n\n # Compute the support according to the shape parameters.\n self._argcheck(*args)\n _a, _b = self._get_support(*args)\n a, b = _a, _b\n support_width = b - a\n\n # If the support is empty then return the moment-based estimates.\n if support_width <= 0:\n return loc_hat, scale_hat\n\n # Compute the proposed support according to the loc and scale\n # estimates.\n a_hat = loc_hat + a * scale_hat\n b_hat = loc_hat + b * scale_hat\n\n # Use the moment-based estimates if they are compatible with the data.\n data_a = np.min(data)\n data_b = np.max(data)\n if a_hat < data_a and data_b < b_hat:\n return loc_hat, scale_hat\n\n # Otherwise find other estimates that are compatible with the data.\n data_width = data_b - data_a\n rel_margin = 0.1\n margin = data_width * rel_margin\n\n # For a finite interval, both the location and scale\n # should have interesting values.\n if support_width < np.inf:\n loc_hat = (data_a - a) - margin\n scale_hat = (data_width + 2 * margin) / support_width\n return loc_hat, scale_hat\n\n # For a one-sided interval, use only an interesting location parameter.\n if a > -np.inf:\n return (data_a - a) - margin, 1\n elif b < np.inf:\n return (data_b - b) + margin, 1\n else:\n raise RuntimeError\n\n def fit_loc_scale(self, data, *args):\n \"\"\"\n Estimate loc and scale parameters from data using 1st and 2nd moments.\n\n Parameters\n ----------\n data : array_like\n Data to fit.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n\n Returns\n -------\n Lhat : float\n Estimated location parameter for the data.\n Shat : float\n Estimated scale parameter for the data.\n\n \"\"\"\n mu, mu2 = self.stats(*args, **{'moments': 'mv'})\n tmp = asarray(data)\n muhat = tmp.mean()\n mu2hat = tmp.var()\n Shat = sqrt(mu2hat / mu2)\n Lhat = muhat - Shat*mu\n if not np.isfinite(Lhat):\n Lhat = 0\n if not (np.isfinite(Shat) and (0 < Shat)):\n Shat = 1\n return Lhat, Shat\n\n def _entropy(self, *args):\n def integ(x):\n val = self._pdf(x, *args)\n return entr(val)\n\n # upper limit is often inf, so suppress warnings when integrating\n _a, _b = self._get_support(*args)\n with np.errstate(over='ignore'):\n h = integrate.quad(integ, _a, _b)[0]\n\n if not np.isnan(h):\n return h\n else:\n # try with different limits if integration problems\n low, upp = self.ppf([1e-10, 1. - 1e-10], *args)\n if np.isinf(_b):\n upper = upp\n else:\n upper = _b\n if np.isinf(_a):\n lower = low\n else:\n lower = _a\n return integrate.quad(integ, lower, upper)[0]\n\n def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,\n conditional=False, **kwds):\n \"\"\"Calculate expected value of a function with respect to the\n distribution by numerical integration.\n\n The expected value of a function ``f(x)`` with respect to a\n distribution ``dist`` is defined as::\n\n ub\n E[f(x)] = Integral(f(x) * dist.pdf(x)),\n lb\n\n where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``\n distribution. If the bounds ``lb`` and ``ub`` correspond to the\n support of the distribution, e.g. ``[-inf, inf]`` in the default\n case, then the integral is the unrestricted expectation of ``f(x)``.\n Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``\n outside a finite interval in which case the expectation is\n calculated within the finite range ``[lb, ub]``.\n\n Parameters\n ----------\n func : callable, optional\n Function for which integral is calculated. Takes only one argument.\n The default is the identity mapping f(x) = x.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter (default=0).\n scale : float, optional\n Scale parameter (default=1).\n lb, ub : scalar, optional\n Lower and upper bound for integration. Default is set to the\n support of the distribution.\n conditional : bool, optional\n If True, the integral is corrected by the conditional probability\n of the integration interval. The return value is the expectation\n of the function, conditional on being in the given interval.\n Default is False.\n\n Additional keyword arguments are passed to the integration routine.\n\n Returns\n -------\n expect : float\n The calculated expected value.\n\n Notes\n -----\n The integration behavior of this function is inherited from\n `scipy.integrate.quad`. Neither this function nor\n `scipy.integrate.quad` can verify whether the integral exists or is\n finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and\n ``cauchy(0).expect()`` returns ``0.0``.\n\n The function is not vectorized.\n\n Examples\n --------\n\n To understand the effect of the bounds of integration consider\n \n >>> from scipy.stats import expon\n >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)\n 0.6321205588285578\n\n This is close to\n\n >>> expon(1).cdf(2.0) - expon(1).cdf(0.0)\n 0.6321205588285577\n\n If ``conditional=True``\n\n >>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)\n 1.0000000000000002\n\n The slight deviation from 1 is due to numerical integration.\n \"\"\"\n lockwds = {'loc': loc,\n 'scale': scale}\n self._argcheck(*args)\n _a, _b = self._get_support(*args)\n if func is None:\n def fun(x, *args):\n return x * self.pdf(x, *args, **lockwds)\n else:\n def fun(x, *args):\n return func(x) * self.pdf(x, *args, **lockwds)\n if lb is None:\n lb = loc + _a * scale\n if ub is None:\n ub = loc + _b * scale\n if conditional:\n invfac = (self.sf(lb, *args, **lockwds)\n - self.sf(ub, *args, **lockwds))\n else:\n invfac = 1.0\n kwds['args'] = args\n # Silence floating point warnings from integration.\n with np.errstate(all='ignore'):\n vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac\n return vals\n\n\n# Helpers for the discrete distributions\ndef _drv2_moment(self, n, *args):\n \"\"\"Non-central moment of discrete distribution.\"\"\"\n def fun(x):\n return np.power(x, n) * self._pmf(x, *args)\n\n _a, _b = self._get_support(*args)\n return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)\n\n\ndef _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm\n _a, _b = self._get_support(*args)\n b = _b\n a = _a\n if isinf(b): # Be sure ending point is > q\n b = int(max(100*q, 10))\n while 1:\n if b >= _b:\n qb = 1.0\n break\n qb = self._cdf(b, *args)\n if (qb < q):\n b += 10\n else:\n break\n else:\n qb = 1.0\n if isinf(a): # be sure starting point < q\n a = int(min(-100*q, -10))\n while 1:\n if a <= _a:\n qb = 0.0\n break\n qa = self._cdf(a, *args)\n if (qa > q):\n a -= 10\n else:\n break\n else:\n qa = self._cdf(a, *args)\n\n while 1:\n if (qa == q):\n return a\n if (qb == q):\n return b\n if b <= a+1:\n if qa > q:\n return a\n else:\n return b\n c = int((a+b)/2.0)\n qc = self._cdf(c, *args)\n if (qc < q):\n if a != c:\n a = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qa = qc\n elif (qc > q):\n if b != c:\n b = c\n else:\n raise RuntimeError('updating stopped, endless loop')\n qb = qc\n else:\n return c\n\n\ndef entropy(pk, qk=None, base=None, axis=0):\n \"\"\"Calculate the entropy of a distribution for given probability values.\n\n If only probabilities `pk` are given, the entropy is calculated as\n ``S = -sum(pk * log(pk), axis=axis)``.\n\n If `qk` is not None, then compute the Kullback-Leibler divergence\n ``S = sum(pk * log(pk / qk), axis=axis)``.\n\n This routine will normalize `pk` and `qk` if they don't sum to 1.\n\n Parameters\n ----------\n pk : sequence\n Defines the (discrete) distribution. ``pk[i]`` is the (possibly\n unnormalized) probability of event ``i``.\n qk : sequence, optional\n Sequence against which the relative entropy is computed. Should be in\n the same format as `pk`.\n base : float, optional\n The logarithmic base to use, defaults to ``e`` (natural logarithm).\n axis: int, optional\n The axis along which the entropy is calculated. Default is 0.\n\n Returns\n -------\n S : float\n The calculated entropy.\n\n Examples\n --------\n\n >>> from scipy.stats import entropy\n\n Bernoulli trial with different p.\n The outcome of a fair coin is the most uncertain:\n\n >>> entropy([1/2, 1/2], base=2)\n 1.0\n\n The outcome of a biased coin is less uncertain:\n\n >>> entropy([9/10, 1/10], base=2)\n 0.46899559358928117\n\n Relative entropy:\n\n >>> entropy([1/2, 1/2], qk=[9/10, 1/10])\n 0.5108256237659907\n\n \"\"\"\n pk = asarray(pk)\n pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)\n if qk is None:\n vec = entr(pk)\n else:\n qk = asarray(qk)\n if qk.shape != pk.shape:\n raise ValueError(\"qk and pk must have same shape.\")\n qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)\n vec = rel_entr(pk, qk)\n S = np.sum(vec, axis=axis)\n if base is not None:\n S /= log(base)\n return S\n\n\n# Must over-ride one of _pmf or _cdf or pass in\n# x_k, p(x_k) lists in initialization\n\nclass rv_discrete(rv_generic):\n \"\"\"\n A generic discrete random variable class meant for subclassing.\n\n `rv_discrete` is a base class to construct specific distribution classes\n and instances for discrete random variables. It can also be used\n to construct an arbitrary distribution defined by a list of support\n points and corresponding probabilities.\n\n Parameters\n ----------\n a : float, optional\n Lower bound of the support of the distribution, default: 0\n b : float, optional\n Upper bound of the support of the distribution, default: plus infinity\n moment_tol : float, optional\n The tolerance for the generic calculation of moments.\n values : tuple of two array_like, optional\n ``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero\n probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``\n and ``pk`` must have the same shape.\n inc : integer, optional\n Increment for the support of the distribution.\n Default is 1. (other values have not been tested)\n badvalue : float, optional\n The value in a result arrays that indicates a value that for which\n some argument restriction is violated, default is np.nan.\n name : str, optional\n The name of the instance. This string is used to construct the default\n example for distributions.\n longname : str, optional\n This string is used as part of the first line of the docstring returned\n when a subclass has no docstring of its own. Note: `longname` exists\n for backwards compatibility, do not use for new subclasses.\n shapes : str, optional\n The shape of the distribution. For example \"m, n\" for a distribution\n that takes two integers as the two shape arguments for all its methods\n If not provided, shape parameters will be inferred from\n the signatures of the private methods, ``_pmf`` and ``_cdf`` of\n the instance.\n extradoc : str, optional\n This string is used as the last part of the docstring returned when a\n subclass has no docstring of its own. Note: `extradoc` exists for\n backwards compatibility, do not use for new subclasses.\n seed : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random variates.\n If `seed` is `None` the `~np.random.RandomState` singleton is used.\n If `seed` is an int, a new ``RandomState`` instance is used, seeded\n with seed.\n If `seed` is already a ``RandomState`` or ``Generator`` instance,\n then that object is used.\n Default is None.\n\n Methods\n -------\n rvs\n pmf\n logpmf\n cdf\n logcdf\n sf\n logsf\n ppf\n isf\n moment\n stats\n entropy\n expect\n median\n mean\n std\n var\n interval\n __call__\n support\n\n\n Notes\n -----\n\n This class is similar to `rv_continuous`. Whether a shape parameter is\n valid is decided by an ``_argcheck`` method (which defaults to checking\n that its arguments are strictly positive.)\n The main differences are:\n\n - the support of the distribution is a set of integers\n - instead of the probability density function, ``pdf`` (and the\n corresponding private ``_pdf``), this class defines the\n *probability mass function*, `pmf` (and the corresponding\n private ``_pmf``.)\n - scale parameter is not defined.\n\n To create a new discrete distribution, we would do the following:\n\n >>> from scipy.stats import rv_discrete\n >>> class poisson_gen(rv_discrete):\n ... \"Poisson distribution\"\n ... def _pmf(self, k, mu):\n ... return exp(-mu) * mu**k / factorial(k)\n\n and create an instance::\n\n >>> poisson = poisson_gen(name=\"poisson\")\n\n Note that above we defined the Poisson distribution in the standard form.\n Shifting the distribution can be done by providing the ``loc`` parameter\n to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``\n delegates the work to ``poisson._pmf(x-loc, mu)``.\n\n **Discrete distributions from a list of probabilities**\n\n Alternatively, you can construct an arbitrary discrete rv defined\n on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the\n ``values`` keyword argument to the `rv_discrete` constructor.\n\n Examples\n --------\n\n Custom made discrete distribution:\n\n >>> from scipy import stats\n >>> xk = np.arange(7)\n >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)\n >>> custm = stats.rv_discrete(name='custm', values=(xk, pk))\n >>>\n >>> import matplotlib.pyplot as plt\n >>> fig, ax = plt.subplots(1, 1)\n >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')\n >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)\n >>> plt.show()\n\n Random number generation:\n\n >>> R = custm.rvs(size=100)\n\n \"\"\"\n def __new__(cls, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n if values is not None:\n # dispatch to a subclass\n return super(rv_discrete, cls).__new__(rv_sample)\n else:\n # business as usual\n return super(rv_discrete, cls).__new__(cls)\n\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n self.badvalue = badvalue\n self.a = a\n self.b = b\n self.moment_tol = moment_tol\n self.inc = inc\n self._cdfvec = vectorize(self._cdf_single, otypes='d')\n self.vecentropy = vectorize(self._entropy)\n self.shapes = shapes\n\n if values is not None:\n raise ValueError(\"rv_discrete.__init__(..., values != None, ...)\")\n\n self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n # nin correction needs to be after we know numargs\n # correct nin for generic moment vectorization\n _vec_generic_moment = vectorize(_drv2_moment, otypes='d')\n _vec_generic_moment.nin = self.numargs + 2\n self.generic_moment = types.MethodType(_vec_generic_moment, self)\n\n # correct nin for ppf vectorization\n _vppf = vectorize(_drv2_ppfsingle, otypes='d')\n _vppf.nin = self.numargs + 2\n self._ppfvec = types.MethodType(_vppf, self)\n\n # now that self.numargs is defined, we can adjust nin\n self._cdfvec.nin = self.numargs + 1\n\n self._construct_docstrings(name, longname, extradoc)\n\n def _construct_docstrings(self, name, longname, extradoc):\n if name is None:\n name = 'Distribution'\n self.name = name\n self.extradoc = extradoc\n\n # generate docstring for subclass instances\n if longname is None:\n if name[0] in ['aeiouAEIOU']:\n hstr = \"An \"\n else:\n hstr = \"A \"\n longname = hstr + name\n\n if sys.flags.optimize < 2:\n # Skip adding docstrings if interpreter is run with -OO\n if self.__doc__ is None:\n self._construct_default_doc(longname=longname,\n extradoc=extradoc,\n docdict=docdict_discrete,\n discrete='discrete')\n else:\n dct = dict(distdiscrete)\n self._construct_doc(docdict_discrete, dct.get(self.name))\n\n # discrete RV do not have the scale parameter, remove it\n self.__doc__ = self.__doc__.replace(\n '\\n scale : array_like, '\n 'optional\\n scale parameter (default=1)', '')\n\n def _updated_ctor_param(self):\n \"\"\" Return the current version of _ctor_param, possibly updated by user.\n\n Used by freezing and pickling.\n Keep this in sync with the signature of __init__.\n \"\"\"\n dct = self._ctor_param.copy()\n dct['a'] = self.a\n dct['b'] = self.b\n dct['badvalue'] = self.badvalue\n dct['moment_tol'] = self.moment_tol\n dct['inc'] = self.inc\n dct['name'] = self.name\n dct['shapes'] = self.shapes\n dct['extradoc'] = self.extradoc\n return dct\n\n def _nonzero(self, k, *args):\n return floor(k) == k\n\n def _pmf(self, k, *args):\n return self._cdf(k, *args) - self._cdf(k-1, *args)\n\n def _logpmf(self, k, *args):\n return log(self._pmf(k, *args))\n\n def _cdf_single(self, k, *args):\n _a, _b = self._get_support(*args)\n m = arange(int(_a), k+1)\n return np.sum(self._pmf(m, *args), axis=0)\n\n def _cdf(self, x, *args):\n k = floor(x)\n return self._cdfvec(k, *args)\n\n # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic\n\n def rvs(self, *args, **kwargs):\n \"\"\"\n Random variates of given type.\n\n Parameters\n ----------\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n size : int or tuple of ints, optional\n Defining number of random variates (Default is 1). Note that `size`\n has to be given as keyword, not as positional argument.\n random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional\n This parameter defines the object to use for drawing random\n variates.\n If `random_state` is `None` the `~np.random.RandomState` singleton\n is used.\n If `random_state` is an int, a new ``RandomState`` instance is used,\n seeded with random_state.\n If `random_state` is already a ``RandomState`` or ``Generator``\n instance, then that object is used.\n Default is None.\n\n Returns\n -------\n rvs : ndarray or scalar\n Random variates of given `size`.\n\n \"\"\"\n kwargs['discrete'] = True\n return super(rv_discrete, self).rvs(*args, **kwargs)\n\n def pmf(self, k, *args, **kwds):\n \"\"\"\n Probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n pmf : array_like\n Probability mass function evaluated at k\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logpmf(self, k, *args, **kwds):\n \"\"\"\n Log of the probability mass function at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter. Default is 0.\n\n Returns\n -------\n logpmf : array_like\n Log of the probability mass function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logpmf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def cdf(self, k, *args, **kwds):\n \"\"\"\n Cumulative distribution function of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n cdf : ndarray\n Cumulative distribution function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k >= _b)\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 1.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logcdf(self, k, *args, **kwds):\n \"\"\"\n Log of the cumulative distribution function at k of the given RV.\n\n Parameters\n ----------\n k : array_like, int\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logcdf : array_like\n Log of the cumulative distribution function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray((k-loc))\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k >= _b)\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2*(cond0 == cond0), 0.0)\n\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logcdf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def sf(self, k, *args, **kwds):\n \"\"\"\n Survival function (1 - `cdf`) at k of the given RV.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n sf : array_like\n Survival function evaluated at k.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k < _a) & cond0\n cond = cond0 & cond1\n output = zeros(shape(cond), 'd')\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 1.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, np.clip(self._sf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output\n\n def logsf(self, k, *args, **kwds):\n \"\"\"\n Log of the survival function of the given RV.\n\n Returns the log of the \"survival function,\" defined as 1 - `cdf`,\n evaluated at `k`.\n\n Parameters\n ----------\n k : array_like\n Quantiles.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n logsf : ndarray\n Log of the survival function evaluated at `k`.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n k = asarray(k-loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = (k < _a) & cond0\n cond = cond0 & cond1\n output = empty(shape(cond), 'd')\n output.fill(NINF)\n place(output, (1-cond0) + np.isnan(k), self.badvalue)\n place(output, cond2, 0.0)\n if np.any(cond):\n goodargs = argsreduce(cond, *((k,)+args))\n place(output, cond, self._logsf(*goodargs))\n if output.ndim == 0:\n return output[()]\n return output\n\n def ppf(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of `cdf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Lower tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : array_like\n Quantile corresponding to the lower tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), _a-1 + loc)\n place(output, cond2, _b + loc)\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._ppf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def isf(self, q, *args, **kwds):\n \"\"\"\n Inverse survival function (inverse of `sf`) at q of the given RV.\n\n Parameters\n ----------\n q : array_like\n Upper tail probability.\n arg1, arg2, arg3,... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n loc : array_like, optional\n Location parameter (default=0).\n\n Returns\n -------\n k : ndarray or scalar\n Quantile corresponding to the upper tail probability, q.\n\n \"\"\"\n args, loc, _ = self._parse_args(*args, **kwds)\n _a, _b = self._get_support(*args)\n q, loc = map(asarray, (q, loc))\n args = tuple(map(asarray, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n\n # same problem as with ppf; copied from ppf and changed\n output = valarray(shape(cond), value=self.badvalue, typecode='d')\n # output type 'd' to handle nin and inf\n place(output, (q == 0)*(cond == cond), _b)\n place(output, cond2, _a-1)\n\n # call place only if at least 1 valid argument\n if np.any(cond):\n goodargs = argsreduce(cond, *((q,)+args+(loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n # PB same as ticket 766\n place(output, cond, self._isf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n def _entropy(self, *args):\n if hasattr(self, 'pk'):\n return entropy(self.pk)\n else:\n _a, _b = self._get_support(*args)\n return _expect(lambda x: entr(self.pmf(x, *args)),\n _a, _b, self.ppf(0.5, *args), self.inc)\n\n def expect(self, func=None, args=(), loc=0, lb=None, ub=None,\n conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):\n \"\"\"\n Calculate expected value of a function with respect to the distribution\n for discrete distribution by numerical summation.\n\n Parameters\n ----------\n func : callable, optional\n Function for which the expectation value is calculated.\n Takes only one argument.\n The default is the identity mapping f(k) = k.\n args : tuple, optional\n Shape parameters of the distribution.\n loc : float, optional\n Location parameter.\n Default is 0.\n lb, ub : int, optional\n Lower and upper bound for the summation, default is set to the\n support of the distribution, inclusive (``ul <= k <= ub``).\n conditional : bool, optional\n If true then the expectation is corrected by the conditional\n probability of the summation interval. The return value is the\n expectation of the function, `func`, conditional on being in\n the given interval (k such that ``ul <= k <= ub``).\n Default is False.\n maxcount : int, optional\n Maximal number of terms to evaluate (to avoid an endless loop for\n an infinite sum). Default is 1000.\n tolerance : float, optional\n Absolute tolerance for the summation. Default is 1e-10.\n chunksize : int, optional\n Iterate over the support of a distributions in chunks of this size.\n Default is 32.\n\n Returns\n -------\n expect : float\n Expected value.\n\n Notes\n -----\n For heavy-tailed distributions, the expected value may or may not exist,\n depending on the function, `func`. If it does exist, but the sum converges\n slowly, the accuracy of the result may be rather low. For instance, for\n ``zipf(4)``, accuracy for mean, variance in example is only 1e-5.\n increasing `maxcount` and/or `chunksize` may improve the result, but may\n also make zipf very slow.\n\n The function is not vectorized.\n\n \"\"\"\n if func is None:\n def fun(x):\n # loc and args from outer scope\n return (x+loc)*self._pmf(x, *args)\n else:\n def fun(x):\n # loc and args from outer scope\n return func(x+loc)*self._pmf(x, *args)\n # used pmf because _pmf does not check support in randint and there\n # might be problems(?) with correct self.a, self.b at this stage maybe\n # not anymore, seems to work now with _pmf\n\n self._argcheck(*args) # (re)generate scalar self.a and self.b\n _a, _b = self._get_support(*args)\n if lb is None:\n lb = _a\n else:\n lb = lb - loc # convert bound for standardized distribution\n if ub is None:\n ub = _b\n else:\n ub = ub - loc # convert bound for standardized distribution\n if conditional:\n invfac = self.sf(lb-1, *args) - self.sf(ub, *args)\n else:\n invfac = 1.0\n\n # iterate over the support, starting from the median\n x0 = self.ppf(0.5, *args)\n res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)\n return res / invfac\n\n\ndef _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,\n chunksize=32):\n \"\"\"Helper for computing the expectation value of `fun`.\"\"\"\n\n # short-circuit if the support size is small enough\n if (ub - lb) <= chunksize:\n supp = np.arange(lb, ub+1, inc)\n vals = fun(supp)\n return np.sum(vals)\n\n # otherwise, iterate starting from x0\n if x0 < lb:\n x0 = lb\n if x0 > ub:\n x0 = ub\n\n count, tot = 0, 0.\n # iterate over [x0, ub] inclusive\n for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n return tot\n\n # iterate over [lb, x0)\n for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):\n count += x.size\n delta = np.sum(fun(x))\n tot += delta\n if abs(delta) < tolerance * x.size:\n break\n if count > maxcount:\n warnings.warn('expect(): sum did not converge', RuntimeWarning)\n break\n\n return tot\n\n\ndef _iter_chunked(x0, x1, chunksize=4, inc=1):\n \"\"\"Iterate from x0 to x1 in chunks of chunksize and steps inc.\n\n x0 must be finite, x1 need not be. In the latter case, the iterator is\n infinite.\n Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards\n (make sure to set inc < 0.)\n\n >>> [x for x in _iter_chunked(2, 5, inc=2)]\n [array([2, 4])]\n >>> [x for x in _iter_chunked(2, 11, inc=2)]\n [array([2, 4, 6, 8]), array([10])]\n >>> [x for x in _iter_chunked(2, -5, inc=-2)]\n [array([ 2, 0, -2, -4])]\n >>> [x for x in _iter_chunked(2, -9, inc=-2)]\n [array([ 2, 0, -2, -4]), array([-6, -8])]\n\n \"\"\"\n if inc == 0:\n raise ValueError('Cannot increment by zero.')\n if chunksize <= 0:\n raise ValueError('Chunk size must be positive; got %s.' % chunksize)\n\n s = 1 if inc > 0 else -1\n stepsize = abs(chunksize * inc)\n\n x = x0\n while (x - x1) * inc < 0:\n delta = min(stepsize, abs(x - x1))\n step = delta * s\n supp = np.arange(x, x + step, inc)\n x += step\n yield supp\n\n\nclass rv_sample(rv_discrete):\n \"\"\"A 'sample' discrete distribution defined by the support and values.\n\n The ctor ignores most of the arguments, only needs the `values` argument.\n \"\"\"\n def __init__(self, a=0, b=inf, name=None, badvalue=None,\n moment_tol=1e-8, values=None, inc=1, longname=None,\n shapes=None, extradoc=None, seed=None):\n\n super(rv_discrete, self).__init__(seed)\n\n if values is None:\n raise ValueError(\"rv_sample.__init__(..., values=None,...)\")\n\n # cf generic freeze\n self._ctor_param = dict(\n a=a, b=b, name=name, badvalue=badvalue,\n moment_tol=moment_tol, values=values, inc=inc,\n longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)\n\n if badvalue is None:\n badvalue = nan\n self.badvalue = badvalue\n self.moment_tol = moment_tol\n self.inc = inc\n self.shapes = shapes\n self.vecentropy = self._entropy\n\n xk, pk = values\n\n if np.shape(xk) != np.shape(pk):\n raise ValueError(\"xk and pk must have the same shape.\")\n if np.less(pk, 0.0).any():\n raise ValueError(\"All elements of pk must be non-negative.\")\n if not np.allclose(np.sum(pk), 1):\n raise ValueError(\"The sum of provided pk is not 1.\")\n\n indx = np.argsort(np.ravel(xk))\n self.xk = np.take(np.ravel(xk), indx, 0)\n self.pk = np.take(np.ravel(pk), indx, 0)\n self.a = self.xk[0]\n self.b = self.xk[-1]\n\n self.qvals = np.cumsum(self.pk, axis=0)\n\n self.shapes = ' ' # bypass inspection\n self._construct_argparser(meths_to_inspect=[self._pmf],\n locscale_in='loc=0',\n # scale=1 for discrete RVs\n locscale_out='loc, 1')\n\n self._construct_docstrings(name, longname, extradoc)\n\n def _get_support(self, *args):\n \"\"\"Return the support of the (unscaled, unshifted) distribution.\n\n Parameters\n ----------\n arg1, arg2, ... : array_like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information).\n Returns\n -------\n a, b : numeric (float, or int or +/-np.inf)\n end-points of the distribution's support.\n \"\"\"\n return self.a, self.b\n\n def _pmf(self, x):\n return np.select([x == k for k in self.xk],\n [np.broadcast_arrays(p, x)[0] for p in self.pk], 0)\n\n def _cdf(self, x):\n xx, xxk = np.broadcast_arrays(x[:, None], self.xk)\n indx = np.argmax(xxk > xx, axis=-1) - 1\n return self.qvals[indx]\n\n def _ppf(self, q):\n qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)\n indx = argmax(sqq >= qq, axis=-1)\n return self.xk[indx]\n\n def _rvs(self, size=None, random_state=None):\n # Need to define it explicitly, otherwise .rvs() with size=None\n # fails due to explicit broadcasting in _ppf\n U = random_state.uniform(size=size)\n if size is None:\n U = np.array(U, ndmin=1)\n Y = self._ppf(U)[0]\n else:\n Y = self._ppf(U)\n return Y\n\n def _entropy(self):\n return entropy(self.pk)\n\n def generic_moment(self, n):\n n = asarray(n)\n return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)\n\n\ndef _check_shape(argshape, size):\n \"\"\"\n This is a utility function used by `_rvs()` in the class geninvgauss_gen.\n It compares the tuple argshape to the tuple size.\n\n Parameters\n ----------\n argshape : tuple of integers\n Shape of the arguments.\n size : tuple of integers or integer\n Size argument of rvs().\n\n Returns\n -------\n The function returns two tuples, scalar_shape and bc.\n\n scalar_shape : tuple\n Shape to which the 1-d array of random variates returned by\n _rvs_scalar() is converted when it is copied into the\n output array of _rvs().\n\n bc : tuple of booleans\n bc is an tuple the same length as size. bc[j] is True if the data\n associated with that index is generated in one call of _rvs_scalar().\n\n \"\"\"\n scalar_shape = []\n bc = []\n for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],\n fillvalue=1):\n if sizedim > argdim or (argdim == sizedim == 1):\n scalar_shape.append(sizedim)\n bc.append(True)\n else:\n bc.append(False)\n return tuple(scalar_shape[::-1]), tuple(bc[::-1])\n\n\ndef get_distribution_names(namespace_pairs, rv_base_class):\n \"\"\"\n Collect names of statistical distributions and their generators.\n\n Parameters\n ----------\n namespace_pairs : sequence\n A snapshot of (name, value) pairs in the namespace of a module.\n rv_base_class : class\n The base class of random variable generator classes in a module.\n\n Returns\n -------\n distn_names : list of strings\n Names of the statistical distributions.\n distn_gen_names : list of strings\n Names of the generators of the statistical distributions.\n Note that these are not simply the names of the statistical\n distributions, with a _gen suffix added.\n\n \"\"\"\n distn_names = []\n distn_gen_names = []\n for name, value in namespace_pairs:\n if name.startswith('_'):\n continue\n if name.endswith('_gen') and issubclass(value, rv_base_class):\n distn_gen_names.append(name)\n if isinstance(value, rv_base_class):\n distn_names.append(name)\n return distn_names, distn_gen_names\n"
] | [
[
"scipy.special.rel_entr",
"numpy.sqrt",
"scipy.special.ive",
"numpy.asarray",
"numpy.cumsum",
"numpy.all",
"numpy.max",
"scipy._lib.doccer.docformat",
"numpy.any",
"numpy.place",
"scipy.misc.derivative",
"scipy.special.entr",
"scipy.special.chndtr",
"numpy.arange",
"numpy.less",
"numpy.atleast_1d",
"scipy._lib._util.check_random_state",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.ravel",
"numpy.log",
"numpy.power",
"numpy.min",
"numpy.isnan",
"numpy.floor",
"numpy.broadcast_arrays",
"numpy.errstate",
"scipy.integrate.quad",
"numpy.find_common_type",
"numpy.extract",
"scipy.special.xlogy",
"numpy.sum",
"scipy._lib._util.getfullargspec_no_self",
"numpy.array",
"numpy.isfinite",
"numpy.ones",
"scipy.special.comb",
"numpy.vectorize",
"numpy.shape",
"scipy.optimize.brentq",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
saikrishna-pallerla/efficientdet-pytorch | [
"dc7b790f537d28476a26af6f793acc4757becd0d"
] | [
"effdet/data/transforms.py"
] | [
"\"\"\" COCO transforms (quick and dirty)\n\nHacked together by Ross Wightman\n\"\"\"\nimport torch\nfrom PIL import Image\nimport numpy as np\nimport random\nimport math\n\nIMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)\nIMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)\nIMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)\nIMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)\n\n\nclass ImageToNumpy:\n\n def __call__(self, pil_img, annotations: dict):\n np_img = np.array(pil_img, dtype=np.uint8)\n if np_img.ndim < 3:\n np_img = np.expand_dims(np_img, axis=-1)\n np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW\n return np_img, annotations\n\n\nclass ImageToTensor:\n\n def __init__(self, dtype=torch.float32):\n self.dtype = dtype\n\n def __call__(self, pil_img, annotations: dict):\n np_img = np.array(pil_img, dtype=np.uint8)\n if np_img.ndim < 3:\n np_img = np.expand_dims(np_img, axis=-1)\n np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW\n return torch.from_numpy(np_img).to(dtype=self.dtype), annotations\n\n\ndef _pil_interp(method):\n if method == 'bicubic':\n return Image.BICUBIC\n elif method == 'lanczos':\n return Image.LANCZOS\n elif method == 'hamming':\n return Image.HAMMING\n else:\n # default bilinear, do we want to allow nearest?\n return Image.BILINEAR\n\n\n_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)\n\n\ndef clip_boxes_(boxes, img_size):\n height, width = img_size\n clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)\n np.clip(boxes, 0, clip_upper, out=boxes)\n\n\ndef clip_boxes(boxes, img_size):\n clipped_boxes = boxes.copy()\n clip_boxes_(clipped_boxes, img_size)\n return clipped_boxes\n\n\ndef _size_tuple(size):\n if isinstance(size, int):\n return size, size\n else:\n assert len(size) == 2\n return size\n\n\nclass ResizePad:\n\n def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):\n self.target_size = _size_tuple(target_size)\n self.interpolation = interpolation\n self.fill_color = fill_color\n\n def __call__(self, img, anno: dict):\n width, height = img.size\n\n img_scale_y = self.target_size[0] / height\n img_scale_x = self.target_size[1] / width\n img_scale = min(img_scale_y, img_scale_x)\n scaled_h = int(height * img_scale)\n scaled_w = int(width * img_scale)\n\n new_img = Image.new(\"RGB\", (self.target_size[1], self.target_size[0]), color=self.fill_color)\n interp_method = _pil_interp(self.interpolation)\n img = img.resize((scaled_w, scaled_h), interp_method)\n new_img.paste(img)\n\n if 'bbox' in anno:\n # FIXME haven't tested this path since not currently using dataset annotations for train/eval\n bbox = anno['bbox']\n bbox[:, :4] *= img_scale\n clip_boxes_(bbox, (scaled_h, scaled_w))\n valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)\n anno['bbox'] = bbox[valid_indices, :]\n anno['cls'] = anno['cls'][valid_indices]\n\n anno['img_scale'] = 1. / img_scale # back to original\n\n return new_img, anno\n\n\nclass RandomResizePad:\n\n def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'random',\n fill_color: tuple = (0, 0, 0)):\n self.target_size = _size_tuple(target_size)\n self.scale = scale\n if interpolation == 'random':\n self.interpolation = _RANDOM_INTERPOLATION\n else:\n self.interpolation = _pil_interp(interpolation)\n self.fill_color = fill_color\n\n def _get_params(self, img):\n # Select a random scale factor.\n scale_factor = random.uniform(*self.scale)\n scaled_target_height = scale_factor * self.target_size[0]\n scaled_target_width = scale_factor * self.target_size[1]\n\n # Recompute the accurate scale_factor using rounded scaled image size.\n width, height = img.size\n img_scale_y = scaled_target_height / height\n img_scale_x = scaled_target_width / width\n img_scale = min(img_scale_y, img_scale_x)\n\n # Select non-zero random offset (x, y) if scaled image is larger than target size\n scaled_h = int(height * img_scale)\n scaled_w = int(width * img_scale)\n offset_y = scaled_h - self.target_size[0]\n offset_x = scaled_w - self.target_size[1]\n offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))\n offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))\n return scaled_h, scaled_w, offset_y, offset_x, img_scale\n\n def __call__(self, img, anno: dict):\n scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)\n\n if isinstance(self.interpolation, (tuple, list)):\n interpolation = random.choice(self.interpolation)\n else:\n interpolation = self.interpolation\n img = img.resize((scaled_w, scaled_h), interpolation)\n right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])\n img = img.crop((offset_x, offset_y, right, lower))\n new_img = Image.new(\"RGB\", (self.target_size[1], self.target_size[0]), color=self.fill_color)\n new_img.paste(img)\n\n if 'bbox' in anno:\n # FIXME not fully tested\n bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace\n bbox[:, :4] *= img_scale\n box_offset = np.stack([offset_y, offset_x] * 2)\n bbox -= box_offset\n clip_boxes_(bbox, (scaled_h, scaled_w))\n valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)\n anno['bbox'] = bbox[valid_indices, :]\n anno['cls'] = anno['cls'][valid_indices]\n\n anno['img_scale'] = 1. / img_scale # back to original\n\n return new_img, anno\n\n\nclass RandomFlip:\n\n def __init__(self, horizontal=True, vertical=False, prob=0.5):\n self.horizontal = horizontal\n self.vertical = vertical\n self.prob = prob\n\n def _get_params(self):\n do_horizontal = random.random() < self.prob if self.horizontal else False\n do_vertical = random.random() < self.prob if self.vertical else False\n return do_horizontal, do_vertical\n\n def __call__(self, img, annotations: dict):\n do_horizontal, do_vertical = self._get_params()\n width, height = img.size\n\n def _fliph(bbox):\n x_max = width - bbox[:, 1]\n x_min = width - bbox[:, 3]\n bbox[:, 1] = x_min\n bbox[:, 3] = x_max\n\n def _flipv(bbox):\n y_max = height - bbox[:, 0]\n y_min = height - bbox[:, 2]\n bbox[:, 0] = y_min\n bbox[:, 2] = y_max\n\n if do_horizontal and do_vertical:\n img = img.transpose(Image.ROTATE_180)\n if 'bbox' in annotations:\n _fliph(annotations['bbox'])\n _flipv(annotations['bbox'])\n elif do_horizontal:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if 'bbox' in annotations:\n _fliph(annotations['bbox'])\n elif do_vertical:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n if 'bbox' in annotations:\n _flipv(annotations['bbox'])\n\n return img, annotations\n\n\ndef resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):\n if isinstance(fill_color, tuple):\n assert len(fill_color) == 3\n fill_color = fill_color\n else:\n try:\n int_color = int(fill_color)\n fill_color = (int_color,) * 3\n except ValueError:\n assert fill_color == 'mean'\n fill_color = tuple([int(round(255 * x)) for x in img_mean])\n return fill_color\n\n\nclass Compose:\n\n def __init__(self, transforms: list):\n self.transforms = transforms\n\n def __call__(self, img, annotations: dict):\n for t in self.transforms:\n img, annotations = t(img, annotations)\n return img, annotations\n\n\ndef transforms_coco_eval(\n img_size=224,\n interpolation='bilinear',\n use_prefetcher=False,\n fill_color='mean',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD):\n\n fill_color = resolve_fill_color(fill_color, mean)\n\n image_tfl = [\n ResizePad(\n target_size=img_size, interpolation=interpolation, fill_color=fill_color),\n ImageToNumpy(),\n ]\n\n assert use_prefetcher, \"Only supporting prefetcher usage right now\"\n\n image_tf = Compose(image_tfl)\n return image_tf\n\n\ndef transforms_coco_train(\n img_size=224,\n interpolation='random',\n use_prefetcher=False,\n fill_color='mean',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD):\n\n fill_color = resolve_fill_color(fill_color, mean)\n\n image_tfl = [\n RandomFlip(horizontal=True, prob=0.5),\n RandomResizePad(\n target_size=img_size, interpolation=interpolation, fill_color=fill_color),\n ImageToNumpy(),\n ]\n\n assert use_prefetcher, \"Only supporting prefetcher usage right now\"\n\n image_tf = Compose(image_tfl)\n return image_tf\n"
] | [
[
"numpy.expand_dims",
"numpy.clip",
"torch.from_numpy",
"numpy.stack",
"numpy.moveaxis",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jtwhite79/MetPy | [
"8f1880be1ee98c17cd00ae556324386d2a6301ac"
] | [
"metpy/calc/tests/test_basic.py"
] | [
"# Copyright (c) 2008-2015 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom metpy.units import units\nfrom metpy.testing import assert_almost_equal, assert_array_almost_equal\nfrom metpy.calc.basic import * # noqa\n\n\ndef test_wind_comps_basic():\n 'Test the basic wind component calculation.'\n speed = np.array([4, 4, 4, 4, 25, 25, 25, 25, 10.]) * units.mph\n dirs = np.array([0, 45, 90, 135, 180, 225, 270, 315, 360]) * units.deg\n s2 = np.sqrt(2.)\n\n u, v = get_wind_components(speed, dirs)\n\n true_u = np.array([0, -4 / s2, -4, -4 / s2, 0, 25 / s2, 25, 25 / s2, 0]) * units.mph\n true_v = np.array([-4, -4 / s2, 0, 4 / s2, 25, 25 / s2, 0, -25 / s2, -10]) * units.mph\n\n assert_array_almost_equal(true_u, u, 4)\n assert_array_almost_equal(true_v, v, 4)\n\n\ndef test_wind_comps_scalar():\n 'Test scalar wind components'\n u, v = get_wind_components(8 * units('m/s'), 150 * units.deg)\n assert_almost_equal(u, -4 * units('m/s'), 3)\n assert_almost_equal(v, 6.9282 * units('m/s'), 3)\n\n\ndef test_speed():\n 'Basic test of wind speed calculation'\n u = np.array([4., 2., 0., 0.]) * units('m/s')\n v = np.array([0., 2., 4., 0.]) * units('m/s')\n\n speed = get_wind_speed(u, v)\n\n s2 = np.sqrt(2.)\n true_speed = np.array([4., 2 * s2, 4., 0.]) * units('m/s')\n\n assert_array_almost_equal(true_speed, speed, 4)\n\n\ndef test_dir():\n 'Basic test of wind direction calculation'\n u = np.array([4., 2., 0., 0.]) * units('m/s')\n v = np.array([0., 2., 4., 0.]) * units('m/s')\n\n direc = get_wind_dir(u, v)\n\n true_dir = np.array([270., 225., 180., 270.]) * units.deg\n\n assert_array_almost_equal(true_dir, direc, 4)\n\n\ndef test_speed_dir_roundtrip():\n 'Convert from wind speed and direction to u,v and back'\n # Test each quadrant of the whole circle\n wspd = np.array([15., 5., 2., 10.]) * units.meters / units.seconds\n wdir = np.array([160., 30., 225., 350.]) * units.degrees\n\n u, v = get_wind_components(wspd, wdir)\n\n wdir_out = get_wind_dir(u, v)\n wspd_out = get_wind_speed(u, v)\n\n assert_array_almost_equal(wspd, wspd_out, 4)\n assert_array_almost_equal(wdir, wdir_out, 4)\n\n\ndef test_scalar_speed():\n 'Test wind speed with scalars'\n s = get_wind_speed(-3. * units('m/s'), -4. * units('m/s'))\n assert_almost_equal(s, 5. * units('m/s'), 3)\n\n\ndef test_scalar_dir():\n 'Test wind direction with scalars'\n d = get_wind_dir(3. * units('m/s'), 4. * units('m/s'))\n assert_almost_equal(d, 216.870 * units.deg, 3)\n\n\ndef test_windchill_scalar():\n 'Test wind chill with scalars'\n wc = windchill(-5 * units.degC, 35 * units('m/s'))\n assert_almost_equal(wc, -18.9357 * units.degC, 0)\n\n\ndef test_windchill_basic():\n 'Test the basic wind chill calculation.'\n temp = np.array([40, -10, -45, 20]) * units.degF\n speed = np.array([5, 55, 25, 15]) * units.mph\n\n wc = windchill(temp, speed)\n values = np.array([36, -46, -84, 6]) * units.degF\n assert_array_almost_equal(wc, values, 0)\n\n\ndef test_windchill_invalid():\n 'Test for values that should be masked.'\n temp = np.array([10, 51, 49, 60, 80, 81]) * units.degF\n speed = np.array([4, 4, 3, 1, 10, 39]) * units.mph\n\n wc = windchill(temp, speed)\n mask = np.array([False, True, True, True, True, True])\n assert_array_equal(wc.mask, mask)\n\n\ndef test_windchill_undefined_flag():\n 'Tests whether masking values can be disabled.'\n temp = units.Quantity(np.ma.array([49, 50, 49, 60, 80, 81]), units.degF)\n speed = units.Quantity(([4, 4, 3, 1, 10, 39]), units.mph)\n\n wc = windchill(temp, speed, mask_undefined=False)\n mask = np.array([False] * 6)\n assert_array_equal(wc.mask, mask)\n\n\ndef test_windchill_face_level():\n 'Tests using the face_level flag'\n temp = np.array([20, 0, -20, -40]) * units.degF\n speed = np.array([15, 30, 45, 60]) * units.mph\n\n wc = windchill(temp, speed, face_level_winds=True)\n values = np.array([3, -30, -64, -98]) * units.degF\n assert_array_almost_equal(wc, values, 0)\n\n\ndef test_heat_index_basic():\n 'Test the basic heat index calculation.'\n temp = np.array([80, 88, 92, 110]) * units.degF\n rh = np.array([40, 100, 70, 40]) * units.percent\n\n hi = heat_index(temp, rh)\n values = np.array([80, 121, 112, 136]) * units.degF\n assert_array_almost_equal(hi, values, 0)\n\n\ndef test_heat_index_scalar():\n 'Test heat index using scalars'\n hi = heat_index(96 * units.degF, 65 * units.percent)\n assert_almost_equal(hi, 121 * units.degF, 0)\n\n\ndef test_heat_index_invalid():\n 'Test for values that should be masked.'\n temp = np.array([80, 88, 92, 79, 30, 81]) * units.degF\n rh = np.array([40, 39, 2, 70, 50, 39]) * units.percent\n\n hi = heat_index(temp, rh)\n mask = np.array([False, True, True, True, True, True])\n assert_array_equal(hi.mask, mask)\n\n\ndef test_heat_index_undefined_flag():\n 'Tests whether masking values can be disabled.'\n temp = units.Quantity(np.ma.array([80, 88, 92, 79, 30, 81]), units.degF)\n rh = np.ma.array([40, 39, 2, 70, 50, 39]) * units.percent\n\n hi = heat_index(temp, rh, mask_undefined=False)\n mask = np.array([False] * 6)\n assert_array_equal(hi.mask, mask)\n\n\ndef test_heat_index_units():\n 'Test units coming out of heat index'\n temp = units.Quantity([35., 20.], units.degC)\n rh = 70 * units.percent\n hi = heat_index(temp, rh)\n assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)\n\n\ndef test_heat_index_ratio():\n 'Test giving humidity as number [0, 1]'\n temp = units.Quantity([35., 20.], units.degC)\n rh = 0.7\n hi = heat_index(temp, rh)\n assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)\n\n# class TestIrrad(object):\n# def test_basic(self):\n# 'Test the basic solar irradiance calculation.'\n# from datetime import date\n\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hours = np.linspace(6,18,10)\n\n# s = solar_irradiance(lat, d, hours)\n# values = np.array([0., 344.1, 682.6, 933.9, 1067.6, 1067.6, 933.9,\n# 682.6, 344.1, 0.])\n# assert_array_almost_equal(s, values, 1)\n\n# def test_scalar(self):\n# from datetime import date\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hour = 9.5\n# s = solar_irradiance(lat, d, hour)\n# assert_almost_equal(s, 852.1, 1)\n\n# def test_invalid(self):\n# 'Test for values that should be masked.'\n# from datetime import date\n# d = date(2008, 9, 28)\n# lat = 35.25\n# hours = np.linspace(0,22,12)\n# s = solar_irradiance(lat, d, hours)\n\n# mask = np.array([ True, True, True, True, False, False, False,\n# False, False, True, True, True])\n# assert_array_equal(s.mask, mask)\n\n\ndef test_pressure_to_heights_basic():\n 'Tests basic pressure to height calculation.'\n pressures = np.array([975.2, 987.5, 956., 943.]) * units.mbar\n heights = pressure_to_height_std(pressures)\n values = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter\n assert_almost_equal(heights, values, 1)\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.sqrt",
"numpy.ma.array"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LBJ-Wade/GALLUMI_public | [
"4529ab32ccfc281e5976f482fe556b672b8f464f"
] | [
"Scripts/Plotting/Posteriors_cosmo_model1/Posteriors_cosmo_model1_alternative_dust.py"
] | [
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport glob\nfrom matplotlib import patches as mpatches\nimport scipy.ndimage\nfrom scipy.interpolate import PchipInterpolator\nplt.style.use(\"../template.mplstyle\")\n\n# purple - green - darkgoldenrod - blue - red\ncolors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']\nlinestyles = [(0, (1,1.05)), (0, (3, 1, 1, 1)), (0, (1,3)), (0, (3,3.65)), (0, (3,2.772)), (0, (3, 1, 1, 1, 1, 1))]\n\n#########################################################################################\n\ndef ctr_level2d(histogram2d, lvl, infinite=False):\n hist = histogram2d.flatten()*1.\n hist.sort()\n cum_hist = np.cumsum(hist[::-1])\n cum_hist /= cum_hist[-1]\n\n alvl = np.searchsorted(cum_hist, lvl)[::-1]\n clist = [0]+[hist[-i] for i in alvl]+[hist.max()]\n if not infinite:\n return clist[1:]\n return clist\n\ndef get_hist2d(datax, datay, num_bins=40, weights=[None]):\n if not any(weights):\n weights = np.ones(len(datax))\n hist, bin_edgesx, bin_edgesy = np.histogram2d(datax, datay, bins=num_bins, weights=weights)\n bin_centresx = 0.5*(bin_edgesx[1:]+bin_edgesx[:-1])\n bin_centresy = 0.5*(bin_edgesy[1:]+bin_edgesy[:-1])\n return hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy\n\ndef adjust_lightness(color, amount=0.5):\n import matplotlib.colors as mc\n import colorsys\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])\n\ndef plot_hist2d(datax, datay, ax, num_bins=30, weights=[None], color=None, zorder=0):\n if not any(weights):\n weights = np.ones(len(datax))\n if color == None:\n color=\"black\"\n\n hist, bin_edgesx, bin_edgesy, bin_centresx, bin_centresy = get_hist2d(datax, datay, num_bins=num_bins, weights=weights)\n\n interpolation_smoothing = 3.\n gaussian_smoothing = 0.5\n sigma = interpolation_smoothing * gaussian_smoothing\n\n interp_y_centers = scipy.ndimage.zoom(bin_centresy, interpolation_smoothing, mode='reflect')\n interp_x_centers = scipy.ndimage.zoom(bin_centresx,interpolation_smoothing, mode='reflect')\n interp_hist = scipy.ndimage.zoom(hist, interpolation_smoothing, mode='reflect')\n interp_smoothed_hist = scipy.ndimage.filters.gaussian_filter(interp_hist, [sigma,sigma], mode='reflect')\n\n ax.contourf(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[adjust_lightness(color,1.4), adjust_lightness(color,0.8)], levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder, alpha=0.45)\n ax.contour(interp_x_centers, interp_y_centers, np.transpose(interp_smoothed_hist), colors=[color, adjust_lightness(color,0.8)], linewidths=2., levels=ctr_level2d(interp_smoothed_hist.copy(), [0.68, 0.95]), zorder=zorder)\n\n\n##################################################################################################\n\nUVLF_Overzier = []\nUVLF_Bouwens = []\nUVLF_Casey = []\n\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Overzier.append(data)\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Bouwens2016/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Bouwens.append(data)\nfor filepath in glob.iglob('../../Data/UVLF_HST_ST_model1_Casey2014/*__*.txt'):\n data = np.loadtxt(filepath)\n UVLF_Casey.append(data)\n\nUVLF_Overzier = np.vstack(np.array(UVLF_Overzier))\nUVLF_Bouwens = np.vstack(np.array(UVLF_Bouwens))\nUVLF_Casey = np.vstack(np.array(UVLF_Casey))\n\nbetadata = np.loadtxt(\"Beta_parameters.txt\", unpack=True)\nbetainterp = PchipInterpolator(betadata[0], betadata[1])\ndbetadMUVinterp = PchipInterpolator(betadata[0], betadata[2])\n\ndef betaAverage(z, MUV):\n if MUV < -19.5:\n return dbetadMUVinterp(z) * (MUV + 19.5) + betainterp(z)\n return (betainterp(z) + 2.33) * np.exp((dbetadMUVinterp(z) * (MUV + 19.5)) / (betainterp(z) + 2.33)) - 2.33\n\[email protected]\ndef AUV(z, MUV, index):\n if z < 2.5 or z > 8:\n return 0.\n sigmabeta = 0.34\n if index==0:\n return max(0., 4.54 + 0.2 * np.log(10) * (2.07**2) * (sigmabeta**2) + 2.07 * betaAverage(z, MUV)) # Overzier 2011\n if index==1:\n return max(0., 3.36 + 0.2 * np.log(10) * (2.04**2) * (sigmabeta**2) + 2.04 * betaAverage(z, MUV)) # Casey 2014\n if index==2:\n return max(0., 2.45 + 0.2 * np.log(10) * (1.1**2) * (sigmabeta**2) + 1.1 * betaAverage(z, MUV)) # Bouwens 2016\n\n\nplt.figure(figsize=(24.,6.))\nax1 = plt.subplot(131)\nax2 = plt.subplot(132)\nax3 = plt.subplot(133)\nax1.tick_params(axis='x', which='major', pad=6)\nax2.tick_params(axis='x', which='major', pad=6)\nax3.tick_params(axis='x', which='major', pad=6)\nax1.tick_params(axis='both', which='major', labelsize=26)\nax1.tick_params(axis='both', which='minor', labelsize=26)\nax2.tick_params(axis='both', which='major', labelsize=26)\nax2.tick_params(axis='both', which='minor', labelsize=26)\nax3.tick_params(axis='both', which='major', labelsize=26)\nax3.tick_params(axis='both', which='minor', labelsize=26)\n\nfor axis in ['top','bottom','left','right']:\n ax1.spines[axis].set_linewidth(2.2)\n ax2.spines[axis].set_linewidth(2.2)\n ax3.spines[axis].set_linewidth(2.2)\n\n###############\n\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 0), color=colors[3], lw=2.5)\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 1), linestyle=linestyles[2], color=colors[1], lw=3.)\nax1.plot(MUV:=np.linspace(-23,-16, 100), AUV(6., MUV, 2), linestyle=linestyles[3], color=colors[-1], lw=2.5)\nax1.set_xlabel(r'$M_\\mathrm{UV}$', labelpad=10, fontsize=30)\nax1.set_ylabel(r'$A_\\mathrm{UV}$', labelpad=12, fontsize=30)\nax1.set_xlim(-23, -16)\nax1.set_ylim(0., 1.3)\n\npatch_blue = mpatches.Patch(color=colors[3], lw=1.5, label=r\"$\\mathrm{Overzier\\ 2011}$\")\npatch_green = mpatches.Patch(color=colors[1], lw=1.5, label=r\"$\\mathrm{Casey\\ 2014}$\")\npatch_yellow = mpatches.Patch(color=colors[-1], lw=1.5, label=r\"$\\mathrm{Bouwens\\ 2016}$\")\nleg = ax1.legend(handles=[patch_blue, patch_green,patch_yellow], loc=\"upper right\", frameon=False, markerfirst=False, prop={'size': 21}, handlelength=1.9, handletextpad=0.5)\n\n###############\n\nplot_hist2d(datax=UVLF_Overzier[:,-7], datay=UVLF_Overzier[:,2], ax=ax2, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)\nplot_hist2d(datax=UVLF_Bouwens[:,-7], datay=UVLF_Bouwens[:,2], ax=ax2, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)\nplot_hist2d(datax=UVLF_Casey[:,-7], datay=UVLF_Casey[:,2], ax=ax2, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)\nax2.set_xlabel(r'$\\Omega_\\mathrm{m}$', labelpad=10, fontsize=30)\nax2.set_ylabel(r'$\\sigma_8$', labelpad=8, fontsize=30)\nax2.set_xlim(0.2, 0.4)\nax2.set_ylim(0.3, 1.3)\n\n###############\n\nplot_hist2d(datax=UVLF_Overzier[:,5], datay=UVLF_Overzier[:,2], ax=ax3, num_bins=20, weights=UVLF_Overzier[:,0], color=colors[3], zorder=3)\nplot_hist2d(datax=UVLF_Bouwens[:,5], datay=UVLF_Bouwens[:,2], ax=ax3, num_bins=20, weights=UVLF_Bouwens[:,0], color=colors[-1], zorder=2)\nplot_hist2d(datax=UVLF_Casey[:,5], datay=UVLF_Casey[:,2], ax=ax3, num_bins=20, weights=UVLF_Casey[:,0], color=colors[1], zorder=1)\nax3.set_ylabel(r'$\\sigma_8$', labelpad=8, fontsize=30)\nax3.set_xlabel(r'$n_\\mathrm{s}$', labelpad=10, fontsize=30)\nax3.set_xlim(0.7, 1.3)\nax3.set_ylim(0.3, 1.3)\n\nplt.savefig(\"Posteriors_cosmo_model1_alternative_dust.pdf\")\n"
] | [
[
"matplotlib.patches.Patch",
"numpy.log",
"matplotlib.colors.to_rgb",
"numpy.linspace",
"numpy.cumsum",
"matplotlib.pyplot.savefig",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"scipy.interpolate.PchipInterpolator",
"numpy.histogram2d",
"numpy.searchsorted",
"numpy.array",
"matplotlib.pyplot.style.use",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Yugeeth/chat-bot | [
"3198fb160f743c7be1f377d2febb889423da8c06"
] | [
"train.py"
] | [
"import numpy as np\r\nimport random\r\nimport json\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import Dataset, DataLoader\r\n\r\nfrom nltk_utils import bag_of_words, tokenize, stem\r\nfrom model import NeuralNet\r\n\r\nwith open('intents.json', 'r') as f:\r\n intents = json.load(f)\r\n\r\nall_words = []\r\ntags = []\r\nxy = []\r\n# loop through each sentence in our intents patterns\r\nfor intent in intents['intents']:\r\n tag = intent['tag']\r\n # add to tag list\r\n tags.append(tag)\r\n for pattern in intent['patterns']:\r\n # tokenize each word in the sentence\r\n w = tokenize(pattern)\r\n # add to our words list\r\n all_words.extend(w)\r\n # add to xy pair\r\n xy.append((w, tag))\r\n\r\n# stem and lower each word\r\nignore_words = ['?', '.', '!']\r\nall_words = [stem(w) for w in all_words if w not in ignore_words]\r\n# remove duplicates and sort\r\nall_words = sorted(set(all_words))\r\ntags = sorted(set(tags))\r\n\r\nprint(len(xy), \"patterns\")\r\nprint(len(tags), \"tags:\", tags)\r\nprint(len(all_words), \"unique stemmed words:\", all_words)\r\n\r\n# create training data\r\nX_train = []\r\ny_train = []\r\nfor (pattern_sentence, tag) in xy:\r\n # X: bag of words for each pattern_sentence\r\n bag = bag_of_words(pattern_sentence, all_words)\r\n X_train.append(bag)\r\n # y: PyTorch CrossEntropyLoss needs only class labels, not one-hot\r\n label = tags.index(tag)\r\n y_train.append(label)\r\n\r\nX_train = np.array(X_train)\r\ny_train = np.array(y_train)\r\n\r\n# Hyper-parameters \r\nnum_epochs = 1000\r\nbatch_size = 8\r\nlearning_rate = 0.001\r\ninput_size = len(X_train[0])\r\nhidden_size = 8\r\noutput_size = len(tags)\r\nprint(input_size, output_size)\r\n\r\nclass ChatDataset(Dataset):\r\n\r\n def __init__(self):\r\n self.n_samples = len(X_train)\r\n self.x_data = X_train\r\n self.y_data = y_train\r\n\r\n # support indexing such that dataset[i] can be used to get i-th sample\r\n def __getitem__(self, index):\r\n return self.x_data[index], self.y_data[index]\r\n\r\n # we can call len(dataset) to return the size\r\n def __len__(self):\r\n return self.n_samples\r\n\r\ndataset = ChatDataset()\r\ntrain_loader = DataLoader(dataset=dataset,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n num_workers=0)\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\nmodel = NeuralNet(input_size, hidden_size, output_size).to(device)\r\n\r\n# Loss and optimizer\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n\r\n# Train the model\r\nfor epoch in range(num_epochs):\r\n for (words, labels) in train_loader:\r\n words = words.to(device)\r\n labels = labels.to(dtype=torch.long).to(device)\r\n \r\n # Forward pass\r\n outputs = model(words)\r\n # if y would be one-hot, we must apply\r\n # labels = torch.max(labels, 1)[1]\r\n loss = criterion(outputs, labels)\r\n \r\n # Backward and optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n \r\n if (epoch+1) % 100 == 0:\r\n print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')\r\n\r\n\r\nprint(f'final loss: {loss.item():.4f}')\r\n\r\ndata = {\r\n\"model_state\": model.state_dict(),\r\n\"input_size\": input_size,\r\n\"hidden_size\": hidden_size,\r\n\"output_size\": output_size,\r\n\"all_words\": all_words,\r\n\"tags\": tags\r\n}\r\n\r\nFILE = \"data.pth\"\r\ntorch.save(data, FILE)\r\n\r\nprint(f'training complete. file saved to {FILE}')"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"numpy.array",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TurkuNLP/paraphrase-classification | [
"625f0cf5223ecff9d25c2a4f558ca39fa5ecc794"
] | [
"para_averaging.py"
] | [
"import torch.nn.functional as F\nimport torch\nimport para_model\n\nclass ParaAvgModel(para_model.PARAModel):\n\n def __init__(self, **args):\n super().__init__(**args)\n # self.drop_layer=torch.nn.Dropout(p=0.2)\n self.cls_layer=torch.nn.Linear(self.bert.config.hidden_size*5, args['num_classes'])\n\n def forward(self, batch):\n input_ids = batch['input_ids']\n token_type_ids = batch['token_type_ids']\n attention_mask = batch['attention_mask']\n cls_mask = batch['cls_mask']\n sep1_mask = batch['sep1_mask']\n sep2_mask = batch['sep2_mask']\n left_mask = batch['left_mask']\n right_mask = batch['right_mask']\n enc = self.bert(input_ids=input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)[0] #BxS_LENxSIZE; BxSIZE\n cls = (enc*cls_mask.unsqueeze(-1)).sum(1) # enc.pooler_output\n sep1 = (enc*sep1_mask.unsqueeze(-1)).sum(1)\n sep2 = (enc*sep2_mask.unsqueeze(-1)).sum(1)\n left = (enc*left_mask.unsqueeze(-1)).sum(1) / left_mask.sum(-1).unsqueeze(-1)\n right = (enc*right_mask.unsqueeze(-1)).sum(1) / right_mask.sum(-1).unsqueeze(-1)\n catenated = torch.cat((cls, sep1, sep2, left, right), -1)\n # dropped = self.drop_layer(catenated)\n\n return self.cls_layer(catenated)"
] | [
[
"torch.nn.Linear",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joewalter/mne-python | [
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc",
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc",
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc",
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc",
"b0629bea7f5e8e94d9e2e889f45a35f9657e6dbc"
] | [
"mne/viz/circle.py",
"mne/io/meas_info.py",
"mne/gui/tests/test_file_traits.py",
"mne/beamformer/tests/test_lcmv.py",
"mne/io/tests/test_reference.py"
] | [
"\"\"\"Functions to plot on circle as for connectivity\n\"\"\"\nfrom __future__ import print_function\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: Simplified BSD\n\n\nfrom itertools import cycle\nfrom functools import partial\n\nimport numpy as np\n\nfrom .utils import plt_show\nfrom ..externals.six import string_types\n\n\ndef circular_layout(node_names, node_order, start_pos=90, start_between=True,\n group_boundaries=None, group_sep=10):\n \"\"\"Create layout arranging nodes on a circle.\n\n Parameters\n ----------\n node_names : list of str\n Node names.\n node_order : list of str\n List with node names defining the order in which the nodes are\n arranged. Must have the elements as node_names but the order can be\n different. The nodes are arranged clockwise starting at \"start_pos\"\n degrees.\n start_pos : float\n Angle in degrees that defines where the first node is plotted.\n start_between : bool\n If True, the layout starts with the position between the nodes. This is\n the same as adding \"180. / len(node_names)\" to start_pos.\n group_boundaries : None | array-like\n List of of boundaries between groups at which point a \"group_sep\" will\n be inserted. E.g. \"[0, len(node_names) / 2]\" will create two groups.\n group_sep : float\n Group separation angle in degrees. See \"group_boundaries\".\n\n Returns\n -------\n node_angles : array, shape=(len(node_names,))\n Node angles in degrees.\n \"\"\"\n n_nodes = len(node_names)\n\n if len(node_order) != n_nodes:\n raise ValueError('node_order has to be the same length as node_names')\n\n if group_boundaries is not None:\n boundaries = np.array(group_boundaries, dtype=np.int)\n if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):\n raise ValueError('\"group_boundaries\" has to be between 0 and '\n 'n_nodes - 1.')\n if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):\n raise ValueError('\"group_boundaries\" must have non-decreasing '\n 'values.')\n n_group_sep = len(group_boundaries)\n else:\n n_group_sep = 0\n boundaries = None\n\n # convert it to a list with indices\n node_order = [node_order.index(name) for name in node_names]\n node_order = np.array(node_order)\n if len(np.unique(node_order)) != n_nodes:\n raise ValueError('node_order has repeated entries')\n\n node_sep = (360. - n_group_sep * group_sep) / n_nodes\n\n if start_between:\n start_pos += node_sep / 2\n\n if boundaries is not None and boundaries[0] == 0:\n # special case when a group separator is at the start\n start_pos += group_sep / 2\n boundaries = boundaries[1:] if n_group_sep > 1 else None\n\n node_angles = np.ones(n_nodes, dtype=np.float) * node_sep\n node_angles[0] = start_pos\n if boundaries is not None:\n node_angles[boundaries] += group_sep\n\n node_angles = np.cumsum(node_angles)[node_order]\n\n return node_angles\n\n\ndef _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,\n n_nodes=0, node_angles=None,\n ylim=[9, 10]):\n \"\"\"Isolates connections around a single node when user left clicks a node.\n\n On right click, resets all connections.\"\"\"\n if event.inaxes != axes:\n return\n\n if event.button == 1: # left click\n # click must be near node radius\n if not ylim[0] <= event.ydata <= ylim[1]:\n return\n\n # all angles in range [0, 2*pi]\n node_angles = node_angles % (np.pi * 2)\n node = np.argmin(np.abs(event.xdata - node_angles))\n\n patches = event.inaxes.patches\n for ii, (x, y) in enumerate(zip(indices[0], indices[1])):\n patches[ii].set_visible(node in [x, y])\n fig.canvas.draw()\n elif event.button == 3: # right click\n patches = event.inaxes.patches\n for ii in range(np.size(indices, axis=1)):\n patches[ii].set_visible(True)\n fig.canvas.draw()\n\n\ndef plot_connectivity_circle(con, node_names, indices=None, n_lines=None,\n node_angles=None, node_width=None,\n node_colors=None, facecolor='black',\n textcolor='white', node_edgecolor='black',\n linewidth=1.5, colormap='hot', vmin=None,\n vmax=None, colorbar=True, title=None,\n colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),\n fontsize_title=12, fontsize_names=8,\n fontsize_colorbar=8, padding=6.,\n fig=None, subplot=111, interactive=True,\n node_linewidth=2., show=True):\n \"\"\"Visualize connectivity as a circular graph.\n\n Note: This code is based on the circle graph example by Nicolas P. Rougier\n http://www.labri.fr/perso/nrougier/coding/.\n\n Parameters\n ----------\n con : array\n Connectivity scores. Can be a square matrix, or a 1D array. If a 1D\n array is provided, \"indices\" has to be used to define the connection\n indices.\n node_names : list of str\n Node names. The order corresponds to the order in con.\n indices : tuple of arrays | None\n Two arrays with indices of connections for which the connections\n strenghts are defined in con. Only needed if con is a 1D array.\n n_lines : int | None\n If not None, only the n_lines strongest connections (strength=abs(con))\n are drawn.\n node_angles : array, shape=(len(node_names,)) | None\n Array with node positions in degrees. If None, the nodes are equally\n spaced on the circle. See mne.viz.circular_layout.\n node_width : float | None\n Width of each node in degrees. If None, the minimum angle between any\n two nodes is used as the width.\n node_colors : list of tuples | list of str\n List with the color to use for each node. If fewer colors than nodes\n are provided, the colors will be repeated. Any color supported by\n matplotlib can be used, e.g., RGBA tuples, named colors.\n facecolor : str\n Color to use for background. See matplotlib.colors.\n textcolor : str\n Color to use for text. See matplotlib.colors.\n node_edgecolor : str\n Color to use for lines around nodes. See matplotlib.colors.\n linewidth : float\n Line width to use for connections.\n colormap : str\n Colormap to use for coloring the connections.\n vmin : float | None\n Minimum value for colormap. If None, it is determined automatically.\n vmax : float | None\n Maximum value for colormap. If None, it is determined automatically.\n colorbar : bool\n Display a colorbar or not.\n title : str\n The figure title.\n colorbar_size : float\n Size of the colorbar.\n colorbar_pos : 2-tuple\n Position of the colorbar.\n fontsize_title : int\n Font size to use for title.\n fontsize_names : int\n Font size to use for node names.\n fontsize_colorbar : int\n Font size to use for colorbar.\n padding : float\n Space to add around figure to accommodate long labels.\n fig : None | instance of matplotlib.pyplot.Figure\n The figure to use. If None, a new figure with the specified background\n color will be created.\n subplot : int | 3-tuple\n Location of the subplot when creating figures with multiple plots. E.g.\n 121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See\n matplotlib.pyplot.subplot.\n interactive : bool\n When enabled, left-click on a node to show only connections to that\n node. Right-click shows all connections.\n node_linewidth : float\n Line with for nodes.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : instance of matplotlib.pyplot.Figure\n The figure handle.\n axes : instance of matplotlib.axes.PolarAxesSubplot\n The subplot handle.\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.path as m_path\n import matplotlib.patches as m_patches\n\n n_nodes = len(node_names)\n\n if node_angles is not None:\n if len(node_angles) != n_nodes:\n raise ValueError('node_angles has to be the same length '\n 'as node_names')\n # convert it to radians\n node_angles = node_angles * np.pi / 180\n else:\n # uniform layout on unit circle\n node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)\n\n if node_width is None:\n # widths correspond to the minimum angle between two nodes\n dist_mat = node_angles[None, :] - node_angles[:, None]\n dist_mat[np.diag_indices(n_nodes)] = 1e9\n node_width = np.min(np.abs(dist_mat))\n else:\n node_width = node_width * np.pi / 180\n\n if node_colors is not None:\n if len(node_colors) < n_nodes:\n node_colors = cycle(node_colors)\n else:\n # assign colors using colormap\n node_colors = [plt.cm.spectral(i / float(n_nodes))\n for i in range(n_nodes)]\n\n # handle 1D and 2D connectivity information\n if con.ndim == 1:\n if indices is None:\n raise ValueError('indices has to be provided if con.ndim == 1')\n elif con.ndim == 2:\n if con.shape[0] != n_nodes or con.shape[1] != n_nodes:\n raise ValueError('con has to be 1D or a square matrix')\n # we use the lower-triangular part\n indices = np.tril_indices(n_nodes, -1)\n con = con[indices]\n else:\n raise ValueError('con has to be 1D or a square matrix')\n\n # get the colormap\n if isinstance(colormap, string_types):\n colormap = plt.get_cmap(colormap)\n\n # Make figure background the same colors as axes\n if fig is None:\n fig = plt.figure(figsize=(8, 8), facecolor=facecolor)\n\n # Use a polar axes\n if not isinstance(subplot, tuple):\n subplot = (subplot,)\n axes = plt.subplot(*subplot, polar=True, axisbg=facecolor)\n\n # No ticks, we'll put our own\n plt.xticks([])\n plt.yticks([])\n\n # Set y axes limit, add additional space if requested\n plt.ylim(0, 10 + padding)\n\n # Remove the black axes border which may obscure the labels\n axes.spines['polar'].set_visible(False)\n\n # Draw lines between connected nodes, only draw the strongest connections\n if n_lines is not None and len(con) > n_lines:\n con_thresh = np.sort(np.abs(con).ravel())[-n_lines]\n else:\n con_thresh = 0.\n\n # get the connections which we are drawing and sort by connection strength\n # this will allow us to draw the strongest connections first\n con_abs = np.abs(con)\n con_draw_idx = np.where(con_abs >= con_thresh)[0]\n\n con = con[con_draw_idx]\n con_abs = con_abs[con_draw_idx]\n indices = [ind[con_draw_idx] for ind in indices]\n\n # now sort them\n sort_idx = np.argsort(con_abs)\n con_abs = con_abs[sort_idx]\n con = con[sort_idx]\n indices = [ind[sort_idx] for ind in indices]\n\n # Get vmin vmax for color scaling\n if vmin is None:\n vmin = np.min(con[np.abs(con) >= con_thresh])\n if vmax is None:\n vmax = np.max(con)\n vrange = vmax - vmin\n\n # We want to add some \"noise\" to the start and end position of the\n # edges: We modulate the noise with the number of connections of the\n # node and the connection strength, such that the strongest connections\n # are closer to the node center\n nodes_n_con = np.zeros((n_nodes), dtype=np.int)\n for i, j in zip(indices[0], indices[1]):\n nodes_n_con[i] += 1\n nodes_n_con[j] += 1\n\n # initialize random number generator so plot is reproducible\n rng = np.random.mtrand.RandomState(seed=0)\n\n n_con = len(indices[0])\n noise_max = 0.25 * node_width\n start_noise = rng.uniform(-noise_max, noise_max, n_con)\n end_noise = rng.uniform(-noise_max, noise_max, n_con)\n\n nodes_n_con_seen = np.zeros_like(nodes_n_con)\n for i, (start, end) in enumerate(zip(indices[0], indices[1])):\n nodes_n_con_seen[start] += 1\n nodes_n_con_seen[end] += 1\n\n start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /\n float(nodes_n_con[start]))\n end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /\n float(nodes_n_con[end]))\n\n # scale connectivity for colormap (vmin<=>0, vmax<=>1)\n con_val_scaled = (con - vmin) / vrange\n\n # Finally, we draw the connections\n for pos, (i, j) in enumerate(zip(indices[0], indices[1])):\n # Start point\n t0, r0 = node_angles[i], 10\n\n # End point\n t1, r1 = node_angles[j], 10\n\n # Some noise in start and end point\n t0 += start_noise[pos]\n t1 += end_noise[pos]\n\n verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]\n codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,\n m_path.Path.LINETO]\n path = m_path.Path(verts, codes)\n\n color = colormap(con_val_scaled[pos])\n\n # Actual line\n patch = m_patches.PathPatch(path, fill=False, edgecolor=color,\n linewidth=linewidth, alpha=1.)\n axes.add_patch(patch)\n\n # Draw ring with colored nodes\n height = np.ones(n_nodes) * 1.0\n bars = axes.bar(node_angles, height, width=node_width, bottom=9,\n edgecolor=node_edgecolor, lw=node_linewidth,\n facecolor='.9', align='center')\n\n for bar, color in zip(bars, node_colors):\n bar.set_facecolor(color)\n\n # Draw node labels\n angles_deg = 180 * node_angles / np.pi\n for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):\n if angle_deg >= 270:\n ha = 'left'\n else:\n # Flip the label, so text is always upright\n angle_deg += 180\n ha = 'right'\n\n axes.text(angle_rad, 10.4, name, size=fontsize_names,\n rotation=angle_deg, rotation_mode='anchor',\n horizontalalignment=ha, verticalalignment='center',\n color=textcolor)\n\n if title is not None:\n plt.title(title, color=textcolor, fontsize=fontsize_title,\n axes=axes)\n\n if colorbar:\n sm = plt.cm.ScalarMappable(cmap=colormap,\n norm=plt.Normalize(vmin, vmax))\n sm.set_array(np.linspace(vmin, vmax))\n cb = plt.colorbar(sm, ax=axes, use_gridspec=False,\n shrink=colorbar_size,\n anchor=colorbar_pos)\n cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')\n cb.ax.tick_params(labelsize=fontsize_colorbar)\n plt.setp(cb_yticks, color=textcolor)\n\n # Add callback for interaction\n if interactive:\n callback = partial(_plot_connectivity_circle_onpick, fig=fig,\n axes=axes, indices=indices, n_nodes=n_nodes,\n node_angles=node_angles)\n\n fig.canvas.mpl_connect('button_press_event', callback)\n\n plt_show(show)\n return fig, axes\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom collections import Counter\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nimport os.path as op\nimport re\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .pick import channel_type\nfrom .constants import FIFF\nfrom .open import fiff_open\nfrom .tree import dir_tree_find\nfrom .tag import read_tag, find_tag\nfrom .proj import _read_proj, _write_proj, _uniquify_projs, _normalize_proj\nfrom .ctf_comp import read_ctf_comp, write_ctf_comp\nfrom .write import (start_file, end_file, start_block, end_block,\n write_string, write_dig_point, write_float, write_int,\n write_coord_trans, write_ch_info, write_name_list,\n write_julian, write_float_matrix)\nfrom .proc_history import _read_proc_history, _write_proc_history\nfrom ..utils import logger, verbose, warn\nfrom .. import __version__\nfrom ..externals.six import b, BytesIO, string_types, text_type\n\n\n_kind_dict = dict(\n eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T),\n grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M),\n ref_meg=(FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3,\n FIFF.FIFF_UNIT_T),\n misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE),\n stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n bio=(FIFF.FIFFV_BIO_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V),\n ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V),\n hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL),\n hbr=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFF_UNIT_MOL)\n)\n\n\ndef _summarize_str(st):\n \"\"\"Aux function\"\"\"\n return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'\n\n\nclass Info(dict):\n \"\"\"Information about the recording.\n\n This data structure behaves like a dictionary. It contains all meta-data\n that is available for a recording.\n\n The attributes listed below are the possible dictionary entries:\n\n Attributes\n ----------\n bads : list of str\n List of bad (noisy/broken) channels, by name. These channels will by\n default be ignored by many processing steps.\n ch_names : list-like of str (read-only)\n The names of the channels.\n This object behaves like a read-only Python list. Behind the scenes\n it iterates over the channels dictionaries in `info['chs']`:\n `info['ch_names'][x] == info['chs'][x]['ch_name']`\n chs : list of dict\n A list of channel information structures.\n See: :ref:`faq` for details.\n comps : list of dict\n CTF software gradient compensation data.\n See: :ref:`faq` for details.\n custom_ref_applied : bool\n Whether a custom (=other than average) reference has been applied to\n the EEG data. This flag is checked by some algorithms that require an\n average reference to be set.\n events : list of dict\n Event list, usually extracted from the stim channels.\n See: :ref:`faq` for details.\n hpi_results : list of dict\n Head position indicator (HPI) digitization points and fit information\n (e.g., the resulting transform). See: :ref:`faq` for details.\n meas_date : list of int\n The first element of this list is a POSIX timestamp (milliseconds since\n 1970-01-01 00:00:00) denoting the date and time at which the\n measurement was taken. The second element is the number of\n microseconds.\n nchan : int\n Number of channels.\n projs : list of dict\n List of SSP operators that operate on the data.\n See: :ref:`faq` for details.\n sfreq : float\n Sampling frequency in Hertz.\n See: :ref:`faq` for details.\n acq_pars : str | None\n MEG system acquition parameters.\n acq_stim : str | None\n MEG system stimulus parameters.\n buffer_size_sec : float | None\n Buffer size (in seconds) when reading the raw data in chunks.\n ctf_head_t : dict | None\n The transformation from 4D/CTF head coordinates to Neuromag head\n coordinates. This is only present in 4D/CTF data.\n See: :ref:`faq` for details.\n description : str | None\n String description of the recording.\n dev_ctf_t : dict | None\n The transformation from device coordinates to 4D/CTF head coordinates.\n This is only present in 4D/CTF data.\n See: :ref:`faq` for details.\n dev_head_t : dict | None\n The device to head transformation.\n See: :ref:`faq` for details.\n dig : list of dict | None\n The Polhemus digitization data in head coordinates.\n See: :ref:`faq` for details.\n experimentor : str | None\n Name of the person that ran the experiment.\n file_id : dict | None\n The fif ID datastructure of the measurement file.\n See: :ref:`faq` for details.\n filename : str | None\n The name of the file that provided the raw data.\n highpass : float | None\n Highpass corner frequency in Hertz. Zero indicates a DC recording.\n hpi_meas : list of dict | None\n HPI measurements that were taken at the start of the recording\n (e.g. coil frequencies).\n hpi_subsystem : dict | None\n Information about the HPI subsystem that was used (e.g., event\n channel used for cHPI measurements).\n line_freq : float | None\n Frequency of the power line in Hertz.\n lowpass : float | None\n Lowpass corner frequency in Hertz.\n meas_id : dict | None\n The ID assigned to this measurement by the acquisition system or during\n file conversion.\n See: :ref:`faq` for details.\n proj_id : int | None\n ID number of the project the experiment belongs to.\n proj_name : str | None\n Name of the project the experiment belongs to.\n subject_info : dict | None\n Information about the subject.\n proc_history : list of dict | None | not present in dict\n The SSS info, the CTC correction and the calibaraions from the SSS\n processing logs inside of a raw file.\n See: :ref:`faq` for details.\n \"\"\"\n\n def copy(self):\n \"\"\"Copy the instance\n\n Returns\n -------\n info : instance of Info\n The copied info.\n \"\"\"\n return Info(deepcopy(self))\n\n def normalize_proj(self):\n \"\"\"(Re-)Normalize projection vectors after subselection\n\n Applying projection after sub-selecting a set of channels that\n were originally used to compute the original projection vectors\n can be dangerous (e.g., if few channels remain, most power was\n in channels that are no longer picked, etc.). By default, mne\n will emit a warning when this is done.\n\n This function will re-normalize projectors to use only the\n remaining channels, thus avoiding that warning. Only use this\n function if you're confident that the projection vectors still\n adequately capture the original signal of interest.\n \"\"\"\n _normalize_proj(self)\n\n def __repr__(self):\n \"\"\"Summarize info instead of printing all\"\"\"\n strs = ['<Info | %s non-empty fields']\n non_empty = 0\n for k, v in self.items():\n if k in ['bads', 'ch_names']:\n entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)\n if v else '0 items')\n if len(entr) >= 56:\n # get rid of of half printed ch names\n entr = _summarize_str(entr)\n elif k == 'filename' and v:\n path, fname = op.split(v)\n entr = path[:10] + '.../' + fname\n elif k == 'projs' and v:\n entr = ', '.join(p['desc'] + ': o%s' %\n {0: 'ff', 1: 'n'}[p['active']] for p in v)\n if len(entr) >= 56:\n entr = _summarize_str(entr)\n elif k == 'meas_date' and np.iterable(v):\n # first entry in meas_date is meaningful\n entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')\n elif k == 'kit_system_id' and v is not None:\n from .kit.constants import SYSNAMES as KIT_SYSNAMES\n entr = '%i (%s)' % (v, KIT_SYSNAMES.get(v, 'unknown'))\n else:\n this_len = (len(v) if hasattr(v, '__len__') else\n ('%s' % v if v is not None else None))\n entr = (('%d items' % this_len) if isinstance(this_len, int)\n else ('%s' % this_len if this_len else ''))\n if entr:\n non_empty += 1\n entr = ' | ' + entr\n if k == 'chs':\n ch_types = [channel_type(self, idx) for idx in range(len(v))]\n ch_counts = Counter(ch_types)\n entr += \" (%s)\" % ', '.join(\"%s: %d\" % (ch_type.upper(), count)\n for ch_type, count\n in ch_counts.items())\n strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))\n if k in ['sfreq', 'lowpass', 'highpass']:\n strs[-1] += ' Hz'\n strs_non_empty = sorted(s for s in strs if '|' in s)\n strs_empty = sorted(s for s in strs if '|' not in s)\n st = '\\n '.join(strs_non_empty + strs_empty)\n st += '\\n>'\n st %= non_empty\n return st\n\n def _check_consistency(self):\n \"\"\"Do some self-consistency checks and datatype tweaks\"\"\"\n missing = [bad for bad in self['bads'] if bad not in self['ch_names']]\n if len(missing) > 0:\n raise RuntimeError('bad channel(s) %s marked do not exist in info'\n % (missing,))\n\n chs = [ch['ch_name'] for ch in self['chs']]\n if len(self['ch_names']) != len(chs) or any(\n ch_1 != ch_2 for ch_1, ch_2 in zip(self['ch_names'], chs)) or \\\n self['nchan'] != len(chs):\n raise RuntimeError('info channel name inconsistency detected, '\n 'please notify mne-python developers')\n\n # make sure we have the proper datatypes\n for key in ('sfreq', 'highpass', 'lowpass'):\n if self.get(key) is not None:\n self[key] = float(self[key])\n\n # make sure channel names are unique\n unique_ids = np.unique(self['ch_names'], return_index=True)[1]\n if len(unique_ids) != self['nchan']:\n dups = set(self['ch_names'][x]\n for x in np.setdiff1d(range(self['nchan']), unique_ids))\n raise RuntimeError('Channel names are not unique, found '\n 'duplicates for: %s' % dups)\n\n def _update_redundant(self):\n \"\"\"Update the redundant entries\"\"\"\n self['ch_names'] = [ch['ch_name'] for ch in self['chs']]\n self['nchan'] = len(self['chs'])\n\n\ndef read_fiducials(fname):\n \"\"\"Read fiducials from a fiff file\n\n Parameters\n ----------\n fname : str\n The filename to read.\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...)\n \"\"\"\n fid, tree, _ = fiff_open(fname)\n with fid:\n isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)\n isotrak = isotrak[0]\n pts = []\n coord_frame = FIFF.FIFFV_COORD_UNKNOWN\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n pts.append(tag.data)\n elif kind == FIFF.FIFF_MNE_COORD_FRAME:\n tag = read_tag(fid, pos)\n coord_frame = tag.data[0]\n\n if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:\n err = (\"No coordinate frame was found in the file %r, it is probably \"\n \"not a valid fiducials file.\" % fname)\n raise ValueError(err)\n\n # coord_frame is not stored in the tag\n for pt in pts:\n pt['coord_frame'] = coord_frame\n\n return pts, coord_frame\n\n\ndef write_fiducials(fname, pts, coord_frame=0):\n \"\"\"Write fiducials to a fiff file\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.io.constants.FIFF.FIFFV_COORD_...)\n \"\"\"\n pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))\n bad_frames = pts_frames - set((coord_frame,))\n if len(bad_frames) > 0:\n err = (\"Points have coord_frame entries that are incompatible with \"\n \"coord_frame=%i: %s.\" % (coord_frame, str(tuple(bad_frames))))\n raise ValueError(err)\n\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)\n for pt in pts:\n write_dig_point(fid, pt)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n end_file(fid)\n\n\ndef _read_dig_fif(fid, meas_info):\n \"\"\"Helper to read digitizer data from a FIFF file\"\"\"\n isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)\n dig = None\n if len(isotrak) == 0:\n logger.info('Isotrak not found')\n elif len(isotrak) > 1:\n warn('Multiple Isotrak found')\n else:\n isotrak = isotrak[0]\n dig = []\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n dig.append(tag.data)\n dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n return dig\n\n\ndef _read_dig_points(fname, comments='%', unit='auto'):\n \"\"\"Read digitizer data from a text file.\n\n If fname ends in .hsp or .esp, the function assumes digitizer files in [m],\n otherwise it assumes space-delimited text files in [mm].\n\n Parameters\n ----------\n fname : str\n The filepath of space delimited file with points.\n comments : str\n The character used to indicate the start of a comment;\n Default: '%'.\n unit : 'auto' | 'm' | 'cm' | 'mm'\n Unit of the digitizer files (hsp and elp). If not 'm', coordinates will\n be rescaled to 'm'. Default is 'auto', which assumes 'm' for *.hsp and\n *.elp files and 'mm' for *.txt files, corresponding to the known\n Polhemus export formats.\n\n Returns\n -------\n dig_points : np.ndarray, shape (n_points, 3)\n Array of dig points in [m].\n \"\"\"\n if unit not in ('auto', 'm', 'mm', 'cm'):\n raise ValueError('unit must be one of \"auto\", \"m\", \"mm\", or \"cm\"')\n\n _, ext = op.splitext(fname)\n if ext == '.elp' or ext == '.hsp':\n with open(fname) as fid:\n file_str = fid.read()\n value_pattern = \"\\-?\\d+\\.?\\d*e?\\-?\\d*\"\n coord_pattern = \"({0})\\s+({0})\\s+({0})\\s*$\".format(value_pattern)\n if ext == '.hsp':\n coord_pattern = '^' + coord_pattern\n points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,\n re.MULTILINE)]\n dig_points = np.array(points_str, dtype=float)\n else:\n dig_points = np.loadtxt(fname, comments=comments, ndmin=2)\n if unit == 'auto':\n unit = 'mm'\n\n if dig_points.shape[-1] != 3:\n err = 'Data must be (n, 3) instead of %s' % (dig_points.shape,)\n raise ValueError(err)\n\n if unit == 'mm':\n dig_points /= 1000.\n elif unit == 'cm':\n dig_points /= 100.\n\n return dig_points\n\n\ndef _write_dig_points(fname, dig_points):\n \"\"\"Write points to text file\n\n Parameters\n ----------\n fname : str\n Path to the file to write. The kind of file to write is determined\n based on the extension: '.txt' for tab separated text file.\n dig_points : numpy.ndarray, shape (n_points, 3)\n Points.\n \"\"\"\n _, ext = op.splitext(fname)\n dig_points = np.asarray(dig_points)\n if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):\n err = (\"Points must be of shape (n_points, 3), \"\n \"not %s\" % (dig_points.shape,))\n raise ValueError(err)\n\n if ext == '.txt':\n with open(fname, 'wb') as fid:\n version = __version__\n now = dt.now().strftime(\"%I:%M%p on %B %d, %Y\")\n fid.write(b(\"% Ascii 3D points file created by mne-python version \"\n \"{version} at {now}\\n\".format(version=version,\n now=now)))\n fid.write(b(\"% {N} 3D points, \"\n \"x y z per line\\n\".format(N=len(dig_points))))\n np.savetxt(fid, dig_points, delimiter='\\t', newline='\\n')\n else:\n msg = \"Unrecognized extension: %r. Need '.txt'.\" % ext\n raise ValueError(msg)\n\n\ndef _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None,\n dig_points=None, dig_ch_pos=None):\n \"\"\"Constructs digitizer info for the info.\n\n Parameters\n ----------\n nasion : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the nasion point.\n lpa : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the left auricular point.\n rpa : array-like | numpy.ndarray, shape (3,) | None\n Point designated as the right auricular point.\n hpi : array-like | numpy.ndarray, shape (n_points, 3) | None\n Points designated as head position indicator points.\n dig_points : array-like | numpy.ndarray, shape (n_points, 3)\n Points designed as the headshape points.\n dig_ch_pos : dict\n Dict of EEG channel positions.\n\n Returns\n -------\n dig : list\n List of digitizer points to be added to the info['dig'].\n \"\"\"\n dig = []\n if lpa is not None:\n lpa = np.asarray(lpa)\n if lpa.shape == (3,):\n dig.append({'r': lpa, 'ident': FIFF.FIFFV_POINT_LPA,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('LPA should have the shape (3,) instead of %s'\n % (lpa.shape,))\n raise ValueError(msg)\n if nasion is not None:\n nasion = np.asarray(nasion)\n if nasion.shape == (3,):\n dig.append({'r': nasion, 'ident': FIFF.FIFFV_POINT_NASION,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('Nasion should have the shape (3,) instead of %s'\n % (nasion.shape,))\n raise ValueError(msg)\n if rpa is not None:\n rpa = np.asarray(rpa)\n if rpa.shape == (3,):\n dig.append({'r': rpa, 'ident': FIFF.FIFFV_POINT_RPA,\n 'kind': FIFF.FIFFV_POINT_CARDINAL,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('RPA should have the shape (3,) instead of %s'\n % (rpa.shape,))\n raise ValueError(msg)\n if hpi is not None:\n hpi = np.asarray(hpi)\n if hpi.shape[1] == 3:\n for idx, point in enumerate(hpi):\n dig.append({'r': point, 'ident': idx + 1,\n 'kind': FIFF.FIFFV_POINT_HPI,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('HPI should have the shape (n_points, 3) instead of '\n '%s' % (hpi.shape,))\n raise ValueError(msg)\n if dig_points is not None:\n dig_points = np.asarray(dig_points)\n if dig_points.shape[1] == 3:\n for idx, point in enumerate(dig_points):\n dig.append({'r': point, 'ident': idx + 1,\n 'kind': FIFF.FIFFV_POINT_EXTRA,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n else:\n msg = ('Points should have the shape (n_points, 3) instead of '\n '%s' % (dig_points.shape,))\n raise ValueError(msg)\n if dig_ch_pos is not None:\n keys = sorted(dig_ch_pos.keys())\n for key in keys:\n dig.append({'r': dig_ch_pos[key], 'ident': int(key[-3:]),\n 'kind': FIFF.FIFFV_POINT_EEG,\n 'coord_frame': FIFF.FIFFV_COORD_HEAD})\n return dig\n\n\n@verbose\ndef read_info(fname, verbose=None):\n \"\"\"Read measurement info from a file\n\n Parameters\n ----------\n fname : str\n File name.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n Measurement information for the dataset.\n \"\"\"\n f, tree, _ = fiff_open(fname)\n with f as fid:\n info = read_meas_info(fid, tree)[0]\n return info\n\n\ndef read_bad_channels(fid, node):\n \"\"\"Read bad channels\n\n Parameters\n ----------\n fid : file\n The file descriptor.\n\n node : dict\n The node of the FIF tree that contains info on the bad channels.\n\n Returns\n -------\n bads : list\n A list of bad channel's names.\n \"\"\"\n nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n bads = []\n if len(nodes) > 0:\n for node in nodes:\n tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST)\n if tag is not None and tag.data is not None:\n bads = tag.data.split(':')\n return bads\n\n\n@verbose\ndef read_meas_info(fid, tree, clean_bads=False, verbose=None):\n \"\"\"Read the measurement info\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n clean_bads : bool\n If True, clean info['bads'] before running consistency check.\n Should only be needed for old files where we did not check bads\n before saving.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n Info on dataset.\n meas : dict\n Node in tree that contains the info.\n \"\"\"\n\n # Find the desired blocks\n meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)\n if len(meas) == 0:\n raise ValueError('Could not find measurement data')\n if len(meas) > 1:\n raise ValueError('Cannot read more that 1 measurement data')\n meas = meas[0]\n\n meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)\n if len(meas_info) == 0:\n raise ValueError('Could not find measurement info')\n if len(meas_info) > 1:\n raise ValueError('Cannot read more that 1 measurement info')\n meas_info = meas_info[0]\n\n # Read measurement info\n dev_head_t = None\n ctf_head_t = None\n dev_ctf_t = None\n meas_date = None\n highpass = None\n lowpass = None\n nchan = None\n sfreq = None\n chs = []\n experimenter = None\n description = None\n proj_id = None\n proj_name = None\n line_freq = None\n custom_ref_applied = False\n xplotter_layout = None\n kit_system_id = None\n for k in range(meas_info['nent']):\n kind = meas_info['directory'][k].kind\n pos = meas_info['directory'][k].pos\n if kind == FIFF.FIFF_NCHAN:\n tag = read_tag(fid, pos)\n nchan = int(tag.data)\n elif kind == FIFF.FIFF_SFREQ:\n tag = read_tag(fid, pos)\n sfreq = float(tag.data)\n elif kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n elif kind == FIFF.FIFF_LOWPASS:\n tag = read_tag(fid, pos)\n lowpass = float(tag.data)\n elif kind == FIFF.FIFF_HIGHPASS:\n tag = read_tag(fid, pos)\n highpass = float(tag.data)\n elif kind == FIFF.FIFF_MEAS_DATE:\n tag = read_tag(fid, pos)\n meas_date = tag.data\n elif kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n\n if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n dev_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n ctf_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE and \\\n cand['to'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:\n dev_ctf_t = cand\n elif kind == FIFF.FIFF_EXPERIMENTER:\n tag = read_tag(fid, pos)\n experimenter = tag.data\n elif kind == FIFF.FIFF_DESCRIPTION:\n tag = read_tag(fid, pos)\n description = tag.data\n elif kind == FIFF.FIFF_PROJ_ID:\n tag = read_tag(fid, pos)\n proj_id = tag.data\n elif kind == FIFF.FIFF_PROJ_NAME:\n tag = read_tag(fid, pos)\n proj_name = tag.data\n elif kind == FIFF.FIFF_LINE_FREQ:\n tag = read_tag(fid, pos)\n line_freq = float(tag.data)\n elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11\n tag = read_tag(fid, pos)\n custom_ref_applied = bool(tag.data)\n elif kind == FIFF.FIFF_XPLOTTER_LAYOUT:\n tag = read_tag(fid, pos)\n xplotter_layout = str(tag.data)\n elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID:\n tag = read_tag(fid, pos)\n kit_system_id = int(tag.data)\n\n # Check that we have everything we need\n if nchan is None:\n raise ValueError('Number of channels is not defined')\n\n if sfreq is None:\n raise ValueError('Sampling frequency is not defined')\n\n if len(chs) == 0:\n raise ValueError('Channel information not defined')\n\n if len(chs) != nchan:\n raise ValueError('Incorrect number of channel definitions found')\n\n if dev_head_t is None or ctf_head_t is None:\n hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n if len(hpi_result) == 1:\n hpi_result = hpi_result[0]\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n if (cand['from'] == FIFF.FIFFV_COORD_DEVICE and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n dev_head_t is None):\n dev_head_t = cand\n elif (cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and\n cand['to'] == FIFF.FIFFV_COORD_HEAD and\n ctf_head_t is None):\n ctf_head_t = cand\n\n # Locate the Polhemus data\n dig = _read_dig_fif(fid, meas_info)\n\n # Locate the acquisition information\n acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)\n acq_pars = None\n acq_stim = None\n if len(acqpars) == 1:\n acqpars = acqpars[0]\n for k in range(acqpars['nent']):\n kind = acqpars['directory'][k].kind\n pos = acqpars['directory'][k].pos\n if kind == FIFF.FIFF_DACQ_PARS:\n tag = read_tag(fid, pos)\n acq_pars = tag.data\n elif kind == FIFF.FIFF_DACQ_STIM:\n tag = read_tag(fid, pos)\n acq_stim = tag.data\n\n # Load the SSP data\n projs = _read_proj(fid, meas_info)\n\n # Load the CTF compensation data\n comps = read_ctf_comp(fid, meas_info, chs)\n\n # Load the bad channel list\n bads = read_bad_channels(fid, meas_info)\n\n #\n # Put the data together\n #\n if tree['id'] is not None:\n info = Info(file_id=tree['id'])\n else:\n info = Info(file_id=None)\n\n # Locate events list\n events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS)\n evs = list()\n for event in events:\n ev = dict()\n for k in range(event['nent']):\n kind = event['directory'][k].kind\n pos = event['directory'][k].pos\n if kind == FIFF.FIFF_EVENT_CHANNELS:\n ev['channels'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_EVENT_LIST:\n ev['list'] = read_tag(fid, pos).data\n evs.append(ev)\n info['events'] = evs\n\n # Locate HPI result\n hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n hrs = list()\n for hpi_result in hpi_results:\n hr = dict()\n hr['dig_points'] = []\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n hr['dig_points'].append(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER:\n hr['order'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COILS_USED:\n hr['used'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COIL_MOMENTS:\n hr['moments'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOODNESS:\n hr['goodness'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT:\n hr['good_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT:\n hr['dist_limit'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_FIT_ACCEPT:\n hr['accept'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_COORD_TRANS:\n hr['coord_trans'] = read_tag(fid, pos).data\n hrs.append(hr)\n info['hpi_results'] = hrs\n\n # Locate HPI Measurement\n hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS)\n hms = list()\n for hpi_meas in hpi_meass:\n hm = dict()\n for k in range(hpi_meas['nent']):\n kind = hpi_meas['directory'][k].kind\n pos = hpi_meas['directory'][k].pos\n if kind == FIFF.FIFF_CREATOR:\n hm['creator'] = text_type(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_SFREQ:\n hm['sfreq'] = float(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NCHAN:\n hm['nchan'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_NAVE:\n hm['nave'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_HPI_NCOIL:\n hm['ncoil'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_FIRST_SAMPLE:\n hm['first_samp'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_LAST_SAMPLE:\n hm['last_samp'] = int(read_tag(fid, pos).data)\n hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL)\n hcs = []\n for hpi_coil in hpi_coils:\n hc = dict()\n for k in range(hpi_coil['nent']):\n kind = hpi_coil['directory'][k].kind\n pos = hpi_coil['directory'][k].pos\n if kind == FIFF.FIFF_HPI_COIL_NO:\n hc['number'] = int(read_tag(fid, pos).data)\n elif kind == FIFF.FIFF_EPOCH:\n hc['epoch'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_SLOPES:\n hc['slopes'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_CORR_COEFF:\n hc['corr_coeff'] = read_tag(fid, pos).data\n elif kind == FIFF.FIFF_HPI_COIL_FREQ:\n hc['coil_freq'] = read_tag(fid, pos).data\n hcs.append(hc)\n hm['hpi_coils'] = hcs\n hms.append(hm)\n info['hpi_meas'] = hms\n\n subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)\n si = None\n if len(subject_info) == 1:\n subject_info = subject_info[0]\n si = dict()\n for k in range(subject_info['nent']):\n kind = subject_info['directory'][k].kind\n pos = subject_info['directory'][k].pos\n if kind == FIFF.FIFF_SUBJ_ID:\n tag = read_tag(fid, pos)\n si['id'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HIS_ID:\n tag = read_tag(fid, pos)\n si['his_id'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_LAST_NAME:\n tag = read_tag(fid, pos)\n si['last_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:\n tag = read_tag(fid, pos)\n si['first_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME:\n tag = read_tag(fid, pos)\n si['middle_name'] = text_type(tag.data)\n elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:\n tag = read_tag(fid, pos)\n si['birthday'] = tag.data\n elif kind == FIFF.FIFF_SUBJ_SEX:\n tag = read_tag(fid, pos)\n si['sex'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HAND:\n tag = read_tag(fid, pos)\n si['hand'] = int(tag.data)\n info['subject_info'] = si\n\n hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM)\n hs = None\n if len(hpi_subsystem) == 1:\n hpi_subsystem = hpi_subsystem[0]\n hs = dict()\n for k in range(hpi_subsystem['nent']):\n kind = hpi_subsystem['directory'][k].kind\n pos = hpi_subsystem['directory'][k].pos\n if kind == FIFF.FIFF_HPI_NCOIL:\n tag = read_tag(fid, pos)\n hs['ncoil'] = int(tag.data)\n elif kind == FIFF.FIFF_EVENT_CHANNEL:\n tag = read_tag(fid, pos)\n hs['event_channel'] = text_type(tag.data)\n hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL)\n hc = []\n for coil in hpi_coils:\n this_coil = dict()\n for j in range(coil['nent']):\n kind = coil['directory'][j].kind\n pos = coil['directory'][j].pos\n if kind == FIFF.FIFF_EVENT_BITS:\n tag = read_tag(fid, pos)\n this_coil['event_bits'] = np.array(tag.data)\n hc.append(this_coil)\n hs['hpi_coils'] = hc\n info['hpi_subsystem'] = hs\n\n # Read processing history\n _read_proc_history(fid, tree, info)\n\n # Make the most appropriate selection for the measurement id\n if meas_info['parent_id'] is None:\n if meas_info['id'] is None:\n if meas['id'] is None:\n if meas['parent_id'] is None:\n info['meas_id'] = info['file_id']\n else:\n info['meas_id'] = meas['parent_id']\n else:\n info['meas_id'] = meas['id']\n else:\n info['meas_id'] = meas_info['id']\n else:\n info['meas_id'] = meas_info['parent_id']\n\n info['experimenter'] = experimenter\n info['description'] = description\n info['proj_id'] = proj_id\n info['proj_name'] = proj_name\n\n if meas_date is None:\n meas_date = [info['meas_id']['secs'], info['meas_id']['usecs']]\n info['meas_date'] = meas_date\n\n info['sfreq'] = sfreq\n info['highpass'] = highpass if highpass is not None else 0.\n info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0\n info['line_freq'] = line_freq\n\n # Add the channel information and make a list of channel names\n # for convenience\n info['chs'] = chs\n\n #\n # Add the coordinate transformations\n #\n info['dev_head_t'] = dev_head_t\n info['ctf_head_t'] = ctf_head_t\n info['dev_ctf_t'] = dev_ctf_t\n if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None:\n from ..transforms import Transform\n head_ctf_trans = linalg.inv(ctf_head_t['trans'])\n dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])\n info['dev_ctf_t'] = Transform('meg', 'ctf_head', dev_ctf_trans)\n\n # All kinds of auxliary stuff\n info['dig'] = dig\n info['bads'] = bads\n info._update_redundant()\n if clean_bads:\n info['bads'] = [b for b in bads if b in info['ch_names']]\n info['projs'] = projs\n info['comps'] = comps\n info['acq_pars'] = acq_pars\n info['acq_stim'] = acq_stim\n info['custom_ref_applied'] = custom_ref_applied\n info['xplotter_layout'] = xplotter_layout\n info['kit_system_id'] = kit_system_id\n info._check_consistency()\n return info, meas\n\n\ndef write_meas_info(fid, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info into a file id (from a fif file)\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n info : instance of Info\n The measurement info structure.\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Notes\n -----\n Tags are written in a particular order for compatibility with maxfilter.\n \"\"\"\n info._check_consistency()\n\n # Measurement info\n start_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n for event in info['events']:\n start_block(fid, FIFF.FIFFB_EVENTS)\n if event.get('channels') is not None:\n write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event['channels'])\n if event.get('list') is not None:\n write_int(fid, FIFF.FIFF_EVENT_LIST, event['list'])\n end_block(fid, FIFF.FIFFB_EVENTS)\n\n # HPI Result\n for hpi_result in info['hpi_results']:\n start_block(fid, FIFF.FIFFB_HPI_RESULT)\n for d in hpi_result['dig_points']:\n write_dig_point(fid, d)\n if 'order' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER,\n hpi_result['order'])\n if 'used' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result['used'])\n if 'moments' in hpi_result:\n write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS,\n hpi_result['moments'])\n if 'goodness' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS,\n hpi_result['goodness'])\n if 'good_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT,\n hpi_result['good_limit'])\n if 'dist_limit' in hpi_result:\n write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT,\n hpi_result['dist_limit'])\n if 'accept' in hpi_result:\n write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result['accept'])\n if 'coord_trans' in hpi_result:\n write_coord_trans(fid, hpi_result['coord_trans'])\n end_block(fid, FIFF.FIFFB_HPI_RESULT)\n\n # HPI Measurement\n for hpi_meas in info['hpi_meas']:\n start_block(fid, FIFF.FIFFB_HPI_MEAS)\n if hpi_meas.get('creator') is not None:\n write_string(fid, FIFF.FIFF_CREATOR, hpi_meas['creator'])\n if hpi_meas.get('sfreq') is not None:\n write_float(fid, FIFF.FIFF_SFREQ, hpi_meas['sfreq'])\n if hpi_meas.get('nchan') is not None:\n write_int(fid, FIFF.FIFF_NCHAN, hpi_meas['nchan'])\n if hpi_meas.get('nave') is not None:\n write_int(fid, FIFF.FIFF_NAVE, hpi_meas['nave'])\n if hpi_meas.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas['ncoil'])\n if hpi_meas.get('first_samp') is not None:\n write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas['first_samp'])\n if hpi_meas.get('last_samp') is not None:\n write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas['last_samp'])\n for hpi_coil in hpi_meas['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if hpi_coil.get('number') is not None:\n write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil['number'])\n if hpi_coil.get('epoch') is not None:\n write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil['epoch'])\n if hpi_coil.get('slopes') is not None:\n write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil['slopes'])\n if hpi_coil.get('corr_coeff') is not None:\n write_float(fid, FIFF.FIFF_HPI_CORR_COEFF,\n hpi_coil['corr_coeff'])\n if hpi_coil.get('coil_freq') is not None:\n write_float(fid, FIFF.FIFF_HPI_COIL_FREQ,\n hpi_coil['coil_freq'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_MEAS)\n\n # Polhemus data\n if info['dig'] is not None:\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n for d in info['dig']:\n write_dig_point(fid, d)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n\n # megacq parameters\n if info['acq_pars'] is not None or info['acq_stim'] is not None:\n start_block(fid, FIFF.FIFFB_DACQ_PARS)\n if info['acq_pars'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])\n\n if info['acq_stim'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])\n\n end_block(fid, FIFF.FIFFB_DACQ_PARS)\n\n # Coordinate transformations if the HPI result block was not there\n if info['dev_head_t'] is not None:\n write_coord_trans(fid, info['dev_head_t'])\n\n if info['ctf_head_t'] is not None:\n write_coord_trans(fid, info['ctf_head_t'])\n\n if info['dev_ctf_t'] is not None:\n write_coord_trans(fid, info['dev_ctf_t'])\n\n # Projectors\n _write_proj(fid, info['projs'])\n\n # Bad channels\n if len(info['bads']) > 0:\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n # General\n if info.get('experimenter') is not None:\n write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])\n if info.get('description') is not None:\n write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])\n if info.get('proj_id') is not None:\n write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])\n if info.get('proj_name') is not None:\n write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])\n if info.get('meas_date') is not None:\n write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])\n write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])\n write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])\n if info['lowpass'] is not None:\n write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])\n if info['highpass'] is not None:\n write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])\n if info.get('line_freq') is not None:\n write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])\n if data_type is not None:\n write_int(fid, FIFF.FIFF_DATA_PACK, data_type)\n if info.get('custom_ref_applied'):\n write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info['custom_ref_applied'])\n if info.get('xplotter_layout'):\n write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info['xplotter_layout'])\n\n # Channel information\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n # for float/double, the \"range\" param is unnecessary\n if reset_range is True:\n c['range'] = 1.0\n write_ch_info(fid, c)\n\n # Subject information\n if info.get('subject_info') is not None:\n start_block(fid, FIFF.FIFFB_SUBJECT)\n si = info['subject_info']\n if si.get('id') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])\n if si.get('his_id') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])\n if si.get('last_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])\n if si.get('first_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])\n if si.get('middle_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si['middle_name'])\n if si.get('birthday') is not None:\n write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])\n if si.get('sex') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])\n if si.get('hand') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])\n end_block(fid, FIFF.FIFFB_SUBJECT)\n\n if info.get('hpi_subsystem') is not None:\n hs = info['hpi_subsystem']\n start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n if hs.get('ncoil') is not None:\n write_int(fid, FIFF.FIFF_HPI_NCOIL, hs['ncoil'])\n if hs.get('event_channel') is not None:\n write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs['event_channel'])\n if hs.get('hpi_coils') is not None:\n for coil in hs['hpi_coils']:\n start_block(fid, FIFF.FIFFB_HPI_COIL)\n if coil.get('event_bits') is not None:\n write_int(fid, FIFF.FIFF_EVENT_BITS,\n coil['event_bits'])\n end_block(fid, FIFF.FIFFB_HPI_COIL)\n end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM)\n\n # CTF compensation info\n write_ctf_comp(fid, info['comps'])\n\n # KIT system ID\n if info.get('kit_system_id') is not None:\n write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info['kit_system_id'])\n\n end_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n # Processing history\n _write_proc_history(fid, info)\n\n\ndef write_info(fname, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n info : instance of Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n \"\"\"\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MEAS)\n write_meas_info(fid, info, data_type, reset_range)\n end_block(fid, FIFF.FIFFB_MEAS)\n end_file(fid)\n\n\ndef _is_equal_dict(dicts):\n \"\"\"Aux function\"\"\"\n tests = zip(*[d.items() for d in dicts])\n is_equal = []\n for d in tests:\n k0, v0 = d[0]\n if (isinstance(v0, (list, np.ndarray)) and len(v0) > 0 and\n isinstance(v0[0], dict)):\n for k, v in d:\n is_equal.append((k0 == k) and _is_equal_dict(v))\n else:\n is_equal.append(all(np.all(k == k0) and\n (np.array_equal(v, v0) if isinstance(v, np.ndarray)\n else np.all(v == v0)) for k, v in d))\n return all(is_equal)\n\n\n@verbose\ndef _merge_dict_values(dicts, key, verbose=None):\n \"\"\"Merge things together\n\n Fork for {'dict', 'list', 'array', 'other'}\n and consider cases where one or all are of the same type.\n \"\"\"\n values = [d[key] for d in dicts]\n msg = (\"Don't know how to merge '%s'. Make sure values are \"\n \"compatible.\" % key)\n\n def _flatten(lists):\n return [item for sublist in lists for item in sublist]\n\n def _check_isinstance(values, kind, func):\n return func([isinstance(v, kind) for v in values])\n\n def _where_isinstance(values, kind):\n \"\"\"Aux function\"\"\"\n return np.where([isinstance(v, type) for v in values])[0]\n\n # list\n if _check_isinstance(values, list, all):\n lists = (d[key] for d in dicts)\n return (_uniquify_projs(_flatten(lists)) if key == 'projs'\n else _flatten(lists))\n elif _check_isinstance(values, list, any):\n idx = _where_isinstance(values, list)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n lists = (d[key] for d in dicts if isinstance(d[key], list))\n return _flatten(lists)\n # dict\n elif _check_isinstance(values, dict, all):\n is_qual = _is_equal_dict(values)\n if is_qual:\n return values[0]\n else:\n RuntimeError(msg)\n elif _check_isinstance(values, dict, any):\n idx = _where_isinstance(values, dict)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # ndarray\n elif _check_isinstance(values, np.ndarray, all):\n is_qual = all(np.all(values[0] == x) for x in values[1:])\n if is_qual:\n return values[0]\n elif key == 'meas_date':\n logger.info('Found multiple entries for %s. '\n 'Setting value to `None`' % key)\n return None\n else:\n raise RuntimeError(msg)\n elif _check_isinstance(values, np.ndarray, any):\n idx = _where_isinstance(values, np.ndarray)\n if len(idx) == 1:\n return values[int(idx)]\n elif len(idx) > 1:\n raise RuntimeError(msg)\n # other\n else:\n unique_values = set(values)\n if len(unique_values) == 1:\n return list(values)[0]\n elif isinstance(list(unique_values)[0], BytesIO):\n logger.info('Found multiple StringIO instances. '\n 'Setting value to `None`')\n return None\n elif isinstance(list(unique_values)[0], string_types):\n logger.info('Found multiple filenames. '\n 'Setting value to `None`')\n return None\n else:\n raise RuntimeError(msg)\n\n\n@verbose\ndef _merge_info(infos, force_update_to_first=False, verbose=None):\n \"\"\"Merge multiple measurement info dictionaries.\n\n - Fields that are present in only one info object will be used in the\n merged info.\n - Fields that are present in multiple info objects and are the same\n will be used in the merged info.\n - Fields that are present in multiple info objects and are different\n will result in a None value in the merged info.\n - Channels will be concatenated. If multiple info objects contain\n channels with the same name, an exception is raised.\n\n Parameters\n ----------\n infos | list of instance of Info\n Info objects to merge into one info object.\n force_update_to_first : bool\n If True, force the fields for objects in `info` will be updated\n to match those in the first item. Use at your own risk, as this\n may overwrite important metadata.\n verbose : bool, str, int, or NonIe\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of Info\n The merged info object.\n \"\"\"\n for info in infos:\n info._check_consistency()\n if force_update_to_first is True:\n infos = deepcopy(infos)\n _force_update_info(infos[0], infos[1:])\n info = Info()\n info['chs'] = []\n for this_info in infos:\n info['chs'].extend(this_info['chs'])\n info._update_redundant()\n duplicates = set([ch for ch in info['ch_names']\n if info['ch_names'].count(ch) > 1])\n if len(duplicates) > 0:\n msg = (\"The following channels are present in more than one input \"\n \"measurement info objects: %s\" % list(duplicates))\n raise ValueError(msg)\n\n transforms = ['ctf_head_t', 'dev_head_t', 'dev_ctf_t']\n for trans_name in transforms:\n trans = [i[trans_name] for i in infos if i[trans_name]]\n if len(trans) == 0:\n info[trans_name] = None\n elif len(trans) == 1:\n info[trans_name] = trans[0]\n elif all(np.all(trans[0]['trans'] == x['trans']) and\n trans[0]['from'] == x['from'] and\n trans[0]['to'] == x['to']\n for x in trans[1:]):\n info[trans_name] = trans[0]\n else:\n msg = (\"Measurement infos provide mutually inconsistent %s\" %\n trans_name)\n raise ValueError(msg)\n\n # KIT system-IDs\n kit_sys_ids = [i['kit_system_id'] for i in infos if i['kit_system_id']]\n if len(kit_sys_ids) == 0:\n info['kit_system_id'] = None\n elif len(set(kit_sys_ids)) == 1:\n info['kit_system_id'] = kit_sys_ids[0]\n else:\n raise ValueError(\"Trying to merge channels from different KIT systems\")\n\n # other fields\n other_fields = ['acq_pars', 'acq_stim', 'bads', 'buffer_size_sec',\n 'comps', 'custom_ref_applied', 'description', 'dig',\n 'experimenter', 'file_id', 'filename', 'highpass',\n 'hpi_results', 'hpi_meas', 'hpi_subsystem', 'events',\n 'line_freq', 'lowpass', 'meas_date', 'meas_id',\n 'proj_id', 'proj_name', 'projs', 'sfreq',\n 'subject_info', 'sfreq', 'xplotter_layout']\n for k in other_fields:\n info[k] = _merge_dict_values(infos, k)\n\n info._check_consistency()\n return info\n\n\ndef create_info(ch_names, sfreq, ch_types=None, montage=None):\n \"\"\"Create a basic Info instance suitable for use with create_raw\n\n Parameters\n ----------\n ch_names : list of str | int\n Channel names. If an int, a list of channel names will be created\n from :func:`range(ch_names) <range>`.\n sfreq : float\n Sample rate of the data.\n ch_types : list of str | str\n Channel types. If None, data are assumed to be misc.\n Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc',\n 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'hbr' or 'hbo'.\n If str, then all channels are assumed to be of the same type.\n montage : None | str | Montage | DigMontage | list\n A montage containing channel positions. If str or Montage is\n specified, the channel info will be updated with the channel\n positions. Default is None. If DigMontage is specified, the\n digitizer information will be updated. A list of unique montages,\n can be specifed and applied to the info. See also the documentation of\n :func:`mne.channels.read_montage` for more information.\n\n Returns\n -------\n info : instance of Info\n The measurement info.\n\n Notes\n -----\n The info dictionary will be sparsely populated to enable functionality\n within the rest of the package. Advanced functionality such as source\n localization can only be obtained through substantial, proper\n modifications of the info structure (not recommended).\n\n Note that the MEG device-to-head transform ``info['dev_head_t']`` will\n be initialized to the identity transform.\n \"\"\"\n if isinstance(ch_names, int):\n ch_names = list(np.arange(ch_names).astype(str))\n if not isinstance(ch_names, (list, tuple)):\n raise TypeError('ch_names must be a list, tuple, or int')\n sfreq = float(sfreq)\n if sfreq <= 0:\n raise ValueError('sfreq must be positive')\n nchan = len(ch_names)\n if ch_types is None:\n ch_types = ['misc'] * nchan\n if isinstance(ch_types, string_types):\n ch_types = [ch_types] * nchan\n if len(ch_types) != nchan:\n raise ValueError('ch_types and ch_names must be the same length '\n '(%s != %s)' % (len(ch_types), nchan))\n info = _empty_info(sfreq)\n info['meas_date'] = np.array([0, 0], np.int32)\n loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32)\n for ci, (name, kind) in enumerate(zip(ch_names, ch_types)):\n if not isinstance(name, string_types):\n raise TypeError('each entry in ch_names must be a string')\n if not isinstance(kind, string_types):\n raise TypeError('each entry in ch_types must be a string')\n if kind not in _kind_dict:\n raise KeyError('kind must be one of %s, not %s'\n % (list(_kind_dict.keys()), kind))\n kind = _kind_dict[kind]\n chan_info = dict(loc=loc.copy(), unit_mul=0, range=1., cal=1.,\n kind=kind[0], coil_type=kind[1],\n unit=kind[2], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n ch_name=name, scanno=ci + 1, logno=ci + 1)\n info['chs'].append(chan_info)\n info._update_redundant()\n if montage is not None:\n from ..channels.montage import (Montage, DigMontage, _set_montage,\n read_montage)\n if not isinstance(montage, list):\n montage = [montage]\n for montage_ in montage:\n if isinstance(montage_, (Montage, DigMontage)):\n _set_montage(info, montage_)\n elif isinstance(montage_, string_types):\n montage_ = read_montage(montage_)\n _set_montage(info, montage_)\n else:\n raise TypeError('Montage must be an instance of Montage, '\n 'DigMontage, a list of montages, or filepath, '\n 'not %s.' % type(montage))\n info._check_consistency()\n return info\n\n\nRAW_INFO_FIELDS = (\n 'acq_pars', 'acq_stim', 'bads', 'buffer_size_sec', 'ch_names', 'chs',\n 'comps', 'ctf_head_t', 'custom_ref_applied', 'description', 'dev_ctf_t',\n 'dev_head_t', 'dig', 'experimenter', 'events',\n 'file_id', 'filename', 'highpass', 'hpi_meas', 'hpi_results',\n 'hpi_subsystem', 'kit_system_id', 'line_freq', 'lowpass', 'meas_date',\n 'meas_id', 'nchan', 'proj_id', 'proj_name', 'projs', 'sfreq',\n 'subject_info', 'xplotter_layout',\n)\n\n\ndef _empty_info(sfreq):\n \"\"\"Create an empty info dictionary\"\"\"\n from ..transforms import Transform\n _none_keys = (\n 'acq_pars', 'acq_stim', 'buffer_size_sec', 'ctf_head_t', 'description',\n 'dev_ctf_t', 'dig', 'experimenter',\n 'file_id', 'filename', 'highpass', 'hpi_subsystem', 'kit_system_id',\n 'line_freq', 'lowpass', 'meas_date', 'meas_id', 'proj_id', 'proj_name',\n 'subject_info', 'xplotter_layout',\n )\n _list_keys = ('bads', 'chs', 'comps', 'events', 'hpi_meas', 'hpi_results',\n 'projs')\n info = Info()\n for k in _none_keys:\n info[k] = None\n for k in _list_keys:\n info[k] = list()\n info['custom_ref_applied'] = False\n info['dev_head_t'] = Transform('meg', 'head', np.eye(4))\n info['highpass'] = 0.\n info['sfreq'] = float(sfreq)\n info['lowpass'] = info['sfreq'] / 2.\n info._update_redundant()\n info._check_consistency()\n return info\n\n\ndef _force_update_info(info_base, info_target):\n \"\"\"Update target info objects with values from info base.\n\n Note that values in info_target will be overwritten by those in info_base.\n This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'.\n\n Parameters\n ----------\n info_base : mne.Info\n The Info object you want to use for overwriting values\n in target Info objects.\n info_target : mne.Info | list of mne.Info\n The Info object(s) you wish to overwrite using info_base. These objects\n will be modified in-place.\n \"\"\"\n exclude_keys = ['chs', 'ch_names', 'nchan']\n info_target = np.atleast_1d(info_target).ravel()\n all_infos = np.hstack([info_base, info_target])\n for ii in all_infos:\n if not isinstance(ii, Info):\n raise ValueError('Inputs must be of type Info. '\n 'Found type %s' % type(ii))\n for key, val in info_base.items():\n if key in exclude_keys:\n continue\n for i_targ in info_target:\n i_targ[key] = val\n\n\ndef anonymize_info(info):\n \"\"\"Anonymize measurement information in place.\n\n Reset 'subject_info', 'meas_date', 'file_id', and 'meas_id' keys if they\n exist in ``info``.\n\n Parameters\n ----------\n info : dict, instance of Info\n Measurement information for the dataset.\n\n Returns\n -------\n info : instance of Info\n Measurement information for the dataset.\n\n Notes\n -----\n Operates in place.\n \"\"\"\n if not isinstance(info, Info):\n raise ValueError('self must be an Info instance.')\n if info.get('subject_info') is not None:\n del info['subject_info']\n info['meas_date'] = [0, 0]\n for key_1 in ('file_id', 'meas_id'):\n for key_2 in ('secs', 'msecs', 'usecs'):\n info[key_1][key_2] = 0\n return info\n",
"# Author: Christian Brodbeck <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\n\nfrom numpy import array\nfrom numpy.testing import assert_allclose\nfrom nose.tools import assert_equal, assert_false, assert_raises, assert_true\n\nfrom mne.datasets import testing\nfrom mne.io.tests import data_dir as fiff_data_dir\nfrom mne.utils import (_TempDir, requires_mne, requires_freesurfer,\n requires_traits)\n\ndata_path = testing.data_path(download=False)\nsubjects_dir = os.path.join(data_path, 'subjects')\nbem_path = os.path.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem.fif')\ninst_path = os.path.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc_raw.fif')\nfid_path = os.path.join(fiff_data_dir, 'fsaverage-fiducials.fif')\n\n\[email protected]_testing_data\n@requires_traits\ndef test_bem_source():\n \"\"\"Test SurfaceSource\"\"\"\n from mne.gui._file_traits import SurfaceSource\n\n bem = SurfaceSource()\n assert_equal(bem.points.shape, (0, 3))\n assert_equal(bem.tris.shape, (0, 3))\n\n bem.file = bem_path\n assert_equal(bem.points.shape, (642, 3))\n assert_equal(bem.tris.shape, (1280, 3))\n\n\[email protected]_testing_data\n@requires_traits\ndef test_fiducials_source():\n \"\"\"Test FiducialsSource\"\"\"\n from mne.gui._file_traits import FiducialsSource\n\n fid = FiducialsSource()\n fid.file = fid_path\n\n points = array([[-0.08061612, -0.02908875, -0.04131077],\n [0.00146763, 0.08506715, -0.03483611],\n [0.08436285, -0.02850276, -0.04127743]])\n assert_allclose(fid.points, points, 1e-6)\n\n fid.file = ''\n assert_equal(fid.points, None)\n\n\[email protected]_testing_data\n@requires_traits\ndef test_inst_source():\n \"\"\"Test InstSource\"\"\"\n from mne.gui._file_traits import InstSource\n\n inst = InstSource()\n assert_equal(inst.inst_fname, '-')\n\n inst.file = inst_path\n assert_equal(inst.inst_dir, os.path.dirname(inst_path))\n\n lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]])\n nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]])\n rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]])\n assert_allclose(inst.lpa, lpa)\n assert_allclose(inst.nasion, nasion)\n assert_allclose(inst.rpa, rpa)\n\n\[email protected]_testing_data\n@requires_traits\ndef test_subject_source():\n \"\"\"Test SubjectSelector\"\"\"\n from mne.gui._file_traits import MRISubjectSource\n\n mri = MRISubjectSource()\n mri.subjects_dir = subjects_dir\n assert_true('sample' in mri.subjects)\n mri.subject = 'sample'\n\n\[email protected]_testing_data\n@requires_traits\n@requires_mne\n@requires_freesurfer\ndef test_subject_source_with_fsaverage():\n \"\"\"Test SubjectSelector\"\"\"\n from mne.gui._file_traits import MRISubjectSource\n tempdir = _TempDir()\n\n mri = MRISubjectSource()\n assert_false(mri.can_create_fsaverage)\n assert_raises(RuntimeError, mri.create_fsaverage)\n\n mri.subjects_dir = tempdir\n assert_true(mri.can_create_fsaverage)\n mri.create_fsaverage()\n",
"import os.path as op\n\nfrom nose.tools import assert_true, assert_raises\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nimport warnings\n\nimport mne\nfrom mne import compute_covariance\nfrom mne.datasets import testing\nfrom mne.beamformer import lcmv, lcmv_epochs, lcmv_raw, tf_lcmv\nfrom mne.beamformer._lcmv import _lcmv_source_power\nfrom mne.externals.six import advance_iterator\nfrom mne.utils import run_tests_if_main, slow_test\n\n\ndata_path = testing.data_path(download=False)\nfname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')\nfname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')\nfname_fwd = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')\nfname_fwd_vol = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-vol-7-fwd.fif')\nfname_event = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc_raw-eve.fif')\nlabel = 'Aud-lh'\nfname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)\n\nwarnings.simplefilter('always') # enable b/c these tests throw warnings\n\n\ndef read_forward_solution_meg(*args, **kwargs):\n fwd = mne.read_forward_solution(*args, **kwargs)\n return mne.pick_types_forward(fwd, meg=True, eeg=False)\n\n\ndef _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,\n epochs_preload=True, data_cov=True):\n \"\"\"Read in data used in tests\n \"\"\"\n label = mne.read_label(fname_label)\n events = mne.read_events(fname_event)\n raw = mne.io.read_raw_fif(fname_raw, preload=True, add_eeg_ref=False)\n forward = mne.read_forward_solution(fname_fwd)\n if all_forward:\n forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)\n forward_fixed = read_forward_solution_meg(fname_fwd, force_fixed=True,\n surf_ori=True)\n forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)\n else:\n forward_surf_ori = None\n forward_fixed = None\n forward_vol = None\n\n event_id, tmin, tmax = 1, tmin, tmax\n\n # Setup for reading the raw data\n raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels\n # Set up pick list: MEG - bad channels\n left_temporal_channels = mne.read_selection('Left-temporal')\n picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,\n eog=True, ref_meg=False, exclude='bads',\n selection=left_temporal_channels)\n raw.pick_channels([raw.ch_names[ii] for ii in picks])\n raw.info.normalize_proj() # avoid projection warnings\n\n if epochs:\n # Read epochs\n epochs = mne.Epochs(\n raw, events, event_id, tmin, tmax, proj=True,\n baseline=(None, 0), preload=epochs_preload,\n reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6),\n add_eeg_ref=False)\n if epochs_preload:\n epochs.resample(200, npad=0, n_jobs=2)\n evoked = epochs.average()\n info = evoked.info\n else:\n epochs = None\n evoked = None\n info = raw.info\n\n noise_cov = mne.read_cov(fname_cov)\n with warnings.catch_warnings(record=True): # bad proj\n noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,\n eeg=0.1, proj=True)\n if data_cov:\n with warnings.catch_warnings(record=True): # too few samples\n data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145)\n else:\n data_cov = None\n\n return raw, epochs, evoked, data_cov, noise_cov, label, forward,\\\n forward_surf_ori, forward_fixed, forward_vol\n\n\n@slow_test\[email protected]_testing_data\ndef test_lcmv():\n \"\"\"Test LCMV with evoked data and single trials\n \"\"\"\n raw, epochs, evoked, data_cov, noise_cov, label, forward,\\\n forward_surf_ori, forward_fixed, forward_vol = _get_data()\n\n for fwd in [forward, forward_vol]:\n stc = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01)\n stc.crop(0.02, None)\n\n stc_pow = np.sum(stc.data, axis=1)\n idx = np.argmax(stc_pow)\n max_stc = stc.data[idx]\n tmax = stc.times[np.argmax(max_stc)]\n\n assert_true(0.09 < tmax < 0.105, tmax)\n assert_true(0.9 < np.max(max_stc) < 3., np.max(max_stc))\n\n if fwd is forward:\n # Test picking normal orientation (surface source space only)\n stc_normal = lcmv(evoked, forward_surf_ori, noise_cov,\n data_cov, reg=0.01, pick_ori=\"normal\")\n stc_normal.crop(0.02, None)\n\n stc_pow = np.sum(np.abs(stc_normal.data), axis=1)\n idx = np.argmax(stc_pow)\n max_stc = stc_normal.data[idx]\n tmax = stc_normal.times[np.argmax(max_stc)]\n\n assert_true(0.04 < tmax < 0.11, tmax)\n assert_true(0.4 < np.max(max_stc) < 2., np.max(max_stc))\n\n # The amplitude of normal orientation results should always be\n # smaller than free orientation results\n assert_true((np.abs(stc_normal.data) <= stc.data).all())\n\n # Test picking source orientation maximizing output source power\n stc_max_power = lcmv(evoked, fwd, noise_cov, data_cov, reg=0.01,\n pick_ori=\"max-power\")\n stc_max_power.crop(0.02, None)\n stc_pow = np.sum(stc_max_power.data, axis=1)\n idx = np.argmax(stc_pow)\n max_stc = stc_max_power.data[idx]\n tmax = stc.times[np.argmax(max_stc)]\n\n assert_true(0.09 < tmax < 0.11, tmax)\n assert_true(0.8 < np.max(max_stc) < 3., np.max(max_stc))\n\n # Maximum output source power orientation results should be similar to\n # free orientation results\n assert_true((stc_max_power.data - stc.data < 1).all())\n\n # Test if fixed forward operator is detected when picking normal or\n # max-power orientation\n assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,\n reg=0.01, pick_ori=\"normal\")\n assert_raises(ValueError, lcmv, evoked, forward_fixed, noise_cov, data_cov,\n reg=0.01, pick_ori=\"max-power\")\n\n # Test if non-surface oriented forward operator is detected when picking\n # normal orientation\n assert_raises(ValueError, lcmv, evoked, forward, noise_cov, data_cov,\n reg=0.01, pick_ori=\"normal\")\n\n # Test if volume forward operator is detected when picking normal\n # orientation\n assert_raises(ValueError, lcmv, evoked, forward_vol, noise_cov, data_cov,\n reg=0.01, pick_ori=\"normal\")\n\n # Now test single trial using fixed orientation forward solution\n # so we can compare it to the evoked solution\n stcs = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,\n reg=0.01)\n stcs_ = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,\n reg=0.01, return_generator=True)\n assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)\n\n epochs.drop_bad()\n assert_true(len(epochs.events) == len(stcs))\n\n # average the single trial estimates\n stc_avg = np.zeros_like(stcs[0].data)\n for this_stc in stcs:\n stc_avg += this_stc.data\n stc_avg /= len(stcs)\n\n # compare it to the solution using evoked with fixed orientation\n stc_fixed = lcmv(evoked, forward_fixed, noise_cov, data_cov, reg=0.01)\n assert_array_almost_equal(stc_avg, stc_fixed.data)\n\n # use a label so we have few source vertices and delayed computation is\n # not used\n stcs_label = lcmv_epochs(epochs, forward_fixed, noise_cov, data_cov,\n reg=0.01, label=label)\n\n assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)\n\n\[email protected]_testing_data\ndef test_lcmv_raw():\n \"\"\"Test LCMV with raw data\n \"\"\"\n raw, _, _, _, noise_cov, label, forward, _, _, _ =\\\n _get_data(all_forward=False, epochs=False, data_cov=False)\n\n tmin, tmax = 0, 20\n start, stop = raw.time_as_index([tmin, tmax])\n\n # use only the left-temporal MEG channels for LCMV\n data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)\n stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01,\n label=label, start=start, stop=stop)\n\n assert_array_almost_equal(np.array([tmin, tmax]),\n np.array([stc.times[0], stc.times[-1]]),\n decimal=2)\n\n # make sure we get an stc with vertices only in the lh\n vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]\n assert_true(len(stc.vertices[0]) == len(np.intersect1d(vertno[0],\n label.vertices)))\n assert_true(len(stc.vertices[1]) == 0)\n\n\[email protected]_testing_data\ndef test_lcmv_source_power():\n \"\"\"Test LCMV source power computation\n \"\"\"\n raw, epochs, evoked, data_cov, noise_cov, label, forward,\\\n forward_surf_ori, forward_fixed, forward_vol = _get_data()\n\n stc_source_power = _lcmv_source_power(epochs.info, forward, noise_cov,\n data_cov, label=label)\n\n max_source_idx = np.argmax(stc_source_power.data)\n max_source_power = np.max(stc_source_power.data)\n\n assert_true(max_source_idx == 0, max_source_idx)\n assert_true(0.4 < max_source_power < 2.4, max_source_power)\n\n # Test picking normal orientation and using a list of CSD matrices\n stc_normal = _lcmv_source_power(\n epochs.info, forward_surf_ori, noise_cov, data_cov,\n pick_ori=\"normal\", label=label)\n\n # The normal orientation results should always be smaller than free\n # orientation results\n assert_true((np.abs(stc_normal.data[:, 0]) <=\n stc_source_power.data[:, 0]).all())\n\n # Test if fixed forward operator is detected when picking normal\n # orientation\n assert_raises(ValueError, _lcmv_source_power, raw.info, forward_fixed,\n noise_cov, data_cov, pick_ori=\"normal\")\n\n # Test if non-surface oriented forward operator is detected when picking\n # normal orientation\n assert_raises(ValueError, _lcmv_source_power, raw.info, forward, noise_cov,\n data_cov, pick_ori=\"normal\")\n\n # Test if volume forward operator is detected when picking normal\n # orientation\n assert_raises(ValueError, _lcmv_source_power, epochs.info, forward_vol,\n noise_cov, data_cov, pick_ori=\"normal\")\n\n\[email protected]_testing_data\ndef test_tf_lcmv():\n \"\"\"Test TF beamforming based on LCMV\n \"\"\"\n label = mne.read_label(fname_label)\n events = mne.read_events(fname_event)\n raw = mne.io.read_raw_fif(fname_raw, preload=True, add_eeg_ref=False)\n forward = mne.read_forward_solution(fname_fwd)\n\n event_id, tmin, tmax = 1, -0.2, 0.2\n\n # Setup for reading the raw data\n raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels\n\n # Set up pick list: MEG - bad channels\n left_temporal_channels = mne.read_selection('Left-temporal')\n picks = mne.pick_types(raw.info, meg=True, eeg=False,\n stim=True, eog=True, exclude='bads',\n selection=left_temporal_channels)\n raw.pick_channels([raw.ch_names[ii] for ii in picks])\n raw.info.normalize_proj() # avoid projection warnings\n del picks\n\n # Read epochs\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n baseline=None, preload=False,\n reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6),\n add_eeg_ref=False)\n epochs.drop_bad()\n\n freq_bins = [(4, 12), (15, 40)]\n time_windows = [(-0.1, 0.1), (0.0, 0.2)]\n win_lengths = [0.2, 0.2]\n tstep = 0.1\n reg = 0.05\n\n source_power = []\n noise_covs = []\n for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):\n raw_band = raw.copy()\n raw_band.filter(l_freq, h_freq, method='iir', n_jobs=1,\n iir_params=dict(output='ba'))\n epochs_band = mne.Epochs(\n raw_band, epochs.events, epochs.event_id, tmin=tmin, tmax=tmax,\n baseline=None, proj=True, add_eeg_ref=False)\n with warnings.catch_warnings(record=True): # not enough samples\n noise_cov = compute_covariance(epochs_band, tmin=tmin, tmax=tmin +\n win_length)\n noise_cov = mne.cov.regularize(\n noise_cov, epochs_band.info, mag=reg, grad=reg, eeg=reg,\n proj=True)\n noise_covs.append(noise_cov)\n del raw_band # to save memory\n\n # Manually calculating source power in on frequency band and several\n # time windows to compare to tf_lcmv results and test overlapping\n if (l_freq, h_freq) == freq_bins[0]:\n for time_window in time_windows:\n with warnings.catch_warnings(record=True): # bad samples\n data_cov = compute_covariance(epochs_band,\n tmin=time_window[0],\n tmax=time_window[1])\n with warnings.catch_warnings(record=True): # bad proj\n stc_source_power = _lcmv_source_power(\n epochs.info, forward, noise_cov, data_cov,\n reg=reg, label=label)\n source_power.append(stc_source_power.data)\n\n with warnings.catch_warnings(record=True):\n stcs = tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep,\n win_lengths, freq_bins, reg=reg, label=label)\n\n assert_true(len(stcs) == len(freq_bins))\n assert_true(stcs[0].shape[1] == 4)\n\n # Averaging all time windows that overlap the time period 0 to 100 ms\n source_power = np.mean(source_power, axis=0)\n\n # Selecting the first frequency bin in tf_lcmv results\n stc = stcs[0]\n\n # Comparing tf_lcmv results with _lcmv_source_power results\n assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])\n\n # Test if using unsupported max-power orientation is detected\n assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,\n tstep, win_lengths, freq_bins=freq_bins,\n pick_ori='max-power')\n\n # Test if incorrect number of noise CSDs is detected\n # Test if incorrect number of noise covariances is detected\n assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,\n tmax, tstep, win_lengths, freq_bins)\n\n # Test if freq_bins and win_lengths incompatibility is detected\n assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,\n tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)\n\n # Test if time step exceeding window lengths is detected\n assert_raises(ValueError, tf_lcmv, epochs, forward, noise_covs, tmin, tmax,\n tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)\n\n # Test correct detection of preloaded epochs objects that do not contain\n # the underlying raw object\n epochs_preloaded = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n baseline=(None, 0), preload=True,\n add_eeg_ref=False)\n epochs_preloaded._raw = None\n with warnings.catch_warnings(record=True): # not enough samples\n assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward,\n noise_covs, tmin, tmax, tstep, win_lengths, freq_bins)\n\n with warnings.catch_warnings(record=True): # not enough samples\n # Pass only one epoch to test if subtracting evoked\n # responses yields zeros\n stcs = tf_lcmv(epochs[0], forward, noise_covs, tmin, tmax, tstep,\n win_lengths, freq_bins, subtract_evoked=True, reg=reg,\n label=label)\n\n assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))\n\n\nrun_tests_if_main()\n",
"# Authors: Marijn van Vliet <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport warnings\nimport os.path as op\nimport numpy as np\n\nfrom nose.tools import assert_true, assert_equal, assert_raises\nfrom numpy.testing import assert_array_equal, assert_allclose\n\nfrom mne import (pick_channels, pick_types, Evoked, Epochs, read_events,\n set_eeg_reference, set_bipolar_reference,\n add_reference_channels)\nfrom mne.epochs import _BaseEpochs\nfrom mne.io import read_raw_fif\nfrom mne.io.constants import FIFF\nfrom mne.io.proj import _has_eeg_average_ref_proj\nfrom mne.io.reference import _apply_reference\nfrom mne.datasets import testing\nfrom mne.utils import run_tests_if_main\n\nwarnings.simplefilter('always') # enable b/c these tests throw warnings\n\ndata_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')\nfif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')\neve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')\nave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')\n\n\ndef _test_reference(raw, reref, ref_data, ref_from):\n \"\"\"Test whether a reference has been correctly applied.\"\"\"\n # Separate EEG channels from other channel types\n picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')\n picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=True, exclude='bads')\n\n # Calculate indices of reference channesl\n picks_ref = [raw.ch_names.index(ch) for ch in ref_from]\n\n # Get data\n if isinstance(raw, Evoked):\n _data = raw.data\n _reref = reref.data\n else:\n _data = raw._data\n _reref = reref._data\n\n # Check that the ref has been properly computed\n assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))\n\n # Get the raw EEG data and other channel data\n raw_eeg_data = _data[..., picks_eeg, :]\n raw_other_data = _data[..., picks_other, :]\n\n # Get the rereferenced EEG data\n reref_eeg_data = _reref[..., picks_eeg, :]\n reref_other_data = _reref[..., picks_other, :]\n\n # Undo rereferencing of EEG channels\n if isinstance(raw, _BaseEpochs):\n unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]\n else:\n unref_eeg_data = reref_eeg_data + ref_data\n\n # Check that both EEG data and other data is the same\n assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)\n assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)\n\n\[email protected]_testing_data\ndef test_apply_reference():\n \"\"\"Test base function for rereferencing.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n\n # Rereference raw data by creating a copy of original data\n reref, ref_data = _apply_reference(\n raw.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # The CAR reference projection should have been removed by the function\n assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))\n\n # Test that disabling the reference does not break anything\n reref, ref_data = _apply_reference(raw, [])\n assert_array_equal(raw._data, reref._data)\n\n # Test that data is modified in place when copy=False\n reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])\n assert_true(raw is reref)\n\n # Test re-referencing Epochs object\n raw = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, add_eeg_ref=False)\n reref, ref_data = _apply_reference(\n epochs.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Test re-referencing Evoked object\n evoked = epochs.average()\n reref, ref_data = _apply_reference(\n evoked.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Test invalid input\n raw_np = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)\n assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])\n\n\[email protected]_testing_data\ndef test_set_eeg_reference():\n \"\"\"Test rereference eeg data.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n raw.info['projs'] = []\n\n # Test setting an average reference\n assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))\n reref, ref_data = set_eeg_reference(raw)\n assert_true(_has_eeg_average_ref_proj(reref.info['projs']))\n assert_true(ref_data is None)\n\n # Test setting an average reference when one was already present\n with warnings.catch_warnings(record=True): # weight tables\n reref, ref_data = set_eeg_reference(raw, copy=False)\n assert_true(ref_data is None)\n\n # Rereference raw data by creating a copy of original data\n reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Test that data is modified in place when copy=False\n reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],\n copy=False)\n assert_true(raw is reref)\n\n\[email protected]_testing_data\ndef test_set_bipolar_reference():\n \"\"\"Test bipolar referencing.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',\n {'kind': FIFF.FIFFV_EOG_CH,\n 'extra': 'some extra value'})\n assert_true(reref.info['custom_ref_applied'])\n\n # Compare result to a manual calculation\n a = raw.copy().pick_channels(['EEG 001', 'EEG 002'])\n a = a._data[0, :] - a._data[1, :]\n b = reref.copy().pick_channels(['bipolar'])._data[0, :]\n assert_allclose(a, b)\n\n # Original channels should be replaced by a virtual one\n assert_true('EEG 001' not in reref.ch_names)\n assert_true('EEG 002' not in reref.ch_names)\n assert_true('bipolar' in reref.ch_names)\n\n # Check channel information\n bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]\n an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]\n for key in bp_info:\n if key == 'loc':\n assert_array_equal(bp_info[key], 0)\n elif key == 'coil_type':\n assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)\n elif key == 'kind':\n assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)\n else:\n assert_equal(bp_info[key], an_info[key])\n assert_equal(bp_info['extra'], 'some extra value')\n\n # Minimalist call\n reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')\n assert_true('EEG 001-EEG 002' in reref.ch_names)\n\n # Set multiple references at once\n reref = set_bipolar_reference(\n raw,\n ['EEG 001', 'EEG 003'],\n ['EEG 002', 'EEG 004'],\n ['bipolar1', 'bipolar2'],\n [{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'},\n {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}],\n )\n a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])\n a = np.array([a._data[0, :] - a._data[1, :],\n a._data[2, :] - a._data[3, :]])\n b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data\n assert_allclose(a, b)\n\n # Test creating a bipolar reference that doesn't involve EEG channels:\n # it should not set the custom_ref_applied flag\n reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',\n ch_info={'kind': FIFF.FIFFV_MEG_CH})\n assert_true(not reref.info['custom_ref_applied'])\n assert_true('MEG 0111-MEG 0112' in reref.ch_names)\n\n # Test a battery of invalid inputs\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')\n assert_raises(ValueError, set_bipolar_reference, raw,\n ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', 'bipolar',\n ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', ch_name='EEG 003')\n\n\ndef _check_channel_names(inst, ref_names):\n \"\"\"Check channel names.\"\"\"\n if isinstance(ref_names, str):\n ref_names = [ref_names]\n\n # Test that the names of the reference channels are present in `ch_names`\n ref_idx = pick_channels(inst.info['ch_names'], ref_names)\n assert_true(len(ref_idx), len(ref_names))\n\n # Test that the names of the reference channels are present in the `chs`\n # list\n inst.info._check_consistency() # Should raise no exceptions\n\n\[email protected]_testing_data\ndef test_add_reference():\n \"\"\"Test adding a reference.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # check if channel already exists\n assert_raises(ValueError, add_reference_channels,\n raw, raw.info['ch_names'][0])\n # add reference channel to Raw\n raw_ref = add_reference_channels(raw, 'Ref', copy=True)\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n _check_channel_names(raw_ref, 'Ref')\n\n orig_nchan = raw.info['nchan']\n raw = add_reference_channels(raw, 'Ref', copy=False)\n assert_array_equal(raw._data, raw_ref._data)\n assert_equal(raw.info['nchan'], orig_nchan + 1)\n _check_channel_names(raw, 'Ref')\n\n # for Neuromag fif's, the reference electrode location is placed in\n # elements [3:6] of each \"data\" electrode location\n assert_allclose(raw.info['chs'][-1]['loc'][:3],\n raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)\n\n ref_idx = raw.ch_names.index('Ref')\n ref_data, _ = raw[ref_idx]\n assert_array_equal(ref_data, 0)\n\n # add reference channel to Raw when no digitization points exist\n raw = read_raw_fif(fif_fname, add_eeg_ref=False).crop(0, 1).load_data()\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n del raw.info['dig']\n\n raw_ref = add_reference_channels(raw, 'Ref', copy=True)\n\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n _check_channel_names(raw_ref, 'Ref')\n\n orig_nchan = raw.info['nchan']\n raw = add_reference_channels(raw, 'Ref', copy=False)\n assert_array_equal(raw._data, raw_ref._data)\n assert_equal(raw.info['nchan'], orig_nchan + 1)\n _check_channel_names(raw, 'Ref')\n\n # Test adding an existing channel as reference channel\n assert_raises(ValueError, add_reference_channels, raw,\n raw.info['ch_names'][0])\n\n # add two reference channels to Raw\n raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)\n _check_channel_names(raw_ref, ['M1', 'M2'])\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n assert_array_equal(raw_ref._data[-2:, :], 0)\n\n raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)\n _check_channel_names(raw, ['M1', 'M2'])\n ref_idx = raw.ch_names.index('M1')\n ref_idy = raw.ch_names.index('M2')\n ref_data, _ = raw[[ref_idx, ref_idy]]\n assert_array_equal(ref_data, 0)\n\n # add reference channel to epochs\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, add_eeg_ref=False)\n # default: proj=True, after which adding a Ref channel is prohibited\n assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')\n\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed',\n add_eeg_ref=False)\n epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)\n # CAR after custom reference is an Error\n assert_raises(RuntimeError, epochs_ref.set_eeg_reference)\n\n assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)\n _check_channel_names(epochs_ref, 'Ref')\n ref_idx = epochs_ref.ch_names.index('Ref')\n ref_data = epochs_ref.get_data()[:, ref_idx, :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(epochs.info, meg=False, eeg=True)\n assert_array_equal(epochs.get_data()[:, picks_eeg, :],\n epochs_ref.get_data()[:, picks_eeg, :])\n\n # add two reference channels to epochs\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed',\n add_eeg_ref=False)\n with warnings.catch_warnings(record=True): # multiple set zero\n epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)\n assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)\n _check_channel_names(epochs_ref, ['M1', 'M2'])\n ref_idx = epochs_ref.ch_names.index('M1')\n ref_idy = epochs_ref.ch_names.index('M2')\n assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')\n assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')\n ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(epochs.info, meg=False, eeg=True)\n assert_array_equal(epochs.get_data()[:, picks_eeg, :],\n epochs_ref.get_data()[:, picks_eeg, :])\n\n # add reference channel to evoked\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed',\n add_eeg_ref=False)\n evoked = epochs.average()\n evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)\n assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)\n _check_channel_names(evoked_ref, 'Ref')\n ref_idx = evoked_ref.ch_names.index('Ref')\n ref_data = evoked_ref.data[ref_idx, :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(evoked.info, meg=False, eeg=True)\n assert_array_equal(evoked.data[picks_eeg, :],\n evoked_ref.data[picks_eeg, :])\n\n # add two reference channels to evoked\n raw = read_raw_fif(fif_fname, preload=True, add_eeg_ref=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed',\n add_eeg_ref=False)\n evoked = epochs.average()\n with warnings.catch_warnings(record=True): # multiple set zero\n evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)\n assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)\n _check_channel_names(evoked_ref, ['M1', 'M2'])\n ref_idx = evoked_ref.ch_names.index('M1')\n ref_idy = evoked_ref.ch_names.index('M2')\n ref_data = evoked_ref.data[[ref_idx, ref_idy], :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(evoked.info, meg=False, eeg=True)\n assert_array_equal(evoked.data[picks_eeg, :],\n evoked_ref.data[picks_eeg, :])\n\n # Test invalid inputs\n raw_np = read_raw_fif(fif_fname, preload=False, add_eeg_ref=False)\n assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])\n assert_raises(ValueError, add_reference_channels, raw, 1)\n\nrun_tests_if_main()\n"
] | [
[
"numpy.linspace",
"numpy.cumsum",
"matplotlib.pyplot.get_cmap",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"numpy.where",
"matplotlib.patches.PathPatch",
"numpy.unique",
"numpy.tril_indices",
"numpy.size",
"matplotlib.pyplot.subplot",
"numpy.diff",
"numpy.zeros",
"numpy.random.mtrand.RandomState",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.path.Path",
"matplotlib.pyplot.Normalize",
"matplotlib.pyplot.getp",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.xticks",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.setp",
"numpy.diag_indices",
"matplotlib.pyplot.yticks"
],
[
"numpy.hstack",
"numpy.dot",
"numpy.iterable",
"numpy.array_equal",
"numpy.unique",
"numpy.asarray",
"numpy.arange",
"numpy.eye",
"numpy.atleast_1d",
"numpy.all",
"scipy.linalg.inv",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt",
"numpy.savetxt"
],
[
"numpy.array",
"numpy.testing.assert_allclose"
],
[
"numpy.abs",
"numpy.max",
"numpy.intersect1d",
"numpy.argmax",
"numpy.zeros_like",
"numpy.mean",
"numpy.array",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jsaez8/qtt | [
"fa6497ace86a255f33a2192ba01d063d07d6895e"
] | [
"src/qtt/instrument_drivers/virtual_awg.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 31 13:04:09 2016\n\n@author: diepencjv\n\"\"\"\n\n# %%\nimport numpy as np\nimport scipy.signal\nimport logging\nimport warnings\n\nimport qcodes\nfrom qcodes import Instrument\nfrom qcodes.plots.pyqtgraph import QtPlot\nfrom qcodes.data.data_array import DataArray\nimport qtt\nimport qtt.utilities.tools\n\nlogger = logging.getLogger(__name__)\n# %%\n\n\nclass virtual_awg(Instrument):\n \"\"\"\n\n Attributes:\n _awgs (list): handles to instruments\n awg_map (dict)\n hardware (Instrument): contains AWG to plunger values\n corr (float): unknown\n delay_FPGA (float): time delay of signals going through fridge\n\n \"\"\"\n\n def __init__(self, name, instruments=[], awg_map=None, hardware=None, verbose=1, **kwargs):\n super().__init__(name, **kwargs)\n logger.info('initialize virtual_awg %s' % name)\n self._awgs = instruments\n self.awg_map = awg_map\n self.hardware = hardware\n self.verbose = verbose\n self.delay_FPGA = 2.0e-6 # should depend on filterboxes\n self.corr = .0 # legacy code, specific for FPGA board not used any more\n self.maxdatapts = 16e6 # This used to be set to the fpga maximum, but that maximum should not be handled here\n\n self.awg_seq = None\n if len(self._awgs) == 0 and self.verbose:\n print('no physical AWGs connected')\n elif len(self._awgs) == 1:\n self.awg_cont = self._awgs[0]\n self.awg_cont.set('run_mode', 'CONT')\n elif len(self._awgs) == 2 and 'awg_mk' in self.awg_map:\n self.awg_cont = self._awgs[self.awg_map['awg_mk'][0]]\n self.awg_cont.set('run_mode', 'CONT')\n self.awg_seq = self._awgs[(self.awg_map['awg_mk'][0] + 1) % 2]\n\n self._set_seq_mode(self.awg_seq)\n self.delay_AWG = self.hardware.parameters['delay_AWG'].get()\n else:\n raise Exception(\n 'Configuration of AWGs not supported by virtual_awg instrument')\n\n self.AWG_clock = 1e8\n self.ch_amp = 4.0\n for awg in self._awgs:\n awg.set('clock_freq', self.AWG_clock)\n awg.delete_all_waveforms_from_list()\n for i in range(1, 5):\n awg.set('ch%s_amp' % i, self.ch_amp)\n\n def _set_seq_mode(self, a):\n a.set('run_mode', 'SEQ')\n a.sequence_length.set(1)\n a.set_sqel_trigger_wait(1, 0)\n\n def get_idn(self):\n ''' Overrule because the default VISA command does not work '''\n IDN = {'vendor': 'QuTech', 'model': 'virtual_awg',\n 'serial': None, 'firmware': None}\n return IDN\n\n def awg_gate(self, gate):\n \"\"\" Return true of the gate can be controlled by the awg\n\n Args:\n gate ()\n \"\"\"\n if gate is None:\n return False\n\n if isinstance(gate, dict):\n # vector scan, assume we can do it fast if all components are fast\n return np.all([self.awg_gate(g) for g in gate])\n if self.awg_map is None:\n return False\n\n if gate in self.awg_map:\n return True\n else:\n return False\n\n def stop(self, verbose=0):\n ''' Stops all AWGs and turns of all channels '''\n for awg in self._awgs:\n awg.stop()\n for i in range(1, 5):\n awg.set('ch%d_state' % i, 0)\n\n if verbose:\n print('Stopped AWGs')\n\n def sweep_init(self, waveforms, period=1e-3, delete=True, samp_freq=None):\n ''' Send waveform(s) to gate(s)\n\n Arguments:\n waveforms (dict): the waveforms with the gates as keys\n period (float): period of the waveform in seconds\n\n Returns:\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n\n Example:\n --------\n >> sweep_info = sweep_init(waveforms)\n '''\n sweepgates = [g for g in waveforms]\n\n if delete:\n for awg in self._awgs:\n awg.delete_all_waveforms_from_list()\n\n awgs = [self._awgs[self.awg_map[g][0]] for g in sweepgates]\n if 'fpga_mk' in self.awg_map:\n marker_info = self.awg_map['fpga_mk']\n marker_delay = self.delay_FPGA\n marker_name = 'fpga_mk'\n elif 'm4i_mk' in self.awg_map:\n marker_info = self.awg_map['m4i_mk']\n if samp_freq is not None:\n pretrigger_period = 16 / samp_freq\n else:\n pretrigger_period = 0\n marker_delay = self.delay_FPGA + pretrigger_period\n marker_name = 'm4i_mk'\n\n awgs.append(self._awgs[marker_info[0]])\n\n sweep_info = dict()\n wave_len = len(waveforms[sweepgates[0]]['wave'])\n for g in sweepgates:\n sweep_info[self.awg_map[g]] = dict()\n sweep_info[self.awg_map[g]]['waveform'] = waveforms[g]['wave']\n sweep_info[self.awg_map[g]]['marker1'] = np.zeros(wave_len)\n sweep_info[self.awg_map[g]]['marker2'] = np.zeros(wave_len)\n if 'name' in waveforms[g]:\n sweep_info[self.awg_map[g]]['name'] = waveforms[g]['name']\n else:\n sweep_info[self.awg_map[g]]['name'] = 'waveform_%s' % g\n if marker_info[:2] == self.awg_map[g]:\n sweep_info[marker_info[:2]]['delay'] = marker_delay\n\n # marker points\n marker_points = np.zeros(wave_len)\n marker_points[int(marker_delay * self.AWG_clock):(int(marker_delay * self.AWG_clock) + wave_len // 20)] = 1.0\n\n if marker_info[:2] not in sweep_info:\n sweep_info[marker_info[:2]] = dict()\n sweep_info[marker_info[:2]]['waveform'] = np.zeros(wave_len)\n sweep_info[marker_info[:2]]['marker1'] = np.zeros(wave_len)\n sweep_info[marker_info[:2]]['marker2'] = np.zeros(wave_len)\n for g in sweepgates:\n marker_name += '_%s' % g\n sweep_info[marker_info[:2]]['name'] = marker_name\n sweep_info[marker_info[:2]]['delay'] = marker_delay\n\n sweep_info[marker_info[:2]]['marker%d' % marker_info[2]] = marker_points\n self._awgs[marker_info[0]].set(\n 'ch%i_m%i_low' % (marker_info[1], marker_info[2]), 0)\n self._awgs[marker_info[0]].set(\n 'ch%i_m%i_high' % (marker_info[1], marker_info[2]), 2.6)\n\n # awg marker\n if getattr(self, 'awg_seq', None) is not None:\n awg_info = self.awg_map['awg_mk']\n if awg_info[:2] not in sweep_info:\n awgs.append(self._awgs[awg_info[0]])\n sweep_info[awg_info[:2]] = dict()\n sweep_info[awg_info[:2]]['waveform'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['marker1'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['marker2'] = np.zeros(wave_len)\n sweep_info[awg_info[:2]]['name'] = 'awg_mk'\n\n awg_marker = np.zeros(wave_len)\n awg_marker[0:wave_len // 20] = 1\n awg_marker = np.roll(\n awg_marker, wave_len - int(self.delay_AWG * self.AWG_clock))\n sweep_info[awg_info[:2]]['marker%d' %\n self.awg_map['awg_mk'][2]] = awg_marker\n self._awgs[awg_info[0]].set(\n 'ch%i_m%i_low' % (awg_info[1], awg_info[2]), 0)\n self._awgs[awg_info[0]].set(\n 'ch%i_m%i_high' % (awg_info[1], awg_info[2]), 2.6)\n\n # send waveforms\n if delete:\n for sweep in sweep_info:\n try:\n self._awgs[sweep[0]].send_waveform_to_list(sweep_info[sweep]['waveform'], sweep_info[\n sweep]['marker1'], sweep_info[sweep]['marker2'], sweep_info[sweep]['name'])\n except Exception as ex:\n print(ex)\n print('sweep_info[sweep][waveform] %s' % (sweep_info[sweep]['waveform'].shape,))\n print('sweep_info[sweep][marker1] %s' % (sweep_info[sweep]['marker1'].shape,))\n print('sweep_info[sweep][marker2] %s' % (sweep_info[sweep]['marker2'].shape,))\n\n return sweep_info\n\n def sweep_run(self, sweep_info):\n ''' Activate AWG(s) and channel(s) for the sweep(s).\n\n Arguments:\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n for sweep in sweep_info:\n if hasattr(self, 'awg_seq') and self._awgs[sweep[0]] == self.awg_seq:\n self._awgs[sweep[0]].set_sqel_waveform(\n sweep_info[sweep]['name'], sweep[1], 1)\n self._awgs[sweep[0]].set_sqel_loopcnt_to_inf(1)\n self._awgs[sweep[0]].set_sqel_event_jump_target_index(\n sweep[1], 1)\n self._awgs[sweep[0]].set_sqel_event_jump_type(1, 'IND')\n else:\n self._awgs[sweep[0]].set(\n 'ch%i_waveform' % sweep[1], sweep_info[sweep]['name'])\n\n for sweep in sweep_info:\n self._awgs[sweep[0]].set('ch%i_state' % sweep[1], 1)\n\n awgnrs = set([sweep[0] for sweep in sweep_info])\n for nr in awgnrs:\n self._awgs[nr].run()\n\n def make_sawtooth(self, sweeprange, period, width=.95, repetitionnr=1, start_zero=False):\n '''Make a sawtooth with a decline width determined by width. Not yet scaled with\n awg_to_plunger value.\n\n Arguments:\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n wave_raw (array): raw data which represents the waveform\n '''\n samplerate = 1. / self.AWG_clock\n tt = np.arange(0, period * repetitionnr + samplerate, samplerate)\n v_wave = float(sweeprange / ((self.ch_amp / 2.0)))\n wave_raw = (v_wave / 2) * scipy.signal.sawtooth(2 * np.pi * tt / period, width=width)\n# idx_zero = np.argmin(np.abs(wave_raw))\n# wave_raw = np.roll(wave_raw, wave_raw.size-idx_zero)\n if start_zero:\n o = int((wave_raw.size) * (1 - width) / 2)\n wave_raw = np.roll(wave_raw, o)\n\n return wave_raw\n\n def make_pulses(self, voltages, waittimes, reps=1, filtercutoff=None, mvrange=None):\n \"\"\"Make a pulse sequence with custom voltage levels and wait times at each level.\n\n Arguments:\n voltages (list of floats): voltage levels to be applied in the sequence\n waittimes (list of floats): duration of each pulse in the sequence\n reps (int): number of times to repeat the pulse sequence in the waveform\n filtercutoff (float): cutoff frequency of a 1st order butterworth filter to make the pulse steps smoother\n\n Returns:\n wave_raw (array): raw data which represents the waveform\n \"\"\"\n if len(waittimes) != len(voltages):\n raise Exception('Number of voltage levels must be equal to the number of wait times')\n samples = [int(x * self.AWG_clock) for x in waittimes]\n if mvrange is None:\n mvrange = [max(voltages), min(voltages)]\n v_wave = float((mvrange[0] - mvrange[1]) / self.ch_amp)\n v_prop = [2 * ((x - mvrange[1]) / (mvrange[0] - mvrange[1])) - 1 for x in voltages]\n wave_raw = np.concatenate([x * v_wave * np.ones(y) for x, y in zip(v_prop, samples)])\n if filtercutoff is not None:\n b, a = scipy.signal.butter(1, 0.5 * filtercutoff / self.AWG_clock, btype='low', analog=False, output='ba')\n wave_raw = scipy.signal.filtfilt(b, a, wave_raw)\n wave_raw = np.tile(wave_raw, reps)\n\n return wave_raw\n\n def check_frequency_waveform(self, period, width):\n \"\"\" Check whether a sawtooth waveform with specified period can be generated \"\"\"\n old_sr = self.AWG_clock\n new_sr = 5 / (period * (1 - width))\n if (new_sr) > old_sr:\n warnings.warn('awg sampling frequency %.1f MHz is too low for signal requested (sr %.1f [MHz], period %.1f [ms])' % (\n old_sr / 1e6, new_sr / 1e6, 1e3 * period), UserWarning)\n return new_sr\n\n def sweep_gate(self, gate, sweeprange, period, width=.95, wave_name=None, delete=True):\n ''' Send a sawtooth signal with the AWG to a gate to sweep. Also\n send a marker to the measurement instrument.\n\n Args:\n gate (string): the name of the gate to sweep\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n\n Example:\n >>> waveform, sweep_info = sweep_gate('P1',sweeprange=60,period=1e-3)\n '''\n\n self.check_frequency_waveform(period, width)\n self.check_amplitude(gate, sweeprange)\n\n start_zero = True\n waveform = dict()\n wave_raw = self.make_sawtooth(sweeprange, period, width, start_zero=start_zero)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % gate].get()\n wave = wave_raw / awg_to_plunger\n waveform[gate] = dict()\n waveform[gate]['wave'] = wave\n if wave_name is None:\n waveform[gate]['name'] = 'sweep_%s' % gate\n else:\n waveform[gate]['name'] = wave_name\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['start_zero'] = start_zero\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_gate_virt(self, gate_comb, sweeprange, period, width=.95, delete=True):\n ''' Send a sawtooth signal with the AWG to a linear combination of\n gates to sweep. Also send a marker to the measurement instrument.\n\n Arguments:\n gate_comb (dict): the gates to sweep and the coefficients as values\n sweeprange (float): the range of voltages to sweep over\n period (float): the period of the triangular signal\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n self.check_frequency_waveform(period, width)\n\n waveform = dict()\n for g in gate_comb:\n self.check_amplitude(g, gate_comb[g] * sweeprange)\n for g in gate_comb:\n wave_raw = self.make_sawtooth(sweeprange, period, width)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gate_comb[g] / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_%s' % g\n\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweepandpulse_gate(self, sweepdata, pulsedata, wave_name=None, delete=True, shift_zero=True):\n ''' Makes and outputs a waveform which overlays a sawtooth signal to sweep\n a gate, with a pulse sequence. A marker is sent to the measurement instrument\n at the start of the waveform.\n IMPORTANT: The function offsets the voltages values so that the last point is 0 V on all gates (i.e. it centers the pulse sequence on the last point)\n\n Args:\n sweepdata (dict): inputs for the sawtooth (gate, sweeprange, period, width).\n See sweep_gate for more info.\n pulsedata (dict): inputs for the pulse sequence (gate_voltages, waittimes).\n See pulse_gates for more info.\n\n Returns:\n waveform (dict): The waveform being sent with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n sweepgate = sweepdata['gate']\n sweeprange = sweepdata['sweeprange']\n period = sweepdata['period']\n width = sweepdata.get('width', 0.95)\n\n gate_voltages = pulsedata['gate_voltages'].copy()\n if shift_zero:\n for g in gate_voltages:\n gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]\n waittimes = pulsedata['waittimes']\n filtercutoff = pulsedata.get('filtercutoff', None)\n\n pulsesamp = [int(round(x * self.AWG_clock)) for x in waittimes]\n sawsamp = int(round(period * width * self.AWG_clock))\n pulsereps = int(np.ceil(self.AWG_clock * period * width / sum(pulsesamp)))\n allvoltages = np.concatenate([v for v in gate_voltages.values()])\n mvrange = [max(allvoltages), min(allvoltages)]\n\n self.check_frequency_waveform(period, width)\n\n waveform = dict()\n wave_sweep = self.make_sawtooth(sweeprange, period, width)\n for g in gate_voltages:\n self.check_amplitude(g, sweeprange + (mvrange[0] - mvrange[1]))\n for g in gate_voltages:\n wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=pulsereps,\n filtercutoff=filtercutoff, mvrange=mvrange)\n wave_raw = wave_raw[:sawsamp]\n wave_raw = np.pad(wave_raw, (0, len(wave_sweep) - len(wave_raw)), 'edge')\n if sweepgate == g:\n wave_raw += wave_sweep\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n if wave_name is None:\n waveform[g]['name'] = 'sweepandpulse_%s' % g\n else:\n waveform[g]['name'] = wave_name\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['width'] = width\n waveform['sweeprange'] = sweeprange\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period\n waveform['pulse_voltages'] = gate_voltages\n waveform['pulse_waittimes'] = waittimes\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_process(self, data, waveform, Naverage=1, direction='forwards', start_offset=1):\n \"\"\" Process the data returned by reading out based on the shape of\n the sawtooth send with the AWG.\n\n Args:\n data (list or Nxk array): the data (N is the number of samples)\n waveform (dict): contains the wave and the sawtooth width\n Naverage (int): number of times the signal was averaged\n direction (string): option to use backwards signal i.o. forwards\n\n Returns:\n data_processed (array): The data after dropping part of it.\n\n Example:\n >> data_processed = sweep_process(data, waveform, 25)\n \"\"\"\n width = waveform['width']\n\n if isinstance(data, list):\n data = np.array(data)\n\n if direction == 'forwards':\n end = int(np.floor(width * data.shape[0] - 1))\n data_processed = data[start_offset:end]\n elif direction == 'backwards':\n begin = int(np.ceil(width * data.shape[0] + 1))\n data_processed = data[begin:]\n data_processed = data_processed[::-1]\n\n data_processed = np.array(data_processed) / Naverage\n\n return data_processed\n\n def sweep_2D(self, samp_freq, sweepgates, sweepranges, resolution, width=.95, comp=None, delete=True):\n ''' Send sawtooth signals to the sweepgates which effectively do a 2D\n scan.\n\n The first sweepgate is the fast changing gate (on the horizontal axis).\n\n Arguments:\n samp_freq (float): sampling frequency of the measurement instrument in Hertz.\n sweepgates (list): two strings with names of gates to sweep\n sweepranges (list): two floats for sweepranges in milliVolts\n\n Returns:\n waveform (dict): The waveforms being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n# JP: I think FPGA exceptions should not be handled by awg\n# if resolution[0] * resolution[1] > self.maxdatapts:\n# raise Exception('resolution is set higher than FPGA memory allows')\n\n if self.corr != 0:\n raise Exception('please do not use the .corr setting any more')\n error_corr = resolution[0] * self.corr\n period_horz = resolution[0] / samp_freq + error_corr\n period_vert = resolution[1] * period_horz\n\n self.check_frequency_waveform(period_horz, width)\n for g, r in zip(sweepgates, sweepranges):\n self.check_amplitude(g, r)\n\n waveform = dict()\n # horizontal waveform\n wave_horz_raw = self.make_sawtooth(\n sweepranges[0], period_horz, repetitionnr=resolution[1])\n awg_to_plunger_horz = self.hardware.parameters[\n 'awg_to_%s' % sweepgates[0]].get()\n wave_horz = wave_horz_raw / awg_to_plunger_horz\n waveform[sweepgates[0]] = dict()\n waveform[sweepgates[0]]['wave'] = wave_horz\n waveform[sweepgates[0]]['name'] = 'sweep_2D_horz_%s' % sweepgates[0]\n\n # vertical waveform\n wave_vert_raw = self.make_sawtooth(sweepranges[1], period_vert)\n awg_to_plunger_vert = self.hardware.parameters[\n 'awg_to_%s' % sweepgates[1]].get()\n wave_vert = wave_vert_raw / awg_to_plunger_vert\n waveform[sweepgates[1]] = dict()\n waveform[sweepgates[1]]['wave'] = wave_vert\n waveform[sweepgates[1]]['name'] = 'sweep_2D_vert_%s' % sweepgates[1]\n\n if comp is not None:\n for g in comp:\n if g not in sweepgates:\n waveform[g] = dict()\n waveform[g]['wave'] = comp[g]['vert'] * \\\n wave_vert + comp[g]['horz'] * wave_horz\n waveform[g]['name'] = 'sweep_2D_comp_%s' % g\n else:\n raise Exception('Can not compensate a sweepgate')\n\n sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)\n self.sweep_run(sweep_info)\n\n waveform['width_horz'] = width\n waveform['sweeprange_horz'] = sweepranges[0]\n waveform['width_vert'] = width\n waveform['sweeprange_vert'] = sweepranges[1]\n waveform['resolution'] = resolution\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period_vert\n waveform['period_horz'] = period_horz\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_2D_virt(self, samp_freq, gates_horz, gates_vert, sweepranges, resolution, width=.95, delete=True):\n ''' Send sawtooth signals to the linear combinations of gates set by\n gates_horz and gates_vert which effectively do a 2D scan of two virtual\n gates.\n\n The horizontal direction is the direction where the AWG signal is changing fastest. It is the first element in the resolution and sweepranges.\n\n Arguments:\n samp_freq (float): sampling frequency of the measurement instrument in Hertz.\n gates_horz (dict): the gates for the horizontal direction and their coefficients\n gates_vert (dict): the gates for the vertical direction and their coefficients\n sweepranges (list): two floats for sweepranges in milliVolts\n resolution (list): two ints for numbers of pixels\n\n Returns:\n waveform (dict): The waveforms being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n# JP: I think FPGA exceptions should not be handled by awg\n# if resolution[0] * resolution[1] > self.maxdatapts:\n# raise Exception('resolution is set higher than memory allows')\n\n error_corr = resolution[0] * self.corr\n period_horz = resolution[0] / samp_freq + error_corr\n period_vert = resolution[1] * period_horz\n\n new_sr = self.check_frequency_waveform(period_horz, width)\n # self.reset_AWG(new_sr)\n\n waveform = dict()\n # horizontal virtual gate\n for g in gates_horz:\n self.check_amplitude(g, sweepranges[0] * gates_horz[g])\n for g in gates_horz:\n wave_raw = self.make_sawtooth(sweepranges[0], period_horz, repetitionnr=resolution[1])\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gates_horz[g] / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_2D_virt_%s' % g\n\n # vertical virtual gate\n for g in gates_vert:\n self.check_amplitude(g, sweepranges[1] * gates_vert[g])\n for g in gates_vert:\n wave_raw = self.make_sawtooth(sweepranges[1], period_vert)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw * gates_vert[g] / awg_to_plunger\n if g in waveform:\n waveform[g]['wave'] = waveform[g]['wave'] + wave\n else:\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'sweep_2D_virt_%s' % g\n\n # TODO: Implement compensation of sensing dot plunger\n\n sweep_info = self.sweep_init(waveform, period=period_vert, delete=delete, samp_freq=samp_freq)\n self.sweep_run(sweep_info)\n\n waveform['width_horz'] = width\n waveform['sweeprange_horz'] = sweepranges[0]\n waveform['width_vert'] = width\n waveform['sweeprange_vert'] = sweepranges[1]\n waveform['resolution'] = resolution\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['period'] = period_vert\n waveform['period_horz'] = period_horz\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def sweep_2D_process(self, data, waveform, diff_dir=None):\n ''' Process data from sweep_2D\n\n Arguments:\n data (list): the raw measured data\n waveform (dict): The waveforms that was sent with the AWG.\n\n Returns:\n data_processed (list): the processed data\n '''\n width_horz = waveform['width_horz']\n width_vert = waveform['width_vert']\n resolution = waveform['resolution']\n\n # split up the fpga data in chunks of horizontal sweeps\n chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]\n chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]\n data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]\n\n if diff_dir is not None:\n data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)\n\n return data_processed\n\n def pulse_gates(self, gate_voltages, waittimes, reps=1, filtercutoff=None, reset_to_zero=False, delete=True):\n ''' Send a pulse sequence with the AWG that can span over any gate space.\n Sends a marker to measurement instrument at the start of the sequence.\n Only works with physical gates.\n\n Arguments:\n gate_voltages (dict): keys are gates to apply the sequence to, and values\n are arrays with the voltage levels to be applied in the sequence\n waittimes (list of floats): duration of each pulse in the sequence\n reset_to_zero (bool): if True, the function offsets the voltages values so that the last point is 0V\n on all gates (i.e. it centers the pulse sequence on the last point).\n\n Returns:\n waveform (dict): The waveform being send with the AWG.\n sweep_info (dict): the keys are tuples of the awgs and channels to activate\n '''\n\n period = sum(waittimes)\n if reset_to_zero:\n for g in gate_voltages:\n gate_voltages[g] = [x - gate_voltages[g][-1] for x in gate_voltages[g]]\n allvoltages = np.concatenate([v for v in gate_voltages.values()])\n mvrange = [max(allvoltages), min(allvoltages)]\n waveform = dict()\n for g in gate_voltages:\n wave_raw = self.make_pulses(gate_voltages[g], waittimes, reps=reps,\n filtercutoff=filtercutoff, mvrange=mvrange)\n awg_to_plunger = self.hardware.parameters['awg_to_%s' % g].get()\n wave = wave_raw / awg_to_plunger\n waveform[g] = dict()\n waveform[g]['wave'] = wave\n waveform[g]['name'] = 'pulses_%s' % g\n\n sweep_info = self.sweep_init(waveform, period, delete)\n self.sweep_run(sweep_info)\n waveform['voltages'] = gate_voltages\n waveform['samplerate'] = 1 / self.AWG_clock\n waveform['waittimes'] = waittimes\n for channels in sweep_info:\n if 'delay' in sweep_info[channels]:\n waveform['markerdelay'] = sweep_info[channels]['delay']\n\n return waveform, sweep_info\n\n def reset_AWG(self, clock=1e8):\n \"\"\" Reset AWG to videomode and scanfast \"\"\"\n self.AWG_clock = clock\n for a in self._awgs:\n a.clock_freq.set(clock)\n a.trigger_mode.set('CONT')\n a.trigger_source.set('INT')\n\n for ii in range(1, 5):\n f = getattr(a, 'ch%d_amp' % ii)\n val = f()\n if val != 4.0:\n warnings.warn('AWG channel %d output not at 4.0 V' % ii)\n if self.awg_seq is not None:\n self._set_seq_mode(self.awg_seq)\n\n def set_amplitude(self, amplitude):\n \"\"\" Set the AWG peak-to-peak amplitude for all channels\n\n Args:\n amplitude (float): peak-to-peak amplitude (V)\n\n \"\"\"\n if amplitude < 0.02:\n warnings.warn('Trying to set AWG amplitude too low, setting it to minimum (20mV)')\n amplitude = 0.02\n elif amplitude > 4.5:\n warnings.warn('Trying to set AWG amplitude too high, setting it to maximum (4.5V)')\n amplitude = 4.5\n\n # tektronics 5014 has precision of 1mV\n self.ch_amp = round(amplitude, 3)\n for awg in self._awgs:\n for i in range(1, 5):\n awg.set('ch%s_amp' % i, self.ch_amp)\n\n def check_amplitude(self, gate, mvrange):\n \"\"\" Calculates the lowest allowable AWG peak-to-peak amplitude based on the\n ranges to be applied to the gates. If the AWG amplitude is too low, it gives\n a warning and increases the amplitude.\n\n Args:\n gate (str): name of the gate to check\n mvrange (float): voltage range, in mV, that the gate needs to reach\n \"\"\"\n min_amp = mvrange / self.hardware.parameters['awg_to_%s' % gate].get()\n if min_amp > 4:\n raise(Exception('Sweep range of gate %s is larger than maximum allowed by the AWG' % gate))\n if self.ch_amp < min_amp:\n min_amp = np.ceil(min_amp * 10) / 10\n self.set_amplitude(min_amp)\n warnings.warn('AWG amplitude too low for this range, setting to %.1f' % min_amp)\n\n# %%\n\n\ndef plot_wave_raw(wave_raw, samplerate=None, station=None):\n ''' Plot the raw wave\n\n Arguments:\n wave_raw (array): raw data which represents the waveform\n\n Returns:\n plot (QtPlot): the plot showing the data\n '''\n if samplerate is None:\n if station is None:\n raise Exception('There is no station')\n samplerate = 1 / station.awg.getattr('AWG_clock')\n else:\n samplerate = samplerate\n horz_var = np.arange(0, len(wave_raw) * samplerate, samplerate)\n x = DataArray(name='time(s)', label='time (s)',\n preset_data=horz_var, is_setpoint=True)\n y = DataArray(\n label='sweep value (mV)', preset_data=wave_raw, set_arrays=(x,))\n plot = QtPlot(x, y)\n\n return plot\n\n\ndef sweep_2D_process(data, waveform, diff_dir=None):\n ''' Process data from sweep_2D\n\n Arguments:\n data (list): the raw measured data\n waveform (dict): The waveforms that was sent with the AWG.\n\n Returns:\n data_processed (list): the processed data\n '''\n width_horz = waveform['width_horz']\n width_vert = waveform['width_vert']\n resolution = waveform['resolution']\n\n # split up the fpga data in chunks of horizontal sweeps\n chunks_ch1 = [data[x:x + resolution[0]] for x in range(0, len(data), resolution[0])]\n chunks_ch1 = [chunks_ch1[i][1:int(width_horz * len(chunks_ch1[i]))] for i in range(0, len(chunks_ch1))]\n data_processed = chunks_ch1[:int(width_vert * len(chunks_ch1))]\n\n if diff_dir is not None:\n data_processed = qtt.utilities.tools.diffImageSmooth(data_processed, dy=diff_dir, sigma=1)\n\n return data_processed\n"
] | [
[
"numpy.arange",
"numpy.tile",
"numpy.ones",
"numpy.ceil",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarkusHaak/fieldbioinformatics | [
"3d291477a3d84968816c8e57e6078fc80135f422"
] | [
"artic/deprecated/plot_amplicon_depth.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nPlot the mean read depth per amplicon.\n\nThis has been written for use in the ARTIC pipeline so there are no file checks - it assumes the following:\n * the primer scheme is in ARTIC format\n * the input depth files are in the format: `chrom\\treadgroup\\tposition\\tdepth\n * readgroup equates to primer pool\n * the primer pairs in the scheme are sorted by amplicon number (i.e. readgroups are interleaved)\n * depth values are provided for all positions (see output of make_depth_mask.py for expected format)\n\n\"\"\"\nfrom .vcftagprimersites import read_bed_file\nimport sys\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport os\n\n\nos.environ['QT_QPA_PLATFORM'] = 'offscreen'\nimport seaborn as sns\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef go(args):\n\n # get the primer scheme\n primerScheme = read_bed_file(args.primerScheme)\n\n # number the amplicons in the scheme and link them to primer start site\n ampliconCounter = 1\n\n # store the amplicon number and starts by read group dict\n rgAmplicons = {}\n rgStarts = {}\n\n # process the primers by readgroup\n for primer in primerScheme:\n poolName = primer['PoolName']\n if poolName not in rgAmplicons:\n rgAmplicons[poolName] = []\n rgStarts[poolName] = []\n if primer['direction'] == '+':\n rgAmplicons[poolName].append(ampliconCounter)\n rgStarts[poolName].append(primer['start'])\n ampliconCounter += 1\n\n # for pandas cut func to create bins, we need to add an extra value to the starts (just use inf)\n for startList in rgStarts.values():\n startList.append(np.inf)\n\n # process the depth files\n dfs = {}\n for depthFile in args.depthFiles:\n\n # read in the depth file\n df = pd.read_csv(depthFile, sep='\\t', header=None,\n names=['refName', 'readGroup',\n 'position', 'depth'],\n dtype={'refName': str, 'readGroup': str,\n 'position': int, 'depth': int},\n usecols=(0, 1, 2, 3),)\n\n # check that there aren't too many positions in the depth data for plotting\n # assert len(df.index) < 30000, \"error: too many data points to plot\"\n\n # check all ref positions have a depth value\n startPos = df[\"position\"][0]\n endPos = df[\"position\"][df.index[-1]]\n assert len(df.index) == ((endPos - startPos) +\n 1), \"error: depth needs to be reported at all positions\"\n\n # check the primer scheme contains the readgroup\n rgList = df.readGroup.unique()\n assert len(rgList) == 1, \"error: depth file has %d readgroups, need 1 (%s)\" % (\n len(rgList), depthFile)\n rg = rgList[0]\n assert rg in rgAmplicons, \"error: readgroup not found in provided primer scheme (%s)\" % (\n rg)\n\n # get the amplicon starts for this readgroup\n amplicons = sorted(rgAmplicons[rg])\n starts = sorted(rgStarts[rg])\n\n # bin read depths by amplicon for this readgroup\n df['amplicon'] = pd.cut(\n x=df['position'], bins=starts, labels=amplicons)\n\n # store the mean of each bin\n bins = (df.groupby(['amplicon'])[\n 'depth'].mean()).rename(depthFile.name)\n\n # add to the pile\n assert rg not in dfs, \"error: readgroup present in multiple files (%s)\" % (\n rg)\n dfs[rg] = bins\n\n # combine the series data from each input file\n newDF = pd.concat(dfs, axis=1)\n newDF.sort_index(axis=0, inplace=True)\n newDF.reset_index(inplace=True)\n\n # melt the DF for seaborn\n newDF = newDF.melt(\"amplicon\", var_name=\"read group\",\n value_name=\"mean amplicon read depth\")\n newDF = newDF.dropna()\n\n # plot the bar\n g = sns.catplot(data=newDF,\n x=\"amplicon\",\n y=\"mean amplicon read depth\",\n hue=\"read group\",\n height=4,\n aspect=3,\n kind=\"bar\",\n dodge=False,\n legend=False)\n g.set(yscale=\"log\")\n g.fig.suptitle(args.sampleID)\n plt.legend(loc='upper right')\n plt.xticks(rotation=45, size=6)\n plt.savefig(args.outFilePrefix + \"-barplot.png\")\n plt.close()\n\n # plot the box\n g = sns.catplot(data=newDF,\n x=\"read group\",\n y=\"mean amplicon read depth\",\n kind=\"box\")\n g.fig.suptitle(args.sampleID)\n plt.savefig(args.outFilePrefix + \"-boxplot.png\")\n plt.close()\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--primerScheme', required=True,\n help='the ARTIC primer scheme')\n parser.add_argument('--sampleID', required=True,\n help='the sample ID for the provided depth files')\n parser.add_argument('--outFilePrefix', default=\"./amplicon-depth\",\n help='the prefix to give the output plot file')\n parser.add_argument(\n \"depthFiles\", type=argparse.FileType('r'), nargs='+', help='the depth files produced by make_depth_mask.py')\n args = parser.parse_args()\n go(args)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.read_csv",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"pandas.cut",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cperales/pygsom | [
"ac4d4818f441d862cb5183e1d2ea814e3f805759"
] | [
"gsom.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015 Philipp Ludwig <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF\nOR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\"\"\"@package GSOM\n\nThis is an implementation of the growing self-organizing map.\n\nDifferent possible approaches for the GSOM have been presented in the past\nby various researchers. To make things clear, this implementation is based\non the one described in the work of:\n\nAlahakoon, Damminda, S. Halgamuge, and Bala Srinivasan:\n\"Dynamic self-organizing maps with controlled growth for knowledge discovery.\"\nNeural Networks, IEEE Transactions on 11.3 (2000): 601-614.\n\nSadly, this article is not as comprehensive as desirable. Therefore this\nimplementation should not be taken as a reference, but as a best-effort\nversion. Some details of the algorithm have been assembled based on the\nwork of Mengxue Cao et. al, who described their approach within their work:\n\n\"Growing Self-Organizing Map Approach for Semantic Acquisition Modeling\nway within their work\"\n\nRefer to both papers for further details.\n\nAdditionally, this algorithm picks up some of the aspects proposed in the\nwork of:\n\nAndreas Nürnberger and Marcin Detyniecki:\n\"Externally growing self-organizing maps and its application to e-mail\n database visualization and exploration\"\n\"\"\"\nfrom math import log, exp\nimport itertools\nimport math\nimport random\nimport scipy\n\n\nclass GSOMNode:\n \"\"\" Represents one node in a growing SOM. \"\"\"\n R = random.Random()\n\n def __init__(self, dim, x, y, data):\n \"\"\" Initialize this node. \"\"\"\n # Create a weight vector of the given dimension:\n # Initialize the weight vector with random values between 0 and 1.\n self.weights = scipy.array([self.R.random() for _ in range(dim)])\n\n # Remember the error occuring at this particular node\n self.error = 0.0\n\n # Holds the number of the iteration during the node has been inserted.\n self.it = 0\n\n # Holds the number of the last iteration where the node has won.\n self.last_it = 0\n\n # Holds the best-matching data.\n self.data = data\n self.last_changed = 0\n\n # This node has no neighbours yet.\n self.right = None\n self.left = None\n self.up = None\n self.down = None\n\n # Copy the given coordinates.\n self.x, self.y = x, y\n\n def adjust_weights(self, target, learn_rate):\n \"\"\" Adjust the weights of this node. \"\"\"\n for w in range(0, len(target)):\n self.weights[w] += learn_rate * (target[w] - self.weights[w])\n\n def is_boundary(self):\n \"\"\" Check if this node is at the boundary of the map. \"\"\"\n if not self.right: return True\n if not self.left: return True\n if not self.up: return True\n if not self.down: return True\n return False\n\n\nclass GSOM:\n \"\"\" Represents a growing self-organizing map. \"\"\"\n\n @staticmethod\n def _distance(v1, v2):\n \"\"\" Calculate the euclidean distance between two scipy arrays.\"\"\"\n dist = 0.0\n for v, w in zip(v1, v2):\n dist += pow(v - w, 2)\n return dist\n\n def _find_bmu(self, vec):\n \"\"\" Find the best matching unit within the map for the given input_\n vector. \"\"\"\n dist=float(\"inf\")\n winner = False\n for node in self.nodes:\n d = self._distance(vec, node.weights)\n if d < dist:\n dist = d\n winner = node\n return winner\n\n def _find_similar_boundary(self, node):\n \"\"\" Find the most similar boundary node to the given node. \"\"\"\n dist = float(\"inf\")\n winner = False\n for boundary in self.nodes:\n if not boundary.is_boundary(): continue\n if boundary == node: continue\n\n d = self._distance(node.weights, boundary.weights)\n if d < dist:\n dist = d\n winner = node\n\n return winner\n\n def __init__(self, X, y, spread_factor=0.5):\n \"\"\" Initializes this GSOM using the given data. \"\"\"\n # Assign the data\n self.data = []\n for fn, t in zip(X, y):\n arr = scipy.array([t])\n self.data.append([fn, arr])\n\n # Determine the dimension of the data.\n self.dim = len(self.data[0][0])\n\n # Calculate the growing threshold:\n self._GT = -self.dim * math.log(spread_factor, 2)\n\n # Create the 4 starting Nodes.\n self.nodes = []\n n00 = GSOMNode(dim=self.dim, x=0, y=0, data=self.data)\n n01 = GSOMNode(self.dim, 0, 1, self.data)\n n10 = GSOMNode(self.dim, 1, 0, self.data)\n n11 = GSOMNode(self.dim, 1, 1, self.data)\n self.nodes.extend([n00, n01, n10, n11])\n\n # Create starting topology\n n00.right = n10\n n00.up = n01\n n01.right = n11\n n01.down = n00\n n10.up = n11\n n10.left = n00\n n11.left = n01\n n11.down = n10\n\n # Set properties\n self.it = 0 # Current iteration\n self.max_it = len(self.data)\n self.num_it = 1000 # Total iterations\n self.init_lr = 0.1 # Initial value of the learning rate\n self.alpha = 0.1\n self.output = open(\"gsom.csv\", \"w\")\n\n def train(self):\n # Select the next input_.\n input_ = random.choice(self.data)[1]\n input_ = random.choice(self.data)[0]\n\n # Calculate the learn rate.\n # Note that the learning rate, according to the original paper,\n # is reseated for every new input_.\n learn_rate = self.init_lr * self.alpha * (1 - 1.5/len(self.nodes))\n\n # We now present the input_ several times to the network.\n # It is unclear what's a good number here, since no publication\n # took the effort to name a value. However, the implementation\n # provided by Arkadi Kagan presents the input_ 20 times, so we\n # will copy that here.\n recalc_nodes = []\n for _ in range(20):\n # Find the best matching unit\n BMU = self._find_bmu(input_)\n BMU.last_it = self.it\n\n # Adapt the weights of the direct topological neighbours\n neighbours = []\n neighbours.append(BMU)\n if BMU.left: neighbours.append(BMU.left)\n if BMU.right: neighbours.append(BMU.right)\n if BMU.up: neighbours.append(BMU.up)\n if BMU.down: neighbours.append(BMU.down)\n\n if BMU not in recalc_nodes: recalc_nodes.append(BMU)\n\n for node in neighbours:\n node.adjust_weights(input_, learn_rate)\n if node not in recalc_nodes: recalc_nodes.append(node)\n\n # Calculate the error.\n err = self._distance(BMU.weights, input_)\n\n # Add the error to the node.\n growing, nodes = self._node_add_error(BMU, err)\n if growing: recalc_nodes.extend(nodes)\n\n # Count the iteration\n self.it += 1\n\n # Re-Calc representative data elements for changed nodes.\n used_data = []\n for node in self.nodes:\n used_data.append(node.data)\n\n for node in recalc_nodes:\n dist = float(\"inf\")\n winner = False\n winner_fn = False\n\n for fn, point in self.data:\n # if fn in used_data: continue\n\n d = self._distance(point, node.weights)\n if(d < dist):\n dist = d\n winner = point\n winner_fn = fn\n\n if node.data != winner_fn:\n node.data = winner_fn\n node.last_changed = self.it\n self.output.write(str(node.x) + \",\" + str(node.y)\\\n + \",change\\n\")\n used_data.append(winner_fn)\n\n # Remove unused nodes.\n self._remove_unused_nodes()\n\n def _node_add_error(self, node, error):\n \"\"\" Add the given error to the error value of the given node.\n\n This will also take care of growing the map (if necessary) and\n distributing the error along the neighbours (if necessary) \"\"\"\n node.error += error\n\n # Consider growing\n if node.error > self._GT:\n if not node.is_boundary():\n # Find the boundary node which is most similar to this node.\n node = self._find_similar_boundary(node)\n if not node:\n print(\"GSOM: Error: No free boundary node found!\")\n\n \"\"\" Old method:\n # Distribute the error along the neighbours.\n # Since this is not a boundary node, this node must have\n # 4 neighbours.\n node.error = 0.5 * self._GT\n node.left.error += 0.25 * node.left.error\n node.right.error += 0.25 * node.right.error\n node.up.error += 0.25 * node.up.error\n node.down.error += 0.25 * node.down.error\n \"\"\"\n nodes = self._grow(node)\n return True, nodes\n\n return False, 0\n\n def _grow(self, node):\n \"\"\" Grow this GSOM. \"\"\"\n # We grow this GSOM at every possible direction.\n nodes = []\n if node.left is None:\n nn = self._insert(node.x - 1, node.y, node)\n nodes.append(nn)\n print(\"Growing left at: (\" + str(node.x) + \",\" + str(node.y)\\\n + \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.right is None:\n nn = self._insert(node.x + 1, node.y, node)\n nodes.append(nn)\n print(\"Growing right at: (\" + str(node.x) + \",\" + str(node.y)\\\n + \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.up is None:\n nn = self._insert(node.x, node.y + 1, node)\n nodes.append(nn)\n print(\"Growing up at: (\" + str(node.x) + \",\" + str(node.y) +\\\n \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n\n if node.down is None:\n nn = self._insert(node.x, node.y - 1, node)\n nodes.append(nn)\n print(\"Growing down at: (\" + str(node.x) + \",\" + str(node.y) +\\\n \") -> (\" + str(nn.x) + \", \" + str(nn.y) + \")\")\n return nodes\n\n def _insert(self, x, y, init_node):\n # Create new node\n new_node = GSOMNode(self.dim, x, y, self.data)\n self.nodes.append(new_node)\n\n # Save the number of the current iteration. We need this to prune\n # this node later (if neccessary).\n new_node.it = new_node.last_it = self.it\n\n # Create the connections to possible neighbouring nodes.\n for node in self.nodes:\n # Left, Right, Up, Down\n if node.x == x - 1 and node.y == y:\n new_node.left = node\n node.right = new_node\n if node.x == x + 1 and node.y == y:\n new_node.right = node\n node.left = new_node\n if node.x == x and node.y == y + 1:\n new_node.up = node\n node.down = new_node\n if node.x == x and node.y == y - 1:\n new_node.down = node\n node.up = new_node\n\n # Calculate new weights, look for a neighbour.\n neigh = new_node.left\n if neigh is None: neigh = new_node.right\n if neigh is None: neigh = new_node.up\n if neigh is None: neigh = new_node.down\n if neigh is None: print(\"_insert: No neighbour found!\")\n\n for i in range(0, len(new_node.weights)):\n new_node.weights[i] = 2 * init_node.weights[i] - neigh.weights[i]\n\n return new_node\n\n\n def _remove_unused_nodes(self):\n \"\"\" Remove all nodes from the GSOM that have not been used. \"\"\"\n to_remove = []\n\n # Iterate over all nodes.\n for node in self.nodes:\n # Different rules for nodes that have been used or not.\n iterations_not_won = self.it - node.last_it\n\n # If we have 50 nodes, every node is allowed not to win 50 times\n # in a row. This means every node must be picked at least once.\n if iterations_not_won < len(self.nodes) * 4.0 * (1 + self.it/len(self.data)) : continue\n\n\n # First, remove the connections to the neighbouring nodes.\n if node.left: node.left.right = None\n if node.up: node.up.down = None\n if node.down: node.down.up = None\n if node.right: node.right.left = None\n\n # Save this node for removing.\n to_remove.append(node)\n\n # Now remove all marked nodes.\n for node in to_remove:\n print(\"Removing node @ \" + str(node.x) + \", \" + str(node.y) + \\\n \" - Current it: \" + str(self.it) + \" - Last time won: \" +\\\n str(node.last_it))\n if node.data:\n self.output.write(node.data + \",\" + str(node.x)+\",\"+str(node.y)\\\n + \",remove\\n\")\n self.nodes.remove(node)\n\n"
] | [
[
"scipy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eEcoLiDAR/lcMacroPipeline | [
"91709f93ef53a3e453f0ce967e1094688688f684"
] | [
"tests/test_grid.py"
] | [
"from pathlib import Path\nimport unittest\nimport numpy as np\nimport pylas\n\nfrom laserfarm.grid import Grid\n\ntry:\n import matplotlib\n matplotlib_available = True\nexcept ModuleNotFoundError:\n matplotlib_available = False\n\nif matplotlib_available:\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n\n\nclass TestValidGridSetup(unittest.TestCase):\n def setUp(self):\n self.grid = Grid()\n self.grid.setup(0., 0., 20., 20., 5)\n\n def test_gridMins(self):\n np.testing.assert_allclose(self.grid.grid_mins, [0., 0.])\n\n def test_gridMaxs(self):\n np.testing.assert_allclose(self.grid.grid_maxs, [20., 20.])\n\n def test_gridWidth(self):\n np.testing.assert_allclose(self.grid.grid_width, 20.)\n\n def test_tileWidth(self):\n np.testing.assert_allclose(self.grid.tile_width, 4.)\n\n def test_tileIndexForPoint(self):\n np.testing.assert_array_equal(self.grid.get_tile_index(0.1, 0.2),\n (0, 0))\n\n def test_tileIndexForArray(self):\n np.testing.assert_array_equal(self.grid.get_tile_index((0.1, 19.9),\n (0.2, 19.8)),\n ((0, 0), (4, 4)))\n\n def test_tileBoundsForPoint(self):\n np.testing.assert_array_equal(self.grid.get_tile_bounds(0, 0),\n ((0., 0.), (4., 4.)))\n\n def test_tileBoundsForArray(self):\n np.testing.assert_array_equal(self.grid.get_tile_bounds((0, 0),\n (0, 1)),\n (((0., 0.), (0., 4.)),\n ((4., 4.), (4., 8.))))\n\n\nclass TestInvalidGridSetup(unittest.TestCase):\n def test_fractionalNumberOfTilesGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 20., 20., 0.1)\n\n def test_zeroNumberOfTilesGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 20., 20., 0)\n\n def test_zeroWidthGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 0., 20., 5)\n\n def test_rectangularGrid(self):\n with self.assertRaises(ValueError):\n grid = Grid()\n grid.setup(0., 0., 10., 20., 5)\n\n\nclass TestRealGridValid(unittest.TestCase):\n _test_dir = 'test_tmp_dir'\n _test_data_dir = 'testdata'\n _test_tile_idx = [101, 101]\n\n _test_file_name = 'C_43FN1_1_1.LAZ'\n _min_x = -113107.8100\n _min_y = 214783.8700\n _max_x = 398892.1900\n _max_y = 726783.87\n _n_tiles_sides = 256\n\n plot = False\n\n def setUp(self):\n self.grid = Grid()\n self.grid.setup(min_x=self._min_x,\n min_y=self._min_y,\n max_x=self._max_x,\n max_y=self._max_y,\n n_tiles_side=self._n_tiles_sides)\n self._test_data_path = Path(self._test_data_dir).joinpath(self._test_file_name)\n self.points = _read_points_from_file(str(self._test_data_path))\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n self.assertTrue(np.all(mask_valid_points))\n\n\nclass TestRealGridLowPrecision(TestRealGridValid):\n \"\"\"\n The following tile has been obtained by using large scale parameters (0.1)\n in the PDAL LAS writer. Some points thus fall outside the tile boundary\n when read from the file.\n \"\"\"\n _test_file_name = 'C_43FN1_1.LAZ'\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n if self.plot and matplotlib_available:\n _plot_points_and_tile(self.grid,\n self.points[~mask_valid_points],\n self._test_tile_idx,\n self._test_data_path.with_suffix('.png').name)\n self.assertFalse(np.all(mask_valid_points))\n\n def test_isPointInTileWithPrecision(self):\n x_pts, y_pts = self.points.T\n precision = np.abs(np.rint(self._max_y) - self._max_y)\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx,\n precision=precision)\n self.assertTrue(np.all(mask_valid_points))\n\n\nclass TestRealGridLowPrecisionRoundedOrigin(TestRealGridValid):\n \"\"\"\n The following tile has been obtained by rounding off the coordinates\n of the origin and by using the default scale parameters (0.01) in the PDAL\n LAS writer.\n \"\"\"\n _test_file_name = 'C_43FN1_1.LAZ'\n _test_tile_idx = [101, 101]\n\n _min_x = -113108.00\n _min_y = 214784.00\n _max_x = 398892.00\n _max_y = 726784.00\n\n def test_isPointInTile(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx)\n if self.plot and matplotlib_available:\n _plot_points_and_tile(self.grid,\n self.points[~mask_valid_points],\n self._test_tile_idx,\n self._test_data_path.with_suffix('.png').name)\n self.assertFalse(np.all(mask_valid_points))\n\n def test_isPointInTileWithPrecision(self):\n x_pts, y_pts = self.points.T\n mask_valid_points = self.grid.is_point_in_tile(x_pts, y_pts,\n *self._test_tile_idx,\n precision=0.01)\n self.assertTrue(np.all(mask_valid_points))\n\n\ndef _read_points_from_file(filename):\n file = pylas.read(filename)\n return np.column_stack((file.x, file.y))\n\n\ndef _plot_points_and_tile(grid, points, tile_indices, filename=None):\n \"\"\"\n Plot points\n\n :param grid: grid object\n :param points: (Nx2) array containing X,Y coordinates of the points\n :param tile_indices: [N_x, N_y], where N_i is the integer tile index along\n dimension i\n :param filename: optional, path where to save plot\n \"\"\"\n # plot points\n x_pts, y_pts = points.T\n plt.scatter(x_pts, y_pts, color='r')\n # plot tile\n tile_mins, tile_maxs = grid.get_tile_bounds(*tile_indices)\n line = np.array((tile_mins,\n [tile_mins[0], tile_maxs[1]],\n tile_maxs,\n [tile_maxs[0], tile_mins[1]],\n tile_mins))\n x, y = line.T\n plt.plot(x, y, color='k')\n # add tile label\n x_cntr, y_cntr = (tile_mins + tile_maxs) / 2.\n plt.text(x_cntr, y_cntr, '({}, {})'.format(*tile_indices),\n horizontalalignment='center',\n verticalalignment='center')\n if filename is not None:\n plt.savefig(filename, dpi=300)\n else:\n plt.show()\n plt.close(plt.figure())\n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.use",
"numpy.rint",
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.all",
"numpy.column_stack",
"numpy.array",
"numpy.testing.assert_allclose",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
madhawav/plan2scene | [
"cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5",
"cc3481f503fc096d1a50ea4fbcc668b2a3b75fb5"
] | [
"code/src/plan2scene/texture_gen/custom_ops/noise.py",
"code/src/plan2scene/texture_prop/graph_util.py"
] | [
"# Code adapted from https://github.com/henzler/neuraltexture/blob/master/code/custom_ops/noise/noise.py\n\nfrom torch import nn\nfrom torch.autograd import Function\nimport plan2scene.texture_gen.utils.neural_texture_helper as utils_nt\nimport noise_cuda\nimport torch\nimport numpy as np\nfrom torch.autograd import gradcheck\n\n\nclass NoiseFunction(Function):\n @staticmethod\n def forward(ctx, position, seed):\n ctx.save_for_backward(position, seed)\n noise = noise_cuda.forward(position, seed)\n return noise\n\n @staticmethod\n def backward(ctx, grad_noise):\n position, seed = ctx.saved_tensors\n d_position_bilinear = noise_cuda.backward(position, seed)\n\n d_position = torch.stack([torch.zeros_like(d_position_bilinear), d_position_bilinear], dim=0)\n\n return grad_noise.unsqueeze(2) * d_position, None\n\n\nclass Noise(nn.Module):\n def __init__(self):\n super(Noise, self).__init__()\n\n def forward(self, position, seed):\n noise = NoiseFunction.apply(position.contiguous(), seed.contiguous())\n return noise\n",
"from plan2scene.config_manager import ConfigManager\nfrom plan2scene.common.residence import House, Room\nimport torch\n\n\ndef get_house_graph(conf: ConfigManager, house: House, surface_maskout_map: dict, key=\"prop\"):\n \"\"\"\n Generates node embeddings and edge pairs for a given house.\n :param conf: ConfigManager\n :param house: House to generate graph\n :param surface_maskout_map: Dictionary of surfaces to be dropped from input. {room_index: [list of surfaces. e.g. 'floor', 'wall', 'ceiling']}\n :return: Pair of node embeddings tensor and edge indices tensor\n \"\"\"\n combined_emb_dim = conf.texture_gen.combined_emb_dim\n node_embeddings = []\n surface_embeddings = []\n for room_index, room in house.rooms.items():\n assert isinstance(room, Room)\n # Room Type\n node_embedding = []\n rt_embedding = torch.zeros(len(conf.room_types))\n for rt in room.types:\n rt_embedding[conf.room_types.index(rt)] = 1.0\n node_embedding.append(rt_embedding.view(1, -1))\n\n short_embeddings = []\n for surf in conf.surfaces:\n if room_index in surface_maskout_map and surf in surface_maskout_map[room_index]: # Masked out\n short_surf_embedding = torch.zeros((combined_emb_dim,)).view(1, -1)\n surf_present = False\n elif key not in room.surface_embeddings[surf]: # Unobserved\n short_surf_embedding = torch.zeros((combined_emb_dim,)).view(1, -1)\n surf_present = False\n else:\n short_surf_embedding = room.surface_embeddings[surf][key].detach()\n surf_present = True\n\n surf_embedding = torch.cat([torch.tensor([[surf_present]], dtype=torch.float32), short_surf_embedding],\n dim=1)\n node_embedding.append(surf_embedding)\n short_embeddings.append(short_surf_embedding)\n del surf_embedding\n del surf_present\n del short_surf_embedding\n\n if conf.texture_prop.graph_generator.include_enable_in_target:\n assert False\n\n surface_embeddings.append(\n torch.cat(short_embeddings, dim=0).unsqueeze(0))\n node_embedding_tensor = torch.cat(node_embedding, dim=1)\n node_embeddings.append(node_embedding_tensor)\n\n node_embeddings_tensor = torch.cat(node_embeddings, dim=0)\n surface_embeddings_tensor = torch.cat(surface_embeddings, dim=0)\n\n edge_indices = []\n for r1_index, r2_index in house.door_connected_room_pairs:\n if r2_index >= 0:\n edge_indices.append([r1_index, r2_index])\n edge_indices_tensor = torch.tensor(edge_indices, dtype=torch.long)\n\n return node_embeddings_tensor, edge_indices_tensor, surface_embeddings_tensor\n\n\ndef generate_target_and_mask(conf: ConfigManager, house: House, target_surface_map: dict, include_target: bool, key=\"prop\") -> tuple:\n \"\"\"\n Generates y and y_mask tensors for a given house, targetting surfaces that are indicated.\n :param conf: Config Manager\n :param house: House\n :param target_surface_map: Dictionary of room surfaces to include in mask and target. {room_index: [list of surfaces. e.g. 'floor', 'wall', 'ceiling']}\n :param include_target: Pass true to populate target embeddings of each node.\n :return: Pair of target tensor [node_count, surface_count, emb] and masks tensor [node_count, surface_count].\n \"\"\"\n combined_emb_dim = conf.texture_gen.combined_emb_dim\n updated_combined_emb_dim = combined_emb_dim\n if conf.texture_prop.graph_generator.include_enable_in_target:\n updated_combined_emb_dim += 1\n\n if include_target:\n room_targets = []\n else:\n room_targets = None\n room_masks = []\n for room_index, room in house.rooms.items():\n if room_index not in target_surface_map:\n # Unlisted room\n if include_target:\n room_target = torch.zeros([1, len(conf.surfaces), updated_combined_emb_dim], dtype=torch.float)\n room_targets.append(room_target)\n room_masks.append(torch.zeros([1, len(conf.surfaces)], dtype=torch.bool))\n continue\n\n if include_target:\n room_target = [] # surface_count * [1, 1, combined_dim]\n\n room_mask = [] # surface_count\n\n for surf in conf.surfaces:\n if room_index in target_surface_map and surf in target_surface_map[room_index]:\n if include_target:\n surf_target = room.surface_embeddings[surf][key].detach().unsqueeze(0)\n surf_mask = True\n else:\n if include_target:\n surf_target = torch.zeros([1, 1, combined_emb_dim], dtype=torch.float)\n surf_mask = False\n\n if include_target:\n if conf.texture_prop.graph_generator.include_enable_in_target:\n surf_target = torch.cat([torch.tensor([[[surf_mask]]], dtype=torch.float32), surf_target], dim=2)\n room_target.append(surf_target)\n room_mask.append(surf_mask)\n\n\n if include_target:\n room_target_tensor = torch.cat(room_target, dim=1)\n room_targets.append(room_target_tensor)\n\n room_mask_tensor = torch.tensor(room_mask, dtype=torch.bool).unsqueeze(0)\n room_masks.append(room_mask_tensor)\n\n if include_target:\n room_targets_tensor = torch.cat(room_targets, dim=0)\n else:\n room_targets_tensor = None\n\n room_masks_tensor = torch.cat(room_masks, dim=0)\n return room_targets_tensor, room_masks_tensor"
] | [
[
"torch.zeros_like"
],
[
"torch.tensor",
"torch.zeros",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liloganle/Reinforcement-Learning | [
"29ffb74a1c8e506c544245c9aff37e958e503f26",
"29ffb74a1c8e506c544245c9aff37e958e503f26",
"29ffb74a1c8e506c544245c9aff37e958e503f26"
] | [
"Chapter9/Figure9-1.py",
"Chapter8/Figure8-8.py",
"Chapter5/Figure5-1.py"
] | [
"# -*- coding:utf-8 -*-\n\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\nclass RandomWalk(object):\n def __init__(self, num_states=1000, groups=10, alpha=2e-5):\n self.num_states = num_states # the number of states\n self.groups = groups # the number of groups\n self.alpha = alpha # the step size\n\n self.group_value = np.zeros(groups) # the value of each group\n self.group_size = int(num_states / groups) # the size of each group\n\n self.states = np.arange(1, num_states+1) # all states except terminal state\n self.start_state = int(num_states / 2) # the start state\n self.end_state = [0, num_states + 1] # the terminal states\n self.action = [-1, 1] # right:1, left:-1\n self.neighbors = 100 # the neighboring states\n\n def select_action(self):\n \"\"\"to select randomly an action\"\"\"\n if np.random.binomial(1, 0.5):\n return self.action[1] # select right action\n else:\n return self.action[0] # select left action\n\n def find_next_state(self, state, action):\n \"\"\"to get the next state and reward\"\"\"\n move_step = np.random.randint(1, self.neighbors+1) # the step size of moving\n move_step *= action\n next_state = state + move_step # the next state\n next_state = max(min(next_state, self.end_state[1]), 0)\n\n if next_state == self.end_state[0]: # terminating on the left\n reward = -1\n elif next_state == self.end_state[1]: # terminating on the right\n reward = 1\n else:\n reward = 0\n return next_state, reward\n\n def get_state_value(self, state):\n \"\"\"to get the state value except for terminal states\"\"\"\n group_idx = (state - 1) // self.group_size\n return self.group_value[group_idx]\n \n def update_group_value(self, state, delta):\n \"\"\"to update the group_value\"\"\"\n group_idx = (state - 1) // self.group_size\n self.group_value[group_idx] += delta\n\n def gradient_monte_carlo(self, state_distribution):\n \"\"\" the gradient-descent version of Monte Carlo state-value prediction\"\"\"\n state = self.start_state # initialize the state\n trajectory = [state] # track the transition state\n\n while state not in self.end_state:\n action = self.select_action() # select an action\n next_state, reward = self.find_next_state(state, action) # get the next state and reward\n trajectory.append(next_state) # record the transition state\n state = next_state\n\n for stat in trajectory[:-1]:\n delta = self.alpha * (reward - self.get_state_value(stat))\n self.update_group_value(stat, delta)\n state_distribution[stat] += 1\n\n\ndef dp_compute_value(test_class):\n \"\"\"using Dynamic programming to find the true state values\"\"\"\n value = np.arange(-test_class.end_state[1], test_class.end_state[1] + 1, 2) / test_class.end_state[1]\n print(\"Starting computing......\")\n while True:\n value_temp = value.copy()\n for state in test_class.states:\n value[state] = 0\n for act in test_class.action:\n for step in range(1, test_class.neighbors + 1):\n step *= act\n next_state = state + step\n next_state = max(min(next_state, test_class.end_state[1]), 0)\n # update the value\n value[state] += 1/(2*test_class.neighbors)*value[next_state]\n if np.linalg.norm(value - value_temp) < 0.001:\n break\n print(\"Completed!!!\")\n return value\n\n\nif __name__ == \"__main__\":\n episodes = 100000\n test_exam = RandomWalk()\n\n true_value = dp_compute_value(test_class=test_exam)\n distribution = np.zeros(test_exam.num_states + len(test_exam.end_state))\n for itr in tqdm(range(episodes)):\n test_exam.gradient_monte_carlo(distribution)\n\n distribution /= np.sum(distribution)\n state_value = [test_exam.get_state_value(stat) for stat in test_exam.states]\n\n plt.figure(1)\n plt.plot(test_exam.states, true_value[1:-1], label=\"True value\")\n plt.plot(test_exam.states, state_value, label=\"Approximate MC value\")\n plt.xlabel(\"State\")\n plt.ylabel(\"Value\")\n plt.legend()\n plt.savefig(\"./images/Figure9-1-1.png\")\n plt.show()\n\n plt.figure(2)\n plt.plot(test_exam.states, distribution[1:-1], label=\"State Distribution\")\n plt.xlabel(\"State\")\n plt.ylabel(\"Distribution\")\n plt.legend()\n plt.savefig(\"./images/Figure9-1-2.png\")\n plt.show()\n\n plt.close()\n print(\"Completed!!!You can check it in 'images' directory\")\n\n\n",
"# -*- coding:utf-8 -*-\n\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\ndef greedy_algorithm(q_value):\n \"\"\"\n the greedy algorithm\n :param q_value: state value or action value\n :return:\n \"\"\"\n max_value = np.max(q_value)\n return np.random.choice([act for act, val in enumerate(q_value) if val == max_value])\n\n\nclass TrajectorySampling(object):\n def __init__(self, num_states, branches):\n self.num_states = num_states\n self.branches = branches\n\n self.actions = [0, 1] # two actions for every states\n self.epsilon = 0.1 # epsilon-greedy algorithm\n self.terminal_pro = 0.1 # the probability of transition to the terminal state\n self.discount = 1 #\n self.iterations = 20000 # the number of iterations\n\n # the transition probability matrix:(current state, action, next state)\n self.transition = np.random.randint(num_states, size=(num_states, len(self.actions), branches))\n # reward:(current state, action, next state)\n self.reward = np.random.randn(num_states, len(self.actions), branches)\n\n def epsilon_greedy(self, q_value):\n \"\"\"\n the epsilon-greedy algorithm\n :param q_value:state value or action value\n :return:\n \"\"\"\n random_num = np.random.rand()\n if random_num < self.epsilon:\n return np.random.choice(self.actions)\n else:\n max_value = np.max(q_value)\n return np.random.choice([act for act, val in enumerate(q_value) if val == max_value])\n\n def find_next_state(self, state, action):\n \"\"\"\n to find the next state\n :param state: the current state\n :param action: taking action in the current state\n :return: next state and reward\n \"\"\"\n random_num = np.random.rand()\n if random_num < self.epsilon:\n return self.num_states, 0\n next_state = np.random.randint(self.branches)\n return self.transition[state, action, next_state], self.reward[state, action, next_state]\n\n def compute_value(self, q_value):\n \"\"\"\n using Monte Carlo method to compute the state value or action value under greedy policy\n :param q_value: the state value or action value\n :return:\n \"\"\"\n runs = 1000\n returns = np.zeros(runs)\n for run in range(runs):\n state = 0\n reward = 0\n time = 0\n while state < self.num_states:\n action = greedy_algorithm(q_value[state])\n state, rew = self.find_next_state(state, action)\n reward += np.power(self.discount, time) * rew\n time += 1\n returns[run] = reward\n return np.mean(returns)\n\n def uniform_case(self, interval_time):\n start_state_value = []\n q_value = np.zeros((self.num_states, len(self.actions)))\n for it in tqdm(range(self.iterations)):\n state = it // len(self.actions) % self.num_states\n action = it % len(self.actions)\n next_state_all = self.transition[state, action, :]\n\n q_value[state, action] = (1 - self.terminal_pro)*(self.reward[state, action, :] +\n np.max(q_value[next_state_all, :], axis=1)).mean()\n if it % interval_time == 0:\n estimate_value = self.compute_value(q_value)\n start_state_value.append([it, estimate_value])\n return zip(*start_state_value)\n\n def on_policy_case(self, interval_time):\n start_state_value = []\n q_value = np.zeros((self.num_states, len(self.actions)))\n state = 0 # the start state\n for it in tqdm(range(self.iterations)):\n action = self.epsilon_greedy(q_value[state]) # to select an action under epsilon-policy\n next_state, _ = self.find_next_state(state, action) # feedback the next state and reward\n next_state_all = self.transition[state, action, :] # all possible next state under this state-action\n\n q_value[state, action] = (1 - self.terminal_pro)*(self.reward[state, action, :] +\n np.max(q_value[next_state_all, :], axis=1)).mean()\n if next_state == self.num_states:\n next_state = 0\n if it % interval_time == 0:\n estimate_value = self.compute_value(q_value)\n start_state_value.append([it, estimate_value])\n state = next_state\n return zip(*start_state_value)\n\n\nif __name__ == \"__main__\":\n num_stat = [1000, 10000]\n branch = [[1, 3, 10], [1]]\n\n num_tasks = 30 # average across 30 tasks\n ticks = 200 # number of evaluation points\n i = 1\n for states, branches in zip(num_stat, branch):\n plt.figure(i)\n for b in branches:\n all_tasks = [TrajectorySampling(states, b) for _ in range(num_tasks)]\n uniform_values = []\n on_policy_values = []\n for task in all_tasks:\n step, value = task.uniform_case(interval_time=task.iterations/ticks)\n uniform_values.append(value)\n step, value = task.on_policy_case(interval_time=task.iterations/ticks)\n on_policy_values.append(value)\n uniform_values = np.mean(np.asarray(uniform_values), axis=0)\n on_policy_values = np.mean(np.asarray(on_policy_values), axis=0)\n plt.plot(step, uniform_values, label=r\"b=%d, uniform\" % b)\n plt.plot(step, on_policy_values, label=r\"b=%d, on policy\" % b)\n plt.title(\"%d States\" % states)\n plt.xlabel(\"Computation time, in expected updates\")\n plt.ylabel(\"Value of start state under greedy policy\")\n plt.legend()\n plt.savefig(\"./images/Figure8-8-\"+str(i)+\".png\")\n plt.show()\n plt.close()\n i += 1\n print(\"Completed!!! You can check it in the 'images' directory\")\n",
"# -*- coding:utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Blackjack(object):\n def __init__(self):\n self.action_hit = 1\n self.action_stop = 0\n self.actions = [self.action_hit, self.action_stop]\n self.policy_player = np.ones(22, dtype=int)\n self.policy_dealer = np.ones(22, dtype=int)\n\n self.policy_player[20:] = self.action_stop\n self.policy_dealer[17:] = self.action_stop\n\n def target_policy_player(self, sum_player):\n return self.policy_player[sum_player]\n\n def behavior_policy_player(self):\n rand_num = np.random.binomial(1, 0.5)\n if rand_num:\n return self.action_hit\n else:\n return self.action_stop\n\n def get_card(self):\n card_num = np.random.randint(1, 14)\n return min(card_num, 10)\n\n def initial_game(self, initial_state=None):\n sum_player = 0 #sum of player's card\n usable_ace_player = False # type: bool #whether player count ace as 11 or not\n card1_dealer = 0 # the first card of dealer is showing card\n card2_dealer = 0 # the second card of deale\n\n if initial_state is None:\n num_of_ace = 0 # the number of ace card\n # if the sum of player's card less than 12, always hits\n while sum_player < 12:\n card = self.get_card()\n # if getting an ace card, counting it as 11\n if card == 1:\n num_of_ace += 1\n card = 11\n usable_ace_player = True\n sum_player += card\n if sum_player > 21:\n sum_player -= 10\n if num_of_ace == 1:\n usable_ace_player = False\n\n card1_dealer += self.get_card()\n card2_dealer += self.get_card()\n else:\n sum_player, usable_ace_player, card1_dealer = initial_state\n card2_dealer = self.get_card()\n state = [sum_player, usable_ace_player, card1_dealer] # initialize the state of game\n return state, card2_dealer\n\n def generate_episode(self, initial_state=None, initial_action=None):\n sum_dealer = 0 # sum of dealer's card\n trajectory_player = [] # the player's trajectory\n usable_ace_dealer = False #type: bool #whether dealer count ace as 11 or not\n\n state, card2_dealer = self.initial_game(initial_state)\n [sum_player, usable_ace_player, card1_dealer] = state\n # @sum_player: sum of player's card\n # @usable_ace_player: type: bool #whether player count ace as 11 or not\n # @card1_dealer: the first card of dealer is showing card\n # @card2_dealer: the second card of dealer\n\n if card1_dealer == 1 and card2_dealer == 1:\n sum_dealer += 11 + 1\n usable_ace_dealer = True\n elif card1_dealer == 1 and card2_dealer != 1:\n sum_dealer += 11 + card2_dealer\n usable_ace_dealer = True\n elif card1_dealer != 1 and card2_dealer == 1:\n sum_dealer += card1_dealer + 11\n usable_ace_dealer = True\n else:\n sum_dealer += card1_dealer + card2_dealer\n\n\n while True:\n if initial_action is not None:\n action = initial_action\n initial_action = None\n else:\n action = self.policy_player[sum_player] #get an action base on current sum of player's card\n trajectory_player.append([(sum_player, usable_ace_player, card1_dealer), action])\n\n if action == self.action_stop:\n break\n\n new_card = self.get_card()\n if new_card == 1 and sum_player+11 < 21:\n sum_player += 11\n usable_ace_player = True\n else:\n sum_player += new_card\n if sum_player > 21:\n if usable_ace_player:\n sum_player -= 10\n usable_ace_player = False\n else:\n return state, -1, trajectory_player\n\n\n while True:\n action = self.policy_dealer[sum_dealer] #get an action base on current sum of dealer's card\n if action == self.action_stop:\n break\n new_card = self.get_card()\n if new_card == 1 and sum_dealer+11 < 21:\n sum_dealer += 11\n usable_ace_dealer = True\n else:\n sum_dealer += new_card\n\n if sum_dealer > 21:\n if usable_ace_dealer:\n sum_dealer -= 10\n usable_ace_dealer = False\n else:\n return state, 1, trajectory_player\n\n if sum_player > sum_dealer:\n return state, 1, trajectory_player\n elif sum_player == sum_dealer:\n return state, 0, trajectory_player\n else:\n return state, -1, trajectory_player\n\n def first_visit_MC(self, episodes=10000):\n state_usable_ace = np.zeros((10, 10))\n state_usable_ace_count = np.ones(state_usable_ace.shape)\n state_no_usable_ace = np.zeros((10, 10))\n state_no_usable_ace_count = np.ones(state_no_usable_ace.shape)\n\n for ite in np.arange(episodes):\n _, reward, trajectory = self.generate_episode()\n for (sum_player, usable_ace, dealer_showing), _ in trajectory:\n sum_player -= 12\n dealer_showing -= 1\n if usable_ace:\n state_usable_ace[sum_player, dealer_showing] += reward\n state_usable_ace_count[sum_player, dealer_showing] += 1\n else:\n state_no_usable_ace[sum_player, dealer_showing] += reward\n state_no_usable_ace_count[sum_player, dealer_showing] += 1\n return state_usable_ace/state_usable_ace_count, state_no_usable_ace/state_no_usable_ace_count\n\n\ndef draw_image(states_mat, lables):\n\n print(\"Starting drawing......\")\n x_size, y_size = states_mat[0].shape\n x_axis = np.arange(1, x_size + 1)\n y_axis = np.arange(12, y_size + 12)\n x_axis, y_axis = np.meshgrid(x_axis, y_axis)\n nums = np.arange(len(lables))\n\n fig = plt.figure(figsize=(10, 10))\n for state, lable, num in zip(states_mat, lables, nums):\n ax = fig.add_subplot(221+num, projection='3d')\n ax.plot_wireframe(X=x_axis, Y=y_axis, Z=state)\n plt.xlabel(\"Dealer showing\")\n plt.ylabel(\"Player sum\")\n plt.title(lable)\n\n plt.savefig(\"./images/Figure5-1.jpg\", bbox=\"tight\")\n plt.close()\n print(\"Completed !!! You can check it in the 'images' directory.\")\n\n\nif __name__ == \"__main__\":\n blackjack_game = Blackjack()\n print(\"The Blackjack Game is starting......\")\n first_usable_ace, first_no_usable_ace = blackjack_game.first_visit_MC(episodes=10000)\n second_usable_ace, second_no_usable_ace = blackjack_game.first_visit_MC(episodes=500000)\n print(\"Game Over !!!\\n\")\n\n states = [first_usable_ace, second_usable_ace,\n first_no_usable_ace, second_no_usable_ace]\n lables = [\"Usable Ace, 10000 episodes\", \"Usable Ace, 500000 episodes\",\n \"No Usable Ace, 10000 episodes\", \"No Usable Ace, 500000 episodes\"]\n draw_image(states, lables)\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.random.randint",
"matplotlib.pyplot.close",
"numpy.random.binomial",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.random.choice",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.power",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"numpy.random.rand",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.random.randint"
],
[
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.random.randint",
"matplotlib.pyplot.close",
"numpy.random.binomial",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
timelyportfolio/bokeh | [
"6cecb7211277b9d838039d0eb15e50a10f9ac3d1",
"6cecb7211277b9d838039d0eb15e50a10f9ac3d1",
"a976a85535cf137c6238ce9e90b41ab14ae8ce22",
"a976a85535cf137c6238ce9e90b41ab14ae8ce22",
"a976a85535cf137c6238ce9e90b41ab14ae8ce22",
"a976a85535cf137c6238ce9e90b41ab14ae8ce22"
] | [
"sphinx/source/tutorial/solutions/les_mis.py",
"examples/glyphs/prim_server.py",
"bokeh/charts/builder/tests/test_area_builder.py",
"bokeh/properties.py",
"examples/plotting/file/ajax_source.py",
"tests/glyphs/Text.py"
] | [
"import numpy as np\n\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.models import HoverTool, ColumnDataSource\nfrom bokeh.sampledata.les_mis import data\n\n# EXERCISE: try out different sort orders for the names\nnodes = data['nodes']\nnames = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]\n\n# store the links information in numpy\nN = len(nodes)\ncounts = np.empty((N, N))\nfor link in data['links']:\n counts[link['source'], link['target']] = link['value']\n counts[link['target'], link['source']] = link['value']\n\n# We will use these colors to color each group by a different color\ncolormap = [\n \"#444444\", \"#a6cee3\", \"#1f78b4\", \"#b2df8a\", \"#33a02c\", \"#fb9a99\",\n \"#e31a1c\", \"#fdbf6f\", \"#ff7f00\", \"#cab2d6\", \"#6a3d9a\"\n]\n\n# set up some data to plot! We will need to have values for every pair of names. The\n# co-occurrence count for a given pair of names is in `count[i,j]`. The strategy is\n# to color each rect by the group, and set its alpha based on the count.\nxname = []\nyname = []\ncolor = []\nalpha = []\nfor i, n1 in enumerate(nodes):\n for j, n2 in enumerate(nodes):\n xname.append(n1['name'])\n yname.append(n2['name'])\n\n a = min(counts[i,j]/4.0, 0.9) + 0.1\n alpha.append(a)\n\n if n1['group'] == n2['group']:\n color.append(colormap[n1['group']])\n else:\n color.append('lightgrey')\n\n# EXERCISE: output static HTML file\noutput_file(\"les_mis.html\")\n\n# EXERCISE: create a ColumnDataSource to hold the xnames, ynames, colors, alphas,\n# and counts. NOTE: the counts array is 2D and will need to be flattened\nsource = ColumnDataSource(\n data=dict(\n xname=xname,\n yname=yname,\n colors=color,\n alphas=alpha,\n count=counts.flatten(),\n )\n)\n\n# create a new figure\np = figure(title=\"Les Mis Occurrences (one at a time)\",\n x_axis_location=\"above\", tools=\"resize,hover\",\n x_range=list(reversed(names)), y_range=names,\n plot_width=800, plot_height=800)\n\n# EXERCISE: use the `p.rect` renderer to render a categorical heatmap of all the\n# data. Experiment with the widths and heights (use categorical percentage\n# unite) as well as colors and alphas.\np.rect('xname', 'yname', 0.9, 0.9, source=source,\n color='colors', alpha='alphas', line_color=None)\n\n# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:\n# - remove the axis and grid lines\n# - remove the major ticks\n# - make the tick labels smaller\n# - set the x-axis orientation to vertical, or angled\np.grid.grid_line_color = None\np.axis.axis_line_color = None\np.axis.major_tick_line_color = None\np.axis.major_label_text_font_size = \"5pt\"\np.axis.major_label_standoff = 0\np.xaxis.major_label_orientation = np.pi/3\n\n# EXERCISE: configure the hover tool to display both names as well as\n# the count value as tooltips\nhover = p.select(dict(type=HoverTool))\nhover.tooltips = [\n ('names', '@yname, @xname'),\n ('count', '@count'),\n]\n\n# EXERCISE: show the plot\nshow(p)\n",
"from __future__ import print_function\n\nimport numpy as np\n\nfrom bokeh.browserlib import view\nfrom bokeh.document import Document\nfrom bokeh.models.glyphs import *\nfrom bokeh.models import (\n Plot, Range1d, LinearAxis, Grid, ColumnDataSource, PanTool, WheelZoomTool\n)\nfrom bokeh.session import Session\n\ndocument = Document()\nsession = Session()\nsession.use_doc('prim_server')\nsession.load_document(document)\n\nx = np.arange(1,6)\ny = np.arange(5, 0, -1)\n\nsource = ColumnDataSource(data=dict(x=x,y=y))\n\nxdr = Range1d(start=0, end=10)\nydr = Range1d(start=0, end=10)\n\ndef make_plot(name, glyph):\n plot = Plot(x_range=xdr, y_range=ydr, min_border=80)\n\n plot.add_glyph(source, glyph)\n\n xaxis = LinearAxis()\n plot.add_layout(xaxis, 'below')\n\n yaxis = LinearAxis()\n plot.add_layout(yaxis, 'left')\n\n plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\n plot.add_tools(PanTool(), WheelZoomTool())\n\n document.add(plot)\n session.store_document(document)\n\nmake_plot('annular_wedge', AnnularWedge(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5, start_angle=0.8, end_angle=3.8))\nmake_plot('annulus', Annulus(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5))\nmake_plot('arc', Arc(x=\"x\", y=\"y\", radius=0.4, start_angle=0.8, end_angle=3.8))\nmake_plot('circle', Circle(x=\"x\", y=\"y\", radius=1))\nmake_plot('oval', Oval(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('ray', Ray(x=\"x\", y=\"y\", length=25, angle=0.6))\nmake_plot('rect', Rect(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('text', Text(x=\"x\", y=\"y\", text={\"value\":\"foo\"}, angle=0.6))\nmake_plot('wedge', Wedge(x=\"x\", y=\"y\", radius=0.5, start_angle=0.9, end_angle=3.2))\n\nlink = session.object_link(document.context)\nprint(\"please visit %s to see plots\" % link)\nview(link)\n",
"\"\"\" This is the Bokeh charts testing interface.\n\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import\n\nfrom collections import OrderedDict\nimport unittest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport pandas as pd\n\nfrom bokeh.charts import Area\nfrom bokeh.models import DataRange1d, Range1d\n\nfrom bokeh.charts.builder.tests._utils import create_chart\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\nclass TestAreaBuilder(unittest.TestCase):\n\n def test_supported_input(self):\n xyvalues = OrderedDict(\n python=[2, 3, 7, 5, 26],\n pypy=[12, 33, 47, 15, 126],\n jython=[22, 43, 10, 25, 26],\n )\n\n # prepare some data to check tests results...\n zeros = np.zeros(5)\n x = np.array([4,3,2,1,0,0,1,2,3,4])\n y_jython = np.hstack((zeros, np.array(xyvalues['jython'])))\n y_pypy = np.hstack((zeros, np.array(xyvalues['pypy'])))\n y_python = np.hstack((zeros, np.array(xyvalues['python'])))\n\n data_keys = ['x', 'y_jython', 'y_pypy', 'y_python']\n for _xy in [xyvalues, dict(xyvalues), pd.DataFrame(xyvalues)]:\n area = create_chart(Area, _xy)\n builder = area._builders[0]\n self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))\n self.assertListEqual(sorted(builder._data.keys()), data_keys)\n assert_array_equal(builder._data['x'], x)\n assert_array_equal(builder._data['y_jython'], y_jython)\n assert_array_equal(builder._data['y_pypy'], y_pypy)\n assert_array_equal(builder._data['y_python'], y_python)\n\n self.assertIsInstance(area.x_range, DataRange1d)\n self.assertEqual(area.x_range.sources[0].source, builder._source.columns('x').source)\n self.assertIsInstance(area.y_range, Range1d)\n assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)\n assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)\n self.assertEqual(builder._source._data, builder._data)\n\n data_keys = ['x', 'y_0', 'y_1', 'y_2']\n lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]\n y_0, y_1, y_2 = y_python, y_pypy, y_jython\n for _xy in [lvalues, np.array(lvalues)]:\n area = create_chart(Area, _xy)\n builder = area._builders[0]\n\n self.assertEqual(builder._groups, ['0', '1', '2'])\n self.assertListEqual(sorted(builder._data.keys()), data_keys)\n assert_array_equal(builder._data['x'], x)\n assert_array_equal(builder._data['y_0'], y_0)\n assert_array_equal(builder._data['y_1'], y_1)\n assert_array_equal(builder._data['y_2'], y_2)\n\n self.assertIsInstance(area.x_range, DataRange1d)\n self.assertEqual(area.x_range.sources[0].source, builder._source.columns('x').source)\n self.assertIsInstance(area.y_range, Range1d)\n assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)\n assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)\n self.assertEqual(builder._source._data, builder._data)\n",
"\"\"\" Properties are objects that can be assigned as class level\nattributes on Bokeh models, to provide automatic serialization\nand validation.\n\nFor example, the following defines a model that has integer,\nstring, and list[float] properties::\n\n class Model(HasProps):\n foo = Int\n bar = String\n baz = List(Float)\n\nThe properties of this class can be initialized by specifying\nkeyword arguments to the initializer::\n\n m = Model(foo=10, bar=\"a str\", baz=[1,2,3,4])\n\nBut also by setting the attributes on an instance::\n\n m.foo = 20\n\nAttempts to set a property to a value of the wrong type will\nresult in a ``ValueError`` exception::\n\n >>> m.foo = 2.3\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/Users/bryan/work/bokeh/bokeh/properties.py\", line 585, in __setattr__\n super(HasProps, self).__setattr__(name, value)\n File \"/Users/bryan/work/bokeh/bokeh/properties.py\", line 159, in __set__\n raise e\n File \"/Users/bryan/work/bokeh/bokeh/properties.py\", line 152, in __set__\n self.validate(value)\n File \"/Users/bryan/work/bokeh/bokeh/properties.py\", line 707, in validate\n (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))\n ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float\n\nAdditionally, properties know how to serialize themselves,\nto be understood by BokehJS.\n\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport re\nimport types\nimport difflib\nimport datetime\nimport dateutil.parser\nimport collections\nfrom importlib import import_module\nfrom copy import copy\nimport inspect\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom six import integer_types, string_types, add_metaclass, iteritems\nimport numpy as np\n\nfrom . import enums, colors\nfrom .utils import nice_join\n\nbokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types\n\n# used to indicate properties that are not set (vs null, None, etc)\nclass _NotSet(object):\n pass\n\nclass DeserializationError(Exception):\n pass\n\nclass Property(object):\n \"\"\" Base class for all type properties. \"\"\"\n\n def __init__(self, default=None, help=None):\n \"\"\" This is how the descriptor is created in the class declaration \"\"\"\n if isinstance(default, types.FunctionType): # aka. lazy value\n self.validate(default())\n else:\n self.validate(default)\n\n self._default = default\n self.__doc__ = help\n self.alternatives = []\n\n # This gets set by the class decorator at class creation time\n self.name = \"unnamed\"\n\n def __str__(self):\n return self.__class__.__name__\n\n @property\n def _name(self):\n return \"_\" + self.name\n\n @property\n def default(self):\n if not isinstance(self._default, types.FunctionType):\n return copy(self._default)\n else:\n value = self._default()\n self.validate(value)\n return value\n\n @classmethod\n def autocreate(cls, name=None):\n \"\"\" Called by the metaclass to create a\n new instance of this descriptor\n if the user just assigned it to a property without trailing\n parentheses.\n \"\"\"\n return cls()\n\n def matches(self, new, old):\n # XXX: originally this code warned about not being able to compare values, but that\n # doesn't make sense, because most comparisons involving numpy arrays will fail with\n # ValueError exception, thus warning about inevitable.\n try:\n if new is None or old is None:\n return new is old # XXX: silence FutureWarning from NumPy\n else:\n return new == old\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as e:\n logger.debug(\"could not compare %s and %s for property %s (Reason: %s)\", new, old, self.name, e)\n return False\n\n def from_json(self, json, models=None):\n return json\n\n def transform(self, value):\n return value\n\n def validate(self, value):\n pass\n\n def is_valid(self, value):\n try:\n self.validate(value)\n except ValueError:\n return False\n else:\n return True\n\n def _get(self, obj):\n if not hasattr(obj, self._name):\n setattr(obj, self._name, self.default)\n return getattr(obj, self._name)\n\n def __get__(self, obj, owner=None):\n if obj is not None:\n return self._get(obj)\n elif owner is not None:\n return self\n else:\n raise ValueError(\"both 'obj' and 'owner' are None, don't know what to do\")\n\n def __set__(self, obj, value):\n try:\n self.validate(value)\n except ValueError as e:\n for tp, converter in self.alternatives:\n if tp.is_valid(value):\n value = converter(value)\n break\n else:\n raise e\n else:\n value = self.transform(value)\n\n old = self.__get__(obj)\n obj._changed_vars.add(self.name)\n if self._name in obj.__dict__ and self.matches(value, old):\n return\n setattr(obj, self._name, value)\n obj._dirty = True\n if hasattr(obj, '_trigger'):\n if hasattr(obj, '_block_callbacks') and obj._block_callbacks:\n obj._callback_queue.append((self.name, old, value))\n else:\n obj._trigger(self.name, old, value)\n\n def __delete__(self, obj):\n if hasattr(obj, self._name):\n delattr(obj, self._name)\n\n @property\n def has_ref(self):\n return False\n\n def accepts(self, tp, converter):\n tp = ParameterizedProperty._validate_type_param(tp)\n self.alternatives.append((tp, converter))\n return self\n\n def __or__(self, other):\n return Either(self, other)\n\nclass DataSpec(Property):\n \"\"\" Because the BokehJS glyphs support a fixed value or a named\n field for most data fields, we capture that in this descriptor.\n Fields can have a fixed value, or be a name that is looked up\n on the datasource (usually as a column or record array field).\n Numerical data can also have units of screen or data space.\n\n We mirror the JS convention in this Python descriptor. For details,\n see renderers/properties.coffee in BokehJS, and specifically the\n select() function.\n\n There are multiple ways to set a DataSpec, illustrated below with comments\n and example code.\n\n Setting DataSpecs\n\n\n Simple example::\n\n class Foo(HasProps):\n x = DataSpec(\"x\", units=\"data\")\n\n f = Foo()\n f.x = \"fieldname\" # Use the datasource field named \"fieldname\"\n f.x = 12 # A fixed value of 12\n\n Can provide a dict with the fields explicitly named::\n\n f.width = {\"name\": \"foo\"}\n f.size = {\"name\": \"foo\", \"units\": \"screen\"}\n\n Reading DataSpecs\n\n\n In the cases when the dataspec is set to just a field name or a\n fixed value, then those are returned. If the no values have\n been set, then the value of to_dict() is returned.\n\n In all cases, to determine the full dict that will be used to\n represent this dataspec, use the to_dict() method.\n\n Implementation\n\n\n The DataSpec instance is stored in the class dict, and acts as a\n descriptor. Thus, it is shared between all instances of the class.\n Instance-specific data is stored in the instance dict, in a private\n variable named _[attrname]. This stores the actual value that the\n user last set (and does not exist if the user has not yet set the\n value).\n\n \"\"\"\n\n def __init__(self, field=None, units=\"data\", min_value=None, default=_NotSet, help=None):\n \"\"\"\n Parameters\n ==========\n **field** is the string name of a data column to look up.\n **units** is either \"data\" or \"screen\"\n \"\"\"\n # Don't use .name because the HasProps metaclass uses that to\n # store the attribute name on this descriptor.\n if field is None or isinstance(field, string_types):\n self.field = field\n else:\n raise ValueError(\"'field' must be a string or None, got %r\" % field)\n\n self.units = units\n self._default = default\n self.min_value = min_value\n self.__doc__ = help\n\n @classmethod\n def autocreate(cls, name=None):\n # In this case, use the name the user assigned this DataSpec to\n # as the default field name.\n d = cls(field=name)\n return d\n\n def _get(self, obj):\n \"\"\" Try to implement a \"natural\" interface: if the user just set\n simple values or field names, the getter just returns those.\n However, if the user has also overridden the \"units\" or \"default\"\n settings, then a dictionary is returned.\n \"\"\"\n if hasattr(obj, self._name):\n setval = getattr(obj, self._name)\n if isinstance(setval, string_types):\n # A string representing the field\n return setval\n elif not isinstance(setval, dict):\n # Typically a number presenting the fixed value\n return setval\n else:\n return self.to_dict(obj)\n else:\n # If the user hasn't set anything\n if self.field is not None:\n return self.field\n if self.default != _NotSet:\n return self.default\n # XXX: implicit `return None` or unreachable?\n\n def to_dict(self, obj):\n # Build the complete dict\n setval = getattr(obj, self._name, None)\n if isinstance(setval, string_types):\n d = {\"field\": setval, \"units\": self.units}\n elif isinstance(setval, dict):\n d = {\"units\": self.units}\n d.update(setval)\n elif setval is not None:\n # a fixed value of some sort; no need to store the default value\n d = {\"value\": setval, \"units\": self.units}\n else:\n # If the user never set a value\n if self.field is not None:\n d = {\"field\": self.field, \"units\": self.units}\n elif self.default != _NotSet:\n d = {\"value\": self.default, \"units\": self.units}\n else:\n d = {}\n\n if \"value\" in d and self.min_value is not None:\n if d[\"value\"] < self.min_value:\n raise ValueError(\"value must be greater than %s\" % str(self.min_value))\n return d\n\n def __repr__(self):\n return \"DataSpec(field=%r, units=%r)\" % (self.field, self.units)\n\n\nclass ColorSpec(DataSpec):\n \"\"\" Subclass of DataSpec for specifying colors.\n\n Although this serves the same role as a DataSpec, its usage is somewhat\n different because:\n\n * Specifying a fixed value is much more common\n * Strings can be both field identifiers or refer to one of the SVG\n Named Colors (or be a hex value starting with \"#\")\n * There are no units\n\n For colors, because we support named colors and hex values prefaced\n with a \"#\", when we are handed a string value, there is a little\n interpretation: if the value is one of the 147 SVG named colors or\n it starts with a \"#\", then it is interpreted as a value. Otherwise,\n it is treated as a field name.\n\n If a 3-tuple is provided, then it is treated as an RGB (0..255).\n If a 4-tuple is provided, then it is treated as an RGBa (0..255), with\n alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)\n\n Unlike DataSpec, ColorSpecs do not have a \"units\" property.\n\n When reading out a ColorSpec, it returns a tuple, hex value, or\n field name\n\n There are two common use cases for ColorSpec: setting a constant value,\n and indicating a field name to look for on the datasource:\n\n >>> class Bar(HasProps):\n ... col = ColorSpec(default=\"green\")\n ... col2 = ColorSpec(\"colorfield\")\n\n >>> b = Bar()\n >>> b.col = \"red\" # sets a fixed value of red\n >>> b.col\n \"red\"\n >>> b.col = \"myfield\" # Use the datasource field named \"myfield\"\n >>> b.col\n \"myfield\"\n\n For more examples, see tests/test_glyphs.py\n \"\"\"\n\n NAMEDCOLORS = set(colors.__colors__)\n\n def __init__(self, field_or_value=None, field=None, value=None, default=_NotSet, help=None):\n # The fancy footwork below is so we auto-interpret the first positional\n # parameter as either a field or a fixed value. If either \"field\" or\n # \"value\" are then supplied as keyword arguments, then those will\n # override the inferred value from the positional argument.\n\n self.field = field\n self._default = default\n self.value = value\n self.__doc__ = help\n\n if field_or_value is not None:\n if self.isconst(field_or_value):\n self.value = field_or_value\n else:\n self.field = field_or_value\n\n if not (self.field is None or isinstance(self.field, string_types)):\n raise ValueError(\"'field' must be a string or None, got %r\" % self.field)\n\n # We need to distinguish if the user ever explicitly sets the attribute; if\n # they explicitly set it to None, we should pass on None in the dict.\n self._isset = False\n\n @classmethod\n def isconst(cls, arg):\n \"\"\" Returns True if the argument is a literal color. Check for a\n well-formed hexadecimal color value.\n \"\"\"\n return isinstance(arg, string_types) and \\\n ((len(arg) == 7 and arg[0] == \"#\") or arg in cls.NAMEDCOLORS)\n\n def _formattuple(self, colortuple):\n if isinstance(colortuple, tuple):\n if len(colortuple) == 3:\n return \"rgb%r\" % (colortuple,)\n else:\n return \"rgba%r\" % (colortuple,)\n else:\n return colortuple\n\n def _get(self, obj):\n # One key difference in ColorSpec.__get__ from the base class is\n # that we do not call self.to_dict() in any circumstance, because\n # this could lead to formatting color tuples as \"rgb(R,G,B)\" instead\n # of keeping them as tuples.\n if hasattr(obj, self._name):\n setval = getattr(obj, self._name)\n if self.isconst(setval) or isinstance(setval, tuple):\n # Fixed color value\n return setval\n elif isinstance(setval, string_types):\n return setval\n elif setval is None:\n return None\n else:\n # setval should be a dict at this point\n assert(isinstance(setval, dict))\n return setval\n else:\n if self.value is not None:\n return self.value\n if self.default != _NotSet:\n return self.default\n else:\n return self.field\n\n def __set__(self, obj, arg):\n self._isset = True\n if isinstance(arg, tuple):\n if len(arg) in (3, 4):\n # RGB or RGBa\n pass\n else:\n raise RuntimeError(\"Invalid tuple being assigned to ColorSpec; must be length 2, 3, or 4.\")\n elif isinstance(arg, colors.Color):\n arg = arg.to_css()\n super(ColorSpec, self).__set__(obj, arg)\n\n def to_dict(self, obj):\n setval = getattr(obj, self._name, None)\n if self.default != _NotSet and not self._isset:\n setval = self.default\n if setval is not None:\n if self.isconst(setval):\n # Hexadecimal or named color\n return {\"value\": setval}\n elif isinstance(setval, tuple):\n # RGB or RGBa\n # TODO: Should we validate that alpha is between 0..1?\n return {\"value\": self._formattuple(setval)}\n elif isinstance(setval, string_types):\n return {\"field\": setval}\n elif isinstance(setval, dict):\n # this is considerably simpler than the DataSpec case because\n # there are no units involved, and we've handled all of the\n # value cases above.\n return setval.copy()\n else:\n if self._isset:\n if self.value is None:\n return {\"value\": None}\n else:\n return {\"value\": getattr(obj, self._name, self.value)}\n else:\n if self.value:\n return {\"value\": self.value}\n return {\"field\": self.field}\n\n def __repr__(self):\n return \"ColorSpec(field=%r)\" % self.field\n\nclass Include(object):\n \"\"\" Include other properties from mixin Models, with a given prefix. \"\"\"\n\n def __init__(self, delegate, help=\"\", use_prefix=True):\n if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):\n raise ValueError(\"expected a subclass of HasProps, got %r\" % delegate)\n\n self.delegate = delegate\n self.help = help\n self.use_prefix = use_prefix\n\nclass MetaHasProps(type):\n def __new__(cls, class_name, bases, class_dict):\n names = set()\n names_with_refs = set()\n container_names = set()\n\n # First pre-process to handle all the Includes\n includes = {}\n removes = set()\n for name, prop in class_dict.items():\n if not isinstance(prop, Include):\n continue\n\n delegate = prop.delegate\n if prop.use_prefix:\n prefix = re.sub(\"_props$\", \"\", name) + \"_\"\n else:\n prefix = \"\"\n\n for subpropname in delegate.class_properties(withbases=False):\n fullpropname = prefix + subpropname\n subprop = delegate.lookup(subpropname)\n if isinstance(subprop, Property):\n # If it's an actual instance, then we need to make a copy\n # so two properties don't write to the same hidden variable\n # inside the instance.\n subprop = copy(subprop)\n if \"%s\" in prop.help:\n doc = prop.help % subpropname.replace('_', ' ')\n else:\n doc = prop.help\n try:\n includes[fullpropname] = subprop(help=doc)\n except TypeError:\n includes[fullpropname] = subprop\n subprop.__doc__ = doc\n # Remove the name of the Include attribute itself\n removes.add(name)\n\n # Update the class dictionary, taking care not to overwrite values\n # from the delegates that the subclass may have explicitly defined\n for key, val in includes.items():\n if key not in class_dict:\n class_dict[key] = val\n for tmp in removes:\n del class_dict[tmp]\n\n dataspecs = {}\n for name, prop in class_dict.items():\n if isinstance(prop, Property):\n prop.name = name\n if prop.has_ref:\n names_with_refs.add(name)\n elif isinstance(prop, ContainerProperty):\n container_names.add(name)\n names.add(name)\n if isinstance(prop, DataSpec):\n dataspecs[name] = prop\n\n elif isinstance(prop, type) and issubclass(prop, Property):\n # Support the user adding a property without using parens,\n # i.e. using just the Property subclass instead of an\n # instance of the subclass\n newprop = prop.autocreate(name=name)\n class_dict[name] = newprop\n newprop.name = name\n names.add(name)\n\n # Process dataspecs\n if issubclass(prop, DataSpec):\n dataspecs[name] = newprop\n\n class_dict[\"__properties__\"] = names\n class_dict[\"__properties_with_refs__\"] = names_with_refs\n class_dict[\"__container_props__\"] = container_names\n if dataspecs:\n class_dict[\"_dataspecs\"] = dataspecs\n return type.__new__(cls, class_name, bases, class_dict)\n\ndef accumulate_from_subclasses(cls, propname):\n s = set()\n for c in inspect.getmro(cls):\n if issubclass(c, HasProps):\n s.update(getattr(c, propname))\n return s\n\n@add_metaclass(MetaHasProps)\nclass HasProps(object):\n\n def __init__(self, **properties):\n super(HasProps, self).__init__()\n self._changed_vars = set()\n\n for name, value in properties.items():\n setattr(self, name, value)\n\n def __setattr__(self, name, value):\n props = sorted(self.properties())\n\n if name.startswith(\"_\") or name in props:\n super(HasProps, self).__setattr__(name, value)\n else:\n matches, text = difflib.get_close_matches(name.lower(), props), \"similar\"\n\n if not matches:\n matches, text = props, \"possible\"\n\n raise AttributeError(\"unexpected attribute '%s' to %s, %s attributes are %s\" %\n (name, self.__class__.__name__, text, nice_join(matches)))\n\n def clone(self):\n \"\"\" Returns a duplicate of this object with all its properties\n set appropriately. Values which are containers are shallow-copied.\n \"\"\"\n return self.__class__(**self.changed_properties_with_values())\n\n @classmethod\n def lookup(cls, name):\n return getattr(cls, name)\n\n @classmethod\n def properties_with_refs(cls):\n \"\"\" Returns a set of the names of this object's properties that\n have references. We traverse the class hierarchy and\n pull together the full list of properties.\n \"\"\"\n if not hasattr(cls, \"__cached_allprops_with_refs\"):\n s = accumulate_from_subclasses(cls, \"__properties_with_refs__\")\n cls.__cached_allprops_with_refs = s\n return cls.__cached_allprops_with_refs\n\n @classmethod\n def properties_containers(cls):\n \"\"\" Returns a list of properties that are containers\n \"\"\"\n if not hasattr(cls, \"__cached_allprops_containers\"):\n s = accumulate_from_subclasses(cls, \"__container_props__\")\n cls.__cached_allprops_containers = s\n return cls.__cached_allprops_containers\n\n @classmethod\n def properties(cls):\n \"\"\" Returns a set of the names of this object's properties. We\n traverse the class hierarchy and pull together the full\n list of properties.\n \"\"\"\n if not hasattr(cls, \"__cached_allprops\"):\n s = cls.class_properties()\n cls.__cached_allprops = s\n return cls.__cached_allprops\n\n @classmethod\n def dataspecs(cls):\n \"\"\" Returns a set of the names of this object's dataspecs (and\n dataspec subclasses). Traverses the class hierarchy.\n \"\"\"\n if not hasattr(cls, \"__cached_dataspecs\"):\n dataspecs = set()\n for c in reversed(inspect.getmro(cls)):\n if hasattr(c, \"_dataspecs\"):\n dataspecs.update(c._dataspecs.keys())\n cls.__cached_dataspecs = dataspecs\n return cls.__cached_dataspecs\n\n @classmethod\n def dataspecs_with_refs(cls):\n dataspecs = {}\n for c in reversed(inspect.getmro(cls)):\n if hasattr(c, \"_dataspecs\"):\n dataspecs.update(c._dataspecs)\n return dataspecs\n\n def changed_vars(self):\n \"\"\" Returns which variables changed since the creation of the object,\n or the last called to reset_changed_vars().\n \"\"\"\n return set.union(self._changed_vars, self.properties_with_refs(),\n self.properties_containers())\n\n def reset_changed_vars(self):\n self._changed_vars = set()\n\n def properties_with_values(self):\n return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])\n\n def changed_properties(self):\n return self.changed_vars()\n\n def changed_properties_with_values(self):\n return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])\n\n @classmethod\n def class_properties(cls, withbases=True):\n if withbases:\n return accumulate_from_subclasses(cls, \"__properties__\")\n else:\n return set(cls.__properties__)\n\n def set(self, **kwargs):\n \"\"\" Sets a number of properties at once \"\"\"\n for kw in kwargs:\n setattr(self, kw, kwargs[kw])\n\n def pprint_props(self, indent=0):\n \"\"\" Prints the properties of this object, nicely formatted \"\"\"\n for key, value in self.properties_with_values().items():\n print(\"%s%s: %r\" % (\" \"*indent, key, value))\n\nclass PrimitiveProperty(Property):\n \"\"\" A base class for simple property types. Subclasses should\n define a class attribute ``_underlying_type`` that is a tuple\n of acceptable type values for the property.\n\n \"\"\"\n\n _underlying_type = None\n\n def validate(self, value):\n super(PrimitiveProperty, self).validate(value)\n\n if not (value is None or isinstance(value, self._underlying_type)):\n raise ValueError(\"expected a value of type %s, got %s of type %s\" %\n (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))\n\n def from_json(self, json, models=None):\n if json is None or isinstance(json, self._underlying_type):\n return json\n else:\n expected = nice_join([ cls.__name__ for cls in self._underlying_type ])\n raise DeserializationError(\"%s expected %s, got %s\" % (self, expected, json))\n\nclass Bool(PrimitiveProperty):\n \"\"\" Boolean type property. \"\"\"\n _underlying_type = (bool,)\n\nclass Int(PrimitiveProperty):\n \"\"\" Signed integer type property. \"\"\"\n _underlying_type = bokeh_integer_types\n\nclass Float(PrimitiveProperty):\n \"\"\" Floating point type property. \"\"\"\n _underlying_type = (float, ) + bokeh_integer_types\n\nclass Complex(PrimitiveProperty):\n \"\"\" Complex floating point type property. \"\"\"\n _underlying_type = (complex, float) + bokeh_integer_types\n\nclass String(PrimitiveProperty):\n \"\"\" String type property. \"\"\"\n _underlying_type = string_types\n\nclass Regex(String):\n \"\"\" Regex type property validates that text values match the\n given regular expression.\n \"\"\"\n def __init__(self, regex, default=None, help=None):\n self.regex = re.compile(regex)\n super(Regex, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Regex, self).validate(value)\n\n if not (value is None or self.regex.match(value) is not None):\n raise ValueError(\"expected a string matching %r pattern, got %r\" % (self.regex.pattern, value))\n\n def __str__(self):\n return \"%s(%r)\" % (self.__class__.__name__, self.regex.pattern)\n\nclass ParameterizedProperty(Property):\n \"\"\" Base class for Properties that have type parameters, e.g.\n ``List(String)``.\n\n \"\"\"\n\n @staticmethod\n def _validate_type_param(type_param):\n if isinstance(type_param, type):\n if issubclass(type_param, Property):\n return type_param()\n else:\n type_param = type_param.__name__\n elif isinstance(type_param, Property):\n return type_param\n\n raise ValueError(\"expected a property as type parameter, got %s\" % type_param)\n\n @property\n def type_params(self):\n raise NotImplementedError(\"abstract method\")\n\n @property\n def has_ref(self):\n return any(type_param.has_ref for type_param in self.type_params)\n\nclass ContainerProperty(ParameterizedProperty):\n \"\"\" Base class for Container-like type properties. \"\"\"\n pass\n\nclass Seq(ContainerProperty):\n \"\"\" Sequence (list, tuple) type property.\n\n \"\"\"\n\n def _is_seq(self, value):\n return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)\n\n def _new_instance(self, value):\n return value\n\n def __init__(self, item_type, default=None, help=None):\n self.item_type = self._validate_type_param(item_type)\n super(Seq, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.item_type]\n\n def validate(self, value):\n super(Seq, self).validate(value)\n\n if value is not None:\n if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.item_type)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, list):\n return self._new_instance([ self.item_type.from_json(item, models) for item in json ])\n else:\n raise DeserializationError(\"%s expected a list or None, got %s\" % (self, json))\n\nclass List(Seq):\n \"\"\" Python list type property.\n\n \"\"\"\n\n def __init__(self, item_type, default=[], help=None):\n super(List, self).__init__(item_type, default=default, help=help)\n\n def _is_seq(self, value):\n return isinstance(value, list)\n\nclass Array(Seq):\n \"\"\" NumPy array type property.\n\n \"\"\"\n\n def _is_seq(self, value):\n import numpy as np\n return isinstance(value, np.ndarray)\n\n def _new_instance(self, value):\n return np.array(value)\n\nclass Dict(ContainerProperty):\n \"\"\" Python dict type property.\n\n If a default value is passed in, then a shallow copy of it will be\n used for each new use of this property.\n\n \"\"\"\n\n def __init__(self, keys_type, values_type, default={}, help=None):\n self.keys_type = self._validate_type_param(keys_type)\n self.values_type = self._validate_type_param(values_type)\n super(Dict, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.keys_type, self.values_type]\n\n def validate(self, value):\n super(Dict, self).validate(value)\n\n if value is not None:\n if not (isinstance(value, dict) and \\\n all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s, %s)\" % (self.__class__.__name__, self.keys_type, self.values_type)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, dict):\n return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }\n else:\n raise DeserializationError(\"%s expected a dict or None, got %s\" % (self, json))\n\nclass Tuple(ContainerProperty):\n \"\"\" Tuple type property. \"\"\"\n def __init__(self, tp1, tp2, *type_params, **kwargs):\n self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))\n super(Tuple, self).__init__(default=kwargs.get(\"default\"), help=kwargs.get(\"help\"))\n\n @property\n def type_params(self):\n return self._type_params\n\n def validate(self, value):\n super(Tuple, self).validate(value)\n\n if value is not None:\n if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \\\n all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):\n raise ValueError(\"expected an element of %s, got %r\" % (self, value))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(str, self.type_params)))\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, list):\n return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))\n else:\n raise DeserializationError(\"%s expected a list or None, got %s\" % (self, json))\n\nclass Instance(Property):\n \"\"\" Instance type property, for references to other Models in the object\n graph.\n\n \"\"\"\n def __init__(self, instance_type, default=None, help=None):\n if not isinstance(instance_type, (type,) + string_types):\n raise ValueError(\"expected a type or string, got %s\" % instance_type)\n\n if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):\n raise ValueError(\"expected a subclass of HasProps, got %s\" % instance_type)\n\n self._instance_type = instance_type\n\n super(Instance, self).__init__(default=default, help=help)\n\n @property\n def instance_type(self):\n if isinstance(self._instance_type, str):\n module, name = self._instance_type.rsplit(\".\", 1)\n self._instance_type = getattr(import_module(module, \"bokeh\"), name)\n\n return self._instance_type\n\n @property\n def has_ref(self):\n return True\n\n def validate(self, value):\n super(Instance, self).validate(value)\n\n if value is not None:\n if not isinstance(value, self.instance_type):\n raise ValueError(\"expected an instance of type %s, got %s of type %s\" %\n (self.instance_type.__name__, value, type(value).__name__))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.instance_type.__name__)\n\n def from_json(self, json, models=None):\n if json is None:\n return None\n elif isinstance(json, dict):\n from .plot_object import PlotObject\n if issubclass(self.instance_type, PlotObject):\n if models is None:\n raise DeserializationError(\"%s can't deserialize without models\" % self)\n else:\n model = models.get(json[\"id\"])\n\n if model is not None:\n return model\n else:\n raise DeserializationError(\"%s failed to deserilize reference to %s\" % (self, json))\n else:\n attrs = {}\n\n for name, value in iteritems(json):\n prop = self.instance_type.lookup(name)\n attrs[name] = prop.from_json(value, models)\n\n # XXX: this doesn't work when Instance(Superclass) := Subclass()\n # Serialization dict must carry type information to resolve this.\n return self.instance_type(**attrs)\n else:\n raise DeserializationError(\"%s expected a dict or None, got %s\" % (self, json))\n\nclass This(Property):\n \"\"\" A reference to an instance of the class being defined. \"\"\"\n pass\n\n# Fake types, ABCs\nclass Any(Property):\n \"\"\" Any type property accepts any values. \"\"\"\n pass\n\nclass Function(Property):\n \"\"\" Function type property. \"\"\"\n pass\n\nclass Event(Property):\n \"\"\" Event type property. \"\"\"\n pass\n\nclass Interval(ParameterizedProperty):\n ''' Range type property ensures values are contained inside a given interval. '''\n def __init__(self, interval_type, start, end, default=None, help=None):\n self.interval_type = self._validate_type_param(interval_type)\n self.interval_type.validate(start)\n self.interval_type.validate(end)\n self.start = start\n self.end = end\n super(Interval, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return [self.interval_type]\n\n def validate(self, value):\n super(Interval, self).validate(value)\n\n if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):\n raise ValueError(\"expected a value of type %s in range [%s, %s], got %r\" % (self.interval_type, self.start, self.end, value))\n\n def __str__(self):\n return \"%s(%s, %r, %r)\" % (self.__class__.__name__, self.interval_type, self.start, self.end)\n\nclass Byte(Interval):\n ''' Byte type property. '''\n def __init__(self, default=0, help=None):\n super(Byte, self).__init__(Int, 0, 255, default=default, help=help)\n\nclass Either(ParameterizedProperty):\n \"\"\" Takes a list of valid properties and validates against them in succession. \"\"\"\n\n def __init__(self, tp1, tp2, *type_params, **kwargs):\n self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))\n default = kwargs.get(\"default\", self._type_params[0].default)\n help = kwargs.get(\"help\")\n super(Either, self).__init__(default=default, help=help)\n\n @property\n def type_params(self):\n return self._type_params\n\n def validate(self, value):\n super(Either, self).validate(value)\n\n if not (value is None or any(param.is_valid(value) for param in self.type_params)):\n raise ValueError(\"expected an element of either %s, got %r\" % (nice_join(self.type_params), value))\n\n def transform(self, value):\n for param in self.type_params:\n try:\n return param.transform(value)\n except ValueError:\n pass\n\n raise ValueError(\"Could not transform %r\" % value)\n\n def from_json(self, json, models=None):\n for tp in self.type_params:\n try:\n return tp.from_json(json, models)\n except DeserializationError:\n pass\n else:\n raise DeserializationError(\"%s couldn't deserialize %s\" % (self, json))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(str, self.type_params)))\n\n def __or__(self, other):\n return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)\n\nclass Enum(Property):\n \"\"\" An Enum with a list of allowed values. The first value in the list is\n the default value, unless a default is provided with the \"default\" keyword\n argument.\n \"\"\"\n def __init__(self, enum, *values, **kwargs):\n if not (not values and isinstance(enum, enums.Enumeration)):\n enum = enums.enumeration(enum, *values)\n\n self.allowed_values = enum._values\n\n default = kwargs.get(\"default\", enum._default)\n help = kwargs.get(\"help\")\n super(Enum, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Enum, self).validate(value)\n\n if not (value is None or value in self.allowed_values):\n raise ValueError(\"invalid value for %s: %r; allowed values are %s\" % (self.name, value, nice_join(self.allowed_values)))\n\n def __str__(self):\n return \"%s(%s)\" % (self.__class__.__name__, \", \".join(map(repr, self.allowed_values)))\n\nclass Auto(Enum):\n\n def __init__(self):\n super(Auto, self).__init__(\"auto\")\n\n def __str__(self):\n return self.__class__.__name__\n\n# Properties useful for defining visual attributes\nclass Color(Either):\n \"\"\" Accepts color definition in a variety of ways, and produces an\n appropriate serialization of its value for whatever backend.\n\n For colors, because we support named colors and hex values prefaced\n with a \"#\", when we are handed a string value, there is a little\n interpretation: if the value is one of the 147 SVG named colors or\n it starts with a \"#\", then it is interpreted as a value.\n\n If a 3-tuple is provided, then it is treated as an RGB (0..255).\n If a 4-tuple is provided, then it is treated as an RGBa (0..255), with\n alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)\n \"\"\"\n\n def __init__(self, default=None, help=None):\n types = (Enum(enums.NamedColor),\n Regex(\"^#[0-9a-fA-F]{6}$\"),\n Tuple(Byte, Byte, Byte),\n Tuple(Byte, Byte, Byte, Percent))\n super(Color, self).__init__(*types, default=default, help=help)\n\n def __str__(self):\n return self.__class__.__name__\n\nclass Align(Property):\n pass\n\nclass DashPattern(Either):\n \"\"\" Dash type property.\n\n Express patterns that describe line dashes. ``DashPattern`` values\n can be specified in a variety of ways:\n\n * An enum: \"solid\", \"dashed\", \"dotted\", \"dotdash\", \"dashdot\"\n * a tuple or list of integers in the `HTML5 Canvas dash specification style`_.\n Note that if the list of integers has an odd number of elements, then\n it is duplicated, and that duplicated list becomes the new dash list.\n\n To indicate that dashing is turned off (solid lines), specify the empty\n list [].\n\n .. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list\n\n \"\"\"\n\n _dash_patterns = {\n \"solid\": [],\n \"dashed\": [6],\n \"dotted\": [2,4],\n \"dotdash\": [2,4,6,4],\n \"dashdot\": [6,4,2,4],\n }\n\n def __init__(self, default=[], help=None):\n types = Enum(enums.DashPattern), Regex(r\"^(\\d+(\\s+\\d+)*)?$\"), Seq(Int)\n super(DashPattern, self).__init__(*types, default=default, help=help)\n\n def transform(self, value):\n value = super(DashPattern, self).transform(value)\n\n if isinstance(value, string_types):\n try:\n return self._dash_patterns[value]\n except KeyError:\n return [int(x) for x in value.split()]\n else:\n return value\n\n def __str__(self):\n return self.__class__.__name__\n\nclass Size(Float):\n \"\"\" Size type property.\n\n .. note::\n ``Size`` is equivalent to an unsigned int.\n\n \"\"\"\n def validate(self, value):\n super(Size, self).validate(value)\n\n if not (value is None or 0.0 <= value):\n raise ValueError(\"expected a non-negative number, got %r\" % value)\n\nclass Percent(Float):\n \"\"\" Percentage type property.\n\n Percents are useful for specifying alphas and coverage and extents; more\n semantically meaningful than Float(0..1).\n\n \"\"\"\n def validate(self, value):\n super(Percent, self).validate(value)\n\n if not (value is None or 0.0 <= value <= 1.0):\n raise ValueError(\"expected a value in range [0, 1], got %r\" % value)\n\nclass Angle(Float):\n \"\"\" Angle type property. \"\"\"\n pass\n\nclass Date(Property):\n \"\"\" Date (not datetime) type property.\n\n \"\"\"\n def __init__(self, default=datetime.date.today(), help=None):\n super(Date, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Date, self).validate(value)\n\n if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):\n raise ValueError(\"expected a date, string or timestamp, got %r\" % value)\n\n def transform(self, value):\n value = super(Date, self).transform(value)\n\n if isinstance(value, (float,) + bokeh_integer_types):\n try:\n value = datetime.date.fromtimestamp(value)\n except ValueError:\n value = datetime.date.fromtimestamp(value/1000)\n elif isinstance(value, string_types):\n value = dateutil.parser.parse(value).date()\n\n return value\n\nclass Datetime(Property):\n \"\"\" Datetime type property.\n\n \"\"\"\n\n def __init__(self, default=datetime.date.today(), help=None):\n super(Datetime, self).__init__(default=default, help=help)\n\n def validate(self, value):\n super(Datetime, self).validate(value)\n\n if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):\n return\n try:\n import pandas\n if isinstance(value, (pandas.Timestamp)):\n return\n except ImportError:\n pass\n\n raise ValueError(\"Expected a datetime instance, got %r\" % value)\n\n def transform(self, value):\n value = super(Datetime, self).transform(value)\n return value\n # Handled by serialization in protocol.py for now\n\n\nclass RelativeDelta(Dict):\n \"\"\" RelativeDelta type property for time deltas.\n\n \"\"\"\n\n def __init__(self, default={}, help=None):\n keys = Enum(\"years\", \"months\", \"days\", \"hours\", \"minutes\", \"seconds\", \"microseconds\")\n values = Int\n super(RelativeDelta, self).__init__(keys, values, default=default, help=help)\n\n def __str__(self):\n return self.__class__.__name__\n",
"import numpy as np\n\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.models.sources import AjaxDataSource\nfrom bokeh.models.ranges import Range1d\n\noutput_file(\"ajax_source.html\", title=\"ajax_source.py example\")\nsource = AjaxDataSource(data_url='http://localhost:5050/data',\n polling_interval=1000)\np = figure()\np.circle('x', 'y', source=source)\nshow(p)\n\nfrom flask import Flask, jsonify\nfrom bokeh.server.crossdomain import crossdomain\n\napp = Flask(__name__)\n\[email protected]('/data', methods=['GET', 'OPTIONS'])\n@crossdomain(origin=\"*\", methods=['GET', 'POST'], headers=None)\ndef hello_world():\n return jsonify(x=np.random.random(5).tolist(), y=np.random.random(5).tolist())\n\nif __name__ == \"__main__\":\n app.run(port=5050)\n",
"import numpy as np\n\nfrom bokeh.document import Document\nfrom bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid\nfrom bokeh.models.glyphs import Text\nfrom bokeh.plotting import show\n\nN = 9\nx = np.linspace(-2, 2, N)\ny = x**2\na = \"abcdefghijklmnopqrstuvwxyz\"\ntext = [a[i*3:i*3+3] for i in range(N)]\n\nsource = ColumnDataSource(dict(x=x, y=y, text=text))\n\nxdr = DataRange1d(sources=[source.columns(\"x\")])\nydr = DataRange1d(sources=[source.columns(\"y\")])\n\nplot = Plot(\n title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,\n h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)\n\nglyph = Text(x=\"x\", y=\"y\", text=\"text\", angle=0.3, text_color=\"#96deb3\")\nplot.add_glyph(source, glyph)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis, 'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document()\ndoc.add(plot)\n\nshow(plot)"
] | [
[
"numpy.empty"
],
[
"numpy.arange"
],
[
"pandas.DataFrame",
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.array"
],
[
"numpy.random.random"
],
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aristoteleo/scribe-py | [
"ea28d2b588f8648b9ce1679fe18c3142aee2aa58"
] | [
"Scribe/other_estimators.py"
] | [
"import pandas\nimport numpy as np\nfrom multiprocessing import Pool\n\n\ndef __individual_corr(id1, id2, x, y):\n return (id1, id2, corr(x, y)[0])\n\n\ndef __individual_mi(id1, id2, x, y):\n return (id1, id2, mi(x, y))\n\n\ndef corr(self, number_of_processes=1):\n \"\"\"Calculate pairwise correlation over the data\n\n Arguments\n ---------\n self: 'class causal_model object'\n An instance of a causal_model class object. This object can be converted from an AnnData object through\n load_anndata function.\n number_of_processes: `int` (Default: 1)\n Number of processes to use.\n\n Returns\n ---------\n corr_results: 'pd.core.frame.DataFrame'\n The correlation network inferred.\n \"\"\"\n self.corr_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)\n if number_of_processes > 1: temp_input = []\n\n for id1 in self.node_ids:\n for id2 in self.node_ids:\n\n if id1 == id2: continue\n\n if number_of_processes == 1:\n self.corr_results.loc[id1, id2] = __individual_corr((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))[2]\n else:\n temp_input.append((id1, id2, self.expression_concatenated.loc[id1], self.expression_concatenated.loc[id2]))\n\n if number_of_processes > 1:\n tmp_results = Pool(number_of_processes).map(__individual_corr, temp_input)\n for t in tmp_results: self.corr_results.loc[t[0], t[1]] = t[2]\n\n return self.corr_results\n\n\ndef mi(self, number_of_processes=1):\n \"\"\"Calculate pairwise mutual information over the data\n\n Arguments\n ---------\n self: 'class causal_model object'\n An instance of a causal_model class object. This object can be converted from an AnnData object through\n load_anndata function.\n number_of_processes: `int` (Default: 1)\n Number of processes to use.\n\n Returns\n ---------\n mi_results: 'pd.core.frame.DataFrame'\n The mutual information network inferred.\n \"\"\"\n self.mi_results = pandas.DataFrame({node_id: [np.nan for i in self.node_ids] for node_id in self.node_ids}, index=self.node_ids)\n if number_of_processes > 1: temp_input = []\n\n for id1 in self.node_ids:\n for id2 in self.node_ids:\n\n if id1 == id2: continue\n\n if number_of_processes == 1:\n self.mi_results.loc[id1, id2] = __individual_mi((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))[2]\n else:\n temp_input.append((id1, id2,[[i] for i in self.expression_concatenated.loc[id1]],[[j] for j in self.expression_concatenated.loc[id2]] ))\n\n if number_of_processes > 1:\n tmp_results = Pool(number_of_processes).map(__individual_mi, temp_input)\n for t in tmp_results: self.mi_results.loc[t[0], t[1]] = t[2]\n\n return self.mi_results\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
frankilepro/LiTeFlow | [
"d07105ea00ad29b701e1b100d9cda2297eef19de"
] | [
"liteflow/input.py"
] | [
"\"\"\"Utilities for input pipelines.\"\"\"\n\nimport tensorflow as tf\n\n\ndef shuffle(tensors,\n capacity=32,\n min_after_dequeue=16,\n num_threads=1,\n dtypes=None,\n shapes=None,\n seed=None,\n shared_name=None,\n name='shuffle'):\n \"\"\"Wrapper around a `tf.RandomShuffleQueue` creation.\n\n Return a dequeue op that dequeues elements from `tensors` in a\n random order, through a `tf.RandomShuffleQueue` -- see for further\n documentation.\n\n Arguments:\n tensors: an iterable of tensors.\n capacity: (Optional) the capacity of the queue; default value set to 32.\n num_threads: (Optional) the number of threads to be used fo the queue runner;\n default value set to 1.\n min_after_dequeue: (Optional) minimum number of elements to remain in the\n queue after a `dequeue` or `dequeu_many` has been performend,\n in order to ensure better mixing of elements; default value set to 16.\n dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;\n if not provided, will be inferred from `tensors`.\n shapes: (Optional) list of shapes, one for each tensor in `tensors`.\n seed: (Optional) seed for random shuffling.\n shared_name: (Optional) If non-empty, this queue will be shared under\n the given name across multiple sessions.\n name: Optional name scope for the ops.\n\n Returns:\n The tuple of tensors that was randomly dequeued from `tensors`.\n \"\"\"\n\n tensors = list(tensors)\n with tf.name_scope(name, values=tensors):\n dtypes = dtypes or list([t.dtype for t in tensors])\n queue = tf.RandomShuffleQueue(\n seed=seed,\n shared_name=shared_name,\n name='random_shuffle_queue',\n dtypes=dtypes,\n shapes=shapes,\n capacity=capacity,\n min_after_dequeue=min_after_dequeue)\n enqueue = queue.enqueue(tensors)\n runner = tf.train.QueueRunner(queue, [enqueue] * num_threads)\n tf.train.add_queue_runner(runner)\n dequeue = queue.dequeue()\n return dequeue\n\n\ndef shuffle_batch(tensors,\n batch_size,\n capacity=32,\n num_threads=1,\n min_after_dequeue=16,\n dtypes=None,\n shapes=None,\n seed=None,\n enqueue_many=False,\n dynamic_pad=True,\n allow_smaller_final_batch=False,\n shared_name=None,\n name='shuffle_batch'):\n \"\"\"Create shuffled and padded batches of tensors in `tensors`.\n\n Dequeue elements from `tensors` shuffling, batching and dynamically\n padding them. First a `tf.RandomShuffleQueue` is created and fed with\n `tensors` (using the `dket.input.shuffle` function); the dequeued tensors\n shapes are then set and fed into a `tf.train.batch` function that provides\n batching and dynamic padding.\n\n\n Arguments:\n tensors: an iterable of tensors.\n batch_size: an `int` representing th batch size.\n capacity: (Optional) the capacity of the queues; default value set to 32.\n num_threads: (Optional) the number of threads to be used fo the queue runner;\n default value set to 1.\n min_after_dequeue: (Optional) minimum number of elements to remain in the\n shuffling queue after a `dequeue` or `dequeu_many` has been performend,\n in order to ensure better mixing of elements; default value set to 16.\n dtypes: (Optional) list of `DType` objects, one for each tensor in `tensors`;\n if not provided, will be inferred from `tensors`.\n shapes: (Optional) list of shapes, one for each tensor in `tensors`.\n seed: (Optional) seed for random shuffling.\n enqueue_many: Whether each tensor in tensors is a single example.\n dynamic_pad: Boolean. Allow variable dimensions in input shapes.\n The given dimensions are padded upon dequeue so that tensors within\n a batch have the same shapes.\n allow_smaller_final_batch: (Optional) Boolean. If True, allow the final\n batch to be smaller if there are insufficient items left in the queue.\n shared_name: if set, the queues will be shared under the given name\n across different sessions.\n name: scope name for the given ops.\n\n Returns:\n A batch of tensors from `tensors`, shuffled and padded.\n \"\"\"\n\n tensors = list(tensors)\n with tf.name_scope(name, values=tensors):\n dtypes = dtypes or list([t.dtype for t in tensors])\n shapes = shapes or list([t.get_shape() for t in tensors])\n inputs = shuffle(tensors,\n seed=seed,\n dtypes=dtypes,\n capacity=capacity,\n num_threads=num_threads,\n min_after_dequeue=min_after_dequeue,\n shared_name=shared_name,\n name='shuffle')\n\n # fix the shapes\n for tensor, shape in zip(inputs, shapes):\n tensor.set_shape(shape)\n\n minibatch = tf.train.batch(\n tensors=inputs,\n batch_size=batch_size,\n num_threads=num_threads,\n capacity=capacity,\n dynamic_pad=dynamic_pad,\n allow_smaller_final_batch=allow_smaller_final_batch,\n shared_name=shared_name,\n enqueue_many=enqueue_many,\n name='batch')\n return minibatch\n"
] | [
[
"tensorflow.train.QueueRunner",
"tensorflow.train.add_queue_runner",
"tensorflow.RandomShuffleQueue",
"tensorflow.name_scope",
"tensorflow.train.batch"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Nickwangpeng/tsfresh | [
"48118627d9d4644906613e25b077ce2ec82ca2f9"
] | [
"tsfresh/feature_selection/relevance.py"
] | [
"# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\"\"\"\nContains a feature selection method that evaluates the importance of the different extracted features. To do so,\nfor every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.\nThe methods that calculate the p-values are called feature selectors.\n\nAfterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and\nwhich to cut off (solely based on the p-values).\n\"\"\"\n\nfrom multiprocessing import Pool\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom functools import partial, reduce\n\nfrom tsfresh import defaults\nfrom tsfresh.feature_selection.benjamini_hochberg_test import benjamini_hochberg_test\nfrom tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \\\n target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test\nfrom tsfresh.utilities.distribution import initialize_warnings_in_workers\n\n\ndef calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES,\n show_warnings=defaults.SHOW_WARNINGS, chunksize=defaults.CHUNKSIZE,\n test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,\n test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,\n test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,\n test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,\n fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT):\n \"\"\"\n Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.\n The relevance table is calculated for the intended machine learning task `ml_task`.\n\n To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test\n is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to\n decide which features to keep and which to delete.\n\n We are testing\n\n :math:`H_0` = the Feature is not relevant and should not be added\n\n against\n\n :math:`H_1` = the Feature is relevant and should be kept\n\n or in other words\n\n :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target\n\n :math:`H_1` = Target and Feature are associated / dependent\n\n When the target is binary this becomes\n\n :math:`H_0 = \\\\left( F_{\\\\text{target}=1} = F_{\\\\text{target}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( F_{\\\\text{target}=1} \\\\neq F_{\\\\text{target}=0} \\\\right)`\n\n Where :math:`F` is the distribution of the target.\n\n In the same way we can state the hypothesis when the feature is binary\n\n :math:`H_0 = \\\\left( T_{\\\\text{feature}=1} = T_{\\\\text{feature}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( T_{\\\\text{feature}=1} \\\\neq T_{\\\\text{feature}=0} \\\\right)`\n\n Here :math:`T` is the distribution of the target.\n\n TODO: And for real valued?\n\n :param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.\n It can contain both binary or real-valued features at the same time.\n :type X: pandas.DataFrame\n\n :param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.\n :type y: pandas.Series or numpy.ndarray\n\n :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.\n Defaults to `'auto'`, meaning the intended task is inferred from `y`.\n If `y` has a boolean, integer or object dtype, the task is assumend to be classification,\n else regression.\n :type ml_task: str\n\n :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature\n (currently unused)\n :type test_for_binary_target_binary_feature: str\n\n :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature\n :type test_for_binary_target_real_feature: str\n\n :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)\n :type test_for_real_target_binary_feature: str\n\n :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)\n :type test_for_real_target_real_feature: str\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param n_jobs: Number of processes to use during the p-value calculation\n :type n_jobs: int\n\n :param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).\n :type show_warnings: bool\n\n :param chunksize: The size of one chunk that is submitted to the worker\n process for the parallelisation. Where one chunk is defined as a\n singular time series for one id and one kind. If you set the chunksize\n to 10, then it means that one task is to calculate all features for 10\n time series. If it is set it to None, depending on distributor,\n heuristics are used to find the optimal chunksize. If you get out of\n memory exceptions, you can try it with the dask distributor and a\n smaller chunksize.\n :type chunksize: None or int\n\n :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance\n of this particular feature. The DataFrame has the columns\n \"Feature\",\n \"type\" (binary, real or const),\n \"p_value\" (the significance of this feature as a p-value, lower means more significant)\n \"relevant\" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is\n not relevant] for this feature)\n :rtype: pandas.DataFrame\n \"\"\"\n if ml_task not in ['auto', 'classification', 'regression']:\n raise ValueError('ml_task must be one of: \\'auto\\', \\'classification\\', \\'regression\\'')\n elif ml_task == 'auto':\n ml_task = infer_ml_task(y)\n\n with warnings.catch_warnings():\n if not show_warnings:\n warnings.simplefilter(\"ignore\")\n else:\n warnings.simplefilter(\"default\")\n\n if n_jobs == 0:\n map_function = map\n else:\n pool = Pool(processes=n_jobs, initializer=initialize_warnings_in_workers, initargs=(show_warnings,))\n map_function = partial(pool.map, chunksize=chunksize)\n\n relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature'))\n relevance_table['feature'] = relevance_table.index\n relevance_table['type'] = pd.Series(\n map_function(get_feature_type, [X[feature] for feature in relevance_table.index]),\n index=relevance_table.index\n )\n table_real = relevance_table[relevance_table.type == 'real'].copy()\n table_binary = relevance_table[relevance_table.type == 'binary'].copy()\n\n table_const = relevance_table[relevance_table.type == 'constant'].copy()\n table_const['p_value'] = np.NaN\n table_const['relevant'] = False\n\n if not table_const.empty:\n warnings.warn(\"[test_feature_significance] Constant features: {}\"\n .format(\", \".join(table_const.feature)), RuntimeWarning)\n\n if len(table_const) == len(relevance_table):\n if n_jobs != 0:\n pool.close()\n pool.terminate()\n pool.join()\n return table_const\n\n if ml_task == 'classification':\n tables = []\n for label in y.unique():\n _test_real_feature = partial(target_binary_feature_real_test, y=(y == label),\n test=test_for_binary_target_real_feature)\n _test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label))\n tmp = _calculate_relevance_table_for_implicit_target(\n table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,\n fdr_level, map_function\n )\n tables.append(tmp)\n relevance_table = combine_relevance_tables(tables)\n elif ml_task == 'regression':\n _test_real_feature = partial(target_real_feature_real_test, y=y)\n _test_binary_feature = partial(target_real_feature_binary_test, y=y)\n relevance_table = _calculate_relevance_table_for_implicit_target(\n table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,\n fdr_level, map_function\n )\n\n if n_jobs != 0:\n pool.close()\n pool.terminate()\n pool.join()\n\n relevance_table = pd.concat([relevance_table, table_const], axis=0)\n\n if sum(relevance_table['relevant']) == 0:\n warnings.warn(\n \"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage \"\n \"of irrelevant features, consider using an higher fdr level or add other features.\"\n .format(ml_task, fdr_level), RuntimeWarning)\n\n return relevance_table\n\n\ndef _calculate_relevance_table_for_implicit_target(table_real, table_binary, X, test_real_feature, test_binary_feature,\n hypotheses_independent, fdr_level, map_function):\n table_real['p_value'] = pd.Series(\n map_function(test_real_feature, [X[feature] for feature in table_real.index]),\n index=table_real.index\n )\n table_binary['p_value'] = pd.Series(\n map_function(test_binary_feature, [X[feature] for feature in table_binary.index]),\n index=table_binary.index\n )\n relevance_table = pd.concat([table_real, table_binary])\n return benjamini_hochberg_test(relevance_table, hypotheses_independent, fdr_level)\n\n\ndef infer_ml_task(y):\n \"\"\"\n Infer the machine learning task to select for.\n The result will be either `'regression'` or `'classification'`.\n If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`.\n Else `'regression'`.\n\n :param y: The target vector y.\n :type y: pandas.Series\n :return: 'classification' or 'regression'\n :rtype: str\n \"\"\"\n if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object:\n ml_task = 'classification'\n else:\n ml_task = 'regression'\n\n return ml_task\n\n\ndef combine_relevance_tables(relevance_tables):\n \"\"\"\n Create a combined relevance table out of a list of relevance tables,\n aggregating the p-values and the relevances.\n\n :param relevance_tables: A list of relevance tables\n :type relevance_tables: List[pd.DataFrame]\n :return: The combined relevance table\n :rtype: pandas.DataFrame\n \"\"\"\n def _combine(a, b):\n a.relevant |= b.relevant\n a.p_value = a.p_value.combine(b.p_value, min, 1)\n return a\n\n return reduce(_combine, relevance_tables)\n\n\ndef get_feature_type(feature_column):\n \"\"\"\n For a given feature, determine if it is real, binary or constant.\n Here binary means that only two unique values occur in the feature.\n\n :param feature_column: The feature column\n :type feature_column: pandas.Series\n :return: 'constant', 'binary' or 'real'\n \"\"\"\n n_unique_values = len(set(feature_column.values))\n if n_unique_values == 1:\n return 'constant'\n elif n_unique_values == 2:\n return 'binary'\n else:\n return 'real'\n"
] | [
[
"pandas.concat",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
moonieann/welib | [
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52",
"0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52"
] | [
"welib/FEM/reduction.py",
"welib/FEM/utils.py",
"welib/tools/external/setup.py",
"welib/fast/olaf.py",
"welib/tools/spectral.py",
"welib/FEM/fem_beam.py",
"welib/weio/tecplot_file.py"
] | [
"import numpy as np\n\nfrom welib.system.eva import eig\n\n\ndef CraigBampton(MM, KK, Ileader, nModesCB=None, Ifollow=None, F=None, DD=None, fullModesOut=False): \n \"\"\"\n Performs the CraigBampton (CB) reduction of a system given some input master dofs index\n and a number of modes. Reduced matrices, and Guyan and Craig-Bampton modes are returned.\n \n INPUTS\n Ileader : index of leader DOFs\n nModesCB: number of CB modes to keep\n MM, KK : Maff and stiffness matrix\n \n INPUTS (Optional)\n nModesCB: number of CB modes to keep. Default: all\n Ifollow: indices of follower DOFs. Default: complementary set to Ileader\n fullModesOut: if true, the Guyan and CB modes\n \n OUTPUTS\n fc: critical frequency\n Mr,Kr,Fr,Dr: reduced mass, stiffness, force and damping matrices\n \n AUTHOR: E. Branlard\n \"\"\"\n \n # --- Input cleanup\n Ileader = np.asarray(Ileader).ravel()\n # --- Optional arguments\n if Ifollow is None:\n # Then we take the complementary to Ileader\n Iall = np.arange(len(MM))\n Ifollow = [i for i in Iall if i not in Ileader]\n else:\n Ifollow = np.asarray(Ifollow).ravel()\n if nModesCB is None:\n nModesCB=len(Ifollow)\n\n # Partitioning - NOTE: leaders will be first in reduced matrix Mr and Kr\n Mll= MM[np.ix_(Ileader, Ileader)]\n Kll= KK[np.ix_(Ileader, Ileader)]\n Mff= MM[np.ix_(Ifollow, Ifollow)]\n Kff= KK[np.ix_(Ifollow, Ifollow)]\n Mlf= MM[np.ix_(Ileader, Ifollow)]\n Klf= KK[np.ix_(Ileader, Ifollow)]\n \n # --- Solve for Guyan modes\n Kff1Kfl = np.linalg.solve(Kff,(np.transpose(Klf))) # Kss1Ksm=Kss\\(Kms');\n #Kff1Kfl = np.linalg.inv(Kff).dot(Klf.T)\n Kff1Kfl = np.linalg.lstsq(Kff,Klf.T, rcond=None)[0]\n Phi_G = - Kff1Kfl;\n\n # --- Solve EVP for constrained system\n Phi_CB, Lambda_CB = eig(Kff,Mff)\n Omega2 = np.diag(Lambda_CB).copy()\n Omega2[Omega2<0]=0.0\n f_CB = np.sqrt(Omega2)/(2*np.pi)\n # --- Taking only thefirst few modes\n Phi_CB = Phi_CB[:,:nModesCB]\n Lambda_CB = Lambda_CB[:,:nModesCB]\n f_CB = f_CB[:nModesCB]\n # --- Using the T matrix:\n # # T=[eye(nm) zeros(nm,nModesCB); -Kff1Kfl Phi_CB];\n # # MM=[Mll Mlf; Mlf' Mff];\n # # KK=[Kll Klf; Klf' Kff];\n # # Mr=T' * MM * T;\n # # Kr=T' * KK * T;\n\n # --- Building reduced matrices\n #Mr11=Mmm-(Kss1Ksm')*Mms' - Mms*Kss1Ksm + (Kss1Ksm')*Mss*Kss1Ksm;\n #Kr11=Kmm-Kms*Kss1Ksm;\n #Mr12=(Mms-(Kss1Ksm')*Mss)*Psic;\n Mr11 = Mll - (np.transpose(Kff1Kfl)).dot(np.transpose(Mlf)) - Mlf.dot(Kff1Kfl) + (np.transpose(Kff1Kfl)).dot(Mff).dot(Kff1Kfl)\n Kr11 = Kll - Klf.dot(Kff1Kfl)\n Mr12 = (Mlf - (np.transpose(Kff1Kfl)).dot(Mff)).dot(Phi_CB)\n ZZ = np.zeros((len(Ileader),nModesCB))\n\n # --- Guyan frequencies\n Phi_G2, Lambda_G = eig(Kr11,Mr11)\n Omega2 = np.diag(Lambda_G).copy()\n Omega2[Omega2<0]=0.0\n f_G = np.sqrt(Omega2)/(2*np.pi)\n\n # Building reduced matrix \n Mr = np.block( [ [Mr11 , Mr12 ], [ Mr12.T, np.eye(nModesCB) ] ])\n Kr = np.block( [ [Kr11 , ZZ ], [ ZZ.T , Lambda_CB[:nModesCB,:]] ])\n\n # --- Augmenting modes so that they have the same dimension as MM\n # Add \"1\" for Guyan modes, and \"0\" for CB modes\n if fullModesOut:\n Phi_G, Phi_CB = augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=Ifollow)\n\n if DD is not None:\n raise NotImplementedError('Not done')\n if F is not None:\n raise NotImplementedError('Not done')\n\n return Mr, Kr, Phi_G, Phi_CB, f_G, f_CB\n\n\ndef augmentModes(Ileader, Phi_G, Phi_CB, Ifollow=None):\n \"\"\" \n Augment Guyan and Craig Bampton modes, so as to return full DOF vectors\n going back to the original size\n \"\"\"\n # --- Augment modes so that they go back to same size after BC\n nl = len(Ileader)\n nall = nl+Phi_G.shape[0]\n nf = nall-nl\n if Ifollow is None:\n Iall = np.arange(nall)\n Ifollow = list(np.setdiff1d(Iall, Ileader))\n # Guyan\n Phi_G_aug = np.zeros((nall, nl))\n Phi_G_aug[Ileader,:] = np.eye(nl)\n Phi_G_aug[Ifollow,:] = Phi_G\n # \n Phi_CB_aug = np.zeros((nall, Phi_CB.shape[1]))\n Phi_CB_aug[Ileader,:] = 0\n Phi_CB_aug[Ifollow,:] = Phi_CB\n\n return Phi_G_aug, Phi_CB_aug\n\n\n\nif __name__=='__main__':\n np.set_printoptions(linewidth=500)\n L = 100\n EI = 1868211939147.334\n Maff = L * 8828.201296825122\n KK = EI / (L ** 3) * np.array([[12,6 * L,- 12,6 * L],[6 * L,4 * L ** 2,- 6 * L,2 * L ** 2],[- 12,- 6 * L,12,- 6 * L],[6 * L,2 * L ** 2,- 6 * L,4 * L ** 2]])\n MM = Maff / 420 * np.array([[156,22 * L,54,- 13 * L],[22 * L,4 * L ** 2,13 * L,- 3 * L ** 2],[54,13 * L,156,- 22 * L],[- 13 * L,- 3 * L ** 2,- 22 * L,4 * L ** 2]])\n print(MM)\n Mr,Kr,Phi_G,Phi_CB,f_CB,f_G = CraigBampton(MM,KK,[2], nModesCB=2)\n print(Mr)\n print(Kr)\n print(Phi_G)\n print(Phi_CB)\n print(f_CB)\n ## --- Solve EVA\n __,Lambda = eig(Kr,Mr)\n f= np.sqrt(np.sort(np.diag(Lambda)))/(2*np.pi)\n print(f)\n# f = np.sqrt(Omega2) / (2 * pi)\n# for i in np.arange(1,np.amin(8,Mr.shape[1-1])+1).reshape(-1):\n# print('f%d=%8.3f Rayleigh Ratio=%.5f\\n' % (i,f(i),(f(i) / fc) ** 2))\n\n\n",
"import numpy as np\n\ndef skew(x):\n x=np.asarray(x).ravel()\n \"\"\" Returns the skew symmetric matrix M, such that: cross(x,v) = M v \"\"\"\n return np.array([[0, -x[2], x[1]],[x[2],0,-x[0]],[-x[1],x[0],0]])\n\n\n# !> Computes directional cosine matrix DirCos\n# !! Transforms from element to global coordinates: xg = DC.xe, Kg = DC.Ke.DC^t\n# !! Assumes that the element main direction is along ze.\n# !! NOTE that this is the transpose of what is normally considered the Direction Cosine Matrix \n# SUBROUTINE GetDirCos(P1, P2, DirCos, L_out, ErrStat, ErrMsg)\n# REAL(ReKi) , INTENT(IN ) :: P1(3), P2(3) ! (x,y,z) global positions of two nodes making up an element\n# REAL(FEKi) , INTENT( OUT) :: DirCos(3, 3) ! calculated direction cosine matrix\n# REAL(ReKi) , INTENT( OUT) :: L_out ! length of element\n# INTEGER(IntKi), INTENT( OUT) :: ErrStat ! Error status of the operation\n# CHARACTER(*), INTENT( OUT) :: ErrMsg ! Error message if ErrStat /= ErrID_None\n# REAL(FEKi) :: Dx, Dy, Dz, Dxy,L! distances between nodes\n# ErrMsg = \"\"\n# ErrStat = ErrID_None\n# \n# Dx=P2(1)-P1(1)\n# Dy=P2(2)-P1(2)\n# Dz=P2(3)-P1(3)\n# Dxy = sqrt( Dx**2 + Dy**2 )\n# L = sqrt( Dx**2 + Dy**2 + Dz**2)\n# \n# IF ( EqualRealNos(L, 0.0_FEKi) ) THEN\n# ErrMsg = ' Same starting and ending location in the element.'\n# ErrStat = ErrID_Fatal\n# RETURN\n# ENDIF\n# \n# IF ( EqualRealNos(Dxy, 0.0_FEKi) ) THEN \n# DirCos=0.0_FEKi ! whole matrix set to 0\n# IF ( Dz < 0) THEN !x is kept along global x\n# DirCos(1, 1) = 1.0_FEKi\n# DirCos(2, 2) = -1.0_FEKi\n# DirCos(3, 3) = -1.0_FEKi\n# ELSE\n# DirCos(1, 1) = 1.0_ReKi\n# DirCos(2, 2) = 1.0_ReKi\n# DirCos(3, 3) = 1.0_ReKi\n# ENDIF \n# ELSE\n# DirCos(1, 1) = Dy/Dxy\n# DirCos(1, 2) = +Dx*Dz/(L*Dxy)\n# DirCos(1, 3) = Dx/L\n# \n# DirCos(2, 1) = -Dx/Dxy\n# DirCos(2, 2) = +Dz*Dy/(L*Dxy)\n# DirCos(2, 3) = Dy/L\n# \n# DirCos(3, 1) = 0.0_FEKi\n# DirCos(3, 2) = -Dxy/L\n# DirCos(3, 3) = +Dz/L\n# ENDIF\n# L_out= real(L, ReKi)\n# \n# END SUBROUTINE GetDirCos\n# !------------------------------------------------------------------------------------------------------\n# !> Rigid transformation matrix between DOFs of node j and k where node j is the leader node.\n# SUBROUTINE GetRigidTransformation(Pj, Pk, TRigid, ErrStat, ErrMsg)\n# REAL(ReKi), INTENT(IN ) :: Pj(3) ! (x,y,z) positions of leader node\n# REAL(ReKi), INTENT(IN ) :: Pk(3) ! (x,y,z) positions of follower node\n# REAL(ReKi), INTENT( OUT) :: TRigid(6,6) ! Transformation matrix such that xk = T.xj\n# INTEGER(IntKi), INTENT( OUT) :: ErrStat ! Error status of the operation\n# CHARACTER(*), INTENT( OUT) :: ErrMsg ! Error message if ErrStat /= ErrID_None\n# ! Local\n# !REAL(ReKi) :: L ! length of element\n# !REAL(ReKi) :: DirCos(3, 3) ! direction cosine matrix\n# !REAL(ReKi) :: R0(3,3) \n# integer(IntKi) :: I\n# ErrStat = ErrID_None\n# ErrMsg = \"\"\n# \n# ! --- Formulation using Delta of Global coordinates\n# Trigid=0; do I = 1,6; Trigid(I,I) = 1; enddo\n# Trigid ( 1, 5 ) = (Pk(3) - Pj(3))\n# Trigid ( 1, 6 ) = -(Pk(2) - Pj(2))\n# Trigid ( 2, 4 ) = -(Pk(3) - Pj(3))\n# Trigid ( 2, 6 ) = (Pk(1) - Pj(1))\n# Trigid ( 3, 4 ) = (Pk(2) - Pj(2))\n# Trigid ( 3, 5 ) = -(Pk(1) - Pj(1))\n# \n# ! --- Formulation bty transforming the \"local\" matrix into a global one\n# !call GetDirCos(Pj, Pk, R0, L, ErrStat, ErrMsg)\n# !TRigid = 0 ; do I = 1,6; TRigid(I,I) = 1; enddo\n# !TRigid (1, 5) = L\n# !TRigid (2, 4) = -L\n# !TRigid(1:3,4:6) = matmul( R0 , matmul(TRigid(1:3,4:6), transpose(R0)) )\n# \n# ! --- Formulation using L and Rotation matrix\n# !TRigid = 0; do I = 1,6; TRigid(I,I) = 1; enddo\n# !TRigid ( 1, 5 ) = L*R0(3,3)\n# !TRigid ( 1, 6 ) = -L*R0(2,3)\n# !TRigid ( 2, 4 ) = -L*R0(3,3)\n# !TRigid ( 2, 6 ) = L*R0(1,3)\n# !TRigid ( 3, 4 ) = L*R0(2,3)\n# !TRigid ( 3, 5 ) = -L*R0(1,3)\n# END SUBROUTINE GetRigidTransformation\n# SUBROUTINE RigidTransformationLine(dx,dy,dz,iLine,Line)\n# real(ReKi), INTENT(IN) :: dx,dy,dz\n# integer(IntKi) , INTENT(IN) :: iLine \n# Real(ReKi), dimension(6), INTENT(OUT) :: Line\n# SELECT CASE (iLine)\n# CASE (1); Line = (/1.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, dz, -dy/)\n# CASE (2); Line = (/0.0_ReKi, 1.0_ReKi, 0.0_ReKi, -dz, 0.0_ReKi, dx/)\n# CASE (3); Line = (/0.0_ReKi, 0.0_ReKi, 1.0_ReKi, dy, -dx, 0.0_ReKi/)\n# CASE (4); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi, 0.0_ReKi, 0.0_ReKi/)\n# CASE (5); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi, 0.0_ReKi/)\n# CASE (6); Line = (/0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 0.0_ReKi, 1.0_ReKi/)\n# CASE DEFAULT\n# Line=-99999999_ReKi\n# print*,'Error in RigidTransformationLine'\n# STOP\n# ! ErrStat = ErrID_Fatal\n# ! ErrMsg = 'Error calculating transformation matrix TI '\n# ! return\n# END SELECT\n# END SUBROUTINE\n\n",
"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy\n\nsetup(\n cmdclass = {'build_ext': build_ext},\n ext_modules = [Extension(\"lic_internal\", [\"lic_internal.pyx\"],include_dirs=[numpy.get_include()])]\n)\n\n",
"\"\"\"\nTools to work with OLAF the vortex code implemented in openfast\n\"\"\"\nimport numpy as np\n\n\ndef OLAFParams(omega_rpm, deltaPsiDeg=6, nNWrot=2, nFWrot=10, nFWrotFree=3, nPerRot=None, totalRot=None, show=True):\n \"\"\" \n Computes recommended time step and wake length based on the rotational speed in RPM\n\n INPUTS:\n - omega_rpm: rotational speed in RPM\n - deltaPsiDeg : azimuthal discretization in deg\n - nNWrot : number of near wake rotations\n - nFWrot : total number of far wake rotations\n - nFWrotFree : number of far wake rotations that are free\n\n deltaPsiDeg - nPerRot\n 5 72 \n 6 60 \n 7 51.5 \n 8 45 \n \"\"\"\n omega_rpm = np.asarray(omega_rpm)\n omega = omega_rpm*2*np.pi/60\n T = 2*np.pi/omega\n if nPerRot is not None:\n dt_wanted = np.around(T/nPerRot,5)\n else:\n dt_wanted = np.around(deltaPsiDeg/(6*omega_rpm),5)\n nPerRot = int(2*np.pi /(deltaPsiDeg*np.pi/180))\n\n nNWPanel = nNWrot*nPerRot\n nFWPanel = nFWrot*nPerRot\n nFWPanelFree = nFWrotFree*nPerRot\n\n if totalRot is None:\n totalRot = (nNWrot + nFWrot)*3 # going three-times through the entire wake\n\n tMax = dt_wanted*nPerRot*totalRot\n\n if show:\n print(dt_wanted , ' dt')\n print(int (nNWPanel ), ' nNWPanel ({} rotations)'.format(nNWrot))\n print(int (nFWPanel ), ' FarWakeLength ({} rotations)'.format(nFWrot))\n print(int (nFWPanelFree), ' FreeFarWakeLength ({} rotations)'.format(nFWrotFree))\n print(tMax , ' Tmax ({} rotations)'.format(totalRot))\n\n return dt_wanted, tMax, nNWPanel, nFWPanel, nFWPanelFree\n\n\nif __name__ == '__main__':\n OLAFParams(omega_rpm = 4.87558, deltaPsiDeg=6, show=True)\n",
"# Tools for spectral analysis of a real valued signal.\n#\n# The functions in this file were adapted from the python package scipy according to the following license:\n# \n# License: \n# Copyright 2001, 2002 Enthought, Inc.\n# All rights reserved.\n# \n# Copyright 2003-2013 SciPy Developers.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n# \n# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n# Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom six import string_types\n\n__all__ = ['fft_wrap','welch', 'psd', 'fft_amplitude']\n__all__ += ['pwelch', 'csd', 'coherence']\n__all__ += ['fnextpow2']\n__all__ += ['hann','hamming','boxcar','general_hamming','get_window']\n__all__ += ['TestSpectral']\n\n\n# --------------------------------------------------------------------------------}\n# --- FFT wrap\n# --------------------------------------------------------------------------------{\ndef fft_wrap(t,y,dt=None, output_type='amplitude',averaging='None',averaging_window='hamming',detrend=False,nExp=None):\n \"\"\" \n Wrapper to compute FFT amplitude or power spectra, with averaging.\n INPUTS:\n output_type : amplitude, PSD, f x PSD\n averaging_method : None, Welch\n averaging_window : Hamming, Hann, Rectangular\n OUTPUTS:\n frq: vector of frequencies\n Y : Amplitude spectrum, PSD, or f * PSD\n Info: a dictionary of info values\n \"\"\"\n\n # Formatting inputs\n output_type = output_type.lower()\n averaging = averaging.lower()\n averaging_window = averaging_window.lower()\n y = np.asarray(y)\n y = y[~np.isnan(y)]\n n = len(y) \n\n if dt is None:\n dtDelta0 = t[1]-t[0]\n # Hack to use a constant dt\n dt = (np.max(t)-np.min(t))/(n-1)\n if dtDelta0 !=dt:\n print('[WARN] dt from tmax-tmin different from dt from t2-t1' )\n Fs = 1/dt\n if averaging=='none':\n frq, PSD, Info = psd(y, fs=Fs, detrend=detrend, return_onesided=True)\n elif averaging=='welch':\n # --- Welch - PSD\n #overlap_frac=0.5\n #return fnextpow2(np.sqrt(len(x)/(1-overlap_frac)))\n nFFTAll=fnextpow2(n)\n if nExp is None:\n nExp=int(np.log(nFFTAll)/np.log(2))-1\n nPerSeg=2**nExp\n if nPerSeg>n:\n print('[WARN] Power of 2 value was too high and was reduced. Disable averaging to use the full spectrum.');\n nExp=int(np.log(nFFTAll)/np.log(2))-1\n nPerSeg=2**nExp\n if averaging_window=='hamming':\n window = hamming(nPerSeg, True)# True=Symmetric, like matlab\n elif averaging_window=='hann':\n window = hann(nPerSeg, True)\n elif averaging_window=='rectangular':\n window = boxcar(nPerSeg)\n else:\n raise Exception('Averaging window unknown {}'.format(averaging_window))\n frq, PSD, Info = pwelch(y, fs=Fs, window=window, detrend=detrend)\n Info.nExp = nExp\n else:\n raise Exception('Averaging method unknown {}'.format(averaging))\n\n # --- Formatting output\n if output_type=='amplitude':\n deltaf = frq[1]-frq[0]\n Y = np.sqrt(PSD*2*deltaf)\n # NOTE: the above should be the same as:Y=abs(Y[range(nhalf)])/n;Y[1:-1]=Y[1:-1]*2;\n elif output_type=='psd': # one sided\n Y = PSD\n elif output_type=='f x psd':\n Y = PSD*frq\n else:\n raise NotImplementedError('Contact developer')\n if detrend:\n frq= frq[1:]\n Y = Y[1:]\n return frq, Y, Info\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Spectral simple (averaging below) \n# --------------------------------------------------------------------------------{\ndef fft_amplitude(y, fs=1.0, detrend ='constant', return_onesided=True):\n \"\"\" Returns FFT amplitude of signal \"\"\"\n frq, PSD, Info = psd(y, fs=fs, detrend=detrend, return_onesided=return_onesided)\n deltaf = frq[1]-frq[0]\n Y = np.sqrt(PSD*2*deltaf)\n return frq, Y, Info\n\ndef psd(y, fs=1.0, detrend ='constant', return_onesided=True):\n \"\"\" Perform PSD without averaging \"\"\"\n if not return_onesided:\n raise NotImplementedError('Double sided todo')\n\n if detrend is None:\n detrend=False\n\n if detrend=='constant' or detrend==True:\n m=np.mean(y);\n else:\n m=0;\n\n n = len(y) \n if n%2==0:\n nhalf = int(n/2+1)\n else:\n nhalf = int((n+1)/2)\n\n frq = np.arange(nhalf)*fs/n;\n Y = np.fft.rfft(y-m) #Y = np.fft.fft(y) \n PSD = abs(Y[range(nhalf)])**2 /(n*fs) # PSD\n PSD[1:-1] = PSD[1:-1]*2;\n class InfoClass():\n pass\n Info = InfoClass();\n Info.df = frq[1]-frq[0]\n Info.fMax = frq[-1]\n Info.LFreq = len(frq)\n Info.LSeg = len(Y)\n Info.LWin = len(Y)\n Info.LOvlp = 0\n Info.nFFT = len(Y)\n Info.nseg = 1\n return frq, PSD, Info\n\n\n# --------------------------------------------------------------------------------}\n# --- Windows \n# --------------------------------------------------------------------------------{\n\"\"\"The suite of window functions.\"\"\"\ndef fnextpow2(x):\n return 2**np.ceil( np.log(x)*0.99999999999/np.log(2));\n\ndef fDefaultWinLen(x,overlap_frac=0.5):\n return fnextpow2(np.sqrt(len(x)/(1-overlap_frac)))\n\ndef fDefaultWinLenMatlab(x):\n return np.fix((len(x)-3)*2./9.)\n\ndef _len_guards(M):\n \"\"\"Handle small or incorrect window lengths\"\"\"\n if int(M) != M or M < 0:\n raise ValueError('Window length M must be a non-negative integer')\n return M <= 1\n\ndef _extend(M, sym):\n \"\"\"Extend window by 1 sample if needed for DFT-even symmetry\"\"\"\n if not sym:\n return M + 1, True\n else:\n return M, False\n\ndef _truncate(w, needed):\n \"\"\"Truncate window by 1 sample if needed for DFT-even symmetry\"\"\"\n if needed:\n return w[:-1]\n else:\n return w\n\ndef general_cosine(M, a, sym=True):\n if _len_guards(M):\n return np.ones(M)\n M, needs_trunc = _extend(M, sym)\n\n fac = np.linspace(-np.pi, np.pi, M)\n w = np.zeros(M)\n for k in range(len(a)):\n w += a[k] * np.cos(k * fac)\n\n return _truncate(w, needs_trunc)\n\n\ndef boxcar(M, sym=True):\n \"\"\"Return a boxcar or rectangular window.\n\n Also known as a rectangular window or Dirichlet window, this is equivalent\n to no window at all.\n \"\"\"\n if _len_guards(M):\n return np.ones(M)\n M, needs_trunc = _extend(M, sym)\n\n w = np.ones(M, float)\n\n return _truncate(w, needs_trunc)\n\ndef hann(M, sym=True): # same as hanning(*args, **kwargs):\n return general_hamming(M, 0.5, sym)\n\n\ndef general_hamming(M, alpha, sym=True):\n r\"\"\"Return a generalized Hamming window.\n The generalized Hamming window is constructed by multiplying a rectangular\n window by one period of a cosine function [1]_.\n w(n) = \\alpha - \\left(1 - \\alpha\\right) \\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n \"\"\"\n return general_cosine(M, [alpha, 1. - alpha], sym)\n\n\ndef hamming(M, sym=True):\n r\"\"\"Return a Hamming window.\n The Hamming window is a taper formed by using a raised cosine with\n non-zero endpoints, optimized to minimize the nearest side lobe.\n w(n) = 0.54 - 0.46 \\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n \"\"\"\n return general_hamming(M, 0.54, sym)\n\n_win_equiv_raw = {\n ('boxcar', 'box', 'ones', 'rect', 'rectangular'): (boxcar, False),\n ('hamming', 'hamm', 'ham'): (hamming, False),\n ('hanning', 'hann', 'han'): (hann, False),\n}\n\n# Fill dict with all valid window name strings\n_win_equiv = {}\nfor k, v in _win_equiv_raw.items():\n for key in k:\n _win_equiv[key] = v[0]\n\n# Keep track of which windows need additional parameters\n_needs_param = set()\nfor k, v in _win_equiv_raw.items():\n if v[1]:\n _needs_param.update(k)\n\n\ndef get_window(window, Nx, fftbins=True):\n \"\"\"\n Return a window.\n\n Parameters\n ----------\n window : string, float, or tuple\n The type of window to create. See below for more details.\n Nx : int\n The number of samples in the window.\n fftbins : bool, optional\n If True (default), create a \"periodic\" window, ready to use with\n `ifftshift` and be multiplied by the result of an FFT (see also\n `fftpack.fftfreq`).\n If False, create a \"symmetric\" window, for use in filter design.\n \"\"\"\n sym = not fftbins\n try:\n beta = float(window)\n except (TypeError, ValueError):\n args = ()\n if isinstance(window, tuple):\n winstr = window[0]\n if len(window) > 1:\n args = window[1:]\n elif isinstance(window, string_types):\n if window in _needs_param:\n raise ValueError(\"The '\" + window + \"' window needs one or \"\n \"more parameters -- pass a tuple.\")\n else:\n winstr = window\n else:\n raise ValueError(\"%s as window type is not supported.\" %\n str(type(window)))\n\n try:\n winfunc = _win_equiv[winstr]\n except KeyError:\n raise ValueError(\"Unknown window type.\")\n\n params = (Nx,) + args + (sym,)\n else:\n winfunc = kaiser\n params = (Nx, beta, sym)\n\n return winfunc(*params)\n\n\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Helpers \n# --------------------------------------------------------------------------------{\ndef odd_ext(x, n, axis=-1):\n \"\"\"\n Odd extension at the boundaries of an array\n Generate a new ndarray by making an odd extension of `x` along an axis.\n \"\"\"\n if n < 1:\n return x\n if n > x.shape[axis] - 1:\n raise ValueError((\"The extension length n (%d) is too big. \" +\n \"It must not exceed x.shape[axis]-1, which is %d.\")\n % (n, x.shape[axis] - 1))\n left_end = axis_slice(x, start=0, stop=1, axis=axis)\n left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)\n right_end = axis_slice(x, start=-1, axis=axis)\n right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)\n ext = np.concatenate((2 * left_end - left_ext,\n x,\n 2 * right_end - right_ext),\n axis=axis)\n return ext\n\n\ndef even_ext(x, n, axis=-1):\n \"\"\"\n Even extension at the boundaries of an array\n Generate a new ndarray by making an even extension of `x` along an axis.\n \"\"\"\n if n < 1:\n return x\n if n > x.shape[axis] - 1:\n raise ValueError((\"The extension length n (%d) is too big. \" +\n \"It must not exceed x.shape[axis]-1, which is %d.\")\n % (n, x.shape[axis] - 1))\n left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)\n right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)\n ext = np.concatenate((left_ext,\n x,\n right_ext),\n axis=axis)\n return ext\n\n\ndef const_ext(x, n, axis=-1):\n \"\"\"\n Constant extension at the boundaries of an array\n Generate a new ndarray that is a constant extension of `x` along an axis.\n The extension repeats the values at the first and last element of\n the axis.\n \"\"\"\n if n < 1:\n return x\n left_end = axis_slice(x, start=0, stop=1, axis=axis)\n ones_shape = [1] * x.ndim\n ones_shape[axis] = n\n ones = np.ones(ones_shape, dtype=x.dtype)\n left_ext = ones * left_end\n right_end = axis_slice(x, start=-1, axis=axis)\n right_ext = ones * right_end\n ext = np.concatenate((left_ext,\n x,\n right_ext),\n axis=axis)\n return ext\n\n\ndef zero_ext(x, n, axis=-1):\n \"\"\"\n Zero padding at the boundaries of an array\n Generate a new ndarray that is a zero padded extension of `x` along\n an axis.\n \"\"\"\n if n < 1:\n return x\n zeros_shape = list(x.shape)\n zeros_shape[axis] = n\n zeros = np.zeros(zeros_shape, dtype=x.dtype)\n ext = np.concatenate((zeros, x, zeros), axis=axis)\n return ext\n\ndef signaltools_detrend(data, axis=-1, type='linear', bp=0):\n \"\"\"\n Remove linear trend along axis from data.\n\n Parameters\n ----------\n data : array_like\n The input data.\n axis : int, optional\n The axis along which to detrend the data. By default this is the\n last axis (-1).\n type : {'linear', 'constant'}, optional\n The type of detrending. If ``type == 'linear'`` (default),\n the result of a linear least-squares fit to `data` is subtracted\n from `data`.\n If ``type == 'constant'``, only the mean of `data` is subtracted.\n bp : array_like of ints, optional\n A sequence of break points. If given, an individual linear fit is\n performed for each part of `data` between two break points.\n Break points are specified as indices into `data`.\n\n Returns\n -------\n ret : ndarray\n The detrended input data.\n \"\"\"\n if type not in ['linear', 'l', 'constant', 'c']:\n raise ValueError(\"Trend type must be 'linear' or 'constant'.\")\n data = np.asarray(data)\n dtype = data.dtype.char\n if dtype not in 'dfDF':\n dtype = 'd'\n if type in ['constant', 'c']:\n #print('Removing mean')\n ret = data - np.expand_dims(np.mean(data, axis), axis)\n return ret\n else:\n #print('Removing linear?')\n dshape = data.shape\n N = dshape[axis]\n bp = sort(unique(r_[0, bp, N]))\n if np.any(bp > N):\n raise ValueError(\"Breakpoints must be less than length \"\n \"of data along given axis.\")\n Nreg = len(bp) - 1\n # Restructure data so that axis is along first dimension and\n # all other dimensions are collapsed into second dimension\n rnk = len(dshape)\n if axis < 0:\n axis = axis + rnk\n newdims = r_[axis, 0:axis, axis + 1:rnk]\n newdata = reshape(np.transpose(data, tuple(newdims)),\n (N, _prod(dshape) // N))\n newdata = newdata.copy() # make sure we have a copy\n if newdata.dtype.char not in 'dfDF':\n newdata = newdata.astype(dtype)\n # Find leastsq fit and remove it for each piece\n for m in range(Nreg):\n Npts = bp[m + 1] - bp[m]\n A = ones((Npts, 2), dtype)\n A[:, 0] = cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)\n sl = slice(bp[m], bp[m + 1])\n coef, resids, rank, s = np.linalg.lstsq(A, newdata[sl])\n newdata[sl] = newdata[sl] - dot(A, coef)\n # Put data back in original shape.\n tdshape = take(dshape, newdims, 0)\n ret = np.reshape(newdata, tuple(tdshape))\n vals = list(range(1, rnk))\n olddims = vals[:axis] + [0] + vals[axis:]\n ret = np.transpose(ret, tuple(olddims))\n return ret\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Spectral Averaging\n# --------------------------------------------------------------------------------{\n\"\"\"Tools for spectral analysis. \"\"\"\n\ndef welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,\n detrend='constant', return_onesided=True, scaling='density',\n axis=-1):\n \"\"\"Interface identical to scipy.signal \"\"\"\n\n if detrend==True:\n detrend='constant'\n\n freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis)\n return freqs, Pxx.real\n\n#>>>>\ndef pwelch(x, window='hamming', noverlap=None, nfft=None, fs=1.0, nperseg=None, \n detrend=False, return_onesided=True, scaling='density',\n axis=-1):\n r\"\"\"\n NOTE: interface and default options modified to match matlab's implementation\n >> detrend: default to False\n >> window : default to 'hamming'\n >> window: if an integer, use 'hamming(window, sym=True)'\n\n\n Estimate power spectral density using Welch's method.\n\n Welch's method [1]_ computes an estimate of the power spectral\n density by dividing the data into overlapping segments, computing a\n modified periodogram for each segment and averaging the\n periodograms.\n\n Parameters\n ----------\n x : array_like\n Time series of measurement values\n fs : float, optional\n Sampling frequency of the `x` time series. Defaults to 1.0.\n window : str or tuple or array_like, optional\n Desired window to use. If `window` is a string or tuple, it is\n passed to `get_window` to generate the window values, which are\n DFT-even by default. See `get_window` for a list of windows and\n required parameters. If `window` is array_like it will be used\n directly as the window and its length must be nperseg. Defaults\n to a Hann window.\n nperseg : int, optional\n Length of each segment. Defaults to None, but if window is str or\n tuple, is set to 256, and if window is array_like, is set to the\n length of the window.\n noverlap : int, optional\n Number of points to overlap between segments. If `None`,\n ``noverlap = nperseg // 2``. Defaults to `None`.\n nfft : int, optional\n Length of the FFT used, if a zero padded FFT is desired. If\n `None`, the FFT length is `nperseg`. Defaults to `None`.\n detrend : str or function or `False`, optional\n Specifies how to detrend each segment. If `detrend` is a\n string, it is passed as the `type` argument to the `detrend`\n function. If it is a function, it takes a segment and returns a\n detrended segment. If `detrend` is `False`, no detrending is\n done. Defaults to 'constant'.\n return_onesided : bool, optional\n If `True`, return a one-sided spectrum for real data. If\n `False` return a two-sided spectrum. Note that for complex\n data, a two-sided spectrum is always returned.\n scaling : { 'density', 'spectrum' }, optional\n Selects between computing the power spectral density ('density')\n where `Pxx` has units of V**2/Hz and computing the power\n spectrum ('spectrum') where `Pxx` has units of V**2, if `x`\n is measured in V and `fs` is measured in Hz. Defaults to\n 'density'\n axis : int, optional\n Axis along which the periodogram is computed; the default is\n over the last axis (i.e. ``axis=-1``).\n\n Returns\n -------\n f : ndarray\n Array of sample frequencies.\n Pxx : ndarray\n Power spectral density or power spectrum of x.\n\n See Also\n --------\n periodogram: Simple, optionally modified periodogram\n lombscargle: Lomb-Scargle periodogram for unevenly sampled data\n\n Notes\n -----\n An appropriate amount of overlap will depend on the choice of window\n and on your requirements. For the default Hann window an overlap of\n 50% is a reasonable trade off between accurately estimating the\n signal power, while not over counting any of the data. Narrower\n windows may require a larger overlap.\n\n If `noverlap` is 0, this method is equivalent to Bartlett's method\n [2]_.\n\n .. versionadded:: 0.12.0\n\n References\n ----------\n .. [1] P. Welch, \"The use of the fast Fourier transform for the\n estimation of power spectra: A method based on time averaging\n over short, modified periodograms\", IEEE Trans. Audio\n Electroacoust. vol. 15, pp. 70-73, 1967.\n .. [2] M.S. Bartlett, \"Periodogram Analysis and Continuous Spectra\",\n Biometrika, vol. 37, pp. 1-16, 1950.\n\n \"\"\"\n import math\n def fnextpow2(x):\n return 2**math.ceil( math.log(x)*0.99999999999/math.log(2));\n\n # MANU >>> CHANGE OF DEFAULT OPTIONS\n # MANU - If a length is provided use symmetric hamming window\n if type(window)==int:\n window=hamming(window, True) \n # MANU - do not use 256 as default\n if isinstance(window, string_types) or isinstance(window, tuple):\n if nperseg is None:\n if noverlap is None:\n overlap_frac=0.5\n elif noverlap == 0:\n overlap_frac=0\n else:\n raise NotImplementedError('TODO noverlap set but not nperseg')\n #nperseg = 256 # then change to default\n nperseg=fnextpow2(math.sqrt(x.shape[-1]/(1-overlap_frac)));\n\n # MANU accepting true as detrend\n if detrend==True:\n detrend='constant'\n\n freqs, Pxx, Info = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,\n return_onesided, scaling, axis)\n\n return freqs, Pxx.real, Info\n\n\ndef csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,\n detrend='constant', return_onesided=True, scaling='density', axis=-1):\n r\"\"\"\n Estimate the cross power spectral density, Pxy, using Welch's\n method.\n \"\"\"\n\n freqs, _, Pxy, Info = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,\n detrend, return_onesided, scaling, axis,\n mode='psd')\n\n # Average over windows.\n if len(Pxy.shape) >= 2 and Pxy.size > 0:\n if Pxy.shape[-1] > 1:\n Pxy = Pxy.mean(axis=-1)\n else:\n Pxy = np.reshape(Pxy, Pxy.shape[:-1])\n\n return freqs, Pxy, Info\n\n\n\ndef coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,\n nfft=None, detrend='constant', axis=-1):\n r\"\"\"\n Estimate the magnitude squared coherence estimate, Cxy, of\n discrete-time signals X and Y using Welch's method.\n\n ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power\n spectral density estimates of X and Y, and `Pxy` is the cross\n spectral density estimate of X and Y.\n \"\"\"\n\n freqs, Pxx, Infoxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)\n _, Pyy, Infoyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)\n _, Pxy, Infoxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)\n\n Cxy = np.abs(Pxy)**2 / Pxx / Pyy\n\n return freqs, Cxy, Infoxx\n\n\ndef _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,\n nfft=None, detrend='constant', return_onesided=True,\n scaling='spectrum', axis=-1, mode='psd', boundary=None,\n padded=False):\n \"\"\" Calculate various forms of windowed FFTs for PSD, CSD, etc. \"\"\"\n if mode not in ['psd', 'stft']:\n raise ValueError(\"Unknown value for mode %s, must be one of: \"\n \"{'psd', 'stft'}\" % mode)\n \n\n\n\n\n boundary_funcs = {'even': even_ext,\n 'odd': odd_ext,\n 'constant': const_ext,\n 'zeros': zero_ext,\n None: None}\n\n if boundary not in boundary_funcs:\n raise ValueError(\"Unknown boundary option '{0}', must be one of: {1}\"\n .format(boundary, list(boundary_funcs.keys())))\n\n # If x and y are the same object we can save ourselves some computation.\n same_data = y is x\n\n if not same_data and mode != 'psd':\n raise ValueError(\"x and y must be equal if mode is 'stft'\")\n\n axis = int(axis)\n\n # Ensure we have np.arrays, get outdtype\n x = np.asarray(x)\n if not same_data:\n y = np.asarray(y)\n outdtype = np.result_type(x, y, np.complex64)\n else:\n outdtype = np.result_type(x, np.complex64)\n\n if not same_data:\n # Check if we can broadcast the outer axes together\n xouter = list(x.shape)\n youter = list(y.shape)\n xouter.pop(axis)\n youter.pop(axis)\n try:\n outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape\n except ValueError:\n raise ValueError('x and y cannot be broadcast together.')\n\n if same_data:\n if x.size == 0:\n return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)\n else:\n if x.size == 0 or y.size == 0:\n outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)\n emptyout = np.rollaxis(np.empty(outshape), -1, axis)\n return emptyout, emptyout, emptyout\n\n if x.ndim > 1:\n if axis != -1:\n x = np.rollaxis(x, axis, len(x.shape))\n if not same_data and y.ndim > 1:\n y = np.rollaxis(y, axis, len(y.shape))\n\n # Check if x and y are the same length, zero-pad if necessary\n if not same_data:\n if x.shape[-1] != y.shape[-1]:\n if x.shape[-1] < y.shape[-1]:\n pad_shape = list(x.shape)\n pad_shape[-1] = y.shape[-1] - x.shape[-1]\n x = np.concatenate((x, np.zeros(pad_shape)), -1)\n else:\n pad_shape = list(y.shape)\n pad_shape[-1] = x.shape[-1] - y.shape[-1]\n y = np.concatenate((y, np.zeros(pad_shape)), -1)\n\n if nperseg is not None: # if specified by user\n nperseg = int(nperseg)\n if nperseg < 1:\n raise ValueError('nperseg must be a positive integer')\n\n # parse window; if array like, then set nperseg = win.shape\n win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])\n\n if nfft is None:\n nfft = nperseg\n elif nfft < nperseg:\n raise ValueError('nfft must be greater than or equal to nperseg.')\n else:\n nfft = int(nfft)\n\n if noverlap is None:\n noverlap = nperseg//2\n else:\n noverlap = int(noverlap)\n if noverlap >= nperseg:\n raise ValueError('noverlap must be less than nperseg.')\n nstep = nperseg - noverlap\n\n # Padding occurs after boundary extension, so that the extended signal ends\n # in zeros, instead of introducing an impulse at the end.\n # I.e. if x = [..., 3, 2]\n # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]\n # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]\n\n if boundary is not None:\n ext_func = boundary_funcs[boundary]\n x = ext_func(x, nperseg//2, axis=-1)\n if not same_data:\n y = ext_func(y, nperseg//2, axis=-1)\n\n if padded:\n # Pad to integer number of windowed segments\n # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg\n nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg\n zeros_shape = list(x.shape[:-1]) + [nadd]\n x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)\n if not same_data:\n zeros_shape = list(y.shape[:-1]) + [nadd]\n y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)\n\n # Handle detrending and window functions\n if not detrend:\n def detrend_func(d):\n return d\n elif not hasattr(detrend, '__call__'):\n def detrend_func(d):\n return signaltools_detrend(d, type=detrend, axis=-1)\n elif axis != -1:\n # Wrap this function so that it receives a shape that it could\n # reasonably expect to receive.\n def detrend_func(d):\n d = np.rollaxis(d, -1, axis)\n d = detrend(d)\n return np.rollaxis(d, axis, len(d.shape))\n else:\n detrend_func = detrend\n\n if np.result_type(win,np.complex64) != outdtype:\n win = win.astype(outdtype)\n\n if scaling == 'density':\n scale = 1.0 / (fs * (win*win).sum())\n elif scaling == 'spectrum':\n scale = 1.0 / win.sum()**2\n else:\n raise ValueError('Unknown scaling: %r' % scaling)\n\n if mode == 'stft':\n scale = np.sqrt(scale)\n\n if return_onesided:\n if np.iscomplexobj(x):\n sides = 'twosided'\n #warnings.warn('Input data is complex, switching to ' 'return_onesided=False')\n else:\n sides = 'onesided'\n if not same_data:\n if np.iscomplexobj(y):\n sides = 'twosided'\n #warnings.warn('Input data is complex, switching to return_onesided=False')\n else:\n sides = 'twosided'\n\n if sides == 'twosided':\n raise Exception('NOT IMPLEMENTED')\n #freqs = fftpack.fftfreq(nfft, 1/fs)\n elif sides == 'onesided':\n freqs = np.fft.rfftfreq(nfft, 1/fs)\n\n # Perform the windowed FFTs\n result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)\n\n if not same_data:\n # All the same operations on the y data\n result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,\n sides)\n result = np.conjugate(result) * result_y\n elif mode == 'psd':\n result = np.conjugate(result) * result\n\n result *= scale\n if sides == 'onesided' and mode == 'psd':\n if nfft % 2:\n result[..., 1:] *= 2\n else:\n # Last point is unpaired Nyquist freq point, don't double\n result[..., 1:-1] *= 2\n\n time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,\n nperseg - noverlap)/float(fs)\n if boundary is not None:\n time -= (nperseg/2) / fs\n\n result = result.astype(outdtype)\n\n # All imaginary parts are zero anyways\n if same_data and mode != 'stft':\n result = result.real\n\n # Output is going to have new last axis for time/window index, so a\n # negative axis index shifts down one\n if axis < 0:\n axis -= 1\n\n # Roll frequency axis back to axis where the data came from\n result = np.rollaxis(result, -1, axis)\n\n # TODO\n class InfoClass():\n pass\n Info = InfoClass();\n Info.df=freqs[1]-freqs[0]\n Info.fMax=freqs[-1]\n Info.LFreq=len(freqs)\n Info.LSeg=nperseg\n Info.LWin=len(win)\n Info.LOvlp=noverlap\n Info.nFFT=nfft\n Info.nseg=-1\n #print('df:{:.3f} - fm:{:.2f} - nseg:{} - Lf:{:5d} - Lseg:{:5d} - Lwin:{:5d} - Lovlp:{:5d} - Nfft:{:5d} - Lsig:{}'.format(freqs[1]-freqs[0],freqs[-1],-1,len(freqs),nperseg,len(win),noverlap,nfft,x.shape[-1]))\n return freqs, time, result, Info\n\n\ndef _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):\n \"\"\" Calculate windowed FFT \"\"\"\n # Created strided array of data segments\n if nperseg == 1 and noverlap == 0:\n result = x[..., np.newaxis]\n else:\n # http://stackoverflow.com/a/5568169\n step = nperseg - noverlap\n shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)\n strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])\n result = np.lib.stride_tricks.as_strided(x, shape=shape,\n strides=strides)\n\n # Detrend each data segment individually\n result = detrend_func(result)\n\n # Apply window by multiplication\n result = win * result\n\n # Perform the fft. Acts on last axis by default. Zero-pads automatically\n if sides == 'twosided':\n raise Exception('NOT IMPLEMENTED')\n #func = fftpack.fft\n else:\n result = result.real\n func = np.fft.rfft\n result = func(result, n=nfft)\n\n return result\n\ndef _triage_segments(window, nperseg,input_length):\n \"\"\"\n Parses window and nperseg arguments for spectrogram and _spectral_helper.\n This is a helper function, not meant to be called externally.\n \"\"\"\n\n #parse window; if array like, then set nperseg = win.shape\n if isinstance(window, string_types) or isinstance(window, tuple):\n # if nperseg not specified\n if nperseg is None:\n nperseg = 256 # then change to default\n if nperseg > input_length:\n print('nperseg = {0:d} is greater than input length '\n ' = {1:d}, using nperseg = {1:d}'\n .format(nperseg, input_length))\n nperseg = input_length\n win = get_window(window, nperseg)\n else:\n win = np.asarray(window)\n if len(win.shape) != 1:\n raise ValueError('window must be 1-D')\n if input_length < win.shape[-1]:\n raise ValueError('window is longer than input signal')\n if nperseg is None:\n nperseg = win.shape[0]\n elif nperseg is not None:\n if nperseg != win.shape[0]:\n raise ValueError(\"value specified for nperseg is different from\"\n \" length of window\")\n\n return win, nperseg\n\n\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Unittests\n# --------------------------------------------------------------------------------{\nimport unittest\n\nclass TestSpectral(unittest.TestCase):\n\n def test_fft_amplitude(self):\n dt=0.1\n t=np.arange(0,10,dt);\n f0=1;\n A=5;\n y=A*np.sin(2*np.pi*f0*t)\n f,Y,_=fft_amplitude(y,fs=1/dt,detrend=False)\n i=np.argmax(Y)\n self.assertAlmostEqual(Y[i],A)\n self.assertAlmostEqual(f[i],f0)\n \nif __name__ == '__main__':\n unittest.main()\n\n",
"\"\"\" \nClasses and tools to easily set up a FEM model made of beam elements\n\n\nReferences:\n\n\n [2] Richard Schwertassek, Oskar Wallrapp\n \"Dynamik Flexibler Mehrkoerpersysteme : Methoden Der Mechanik \n\n\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom welib.FEM.utils import skew \nfrom welib.system.eva import eig\n\n# --------------------------------------------------------------------------------}\n# --- Main wrapper functions \n# --------------------------------------------------------------------------------{\ndef cbeam(xNodes, m, EIx=None, EIy=None, EIz=None, EA=None, A=None, Kt=None, E=None, G=None, phi=None, \n element='frame3d', nel=None,\n BC='clamped-free', M_root=None, M_tip=None, K_root=None, K_tip=None\n ):\n \"\"\" \n Returns finite element model of a continuous beam\n For uniform or straight beams, the beam is assumed to be along the x direction.\n \n NOTE: input values can be vectors or scalars.\n If they are scalars, then a beam with constant properties and of length L=xNodes is used;\n If they are vectors, values per element are required\n then linear interpolation is used. The dimension of the inputs does not need to match nel\n\n INPUTS\n - xNodes: define beam length, beam spanwise positions or beam nodes, either:\n - (scalar) Beam length, for uniform beam [m]\n - (1xn) Span vector of the beam (for straight beams) [m]\n - (2xn) Nodes positions x,z along the beam for 2d beam [m]\n - (3xn) Nodes positions x,y,z along the beam for 3d beam [m]\n\n - m : (n) Mass per length along the beam, at nodes [kg/m]\n\n - A : (n) Beam cross section area along the beam, at nodes [m^2]\n\n - EIx : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n - EIy : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n - EIz : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n\n - Kt : (n) Torsion constant, at nodes [m^4]\n\n - G : (scalar) Shear modulus. Steel: 79.3 [Pa] [N/m^2]\n - E : (scalar) Elastic (Young) modulus\n\n - phi : (1xn) rotation of principal axes wrt mean line (tangent) of the beam [rad], at nodes\n\n - element: specify the element type to use along the beam: \n 'frame3d'\n 'frame3dlin'\n 'beam2d'\n\n - nel : Number of elements. If provided Structural propeties and nodes will be interpolated to match nel. \n Otherwise, the length of xNodes determines the discretization\n\n - BC: string defining boundary condition:\n -'clamped-free': clamped at root, free at tip\n -'free-free': free at root, free at tip\n\n - M_root/tip: (6x6) additional rigid body mass matrix at beam ends\n\n - K_root/tip: (6x6) additional stiffness matrix at beam ends\n \n OUTPUTS\n FEM: dictionary with keys:\n - MM: (nDOF x nDOF) Mass matrix (before BC)\n - KK: (nDOF x nDOF) Stiffness matrix (before BC)\n - MMr: (nr x nr) Mass matrix (after BC)\n - KKr: (nr x nr) Stiffness matrix (after BC)\n - Tr: (n x nr) Boundary condition transformation matrix\n - xNodes : (3 x nel+1) Nodes locations\n - Q : (nr x nr) Normalized Modes\n - modeNames : (<=nr) Identified modes names\n - freq : (nr) Frequencies\n \"\"\"\n # --- Assembly full FEM system\n MM_, KK_, xNodes, DCM, Elem2Nodes, Nodes2DOF, Elem2DOF=cbeam_assembly(xNodes,m,EIx=EIx,EIy=EIy,EIz=EIz,EA=EA,A=A,E=E,G=G,Kt=Kt,phi=phi,nel=nel,element=element)\n\n # --- Apply boundary conditions (clamped at root, free at tip)\n MM, KK, Tr, IFull2BC, IBC2Full = applyBC(MM_, KK_, Elem2Nodes, Nodes2DOF, BC=BC, K_root=K_root, M_root=M_root, K_tip=K_tip, M_tip=M_tip)\n\n # --- Compute modes and frequencies\n [Q, freq]= eig(KK, MM, freq_out=True)\n\n # --- Compute modes and frequencies\n Q = insertBCinModes(Q, Tr)\n Q, modeNames = identifyAndNormalizeModes(Q, nModes=20)\n\n # --- Return a dictionary\n FEM={'xNodes':xNodes, 'MM':MM, 'KK':KK, 'MM_full':MM_,'KK_full':KK_,'Tr':Tr,\n 'IFull2BC':IFull2BC, 'IBC2Full':IBC2Full,\n 'Elem2Nodes':Elem2Nodes, 'Nodes2DOF':Nodes2DOF, 'Elem2DOF':Elem2DOF,\n 'Q':Q,'freq':freq, 'modeNames':modeNames}\n return FEM\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Craig Bampton \n# --------------------------------------------------------------------------------{\ndef CB_topNode(FEM, nCB=0, element='frame3d', main_axis='x'):\n \"\"\"\n Perform a Craig-Bampton reduction assume the top node is constrained\n \"\"\"\n from welib.FEM.reduction import CraigBampton\n MM = FEM['MM']\n KK = FEM['KK']\n xNodes = FEM['xNodes']\n # Find top node DOF\n IDOF_tip = FEM['Nodes2DOF'][FEM['Elem2Nodes'][-1,:][1],:] # NOTE: index in full system\n Ileader=FEM['IFull2BC'][IDOF_tip] # NOTE: index in system with BC\n # --- Craig-Bampton reduction\n MMr, KKr, Phi_G, Phi_CB, f_G, f_CB = CraigBampton(MM, KK, Ileader, nModesCB=nCB, Ifollow=None, F=None, DD=None, fullModesOut=True)\n\n CB=dict()\n CB['MM'] = MMr\n CB['KK'] = KKr\n CB['Phi_G'] = Phi_G\n CB['Phi_CB'] = Phi_CB\n CB['f_G'] = f_G\n CB['f_CB'] = f_CB\n\n\n # Insert Boundary conditions back in mode\n Q_G = insertBCinModes(Phi_G, FEM['Tr'])\n Q_CB = insertBCinModes(Phi_CB, FEM['Tr'])\n\n # Identify modes for convenience\n _, names_G= identifyAndNormalizeModes(Q_G, element=element, normalize=False)\n _, names_CB= identifyAndNormalizeModes(Q_CB, element=element, normalize=False)\n\n if main_axis!='x':\n # Perform permutations\n raise NotImplementedError()\n\n if element =='frame3d':\n DN = ['ux','uy','uz','tx','ty','tz']\n else:\n raise NotImplementedError()\n\n # --- Create dataframe and mode dict for Guyan modes\n MN = ['G{}'.format(i+1) for i in np.arange(Q_G.shape[1])]\n M=FEM['xNodes'][0,:]\n Modes_G=dict()\n for i,mn in enumerate(names_G):\n ModeComp = [Q_G[0::6,i],Q_G[1::6,i], Q_G[2::6,i], Q_G[3::6,i], Q_G[4::6,i], Q_G[5::6,i]]\n Modes_G[mn]=dict()\n Modes_G[mn]['label'] = names_G[i]\n Modes_G[mn]['comp'] = np.column_stack(ModeComp)\n Modes_G[mn]['raw'] = Q_G[:,i]\n\n M= np.column_stack([M]+ModeComp)\n colnames=['x']+[m+'_'+d for m in MN for d in DN]\n df_G=pd.DataFrame(data=M, columns=colnames)\n\n # --- Create dataframe and mode dict for CB modes\n MN = ['CB{}'.format(i+1) for i in np.arange(Q_CB.shape[1])]\n M=FEM['xNodes'][0,:]\n Modes_CB=dict()\n for i,mn in enumerate(names_CB):\n ModeComp = [Q_CB[0::6,i],Q_CB[1::6,i], Q_CB[2::6,i], Q_CB[3::6,i], Q_CB[4::6,i], Q_CB[5::6,i]]\n Modes_CB[mn]=dict()\n Modes_CB[mn]['label'] = names_CB[i]\n Modes_CB[mn]['comp'] = np.column_stack(ModeComp)\n Modes_CB[mn]['raw'] = Q_CB[:,i]\n M= np.column_stack([M]+ModeComp)\n colnames=['x']+[m+'_'+d for m in MN for d in DN]\n df_CB=pd.DataFrame(data=M, columns=colnames)\n\n return Q_G, Q_CB, df_G, df_CB, Modes_G, Modes_CB, CB\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Helpers, consider adding to utils \n# --------------------------------------------------------------------------------{\ndef rigidBodyMassMatrixAtP(m=None, J_G=None, Ref2COG=None):\n \"\"\" \n Rigid body mass matrix (6x6) at a given reference point: \n the center of gravity (if Ref2COG is None) \n\n\n INPUTS:\n - m/tip: (scalar) body mass \n default: None, no mass\n - J_G: (3-vector or 3x3 matrix), diagonal coefficients or full inertia matrix\n with respect to COG of body! \n The inertia is transferred to the reference point if Ref2COG is not None\n default: None \n - Ref2COG: (3-vector) x,y,z position of center of gravity (COG) with respect to a reference point\n default: None, at first/last node.\n OUTPUTS:\n - M66 (6x6) : rigid body mass matrix at COG or given point \n \"\"\"\n # Default values\n if m is None: m=0\n if Ref2COG is None: Ref2COG=(0,0,0)\n if J_G is None: J_G=np.zeros((3,3))\n if len(J_G.flatten()==3): J_G = np.eye(3).dot(J_G)\n\n M66 = np.zeros((6,6))\n x,y,z = Ref2COG\n Jxx,Jxy,Jxz = J_G[0,:]\n _ ,Jyy,Jyz = J_G[1,:]\n _ ,_ ,Jzz = J_G[2,:]\n M66[0, :] =[ m , 0 , 0 , 0 , z*m , -y*m ]\n M66[1, :] =[ 0 , m , 0 , -z*m , 0 , x*m ]\n M66[2, :] =[ 0 , 0 , m , y*m , -x*m , 0 ]\n M66[3, :] =[ 0 , -z*m , y*m , Jxx + m*(y**2+z**2) , Jxy - m*x*y , Jxz - m*x*z ]\n M66[4, :] =[ z*m , 0 , -x*m , Jxy - m*x*y , Jyy + m*(x**2+z**2) , Jyz - m*y*z ]\n M66[5, :] =[ -y*m , x*m , 0 , Jxz - m*x*z , Jyz - m*y*z , Jzz + m*(x**2+y**2) ]\n return M66\n\ndef LinearDOFMapping(nElem, nNodesPerElem, nDOFperNode):\n \"\"\" \n returns the mappings from nodes to DOF and element to nodes and DOF\n for a structure with the same type of elements, assuming nodes are one after the other\n \"\"\"\n nNodes = (nNodesPerElem-1)*nElem+1 # total number of nodes in system\n Nodes2DOF=np.zeros((nNodes,nDOFperNode), dtype=int)\n for i in np.arange(nNodes):\n Nodes2DOF[i,:]=np.arange(i*6, (i+1)*6) \n Elem2DOF=np.zeros((nElem,nDOFperNode*nNodesPerElem),dtype=int)\n for i in np.arange(nElem):\n Elem2DOF[i,:]=np.concatenate((Nodes2DOF[i,:], Nodes2DOF[i+1,:]))\n Elem2Nodes=np.zeros((nElem,nNodesPerElem), dtype=int)\n for i in np.arange(nElem):\n Elem2Nodes[i,:]=(i,i+1)\n return Elem2Nodes, Nodes2DOF, Elem2DOF\n\ndef ElementDOFIndex(iel,nnel,ndof):\n \"\"\"\n Compute system dofs associated with each element in one- dimensional problem\n \n INPUTS:\n DOFindex - system dof vector associated with element \"iel\"\n iel - element number whose system dofs are to be determined\n nnel - number of nodes per element\n ndof - number of dofs per node \n \"\"\"\n edof = nnel*ndof \n iStart = (iel)*(nnel-1)*ndof\n DOFindex=iStart+np.arange(0,edof)\n return DOFindex\n\ndef BuildGlobalMatrix(KK, Ke, index):\n \"\"\"Assembly of element matrices into the system matrix\n INPUTS\n KK - system matrix\n Ke - element matrix\n index - d.o.f. vector associated with an element\n \"\"\"\n for i,ii in enumerate(index):\n for j,jj in enumerate(index):\n KK[ii,jj] += Ke[i,j]\n #\n #KK[np.ix_(index,index)] += Ke\n return KK\n\n\n# --------------------------------------------------------------------------------}\n# --- Multi purpose assembly method \n# --------------------------------------------------------------------------------{\ndef cbeam_assembly(xNodes, m, EIx=None, EIy=None, EIz=None, EA=None, A=None, Kt=None, E=None, G=None, phi=None, element='frame3d',nel=None):\n \"\"\" \n Returns the mass and stiffness FEM matrix of a beam represented with nel Frame elements \n\n For uniform or straight beams, the beam is assumed to be along the x direction.\n \n NOTE: input values can be vectors or scalars.\n If they are scalars, then a beam with constant properties and of length L=xNodes is used;\n If they are vectors, values per element are required\n then linear interpolation is used. The dimension of the inputs does not need to match nel\n\n See also Matlab function fBeamMatrices3D_Frame6DOF\n \n INPUTS\n xNodes: define beam length, beam spanwise positions or beam nodes, either:\n - (scalar) Beam length, for uniform beam [m]\n - (1xn) Span vector of the beam (for straight beams) [m]\n - (2xn) Nodes positions x,z along the beam for 2d beam [m]\n - (3xn) Nodes positions x,y,z along the beam for 3d beam [m]\n\n m : (n) Mass per length along the beam, at nodes [kg/m]\n\n A : (n) Beam cross section area along the beam, at nodes [m^2]\n\n EIx : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n EIy : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n EIz : (n) Elastic Modulus times Second Moment of Area of cross section, at nodes [Nm2]\n\n Kt : (n) Torsion constant, at nodes [m^4]\n\n G : (scalar) Shear modulus. Steel: 79.3 [Pa] [N/m^2]\n E : (scalar) Elastic (Young) modulus\n\n phi : (1xn) rotation of principal axes wrt mean line (tangent) of the beam [rad], at nodes\n\n element: specify the element type to use along the beam: \n 'frame3d'\n 'frame3dlin'\n 'beam2d'\n\n nel : Number of elements. If provided Structural propeties and nodes will be interpolated to match nel. \n Otherwise, the length of xNodes determines the discretization\n \n OUTPUTS\n MM: (nDOF x nDOF) Mass matrix\n KK: (nDOF x nDOF) Stiffness matrix\n x : (1 x nel) Span vector\n\n \"\"\"\n # --- Consistency checks\n if element in ['frame3d','frame3dlin']:\n if EIx is None:\n raise Exception('For frame3d*, provide EIx')\n if EIy is None:\n raise Exception('For frame3d*, provide EIy')\n if EIz is None and (E is None or Iz is None):\n raise Exception('For frame3d*, provide EIz')\n if EA is None and (E is None or A is None):\n raise Exception('For frame3d*, provide EA')\n #if A is None:\n # raise Exception('For frame3d*, provide A')\n #if Kt is None:\n # raise Exception('For frame3d*, provide Kt')\n #if E is None:\n # raise Exception('For frame3d*, provide E')\n else:\n raise NotImplementedError('Element type: {}'.format(element))\n\n # --- Default values\n if E is None: E = 211e9 # Young modulus\n if G is None: G = E/2/(1+0.3) # Young modulus\n if EIz is None: EIz=EIy\n if A is None: A= m*0+100 # Area, TODO\n if EA is None: EA=E*A\n if Kt is None: Kt= m*0+100 # Saint Venant torsion, TODO\n\n if not hasattr(xNodes,'__len__'):\n xNodes=[xNodes]\n xNodes = np.asarray(xNodes)\n if len(xNodes)==1:\n xNodes0=xNodes\n # Constant beam properties\n xNodes=np.zeros((3,2))\n xNodes[0,:] =[0, xNodes0[0]] # Beam directed about x\n EIx = np.array([1, 1])*EIx\n EIy = np.array([1, 1])*EIy\n EIz = np.array([1, 1])*EIz\n EA = np.array([1, 1])*EA \n Kt = np.array([1, 1])*Kt\n A = np.array([1, 1])*A \n m = np.array([1, 1])*m \n elif len(xNodes.shape)==1:\n xNodes0=xNodes\n xNodes=np.zeros((3,len(xNodes)))\n xNodes[0,:]=xNodes0\n\n\n # --- Create node locations if user specified nElem\n le0 = np.sqrt((xNodes[0,1:]-xNodes[0,0:-1])**2+(xNodes[1,1:]-xNodes[1,0:-1])**2+(xNodes[2,1:]-xNodes[2,0:-1])**2)\n s_span0 = np.concatenate(([0],np.cumsum(le0)))\n\n if nel is None:\n # we will use xNodes provided by the user\n nel=xNodes.shape[0]-1\n interp_needed=False\n else:\n # We create elements with linear spacing along the curvilinear span\n xNodes0=xNodes\n xNodes=np.zeros((3,nel+1))\n s_span = np.linspace(0,s_span0[-1],nel+1)\n xNodes[0,:] = np.interp(s_span, s_span0, xNodes0[0,:])\n xNodes[1,:] = np.interp(s_span, s_span0, xNodes0[1,:])\n xNodes[2,:] = np.interp(s_span, s_span0, xNodes0[2,:])\n interp_needed=True\n\n # Recompute spanwise\n le = np.sqrt((xNodes[0,1:]-xNodes[0,0:-1])**2+(xNodes[1,1:]-xNodes[1,0:-1])**2+(xNodes[2,1:]-xNodes[2,0:-1])**2)\n s_span = np.concatenate(([0],np.cumsum(le)))\n s_span_mid = s_span[:-1]+np.diff(s_span)/2\n\n # --- Interpolate properties based on curvilinear length along the beam to get nel Elements\n if interp_needed or element=='frame3d':\n # NOTE: frame3d needs values at mid points\n \n if element=='frame3dlin':\n # then we interpolate at nodes\n s_span_e = s_span\n else:\n # we interpolate at element (mid-point)\n s_span_e = s_span_mid\n EIx = np.interp(s_span_e, s_span0, EIx)\n EIy = np.interp(s_span_e, s_span0, EIy)\n EIz = np.interp(s_span_e, s_span0, EIz)\n EA = np.interp(s_span_e, s_span0, EA)\n Kt = np.interp(s_span_e, s_span0, Kt)\n m = np.interp(s_span_e, s_span0, m)\n A = np.interp(s_span_e, s_span0, A)\n\n if element=='frame3d':\n return cbeam_assembly_frame3d(xNodes, E, G, m, EIx, EIy, EIz, Kt, EA, A, phi=None)\n else:\n raise NotImplementedError()\n\n\n# --------------------------------------------------------------------------------}\n# --- Assembly dedicated to frame3d (data per element)\n# --------------------------------------------------------------------------------{\ndef cbeam_assembly_frame3d(xNodes, E, G, me, EIxe, EIye, EIze, Kte, EAe, Ae, phi=None):\n \"\"\"\n Assembly a FEM model of a beam made of n elements (n+1 nodes)\n Node positions are given in 3D\n Element properties are given for each elements (n)\n\n INPUTS\n xNodes: (3x n+1) Nodes positions x,y,z along the beam for 3d beam [m]\n G : (scalar or n) Shear modulus. Steel: 79.3 [Pa] [N/m^2]\n E : (scalar or n) Elastic (Young) modulus\n me : (n) Mass per length of elements [kg/m]\n A : (n) Beam cross section area along the beam, for elements [m^2]\n EIy : (n) Elastic Modulus times Second Moment of Area of cross section [Nm2]\n EIz : (n) Elastic Modulus times Second Moment of Area of cross section [Nm2]\n EIz : (n) Elastic Modulus times Second Moment of Area of cross section [Nm2]\n Kt : (n) Torsion constant [m^4]\n phi : (n) rotation of principal axes wrt mean line (tangent) of the beam [rad]\n\n\n nel : Number of elements. If provided Structural propeties and nodes will be interpolated to match nel. \n Otherwise, the length of xNodes determines the discretization\n \n OUTPUTS\n MM: (nDOF x nDOF) Mass matrix\n KK: (nDOF x nDOF) Stiffness matrix\n x : (1 x nel) Span vector\n \"\"\"\n from .frame3d import frame3d_KeMe\n\n nElem = len(me) # Number of elements\n nDOFperNode = 6 # Degrees of Freedom per Node\n nNodesPerElem = 2 # Number of nodes per element\n nNodes = (nNodesPerElem-1)*nElem+1 # total number of nodes in system\n nDOF = nNodes*nDOFperNode # total system dofs\n\n if np.any(xNodes[1,:]!=0):\n raise NotImplementedError('Only straight beam along x supported')\n if np.any(xNodes[2,:]!=0):\n raise NotImplementedError('Only straight beam along x supported')\n\n if np.isscalar(E): E=me*0 + E\n if np.isscalar(G): G=me*0 + G\n\n\n # --- Coordinates system / direction cosine of each element\n DCM = elementDCMfromBeamNodes(xNodes,phi=phi)\n\n # --- Mapping DOF/Nodes/Elem, for consistency with advanced FEM\n Elem2Nodes, Nodes2DOF, Elem2DOF = LinearDOFMapping(nElem, nNodesPerElem, nDOFperNode)\n\n # --- Assembly\n MM = np.zeros((nDOF,nDOF))\n KK = np.zeros((nDOF,nDOF))\n # Loop on elements\n for i in np.arange(nElem):\n DOFindex=ElementDOFIndex(i,nNodesPerElem,nDOFperNode) # 1 x ndof*nnel\n DOFindex=Elem2DOF[i,:]\n #print(DOFindex)\n #print(Elem2DOF[i,:])\n P1 = xNodes[0,i]\n P2 = xNodes[0,i+1]\n Le = np.linalg.norm(P2-P1)\n Me = Le*me[i]\n # --- Element matrix\n Ke,Me,Kg = frame3d_KeMe(E[i],G[i],Kte[i],EAe[i],EIxe[i],EIye[i],EIze[i],Le,Ae[i],Me,T=0,R=None)\n # --- Build global matrices\n MM = BuildGlobalMatrix(MM, Me, DOFindex)\n KK = BuildGlobalMatrix(KK, Ke, DOFindex)\n\n return MM, KK, xNodes, DCM, Elem2Nodes, Nodes2DOF, Elem2DOF\n\n\n# --------------------------------------------------------------------------------}\n# --- Continuous Beam - Frame3d linear formulation\n# --------------------------------------------------------------------------------{\ndef cbeam_assembly_frame3dlin(xNodes, m, Iy, Iz=None, A=None, Kv=None, E=None, G=None, phi=None):\n \"\"\"\n Assemble a FEM system for a continuous beam using frame3d linear elements\n Elements are assumed to be connected continuously from 1st node to last\n\n xNodes: (3 x nNodes) position of the nodes.\n m : (nNodes) linear mass per length\n\n phi : rotation of principal axes wrt mean line (tangent) of the beam [rad]\n \n \"\"\"\n from welib.FEM.frame3dlin import frame3dlin_KeMe\n import scipy \n\n assert(xNodes.shape[0]==3)\n\n nNodes = xNodes.shape[1]\n nElem = nNodes-1\n nqe = 12 # Number of DOF per element\n nqk = int(nqe/2) # Number of DOF per nodes\n nDOF_tot = nNodes*nqk # Total number of DOF without constraint (BC)\n\n # --- Default values\n if Iz is None: Iz=Iy\n if E is None: E = m*0+211e9 # Young modulus\n if G is None: G = E/2/(1+0.3) # Young modulus\n if A is None: A= m*0+100 # Area\n if Kv is None: Kv= m*0+100 # Saint Venant torsion\n\n # --- Coordinates system / direction cosine of each element\n # Putting \"z\" along x\n DCM = elementDCMfromBeamNodes(xNodes,phi=phi)\n\n # --- Distribution of DOFs on nodes and elements\n Nodes2DOF=np.zeros((nNodes,6), dtype=int)\n for i in np.arange(nNodes):\n Nodes2DOF[i,:]=np.arange( i*6, (i+1)*6) \n Elem2DOF=np.zeros((nElem,12),dtype=int)\n for i in np.arange(nElem):\n Elem2DOF[i,:]=np.concatenate((Nodes2DOF[i,:], Nodes2DOF[i+1,:]))\n Elem2Nodes=np.zeros((nElem,2), dtype=int)\n for i in np.arange(nElem):\n Elem2Nodes[i,:]=(i,i+1)\n\n # --- Element mass matrices\n Me = np.zeros((12,12,nElem))\n Ke = np.zeros((12,12,nElem))\n for ie in np.arange(nElem):\n dx= (xNodes[:,ie+1]-xNodes[:,ie]).reshape(3,1)\n le = np.linalg.norm(dx) # element length\n iNode1, iNode2 = Elem2Nodes[ie,:]\n me1 = m[iNode1]*le # m l = rho * A * l\n me2 = m[iNode2]*le\n A1 = A[iNode1]\n A2 = A[iNode2]\n Kv1 = Kv[iNode1]\n Kv2 = Kv[iNode2]\n Iy1 = Iy[iNode1]\n Iy2 = Iy[iNode2]\n Iz1 = Iz[iNode1]\n Iz2 = Iz[iNode2]\n ke,me = frame3dlin_KeMe(E,G,Kv1,Kv2,A1,A2,Iy1,Iy2,Iz1,Iz2,le,me1,me2, R=None)\n #ke,me= frame3dlin_KeMe(me1, me2, le)\n Me[:,:,ie]=me\n Ke[:,:,ie]=ke\n\n # --- Assembly\n MM = np.zeros((nDOF_tot,nDOF_tot))\n KK = np.zeros((nDOF_tot,nDOF_tot))\n for ie in np.arange(nElem):\n IDOF = Elem2DOF[ie,:]\n R = DCM[:,:,ie]\n RR = scipy.linalg.block_diag(R,R,R,R)\n Mg = (RR.T).dot(Me[:,:,ie]).dot(RR)\n Kg = (RR.T).dot(Ke[:,:,ie]).dot(RR)\n MM[np.ix_(IDOF,IDOF)] += Mg\n KK[np.ix_(IDOF,IDOF)] += Kg\n\n return MM, KK, xNodes, DCM, Elem2Nodes, Nodes2DOF, Elem2DOF\n\ndef cbeam_frame3dlin_Kg(Tload, xNodes, Elem2Nodes, Elem2DOF, DCM, E, A, FEMmodel='frame3d_lin'):\n \"\"\" \n Geometric stiffness due a load Tload on all the DOFs\n \"\"\"\n from welib.FEM.frame3dlin import frame3dlin_Kg # TODO switch between implementation\n\n nDOF_tot = len(Tload)\n nElem = Elem2Nodes.shape[0]\n Kg= np.zeros((nDOF_tot,nDOF_tot))\n\n # --- Element mass matrices\n for ie in np.arange(nElem):\n # Going from load in global to load in local\n IDOF = Elem2DOF[ie,:]\n R = DCM[:,:,ie]\n RR = scipy.linalg.block_diag(R,R,R,R)\n Te = RR.dot(Tload[IDOF])\n # Element geometrical stiffness matrix in global \n dx = (xNodes[:,ie+1]-xNodes[:,ie]).reshape(3,1)\n L = np.linalg.norm(dx) # element length\n iNode1, iNode2 = Elem2Nodes[ie,:]\n A1 = A[iNode1]\n A2 = A[iNode2]\n Kge_gl = frame3dlin_Kg(E,A1,A2,L,Te[0],Te[6],R=DCM[:,:,ie])\n # Assembly\n Kg[np.ix_(IDOF,IDOF)] += Kge_gl\n return Kg\n\n\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\ndef applyBC(MM, KK, Elem2Nodes, Nodes2DOF, BC=None, BC_root=[0,0,0,0,0,0], BC_tip=[1,1,1,1,1,1],\n M_root=None, K_root=None, Mass_root=None, COG_root=None, Inertia_root=None,\n M_tip=None, K_tip=None, Mass_tip=None, COG_tip=None, Inertia_tip=None, \n ):\n \"\"\" \n Apply simple boundary conditions at tip and root\n\n INPUTS:\n - MM, KK: (n x n) mass matrix, stiffness matrix\n\n - either:\n - BC: string defining the boundary condition\n 'clamped-free': clamped at root, free at tip\n 'free-free': free at root, free at tip\n or\n - BC_root/tip: 6-array for the BC of each DOF\n \"0\" = fixed\n \"1\" = free\n default: cantilever, root clamped and tip free\n\n - M_tip/root: (6x6) mass matrix to add at beam ends\n - K_tip/root: (6x6) stiffness matrix to add at beam ends\n\n - Mass_root/tip: (scalar) additional point mass to add at beam ends. \n default: None, no mass\n - COG_root/tip: (3-vector) x,y,z position of point mass wrt. the first/last node. \n default: None, at first/last node.\n - Inertia_root/tip: (3-vector or 3x3 matrix), diagonal coefficients or full inertia matrix\n with respect to COG! \n default: None \n\n OUTPUTS:\n Mr, Kr : (nr x nr) reduced mass and stiffness matrix\n Tr : (n x nr) reduction matrix such that Mr = Tr' MM Tr\n IFull2BC: (n) Mapping from index of full matrix to matrix where DOF have been removed\n IBC2Full: (nr) Mapping from index of reduced matrix to full matrix \n \n \"\"\"\n if BC is not None:\n if BC=='clamped-free':\n BC_root = [0,0,0,0,0,0]\n BC_tip = [1,1,1,1,1,1]\n elif BC=='free-free':\n BC_root = [1,1,1,1,1,1]\n BC_tip = [1,1,1,1,1,1]\n\n\n nDOF_tot = MM.shape[0]\n IDOF_All = np.arange(0,nDOF_tot)\n\n # Tip and root degrees of freedom\n IDOF_root = Nodes2DOF[Elem2Nodes[0,:][0] ,:]\n IDOF_tip = Nodes2DOF[Elem2Nodes[-1,:][1],:]\n\n # --- Insert tip/root inertias\n if M_root is None:\n M_root= rigidBodyMassMatrixAtP(Mass_root, Inertia_root, COG_root)\n if M_tip is None:\n M_tip = rigidBodyMassMatrixAtP(Mass_tip, Inertia_tip, COG_tip)\n\n MM[np.ix_(IDOF_root, IDOF_root)] += M_root\n MM[np.ix_(IDOF_tip, IDOF_tip)] += M_tip\n\n # --- Insert tip/root stiffness\n if K_root is not None:\n KK[np.ix_(IDOF_root, IDOF_root)] += K_root\n if K_tip is not None:\n KK[np.ix_(IDOF_tip, IDOF_tip)] += K_tip\n\n # --- Boundary condition transformation matrix (removes row/columns)\n Tr=np.eye(nDOF_tot)\n\n # Root and Tip BC\n IDOF_removed = [i for i,iBC in zip(IDOF_root, BC_root) if iBC==0]\n IDOF_removed += [i for i,iBC in zip(IDOF_tip, BC_tip) if iBC==0]\n Tr = np.delete(Tr, IDOF_removed, axis=1) # removing columns\n\n Mr = (Tr.T).dot(MM).dot(Tr)\n Kr = (Tr.T).dot(KK).dot(Tr)\n\n # --- Create mapping from M to Mr\n nDOF_r = Mr.shape[0]\n IDOF_BC = list(np.setdiff1d(IDOF_All, IDOF_removed))\n IFull2BC = np.zeros(nDOF_tot,dtype=int)\n IBC2Full = np.zeros(nDOF_r,dtype=int)\n k=0\n for i in IDOF_All:\n if i in IDOF_removed:\n IFull2BC[i]=-1\n else:\n IFull2BC[i]=k\n IBC2Full[k]=i\n k+=1\n\n return Mr, Kr, Tr, IFull2BC, IBC2Full\n\n\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\ndef generalizedMassMatrix(xNodes, MM, Se):\n \"\"\" \n Generalized mass matrix from a FEM representation when the structure is undeflected.\n xNodes: Position of the nodes (3 x nNodes)\n MM: FEM Mass Matrix (nDOF x nDOF)\n Se: FEM Modes (nDOF x nModes) (elastic modes, e)\n \n \"\"\"\n dpn=6 # Number of DOF per nodes\n\n assert(xNodes.shape[0]==3)\n nDOF=MM.shape[0]\n\n # --- Rigid body modes (t: translation, r:rotation)\n St = np.zeros((nDOF, 3))\n Sr = np.zeros((nDOF, 3))\n for i in np.arange(xNodes.shape[1]):\n R= skew(xNodes[:,i])\n St[i*dpn : i*dpn+3, :]= np.eye(3)\n Sr[i*dpn : i*dpn+3, :]= -R\n Sr[i*dpn+3 : i*dpn+6, :]= np.eye(3)\n # Se: Selected modes (e:elastic)\n\n # --- Generalized mass matrix\n # Rigid body part # Different Notations:\n Mtt = (St.T).dot(MM).dot(St) # Mxx, mE\n J0 = (Sr.T).dot(MM).dot(Sr) # Mrr, Mtt, I0\n Mrt = (Sr.T).dot(MM).dot(St) # Mrt, Mxt, mc0\n # Flexible part\n Mgt = (Se.T).dot(MM).dot(St) # Mgt, Mgx, Mxg', Ct0\n Mgr = (Se.T).dot(MM).dot(Sr) # Mgr, Mgt, Mtg', Cr0\n Mgg = (Se.T).dot(MM).dot(Se) # Mgg, Me\n return Mtt, J0, Mrt, Mgt, Mgr, Mgg, St, Sr\n\n\ndef shapeIntegrals(xNodes, Nodes2DOF, Elem2Nodes, Elem2DOF, DCM, m, Se, Sr, Tr):\n \"\"\" \n Compute main shape integrals from FEM implementation\n (frame3dlin for C3 for now) \n\n See [2] for equations and details\n\n Inspired by a matlab implementation by J. Geilser:\n https://github.com/jgeisler0303/FEMBeam\n \n \"\"\"\n from welib.FEM.frame3dlin import frame3dlin_Mcross\n\n # init\n nElem = Elem2Nodes.shape[0]\n nNodes = xNodes.shape[1]\n nShapes = Se.shape[1]\n nDOF_tot = Se.shape[0]\n\n # --- C3 Element mass matrices\n C3 = np.zeros((3,3,12,12,nElem))\n for ie in np.arange(nElem):\n dx= (xNodes[:,ie+1]-xNodes[:,ie]).reshape(3,1)\n le = np.linalg.norm(dx) # element length\n iNode1, iNode2 = Elem2Nodes[ie,:]\n me1 = m[iNode1]*le # m l = rho * A * l\n me2 = m[iNode2]*le\n c3 = frame3dlin_Mcross(le,me1,me2)\n C3[:,:,:,:,ie]=c3\n\n # --- Term for second order Cr (Mgr) terms and Oe\n # [2] (5.252) p. 233, (6.401) p. 338\n KFr= np.zeros((3,nDOF_tot,nDOF_tot))\n Kr =np.zeros((3,nShapes,nShapes))\n for ia in np.arange(3):\n for ie in np.arange(nElem):\n lmn= [0,1,2]\n for l in np.arange(3):\n m_= lmn[1];\n n_= lmn[2];\n IDOF = Elem2DOF[ie,:]\n R = DCM[:,:,ie]\n RR = scipy.linalg.block_diag(R,R,R,R)\n Gamma = DCM[:,:,ie]\n\n KFr[ia][np.ix_(IDOF,IDOF)] += (RR.T).dot( -C3[m_, n_,:,:,ie] + C3[n_, m_,:,:,ie]).dot(RR) * Gamma[l, ia]\n lmn= np.roll(lmn,-1) #circshift(lmn, [0 -1]);\n # [2] (6.483) p. 367\n Kr[ia,:,:]= (Se.T).dot(KFr[ia]).dot(Se)\n\n # --- Terms useful for 0th order of Gr, and 1st order of J\n # [2] (6.490) p. 368; (6.531) p. 379 or (6.515) p. 375\n C4= np.zeros((3, 3, nShapes))\n for l in np.arange(nShapes):\n for ia in np.arange(3):\n for ib in np.arange(3):\n C4[ia, ib, l]= -(Sr[:, ia].T).dot(KFr[ib]).dot(Se[:, l]);\n\n # --- \n # (5.268) S. 237\n KFom_ab= np.zeros((3,3, nDOF_tot, nDOF_tot)) # = C6\n for ia in np.arange(3):\n for ib in np.arange(3):\n for l in np.arange(3):\n for m in np.arange(3):\n for ie in np.arange(nElem):\n IDOF = Elem2DOF[ie,:]\n R = DCM[:,:,ie]\n RR = scipy.linalg.block_diag(R,R,R,R)\n Gamma = DCM[:,:,ie]\n if l==m:\n m_= l+1;\n if m_>2: m_= 0\n n_= m_+1;\n if n_>2: n_= 0\n Xi= -(C3[m_, m_,:,:,ie]+C3[n_, n_,:,:,ie]) # [2] (5.266) p. 236\n else:\n Xi= C3[m, l,:,:,ie];\n Madd = (RR.T).dot(Xi).dot(RR) * Gamma[l, ia]*Gamma[m, ib]\n KFom_ab[ia, ib][np.ix_(IDOF,IDOF)] += Madd\n\n # --- DOF undisplaced values\n ZF0= np.zeros((nDOF_tot,1))\n for iNode in np.arange(nNodes):\n IDOF=Nodes2DOF[iNode][:3] # translational DOF only\n ZF0[IDOF,0]= xNodes[:,iNode];\n\n # --- [2] (5.271) p. 237\n KFom = np.zeros((6,nDOF_tot, nDOF_tot))\n Kom = np.zeros((6,nShapes,nShapes))\n Kom0 = np.zeros((nShapes, 6))\n Kom0_= np.zeros((Tr.shape[1], 6));\n for i in np.arange(6):\n if i<3:\n KFom[i]= KFom_ab[i, i]\n else:\n a= i-3;\n b= a+1;\n if b>2: b= 0\n KFom[i]= KFom_ab[a, b] + KFom_ab[a, b].T\n Kom[i]= (Se.T).dot(KFom[i]).dot(Se);\n Kom0 [:, i]= (Se.T).dot(KFom[i]).dot(ZF0).ravel()\n Kom0_[:, i]= (Tr.T).dot(KFom[i]).dot(ZF0).ravel()\n\n return C3, Kr, C4, KFom_ab, Kom, Kom0, Kom0_ \n\n\ndef geometricalStiffening(xNodes, Kinv, Tr, Se, Nodes2DOF, Elem2Nodes, Elem2DOF, DCM, E, A, Kom0_=None, Ct0_=None):\n \"\"\" \n Axial stiffening terms\n See [2] 6.330 S. 319\n \n \"\"\"\n def geo_stiff_wrap(Tload):\n return cbeam_frame3dlin_Kg(Tload, xNodes, Elem2Nodes, Elem2DOF, DCM, E, A)\n\n nDOF_tot = Kinv.shape[0]\n nNodes = Nodes2DOF.shape[0]\n iMaxDim = np.argmax(np.max(np.abs(xNodes),axis=1)-np.min(np.abs(xNodes),axis=1)) \n\n # Stiffness from tip load\n Fend_ax = np.zeros((nDOF_tot, 1))\n iNode=nNodes-1 # Load node\n DOF=Nodes2DOF[iNode,:]\n Fend_ax[DOF[iMaxDim], 0]= 1 # Unit loads at tip\n \n # All axial stiffening contributions\n GKg=dict()\n GKg['Fend'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Fend_ax)) ).dot(Se)\n GKg['t_ax'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Ct0_[:, iMaxDim])))).dot(Se)\n GKg['omxx'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 0]))) ).dot(Se) \n GKg['omyy'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 1]))) ).dot(Se) \n GKg['omzz'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 2]))) ).dot(Se) \n GKg['omxy'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 3]))) ).dot(Se) \n GKg['omxz'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 4]))) ).dot(Se) \n GKg['omyz'] = (Se.T).dot( geo_stiff_wrap( -Kinv.dot(Tr.dot(Kom0_[:, 5]))) ).dot(Se) \n\n return GKg\n\n\n# TODO verify that these are DCM and not the transpose\ndef elementDCMfromBeamNodes(xNodes, phi=None):\n \"\"\" Generate element Direction cosine matricse (DCM) \n from a set of ordered node coordinates defining a beam mean line\n\n INPUTS:\n xNodes: 3 x nNodes\n phi (optional): nNodes angles about mean line to rotate the section axes\n OUTPUTS:\n DCM: 3 x 3 x (nNodes-1)\n \"\"\"\n def null(a, rtol=1e-5):\n u, s, v = np.linalg.svd(a)\n rank = (s > rtol*s[0]).sum()\n return v[rank:].T.copy()\n\n assert(xNodes.shape[0]==3)\n nElem=xNodes.shape[1]-1\n DCM = np.zeros((3,3,nElem))\n for i in np.arange(nElem):\n dx= (xNodes[:,i+1]-xNodes[:,i]).reshape(3,1)\n le = np.linalg.norm(dx) # element length\n e1 = dx/le # tangent vector\n if i==0:\n e1_last = e1\n e2_last = null(e1.T)[:,0].reshape(3,1) # x,z-> y , y-> -x \n # normal vector\n de1 = e1 - e1_last\n if np.linalg.norm(de1)<1e-8:\n e2 = e2_last\n else:\n e2 = de1/np.linalg.norm(de1)\n # Rotation about e1\n if phi is not None:\n R = np.cos(phi[i])*np.eye(3) + np.sin(phi[i])*skew(e1) + (1-np.cos(phi[i]))*e1.dot(e1.T);\n e2 = R.dot(e2)\n # Third vector\n e3=np.cross(e1.ravel(),e2.ravel()).reshape(3,1)\n DCM[:,:,i]= np.column_stack((e1,e2,e3)).T;\n e1_last= e1\n e2_last= e2\n return DCM\n\n\n\n# --------------------------------------------------------------------------------}\n# --- Mode tools \n# --------------------------------------------------------------------------------{\ndef modeNorms(q, iDOFstart=0, nDOF=6):\n \"\"\" \n Return norms of components of a mode\n Norm is computed as sum(abs())\n q: mode vector\n iDOFStart: where to start in mode vector\n nDOF: number of DOF per node typically 6 for 3D and 2/3 for 2D\n \"\"\"\n MaxMag=np.zeros(nDOF)\n for i in np.arange(nDOF): \n MaxMag[i] = np.sum(np.abs(q[iDOFstart+i::nDOF]))\n return MaxMag\n\ndef normalize_to_last(Q, Imodes, iDOFStart=0, nDOF=6):\n for iimode, imode in enumerate(Imodes):\n mag = modeNorms(Q[:,imode], iDOFStart, nDOF)[:int(nDOF/2)]\n iMax= np.argmax(mag);\n v_= Q[iDOFStart+iMax::nDOF, imode];\n Q[:, imode]= Q[:, imode]/v_[-1]\n return Q\n\ndef orthogonalizeModePair(Q1, Q2, iDOFStart=0, nDOF=6):\n # Find magnitudes to see in which direction the mode is the most\n mag = modeNorms(Q1, iDOFStart, nDOF)[:int(nDOF/2)]\n idx= np.argsort(mag)[-1::-1]\n k11 = sum(Q1[iDOFStart+idx[0]-1::nDOF]);\n k12 = sum(Q1[iDOFStart+idx[1]-1::nDOF]);\n k21 = sum(Q2[iDOFStart+idx[0]-1::nDOF]);\n k22 = sum(Q2[iDOFStart+idx[1]-1::nDOF]);\n Q1_ = k11*Q1 + k21*Q2\n Q2_ = k12*Q1 + k22*Q2\n return Q1_, Q2_\n\ndef insertBCinModes(Qr, Tr):\n \"\"\"\n Qr : (nr x nr) reduced modes\n Tr : (n x nr) reduction matrix such that Mr = Tr' MM Tr\n \"\"\"\n return Tr.dot(Qr)\n\ndef identifyAndNormalizeModes(Q, nModes=None, element='frame3d', normalize=True):\n \"\"\" \n Attempts to identify and normalized the first `nModes` modes\n Modes are normalized by last values unless this value is too small compared to the max\n in which case the max is used.\n Mode names are returned of the form [u,v][x,y,z][n]\n where \"u\": displacements, \"v\": slopes, and \"n\" is the mode number in that direction\n \"\"\"\n if nModes is None: nModes=Q.shape[1]\n if element in ['frame3d','frame3dlin']:\n nDOF=6\n sDOF=['ux','uy','uz','vx','vy','vz']\n\n cDOF=np.zeros(nDOF,dtype=int) # Counter on Modes in each DOF\n modeNames=[]\n\n for i in np.arange(nModes):\n q=Q[:,i]\n mag = modeNorms(q, iDOFstart=0, nDOF=nDOF)\n idx= np.argsort(mag)[-1::-1]\n iMax = idx[0]\n U = Q[iMax::nDOF,i]\n # Detect rigid body mode (0 or NaN frequencies), component constant and non-zero\n rigid=False\n for idof in np.arange(nDOF):\n Ui = Q[idof::nDOF,i]\n Umax = max(abs(Ui))\n if Umax>1e-6:\n if len(np.unique(np.around(Ui/Umax,3)))==1:\n icst=idof\n rigid=True\n break\n # Mode name\n if rigid:\n mode_name =sDOF[iMax]+'_'+sDOF[icst]+'_rigid'\n else:\n cDOF[iMax]+=1\n mode_name = sDOF[iMax]+str(cDOF[iMax])\n modeNames.append(mode_name)\n\n #if sDOF[iMax] in ['vy','vz']:\n # print('Mode {} has strong slope, double check identification'.format(i))\n #print('>>>Mode',i, 'name:',mode_name, mag)\n\n # Normalization by max or last\n Umax = max(abs(U))\n Ulast = abs(U[-1])\n if Ulast*100< Umax: # some tuning factor if last is close to 0\n # Normalize by max\n fact = Umax*np.sign(U[-1])\n else:\n # Normalize by last\n fact = Ulast*np.sign(U[-1])\n if normalize:\n Q[:,i]= Q[:,i]/fact\n return Q, modeNames\n\n\n\n\n\n\n\n\n# --------------------------------------------------------------------------------}\n# --- \n# --------------------------------------------------------------------------------{\nif __name__=='__main__':\n pass\n",
"\"\"\" \nRead/Write TecPto ascii files\nsea read_tecplot documentation below\n\nPart of weio library: https://github.com/ebranlard/weio\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nimport struct\n\ntry:\n from .file import File, EmptyFileError, WrongFormatError, BrokenFormatError\nexcept:\n EmptyFileError = type('EmptyFileError', (Exception,),{})\n WrongFormatError = type('WrongFormatError', (Exception,),{})\n BrokenFormatError = type('BrokenFormatError', (Exception,),{})\n File=dict\n\n\n\nKeywords=['title','variables','zone','text','geometry','datasetauxdata','customlabels','varauxdata']\n# --------------------------------------------------------------------------------}\n# --- Helper functions \n# --------------------------------------------------------------------------------{\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n \n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n \n return False\n\n\ndef _process_merged_line(line, section, dict_out):\n n = len(section)\n line = line[n:].strip()\n if section=='title':\n dict_out[section]=line\n elif section=='variables':\n line = line.replace('=','').strip()\n line = line.replace(',',' ').strip()\n line = line.replace(' ',' ').strip()\n line = line.replace('[','_[').strip()\n line = line.replace('(','_(').strip()\n line = line.replace('__','_').strip()\n if line.find('\"')==0:\n line = line.replace('\" \"',',')\n line = line.replace('\"','')\n sp=line.split(',')\n else:\n sp=line.split()\n dict_out[section]=sp\n elif section=='datasetauxdata':\n if section not in dict_out.keys():\n dict_out[section]={} # initialixe an empty directory\n sp = line.split('=')\n key = sp[0]\n value = sp[1].replace('\"','').strip()\n if is_number(value):\n value=float(value)\n dict_out[section][key]=value\n\n elif section=='zone':\n if section not in dict_out.keys():\n dict_out[section]={} # initialixe an empty directory\n sp = line.split('=')\n key = sp[0]\n value = sp[1].replace('\"','').strip()\n if is_number(value):\n value=float(value)\n dict_out[section][key]=value\n \n else:\n print('!!! Reading of section not implemented:')\n print('Processing section {}:'.format(section),line)\n dict_out[section]=line\n\ndef read_tecplot(filename, dict_out={}):\n \"\"\" Reads a tecplot file\n Limited support:\n - title optional\n - variables mandatory\n - Lines may be continued to next line, stopping when a predefined keyword is detected\n For now, assumes that only one section of numerical data is present\n \"\"\"\n\n merged_line=''\n current_section=''\n variables=[]\n with open(filename, \"r\") as f:\n dfs = [] # list of dataframes\n iline=0\n while True:\n line= f.readline().strip()\n iline+=1\n if not line:\n break\n l=line.lower().strip()\n # Comment\n if l[0]=='#':\n continue\n new_section = [k for k in Keywords if l.find(k)==0 ]\n\n if len(new_section)==1:\n # --- Start of a new section\n # First, process the previous section\n if len(merged_line)>0: \n _process_merged_line(merged_line, current_section, dict_out)\n # Then start the new section\n current_section=new_section[0]\n merged_line =line\n elif len(current_section)==0:\n raise WrongFormatError('No section detected')\n else:\n if current_section=='title' or current_section=='variables':\n # OK\n pass\n else:\n if 'variables' not in dict_out.keys():\n raise WrongFormatError('The `variables` section should be present')\n sp = l.split()\n if is_number(sp[0]):\n if len(merged_line)>0: \n _process_merged_line(merged_line, current_section, dict_out)\n # --- Special case of numerical values outside of zone\n f.close()\n M = np.loadtxt(filename, skiprows = iline-1)\n if M.shape[1]!=len(dict_out['variables']):\n raise BrokenFormatError('Number of columns of data does not match number of variables')\n dict_out['data']=M\n break\n else:\n # --- Continuation of previous section\n merged_line +=' '+line\n return dict_out\n\n\nclass TecplotFile(File):\n\n @staticmethod\n def defaultExtensions():\n return ['.dat']\n\n @staticmethod\n def formatName():\n return 'Tecplot ASCII file'\n\n def __init__(self,filename=None,**kwargs):\n self.filename = None\n if filename:\n self.read(filename=filename,**kwargs)\n\n def read(self, filename=None):\n \"\"\" read a tecplot ascii file\n sea `read_tecplot` documentation above\n \"\"\"\n if filename:\n self.filename = filename\n if not self.filename:\n raise Exception('No filename provided')\n if not os.path.isfile(self.filename):\n raise OSError(2,'File not found:',self.filename)\n if os.stat(self.filename).st_size == 0:\n raise EmptyFileError('File is empty:',self.filename)\n\n try:\n read_tecplot(filename,self)\n except BrokenFormatError:\n raise \n except WrongFormatError:\n raise \n except Exception as e: \n raise WrongFormatError('Tecplot dat File {}: '.format(self.filename)+e.args[0])\n\n def write(self, filename=None, precision=None):\n \"\"\" Write tecplot ascii file \"\"\"\n if filename:\n self.filename = filename\n if not self.filename:\n raise Exception('No filename provided')\n\n with open(self.filename, mode='w') as f: \n if 'title' in self.keys():\n f.write('TITLE = {}\\n'.format(self['title']))\n f.write('VARIABLES = ' + ','.join(['\"{}\"'.format(col) for col in self['variables'] ]) + '\\n')\n for k in Keywords[2:]:\n if k in self.keys():\n f.write('{} = {}\\n'.format(k,self[k]))\n # Data\n if 'data' in self.keys():\n for row in self['data']:\n srow = np.array2string(row, edgeitems=0, separator=' ', precision=precision)\n f.write(srow[1:-1]+'\\n')\n\n\n def __repr__(self):\n s='<{} object> with keys:\\n'.format(type(self).__name__)\n for k,v in self.items():\n s+=' - {}: {}\\n'.format(k,v)\n return s\n\n def toDataFrame(self):\n return pd.DataFrame(data=self['data'],columns=self['variables'])\n\nif __name__=='__main__':\n mb = MannBoxFile('mann_bin/mini-u.bin', N=(2,4,8))\n F1=mb['field'].ravel()\n mb.write('mann_bin/mini-u-out.bin')\n\n mb2= MannBoxFile('mann_bin/mini-u-out.bin', N=(2,4,8))\n F2=mb2['field'].ravel()\n# print(F1-F2)\n"
] | [
[
"numpy.diag",
"numpy.ix_",
"numpy.sqrt",
"numpy.asarray",
"numpy.arange",
"numpy.eye",
"numpy.set_printoptions",
"numpy.setdiff1d",
"numpy.linalg.lstsq",
"numpy.block",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.get_include"
],
[
"numpy.asarray",
"numpy.around"
],
[
"numpy.rollaxis",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.concatenate",
"numpy.lib.stride_tricks.as_strided",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.iscomplexobj",
"numpy.conjugate",
"numpy.reshape",
"numpy.arange",
"numpy.fft.rfftfreq",
"numpy.sin",
"numpy.argmax",
"numpy.zeros",
"numpy.log",
"numpy.min",
"numpy.isnan",
"numpy.linalg.lstsq",
"numpy.abs",
"numpy.fft.rfft",
"numpy.cos",
"numpy.ones",
"numpy.result_type",
"numpy.empty"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.around",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.any",
"numpy.roll",
"numpy.linalg.svd",
"numpy.ix_",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.argmax",
"numpy.diff",
"numpy.interp",
"numpy.column_stack",
"numpy.zeros",
"numpy.delete",
"numpy.argsort",
"numpy.array",
"numpy.abs",
"scipy.linalg.block_diag",
"numpy.linalg.norm",
"numpy.setdiff1d",
"numpy.cos",
"numpy.sign",
"numpy.isscalar"
],
[
"numpy.loadtxt",
"numpy.array2string",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
c4dt/mlbench-core | [
"8a5cf6e00ff4535b2aea23b213241858a5ee5f00"
] | [
"mlbench_core/optim/pytorch/fp_optimizers.py"
] | [
"# import ctypes\nimport logging\nimport math\n\nimport torch\nimport torch.distributed as dist\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom mlbench_core.utils.pytorch.distributed import (\n AllReduceAggregation,\n AllReduceAggregationHVD,\n)\n\ntry:\n from apex.optimizers import FusedAdam\n from apex import amp\nexcept ImportError as e:\n pass\n\nlogger = logging.getLogger(\"mlbench\")\n\n\nclass FP16Optimizer:\n \"\"\"\n Mixed precision optimizer with dynamic loss scaling and backoff.\n https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\n\n Args:\n fp16_model (`obj`:torch.nn.Module): model (previously casted to half)\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n use_horovod (bool): Use Horovod for aggregation\n by_layer (bool): Aggregate by layer\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n loss_scale (int): initial loss scale\n dls_downscale (int): loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients\n dls_upscale (int): loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully\n dls_upscale_interval (int): interval for loss scale upscaling\n average_models (bool): Average the models\n \"\"\"\n\n def __init__(\n self,\n fp16_model,\n world_size,\n use_cuda=False,\n use_horovod=False,\n by_layer=False,\n grad_clip=float(\"inf\"),\n loss_scale=1024,\n dls_downscale=2,\n dls_upscale=2,\n dls_upscale_interval=128,\n average_models=True,\n ):\n self.use_cuda = use_cuda\n\n self.fp16_model = fp16_model\n self.fp16_params, self.fp32_params = self.initialize_flat_fp32_weight()\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.grad_clip = grad_clip\n self.world_size = dist.get_world_size()\n\n self.optimizer = None\n\n if use_horovod:\n self.agg = AllReduceAggregationHVD(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n else:\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n # Flattening master weight\n def initialize_flat_fp32_weight(self):\n \"\"\" Initializes the model's parameters in fp32 and fp16\n\n Returns:\n (torch.Tensor, torch.Tensor): The Parametrs in fp16 and fp32\n \"\"\"\n # Set all gradients to None\n for p in self.fp16_model.parameters():\n p.grad = None\n\n # Count number of parameters per layer\n nelem = 0\n for p in self.fp16_model.parameters():\n nelem += p.numel()\n fp32_params = torch.empty(\n nelem,\n dtype=torch.float32,\n device=torch.device(\"cuda\" if self.use_cuda else \"cpu\"),\n )\n fp16_params = torch.empty(\n nelem,\n dtype=torch.float16,\n device=torch.device(\"cuda\" if self.use_cuda else \"cpu\"),\n )\n\n pointer = 0\n for p in self.fp16_model.parameters():\n nelem = p.numel()\n fp32_params[pointer : pointer + nelem].copy_(p.data.view(-1))\n fp16_params[pointer : pointer + nelem].copy_(p.data.view(-1))\n pointer += nelem\n\n fp32_params = torch.nn.Parameter(fp32_params, requires_grad=True)\n fp32_params.grad = torch.autograd.Variable(\n fp32_params.data.new(*fp32_params.size())\n )\n\n fp16_params = torch.nn.Parameter(fp16_params, requires_grad=True)\n fp16_params.grad = torch.autograd.Variable(\n fp16_params.data.new(*fp16_params.size())\n )\n\n return fp16_params, fp32_params\n\n @staticmethod\n def fp16_to_fp32_flat_grad(fp32_params, fp16_model):\n \"\"\" Copies the parameters in `fp16_model` into `fp32_params` in-place\n\n Args:\n fp32_params (torch.Tensor): Parameters in fp32\n fp16_model (torch.nn.Module): Model in fp16\n\n \"\"\"\n pointer = 0\n for p in fp16_model.parameters():\n nelem = p.numel()\n fp32_params.grad.data[pointer : pointer + nelem].copy_(p.grad.data.view(-1))\n pointer += nelem\n\n @staticmethod\n def fp32_to_fp16_grads(fp16_model, fp32_params):\n \"\"\" Copies the parameters in `fp32_params` into `fp16_model` in-place\n\n Args:\n fp16_model (torch.nn.Module): Model in fp16\n fp32_params (torch.Tensor): Parameters in fp32\n\n \"\"\"\n pointer = 0\n for p in fp16_model.parameters():\n nelem = p.numel()\n p.data.view(-1).copy_(fp32_params.data[pointer : pointer + nelem])\n pointer += nelem\n\n def backward_loss(self, loss):\n \"\"\" Scales and performs backward on the given loss\n\n Args:\n loss (torch.nn.Module): The loss\n\n \"\"\"\n loss *= self.loss_scale\n loss.backward()\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n Applies loss scaling, computes gradients in fp16, converts gradients to\n fp32, inverts scaling and applies optional gradient norm clipping.\n If gradients are finite, it applies update to fp32 master weights and\n copies updated parameters to fp16 model for the next iteration. If\n gradients are not finite, it skips the batch and adjusts scaling factor\n for the next iteration.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n\n scaling_factor = self.loss_scale\n\n # Aggregate gradients\n self.agg(self.fp16_model, self.agg_mode)\n # Cast fp16 params to fp32 for optimizer\n self.fp16_to_fp32_flat_grad(self.fp32_params, self.fp16_model)\n\n if scaling_factor != 1.0:\n self.fp32_params.grad.data /= scaling_factor\n norm = clip_grad_norm_([self.fp32_params], self.grad_clip)\n\n updated = False\n if math.isfinite(norm):\n self.optimizer.step(closure=closure)\n self.fp32_to_fp16_grads(self.fp16_model, self.fp32_params)\n self.since_last_invalid += 1\n updated = True\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logger.info(f\"Skipped batch, new scale: {self.loss_scale}\")\n\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n self.since_last_invalid = 0\n\n for p in self.fp16_model.parameters():\n p.grad = None\n\n return updated\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n\nclass FP32Optimizer:\n \"\"\"\n Standard optimizer, computes backward and applies weight update.\n\n Args:\n model (`obj`:torch.nn.Module): model\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n by_layer (bool): Aggregate by layer\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n average_models (bool): Average the models\n \"\"\"\n\n def __init__(\n self,\n model,\n world_size,\n use_cuda=False,\n by_layer=False,\n grad_clip=None,\n average_models=True,\n ):\n self.model = model\n self.grad_clip = grad_clip\n self.optimizer = None\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n \"\"\"\n if self.grad_clip != float(\"inf\"):\n clip_grad_norm_(self.model.parameters(), self.grad_clip)\n\n self.agg(self.model, self.agg_mode)\n self.optimizer.step(closure=closure)\n return True\n\n def backward_loss(self, loss):\n loss.backward()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n\nclass AMPOptimizer:\n \"\"\"\n Optimizer compatible with AMP.\n Uses AMP to apply loss scaling, computes backward and applies weight\n update.\n\n Args:\n model (`obj`:torch.nn.Module): model\n grad_clip (float): coefficient for gradient clipping, max L2 norm of the gradients\n loss_scale (int): initial loss scale\n dls_upscale_interval (int): interval for loss scale upscaling\n average_models (bool): Average the models\n world_size (int): Distributed world size\n use_cuda (bool): Use cuda tensors for aggregation\n by_layer (bool): Aggregate by layer\n use_horovod (bool): Use Horovod for aggregation\n \"\"\"\n\n def __init__(\n self,\n model,\n grad_clip=None,\n loss_scale=8192,\n dls_upscale_interval=128,\n average_models=True,\n world_size=1,\n use_cuda=False,\n by_layer=False,\n use_horovod=False,\n ):\n self.model = model\n self.grad_clip = grad_clip\n self.optimizer = None\n loss_scaler = amp._amp_state.loss_scalers[0]\n loss_scaler._loss_scale = loss_scale\n loss_scaler._scale_seq_len = dls_upscale_interval\n\n if average_models:\n self.agg_mode = \"avg\"\n else:\n raise NotImplementedError(\"Only average model is supported right now.\")\n\n if use_horovod:\n self.agg = AllReduceAggregationHVD(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n else:\n self.agg = AllReduceAggregation(\n world_size=world_size, use_cuda=use_cuda\n ).agg_grad(by_layer=by_layer)\n\n def set_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def backward_loss(self, loss):\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n\n def step(self, closure=None):\n \"\"\"\n Performs one step of the optimizer.\n \"\"\"\n if self.grad_clip != float(\"inf\"):\n clip_grad_norm_(amp.master_params(self.optimizer), self.grad_clip)\n\n self.agg(self.model, self.agg_mode)\n self.optimizer.step(closure=closure)\n return True\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n"
] | [
[
"torch.nn.utils.clip_grad_norm_",
"torch.device",
"torch.distributed.get_world_size",
"torch.nn.Parameter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yj1990/sec_mmf | [
"72a8c0d5a6aadb4362c07a5606c70e51b08a53cd"
] | [
"secmmf/mmf_data_loader/form_parsers.py"
] | [
"import pandas as pd\nimport bs4 as bs\nimport untangle as ut\nimport requests\nimport urllib.request as rq\nfrom collections import OrderedDict\n\nfrom secmmf.mmf_data_loader.utils import get_edgar_url\n\nclass N_MFP2:\n\n def __init__(self):\n self.select_cols()\n\n def born(self, tag):\n # if tag is a single-node tag contains a navigable string, return a list with that string\n # if tag has multiple element, needs to further born them\n childs = []\n for x in tag:\n if (x != '\\n') & (type(x) != bs.element.Comment):\n childs.append(x)\n return childs\n\n def dive(self, root, surname=''):\n name = surname + root.name\n sons = []\n for son in self.born(root):\n if type(son) == bs.element.NavigableString:\n text = ': '.join([name, son])\n sons.append(text)\n elif type(son) == bs.element.Tag:\n sons.extend(self.dive(son, surname=name + '_'))\n return sons\n\n def teach(self, root):\n sons = []\n for son in self.born(root):\n if len(self.born(son)) == 1:\n sons.append((son.name, son.get_text().replace('\\n', '')))\n elif len(self.born(son)) > 1:\n for grandson in self.born(son):\n sons.append((son.name + '_' + grandson.name,\n grandson.get_text().replace('\\n', '')))\n return sons\n\n def teach_rec(self, root):\n sons = []\n for son in self.born(root):\n if len(self.born(son)) == 1:\n sons.append((son.name, son.get_text().replace('\\n', '')))\n elif len(self.born(son)) > 1:\n sons.append(teach_rec(son))\n return sons\n\n def parse(self, url='https://www.sec.gov/Archives/edgar/data/759667/000070217219000020/primary_doc.xml'):\n\n stubs = self.stubs\n #_tonum = self._tonum\n #series_level_names = self.series_level_names\n #class_level_names = self.class_level_names\n\n source = rq.urlopen(url).read()\n soup = bs.BeautifulSoup(source, 'xml')\n\n # parse XML info into a list of dictionaries\n mmf = []\n for tag in self.born(soup.formData):\n if tag.name in ['classLevelInfo', 'generalInfo', 'seriesLevelInfo']:\n mmf.append((tag.name, self.teach(tag)))\n\n general_series_class = []\n general_series = mmf[0][1] + mmf[1][1]\n\n for i, x in enumerate(general_series):\n if x[0] == 'numberOfSharesOutstanding':\n y = list(x)\n y[0] = 'series_numberOfSharesOutstanding'\n general_series[i] = tuple(y)\n\n for x in mmf[2:]:\n general_series_class.append(OrderedDict(general_series + x[1]))\n\n df = pd.DataFrame(general_series_class)\n if 'nameOfPersonDescExpensePay' in df.columns:\n df.drop(columns='nameOfPersonDescExpensePay', inplace=True)\n\n # rename those columns that have reversed patterns\n namemap = []\n for x in ['weeklyGrossRedemptions', 'weeklyGrossSubscriptions']:\n namemap.append(dict([('fridayWeek' + str(i + 1) + '_' + x,\n x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))\n for x in ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets']:\n namemap.append(dict([(x + '_' + 'fridayDay' + str(i + 1),\n x + '_' + 'fridayWeek' + str(i + 1)) for i in range(5)]))\n\n for i in range(4):\n df = df.rename(columns=namemap[i])\n\n # make data wide to long on weekly holding statistics\n df = pd.wide_to_long(df, stubnames=self.stubs,\n i='classesId', j='week', sep='_', suffix='\\w+')\n df.reset_index(inplace=True)\n df['week'] = df['week'].apply(\n lambda x: int(x.replace('fridayWeek', '')))\n\n #df = df[['week']+series_level_names+class_level_names]\n\n # change the type of numeric data to float\n #df[_tonum] = df[_tonum].astype(dtype = float)\n\n return df\n\n def parse_csv(self, url):\n source = get_edgar_url(url).content\n soup = bs.BeautifulSoup(source, 'xml')\n return self.dive(soup.formData)\n\n def select_cols(self):\n\n self.stubs = ['totalValueDailyLiquidAssets', 'percentageDailyLiquidAssets',\n 'totalValueWeeklyLiquidAssets', 'percentageWeeklyLiquidAssets',\n 'netAssetValue', 'netAssetPerShare',\n 'weeklyGrossRedemptions', 'weeklyGrossSubscriptions']\n\n self._tonum = ['totalShareClassesInSeries',\n 'averagePortfolioMaturity',\n 'averageLifeMaturity',\n 'cash',\n 'totalValuePortfolioSecurities',\n 'amortizedCostPortfolioSecurities',\n 'totalValueOtherAssets',\n 'totalValueLiabilities',\n 'netAssetOfSeries',\n 'numberOfSharesOutstanding',\n 'stablePricePerShare',\n 'sevenDayGrossYield',\n 'minInitialInvestment',\n 'netAssetsOfClass',\n 'totalForTheMonthReported_weeklyGrossSubscriptions',\n 'totalForTheMonthReported_weeklyGrossRedemptions',\n 'sevenDayNetYield'] + self.stubs\n\n self.series_level_names = ['reportDate',\n 'cik',\n 'seriesId',\n 'totalShareClassesInSeries',\n 'finalFilingFlag',\n 'fundAcqrdOrMrgdWthAnthrFlag',\n 'securitiesActFileNumber',\n 'adviser_adviserName',\n 'adviser_adviserFileNumber',\n 'indpPubAccountant_name',\n 'indpPubAccountant_city',\n 'indpPubAccountant_stateCountry',\n 'administrator',\n 'transferAgent_name',\n 'transferAgent_cik',\n 'transferAgent_fileNumber',\n 'feederFundFlag',\n 'masterFundFlag',\n 'seriesFundInsuCmpnySepAccntFlag',\n 'moneyMarketFundCategory',\n 'fundExemptRetailFlag',\n 'averagePortfolioMaturity',\n 'averageLifeMaturity',\n 'totalValueDailyLiquidAssets',\n 'totalValueWeeklyLiquidAssets',\n 'percentageDailyLiquidAssets',\n 'percentageWeeklyLiquidAssets',\n 'cash',\n 'totalValuePortfolioSecurities',\n 'amortizedCostPortfolioSecurities',\n 'totalValueOtherAssets',\n 'totalValueLiabilities',\n 'netAssetOfSeries',\n 'series_numberOfSharesOutstanding',\n 'stablePricePerShare',\n 'sevenDayGrossYield',\n 'netAssetValue']\n self.class_level_names = ['classesId',\n 'minInitialInvestment',\n 'netAssetsOfClass',\n 'numberOfSharesOutstanding',\n 'netAssetPerShare',\n 'weeklyGrossSubscriptions',\n 'weeklyGrossRedemptions',\n 'totalForTheMonthReported_weeklyGrossSubscriptions',\n 'totalForTheMonthReported_weeklyGrossRedemptions',\n 'sevenDayNetYield',\n 'personPayForFundFlag']\n"
] | [
[
"pandas.wide_to_long",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
wjwainwright/Capstone | [
"a2ea661079ece6ff5008f4399b3f0f6d32c598d3"
] | [
"IsoFit.py"
] | [
"try:\n runCount += 1\nexcept:\n isoIn = False\n clIn = False\n cataIn = False\n closePlots = False\n resultsIn = False\n clusterList = []\n clusters=[]\n isochrones = []\n isoList = []\n catalogue = []\n runCount = 1\n\nclass resultClusterObj:\n def __init__(self,cl):\n import numpy as np\n \n #Automatically populates variables based on those from the cluster it was given, except the data arrays\n global properties\n \n #List of all of the variables defined for the cluster cl, strips out the __functions__\n properties = [a for a in dir(cl) if not a.startswith('_')]\n for prop in properties:\n #Saves all 'number' type variables to the memory of the result cluster object\n if eval(f\"type(cl.{prop})\") == float or eval(f\"type(cl.{prop})\") == np.float64 or eval(f\"type(cl.{prop})\") == int:\n exec(f\"self.{prop} = float(cl.{prop})\")\n elif eval(f\"type(cl.{prop})\") == str:\n exec(f\"self.{prop} = cl.{prop}\")\n \n #Manually defined properties\n self.name = cl.name\n self.clType = cl.clType\n\nclass clusterObj:\n def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):\n #Declare instance variables\n self.basedir = basedir\n self.dataPath = self.basedir + f\"{name}/data/\"\n self.imgPath = self.basedir + f\"{name}/plots/\"\n self.unfilteredWide = []\n self.unfilteredNarrow = []\n self.filtered = []\n self.mag = []\n self.iso = []\n self.condensed = []\n self.condensed0 = []\n self.condensedInit=[]\n self.unfilteredBright = []\n self.filteredBright = []\n self.brightmag = []\n self.distFiltered = []\n self.binaries = []\n self.stars = []\n self.brightThreshold = brightThreshold\n self.mean_par = 0\n self.stdev_par = 0\n self.mean_ra = 0\n self.mean_dec = 0\n self.stdev_ra = 0\n self.stdev_dec = 0\n self.mean_pmra = 0\n self.stdev_pmra = 0\n self.mean_pmdec = 0\n self.stdev_pmdec = 0\n self.mean_a_g = 0\n self.stdev_a_g = 0\n self.mean_e_bp_rp = 0\n self.stdev_e_bp_rp = 0\n self.mean_par_over_ra = 0\n self.stdev_par_over_ra = 0\n self.dist_mod = 0\n self.turnPoint = 0\n self.reddening = 0\n self.radDist = 0\n self.massLoaded = False\n \n #Catalogued properties\n self.name = name\n self.clType = \"None\"\n self.pmra_min = -99\n self.pmra_max = -99\n self.pmdec_min = -99\n self.pmdec_max = -99\n self.par_min = -99\n self.par_max = -99\n self.cltpx = -99\n self.cltpy = -99\n self.noise_cutoff = -99\n \n #Check directory locations\n import os\n if not os.path.isdir(self.dataPath):\n os.mkdir(self.dataPath)\n if not os.path.isdir(self.imgPath):\n os.mkdir(self.imgPath)\n if not os.path.isdir(f\"{self.imgPath}/png\"):\n os.mkdir(f\"{self.imgPath}/png\")\n\n\n#Gaia DR2 Implementation\n# class starObj:\n# def __init__(self,name,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_match_obs,astro_sigma5d,match_obs,g_mag,b_mag,r_mag,b_r,b_g,g_r,radvel,radvel_err,variable,teff,a_g,e_bp_rp,lum):\n# #Declare instance variables\n# self.name = name\n# self.ra = float(ra)\n# self.ra_err = float(ra_err)\n# self.dec = float(dec)\n# self.dec_err = float(dec_err)\n# self.par = float(par)\n# self.par_err = float(par_err)\n# self.par_over_err = float(par_over_err)\n# self.pmra = float(pmra)\n# self.pmra_err = float(pmra_err)\n# self.pmdec = float(pmdec)\n# self.pmdec_err = float(pmdec_err)\n# self.ra_dec_corr = float(ra_dec_corr)\n# self.ra_par_corr = float(ra_par_corr)\n# self.ra_pmra_corr = float(ra_pmra_corr)\n# self.ra_pmdec_corr = float(ra_pmdec_corr)\n# self.dec_par_corr = float(dec_par_corr)\n# self.dec_pmra_corr = float(dec_pmra_corr)\n# self.dec_pmdec_corr = float(dec_pmdec_corr)\n# self.par_pmra_corr = float(par_pmra_corr)\n# self.par_pmdec_corr = float(par_pmdec_corr)\n# self.pmra_pmdec_corr = float(pmra_pmdec_corr)\n# self.astro_n_obs = float(astro_n_obs)\n# self.astro_n_good_obs = float(astro_n_good_obs)\n# self.astro_n_bad_obs = float(astro_n_bad_obs)\n# self.astro_gof = float(astro_gof)\n# self.astro_chi2 = float(astro_chi2)\n# self.astro_noise = float(astro_noise)\n# self.astro_noise_sig = float(astro_noise_sig)\n# self.astro_match_obs = float(astro_match_obs)\n# self.astro_sigma5d = float(astro_sigma5d)\n# self.match_obs = float(match_obs)\n# self.g_mag = float(g_mag)\n# self.b_mag = float(b_mag)\n# self.r_mag = float(r_mag)\n# self.b_r = float(b_r)\n# self.b_g = float(b_g)\n# self.g_r = float(g_r)\n# self.radvel = float(radvel)\n# self.radvel_err = float(radvel_err)\n# self.variable = variable\n# self.teff = float(teff)\n# self.a_g = float(a_g)\n# self.e_bp_rp = float(e_bp_rp)\n# self.lum = float(lum)\n# self.member = 0\n# self.binary = 0\n# self.radDist = 0\n \n# self.par_over_ra = float(par)/float(ra)\n# self.par_over_dec = float(par)/float(dec)\n# self.par_over_pmra = float(par)/float(pmra)\n# self.par_over_pmdec = float(par)/float(pmdec)\n \n# self.vosaPoints = []\n# self.excess = 0\n\n#Gaia DR3 implementation\nclass starObj:\n def __init__(self,name,source_id,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err, #Basic astrometrics\n ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr, #Correlations\n astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_nu_eff, #Assorted astrometric properties\n pseudocolor,pseudocolor_err,ra_pseudocolor_corr,dec_pseudocolor_corr,par_pseudocolor_corr,pmra_pseudoclor_corr,pmdec_pseudocolor_corr, #Pseudocolor\n astro_sigma5d,duplicated_source, #More assorted properties\n g_flux,g_flux_err,g_mag, #Gaia_G\n b_flux,b_flux_err,b_mag, #Gaia_BP\n r_flux,r_flux_err,r_mag, #Gaia_RP\n b_over_r_excess,b_r,b_g,g_r, #Color indices and excess\n radvel,radvel_err,radvel_num_transits,radvel_teff,radvel_feh, #Template Teff and Fe/H used to calculate the radvel\n l,b,long,lat): #Galactic l and b, ecliptic long and lat\n import numpy as np\n #Declare instance variables\n self.name = name\n self.source_id = source_id\n self.ra = float(ra)\n self.ra_err = float(ra_err)\n self.dec = float(dec)\n self.dec_err = float(dec_err)\n self.par = float(par)\n self.par_err = float(par_err)\n self.par_over_err = float(par_over_err)\n self.pmra = float(pmra)\n self.pmra_err = float(pmra_err)\n self.pmdec = float(pmdec)\n self.pmdec_err = float(pmdec_err)\n \n self.ra_dec_corr = float(ra_dec_corr)\n self.ra_par_corr = float(ra_par_corr)\n self.ra_pmra_corr = float(ra_pmra_corr)\n self.ra_pmdec_corr = float(ra_pmdec_corr)\n self.dec_par_corr = float(dec_par_corr)\n self.dec_pmra_corr = float(dec_pmra_corr)\n self.dec_pmdec_corr = float(dec_pmdec_corr)\n self.par_pmra_corr = float(par_pmra_corr)\n self.par_pmdec_corr = float(par_pmdec_corr)\n self.pmra_pmdec_corr = float(pmra_pmdec_corr)\n \n self.astro_n_obs = float(astro_n_obs)\n self.astro_n_good_obs = float(astro_n_good_obs)\n self.astro_n_bad_obs = float(astro_n_bad_obs)\n self.astro_gof = float(astro_gof)\n self.astro_chi2 = float(astro_chi2)\n self.astro_noise = float(astro_noise)\n self.astro_noise_sig = float(astro_noise_sig)\n self.astro_nu_eff = float(astro_nu_eff)\n \n self.astro_sigma5d = float(astro_sigma5d)\n self.duplicated_source = bool(duplicated_source)\n \n self.g_flux = float(g_flux)\n self.g_flux_err = float(g_flux_err)\n self.g_mag = float(g_mag)\n \n self.b_flux = float(b_flux)\n self.b_flux_err = float(b_flux_err)\n self.b_mag = float(b_mag)\n \n self.r_flux = float(r_flux)\n self.r_flux_err = float(r_flux_err)\n self.r_mag = float(r_mag)\n \n self.b_over_r_excess = float(b_over_r_excess)\n self.b_r = float(b_r)\n self.b_g = float(b_g)\n self.g_r = float(g_r)\n \n self.radvel = float(radvel)\n self.radvel_err = float(radvel_err)\n self.radvel_num_transits=float(radvel_num_transits)\n self.radvel_teff = float(radvel_teff)\n self.radvel_feh = float(radvel_feh)\n \n self.l = float(l)\n self.b = float(b)\n self.long = float(long)\n self.lat = float(lat)\n \n self.member = 0\n self.binary = 0\n self.radDist = 0\n \n self.par_over_ra = float(par)/float(ra)\n self.par_over_dec = float(par)/float(dec)\n self.par_over_pmra = float(par)/float(pmra)\n self.par_over_pmdec = float(par)/float(pmdec)\n \n self.normRA = self.ra*np.cos(self.dec*np.pi/180)\n \n self.vosaPoints = []\n self.excess = 0\n\n\n\nclass isochroneObj:\n def __init__(self,age=404,feh=404,afe=404,y=404,basedir='isochrones/',subdir='processed',isodir=''):\n #Declare instance variables\n self.basedir = basedir\n self.subdir = subdir\n self.isodir = isodir\n self.starList = []\n self.age = age\n self.feh = feh\n self.afe = afe\n self.y = y\n self.name = f\"feh_{feh}_afe_{afe}_age_{age}_y_{y}\"\n self.distance = 0\n self.coeff = []\n self.g = []\n self.br = []\n\n\nclass fakeStarObj:\n def __init__(self,g_mag,b_mag,r_mag):\n #Declare instance variables\n self.g_mag = g_mag\n self.b_mag = b_mag\n self.r_mag = r_mag\n self.b_r = self.b_mag-self.r_mag\n self.b_g = self.b_mag-self.g_mag\n self.g_r = self.g_mag-self.r_mag\n self.score = 0\n\nclass mistStar:\n def __init__(self,properties):\n #Declare instance variables\n \n for prop,val in properties:\n if \"inf\" in str(val):\n val = 50\n exec(f\"self.{prop} = {val}\")\n\n\nclass condensedPoint:\n def __init__(self,b_r,g_mag,weight):\n self.b_r = b_r\n self.g_mag = g_mag\n self.weight = weight\n\n\nclass vosaPoint:\n def __init__(self,filterID,wavelength,obs_flux,obs_error,flux,flux_error,excess):\n self.filterID = filterID\n self.wavelength = wavelength\n self.obs_flux = obs_flux\n self.obs_error = obs_error\n self.flux = flux\n self.flux_error = flux_error\n self.excess = excess\n\n\nclass cataloguedCluster():\n def __init__(self,name,clType,pmra_min,pmra_max,pmdec_min,pmdec_max,par_min,par_max,cltpx,cltpy,noise_cutoff):\n #Catalogued properties\n self.name = str(name)\n self.clType = str(clType)\n self.pmra_min = float(pmra_min)\n self.pmra_max = float(pmra_max)\n self.pmdec_min = float(pmdec_min)\n self.pmdec_max = float(pmdec_max)\n self.par_min = float(par_min)\n self.par_max = float(par_max)\n self.cltpx = float(cltpx)\n self.cltpy = float(cltpy)\n self.noise_cutoff = float(noise_cutoff)\n\n\n\n\n\n\nclass Datum:\n from matplotlib import colors as mcolors\n colorin = mcolors.to_rgba(\"red\")\n colorout = mcolors.to_rgba(\"blue\")\n\n def __init__(self, x, y, include=False):\n self.x = x\n self.y = y\n if include:\n self.color = self.colorin\n else:\n self.color = self.colorout\n\n\nclass LassoManager:\n \n\n def __init__(self, ax, data, cluster):\n from matplotlib.collections import RegularPolyCollection\n \n self.axes = ax\n self.canvas = ax.figure.canvas\n self.data = data\n self.cluster = cluster\n\n self.Nxy = len(data)\n\n facecolors = [d.color for d in data]\n self.xys = [(d.x, d.y) for d in data]\n self.collection = RegularPolyCollection(\n 6, sizes=(5,),\n facecolors=facecolors,\n offsets=self.xys,\n transOffset=ax.transData)\n\n ax.add_collection(self.collection)\n\n self.cid = self.canvas.mpl_connect('button_press_event', self.on_press)\n\n def callback(self, verts):\n from matplotlib import path\n global coords\n global clusters\n \n cluster = clusters[self.cluster.name]\n \n facecolors = self.collection.get_facecolors()\n p = path.Path(verts)\n ind = p.contains_points(self.xys)\n \n cluster.binaries = []\n \n for i in range(len(self.xys)):\n if ind[i]:\n facecolors[i] = Datum.colorin\n star = cluster.filtered[[a.b_r for a in cluster.filtered].index(self.xys[i][0])]\n cluster.binaries.append(star)\n else:\n facecolors[i] = Datum.colorout\n self.canvas.draw_idle()\n self.canvas.widgetlock.release(self.lasso)\n del self.lasso\n\n def on_press(self, event):\n from matplotlib.widgets import Lasso\n \n if self.canvas.widgetlock.locked():\n return\n if event.inaxes is None:\n return\n self.lasso = Lasso(event.inaxes,\n (event.xdata, event.ydata),\n self.callback)\n # acquire a lock on the widget drawing\n self.canvas.widgetlock(self.lasso)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef clusterCatalogue(types='all'):\n import numpy as np\n import pandas as pd\n global data\n global catalogue\n global cataIn\n \n data = pd.read_csv(\"catalogue.csv\",sep=',',dtype=str)\n data = data.to_numpy(dtype=str)\n cata = []\n for row in data:\n cata.append(cataloguedCluster(*row))\n \n if types == 'all':\n catalogue = cata\n \n cataIn = True\n return\n \n\n\ndef readClusters(cList=[\"M67\"],basedir=\"clusters/\",smRad=0.35):\n #Imports\n import numpy as np\n import pandas as pd\n global clusterList\n global clusters\n global stars\n global clIn\n global catalogue\n \n try:\n if clIn and len(clusterList) > 0:\n for clname in cList:\n if clname in clusters:\n unloadClusters([clname])\n except:\n clusterList=[]\n \n #Check the cluster catalogue to load the catalogued properties\n if not cataIn:\n clusterCatalogue()\n \n #Loop through clusters\n for clname in cList:\n #Create cluster objects\n cluster = clusterObj(name=clname,basedir=basedir)\n \n reference = None\n \n for cl in catalogue:\n if str(cl.name) == str(clname):\n reference = cl\n print(f\"Catalogue match for {clname} found\")\n break\n if reference == None:\n print(f\"Catalogue match for {clname} was not found, please create one\")\n continue\n\n #Filter all of the methods out of the properties list\n properties = [a for a in dir(reference) if not a.startswith('_')]\n print(properties)\n #exec(f\"print(reference.{properties[1]})\")\n #print(properties)\n \n #Now we have a list of all the attributes assigned to the catalogue (the self.variables)\n for p in properties:\n prop = getattr(reference,p)\n #print(prop)\n exec(f\"cluster.{p} = prop\")\n try:\n if prop <= -98:\n print(f\"{clname} does not have a specified catalogue value for {p}\")\n except:\n continue\n \n\n # if cluster.name == 'NGC752' or cluster.name == 'NGC188':\n # cluster.brightThreshold=18\n \n # if \"M67\" in clname:\n # cluster.type = \"open\"\n # if \"M35\" in clname:\n # cluster.type = \"open\"\n # if \"NGC188\" in clname:\n # cluster.type = \"open\"\n # if \"NGC752\" in clname:\n # cluster.type = \"open\"\n # if \"IC4651\" in clname:\n # cluster.type = \"open\"\n # if \"NGC2451\" in clname:\n # cluster.type = \"open\"\n # if \"AlphaPer\" in clname:\n # cluster.type = \"open\"\n # if \"M12\" in clname:\n # cluster.type = \"globular\"\n # if \"M3\" in clname:\n # cluster.type = \"globular\"\n # if \"M5\" in clname:\n # cluster.type = \"globular\"\n # if \"M15\" in clname:\n # cluster.type = \"globular\"\n # if \"M53\" in clname:\n # cluster.type = \"globular\"\n # if \"NGC6426\" in clname:\n # cluster.type = \"globular\"\n # if \"NGC6934\" in clname:\n # cluster.type = \"globular\"\n \n \"\"\"\n #Generate wide-field star list\n starlist = np.genfromtxt(cluster.dataPath+\"narrow.csv\", delimiter=\",\", skip_header=1, usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17))\n starlist = preFilter(starlist)\n for s in starlist:\n star = starObj(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],s[8],s[9],s[10],s[11],s[12],s[13],s[14],s[15],s[16],s[17])\n cluster.unfilteredNarrow.append(star) \n \"\"\"\n \n #Generate narrow-field star list\n starlist = pd.read_csv(cluster.dataPath+\"wide.csv\",sep=',',dtype=str)\n stars = pd.read_csv(cluster.dataPath+\"wide.csv\",sep=',',dtype=str)\n starlist = starlist.to_numpy(dtype=str)\n #starlist = np.genfromtxt(cluster.dataPath+\"wide.csv\", delimiter=\",\", skip_header=1)\n print(f\"{clname} initial length: {len(starlist)}\")\n starlist = preFilter(starlist)\n print(f\"{clname} post-prefiltered length: {len(starlist)}\")\n \n ramean = np.mean([float(x) for x in starlist[:,1]])\n decmean = np.mean([float(x) for x in starlist[:,3]])\n \n \n for s in starlist:\n star = starObj(*s)\n cluster.unfilteredWide.append(star)\n \n if np.less_equal(star.g_mag,cluster.brightThreshold):\n cluster.unfilteredBright.append(star)\n \n # if np.less_equal(np.sqrt(((star.ra-ramean)*np.cos(np.pi/180*star.dec))**2+(star.dec-decmean)**2),smRad):\n # cluster.unfilteredNarrow.append(star)\n clusterList.append(cluster)\n calcStats(cluster,mode='narrow')\n \n if not 'YSO' in clname:\n rmOutliers()\n clIn = True\n toDict()\n\n\ndef pad(string, pads):\n spl = string.split(',')\n return '\\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])\n\n\ndef readIso(basedir='isochrones/',subdir='MIST_raw/'):\n #Important note: The ages are rounded to a few decimal places in the Gyr range\n #This has the effect of making it such that a few dozen isochrones in the kyr range \n #are overwritten because they all round to the same value. I found this to be an issue\n #worth overlooking given that a cluster of that age hasn't been identified yet\n \n \n #Imports\n import os\n import re\n \n global isochrone_headers\n global isoList\n global isoIn\n \n path = basedir + subdir\n \n isoList = []\n \n for fn in os.listdir(path):\n \n #Read in file\n main = open(path+fn).read()\n main = main.split(\"\\n\")\n \n #Relevant variables from headers\n N_iso = int(main[7].split(\"=\")[1])\n index = 13\n \n varList = re.sub(\"\\s+\", \",\", main[5].strip()).split(\",\")\n afe = varList[4]\n feh = varList[3]\n y = varList[1]\n z = varList[2]\n v_vcrit = varList[5]\n \n #Column labels\n #Replace any number of spaces with a single comma, then replace a few problematic phrases and split the list by commas\n isochrone_headers = re.sub(\"\\s+\", \",\", main[12].replace(\"2MASS\",\"TwoMASS\").replace(\"[Fe/H]\",\"feh\").strip()).split(\",\")[1:]\n \n for idx in range(0,N_iso):\n N_stars = int(re.sub(\"\\s+\", \",\" , main[index-3].split(\"=\")[1]).split(\",\")[1])\n \n #print(f\"Iso = {idx} N_stars = {N_stars}\")\n \n #Populate a single isochrone\n stars = []\n for i in range(index,index+N_stars):\n #Send the header and values to the mistStar object\n #print(f\"i = {i}\")\n values = [float(a) for a in re.sub(\"\\s+\", \",\" , main[i].strip()).split(\",\")]\n properties = zip(isochrone_headers,values)\n stars.append(mistStar(properties))\n #Create the isochrone from the list of stars\n age = round(10**values[1]/1e9,3)\n iso = isochroneObj(age,feh,afe,y)\n iso.starList = stars\n iso.br = [star.Gaia_BP_EDR3-star.Gaia_RP_EDR3 for star in stars]\n iso.g = [star.Gaia_G_EDR3 for star in stars]\n isoList.append(iso)\n \n index += N_stars + 5\n \n isoIn = True\n toDict()\n \n\n\ndef checkIsoDupes():\n global isochrones\n global isoList\n \n names = []\n for iso in isoList:\n if iso.name in names:\n print(iso.name)\n else:\n names.append(iso.name)\n\n\ndef processIso(basedir='isochrones/',subdir='raw/'):\n #Imports\n import os\n import re\n \n path = basedir + subdir\n \n for fn in os.listdir(path):\n main = open(path+fn).read()\n part = main.split('\\n\\n\\n')\n part[0] = part[0].split('#----------------------------------------------------')[3].split('\\n',1)[1]\n \n for a in range(len(part)):\n temp = part[a].split('#AGE=')[1].split(' EEPS=')[0]\n age = temp.strip()\n \n out = part[a].split('\\n',2)[2]\n out = re.sub(\"\\s+\", \",\", out.strip())\n out = pad(out,8)\n \n filename = f\"{basedir}processed/\"+fn.split('.')[0]+'/'+age+\".csv\"\n \n os.makedirs(os.path.dirname(filename), exist_ok=True) \n with open(filename,\"w\") as f:\n f.write(out)\n\n\ndef readIsochrones(basedir='isochrones/',subdir='processed/'):\n #Imports\n import os\n import numpy as np\n global isoList\n global isoIn\n \n isoList=[]\n \n for folder in os.listdir(basedir+subdir):\n for fn in os.listdir(basedir+subdir+folder):\n \n #Get the age and metallicities of the isochrones\n ageStr = fn.split('.csv')[0]\n fehStr = folder.split('feh')[1].split('afe')[0]\n afeStr = folder.split('afe')[1].split('y')[0]\n if 'y' in folder:\n yStr = folder.split('y')[1]\n else:\n yStr = '0'\n \n feh = float(fehStr[1]+fehStr[2])/10\n afe = float(afeStr[1])/10\n age = float(ageStr)\n y = int(yStr)\n \n if fehStr[0] == 'm':\n feh = feh*-1\n if afeStr[0] == 'm':\n afe = afe*-1\n \n #Debug\n #print(f\"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}\")\n \n #Create isochone object\n iso = isochroneObj(age=age,feh=feh,afe=afe,y=y,basedir=basedir,subdir=subdir,isodir=folder+'/')\n \n isoArr = np.genfromtxt(basedir+subdir+folder+\"/\"+fn, delimiter=\",\")\n for s in isoArr:\n star = fakeStarObj(s[5],s[6],s[7])\n iso.starList.append(star)\n iso.br.append(s[6]-s[7])\n iso.g.append(s[5])\n \n isoList.append(iso)\n isoIn = True\n toDict()\n\ndef preFilter(starList):\n #Imports\n import numpy as np\n \n final = []\n #Columns to be checked for NaN values. If an NaN is present in this column, the entry(star) is discarded from the \"unfiltered\" list\n #2-12 is the astrometry\n #42,45,48 are the g,bp,rp magnitudes\n #50-52 are the color indices\n cols = list(range(2,13))+[42]+[45]+[48]+list(range(50,53))\n \n #Filters out NaN values except for the last two columns\n for n,s in enumerate(starList):\n dump = False\n for c in cols:\n if np.isnan(float(s[c])):\n dump = True\n if not dump:\n final.append(starList[n])\n \n #Reshapes array \n final = np.array(final)\n \n return final\n\ndef rmOutliers():\n #Imports\n global clusterList\n import numpy as np\n \n for cluster in clusterList:\n \n if cluster.clType.lower() == \"globular\":\n scale = 4\n else:\n scale = 1.5\n \n #Variables\n pmthreshold = 5\n pmpthreshold = 50\n parthreshold = 5\n posthreshold = 5\n toRemove=[]\n \n #print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)\n #print(len(cluster.unfilteredWide))\n \n #Classifies outliers\n for star in cluster.unfilteredWide:\n if cluster.name == \"NGC188\":\n if star.ra > 100:\n toRemove.append(star)\n #print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)\n if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt(((star.ra-cluster.mean_ra)*np.cos(np.pi/180*star.dec))**2+(star.dec-cluster.mean_dec)**2),posthreshold) or np.greater(abs(star.par),parthreshold):\n #if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):\n toRemove.append(star)\n \n #Removes the outliers from the array\n for rm in toRemove:\n cluster.unfilteredWide.remove(rm)\n try:\n cluster.unfilteredNarrow.remove(rm)\n except ValueError:\n pass\n \n #print(len(cluster.unfilteredWide))\n\ndef calcStats(cluster,mode='filtered'):\n #Imports\n import numpy as np\n \n #Reads in all the values for a cluster\n par=[]\n par_err=[]\n ra=[]\n dec=[]\n pmra=[]\n pmdec=[]\n gmag = []\n br = []\n # a_g=[]\n # e_bp_rp=[]\n \n loopList=[]\n \n checkLoaded([cluster])\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n \n if mode == 'bright':\n loopList = cluster.filteredBright\n elif mode == 'narrow':\n loopList = cluster.unfilteredNarrow\n elif mode == 'filtered':\n loopList = cluster.filtered\n \n for star in loopList:\n par.append(star.par)\n par_err.append(star.par_err)\n pmra.append(star.pmra)\n pmdec.append(star.pmdec)\n ra.append(star.ra)\n dec.append(star.dec)\n gmag.append(star.g_mag)\n br.append(star.b_r)\n \n # if not np.isnan(star.a_g) and not star.a_g == 0:\n # a_g.append(star.a_g)\n # if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:\n # e_bp_rp.append(star.e_bp_rp)\n \n #Calculate the statistics\n cluster.mean_par = np.mean(par[:])\n cluster.mean_ra = np.mean(ra[:])\n cluster.mean_dec = np.mean(dec[:])\n cluster.stdev_ra = np.std(ra[:])\n cluster.stdev_dec = np.std(dec[:])\n cluster.stdev_par = np.std(par[:])\n cluster.mean_pmra = np.mean(pmra[:])\n cluster.stdev_pmra = np.std(pmra[:])\n cluster.mean_pmdec = np.mean(pmdec[:])\n cluster.stdev_pmdec = np.std(pmdec[:])\n # cluster.mean_a_g = np.mean(a_g[:])\n # cluster.stdev_a_g = np.std(a_g[:])\n # cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])\n # cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])\n cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])\n cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])\n cluster.mean_par_err = np.mean(par_err[:])\n \n cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5\n \n for star in loopList:\n star.radDist = np.sqrt((star.ra-cluster.mean_ra)**2+(star.dec-cluster.mean_dec)**2)\n star.normRadDist = np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-cluster.mean_ra*np.cos(cluster.mean_dec*np.pi/180))**2+(star.dec-cluster.mean_dec)**2)\n\n\ndef saveClusters(cList):\n #Imports\n import dill\n \n saveResults(cList)\n #Creates a pickle file with all of the saved instances\n for cl in cList:\n cluster = clusters[cl]\n #print(cluster.name,id(cluster))\n with open(f\"{cluster.dataPath}filtered.pk1\", 'wb') as output:\n dill.dump(cluster, output)\n\n\ndef saveIsochrones():\n #Imports\n import dill\n global clusterList\n \n #Creates a pickle file with all of the saved instances\n for iso in isoList:\n with open(f\"{iso.basedir}pickled/{iso.name}.pk1\", 'wb') as output:\n dill.dump(iso, output)\n\n \ndef loadClusters(clusterNames=[\"M67\"],basedir='clusters/'):\n #Imports\n import dill\n global clusterList\n global clusters\n global clIn\n \n for clusterName in clusterNames:\n if clusterName in clusters:\n unloadClusters([clusterName])\n #Reads in instances from the saved pickle file\n with open(f\"{basedir}{clusterName}/data/filtered.pk1\",'rb') as input:\n cluster = dill.load(input)\n clusterList.append(cluster)\n clIn = True\n toDict()\n\n\ndef loadIsochrones(basedir='isochrones/'):\n #Imports\n import dill\n import os\n global isoList\n global isoIn\n \n isoList=[]\n \n for fn in os.listdir(basedir+\"pickled/\"):\n #Reads in instances from the saved pickle file\n with open(f\"{basedir}pickled/{fn}\",'rb') as input:\n iso = dill.load(input)\n isoList.append(iso)\n isoIn = True\n toDict()\n\n\ndef unloadClusters(cList=['all']):\n #Imports\n global clusterList\n global clusters\n \n if 'all' in cList:\n cList = [cluster.name for cluster in clusterList]\n \n for cl in cList:\n cluster = clusters[cl]\n \n clusterList.remove(cluster)\n clusters.pop(cl)\n del cluster\n \n\ndef dataProcess(cList,load=False,fit=True,unload=True,plotting=True,member=True,save=True,close=True):\n #This method is largely intended for re-processing a bulk batch of clusters that have already been processed before,\n #meaning they already have condensed point lists or you are already aware of their fitting quality\n \n #Imports\n import matplotlib.pyplot as plt\n global clusterList\n global clusters\n global closePlots\n \n if not isoIn:\n loadIsochrones()\n \n \n loadList = [\"M15\",\"M12\",\"M39\",\"M46\",\"M67\",\"NGC188\",\"NGC2355\",\"NGC2158\",\"IC4651\",\"NGC6791\",\"NGC2360\",\"NGC2204\"]\n \n for cl in cList:\n \n if cl in loadList:\n condensing = \"load\"\n else:\n condensing = \"auto\"\n \n if load:\n loadClusters([cl])\n else:\n readClusters([cl])\n turboFilter([cl])\n \n if close:\n plt.close('all') \n \n \n if fit:\n turboFit([cl],condensing=condensing)\n if plotting:\n plot([cl],['pos','pm','cmd','quiver','iso'])\n if close:\n plt.close('all') \n \n if member:\n proxyMatch([cl])\n boundedStats([cl],saveCl=False,unloadCl=False)\n membership(cl,mode='filtered')\n membership(cl,mode='bounded',N=75)\n plt.close('all')\n \n if save:\n saveClusters([cl])\n saveResults([cl])\n if unload:\n unloadClusters([cl])\n \n\n\n\ndef turboFilter(cl=[\"all\"]):\n #Imports\n global clusterList\n \n cList = checkLoaded(cl)\n \n for clus in cList:\n cluster = clusters[clus]\n \n cluster.filteredBright,cluster.brightmag = pmFilter(cluster.unfilteredBright,cluster.name)\n print(f\"==========================={cluster.name}===========================\")\n print(f\"bright unf/pm fil: {len(cluster.unfilteredBright)} / {len(cluster.filteredBright)}\")\n calcStats(cluster,mode='bright')\n distFilter(cluster)\n print(f\"dist(all): {len(cluster.distFiltered)}\")\n cluster.filtered,cluster.mag = pmFilter(cluster.distFiltered,cluster.name)\n \n \n #Manual filtering of extraneous points\n cluster.filtered,cluster.mag = manualFilter(cluster)\n \n \n print(f\"pm(all): {len(cluster.filtered)}\")\n \n customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')\n \n magnitude = cutNoise(cluster)\n print(f\"noise cutoff: mag {magnitude} length {len(cluster.filtered)}\")\n \n customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')\n \n \"\"\"\n for i in range(10):\n print(f\"{cluster.filtered[i].b_r} {cluster.mag[i,0]}\")\n \"\"\"\n \n calcStats(cluster,mode='filtered')\n setFlag()\n\n\ndef manualFilter(cluster):\n #This exists to remove any points that may or may not be relevant to the cluster but are prohibiting the fit from happening\n \n if \"M35\" in cluster.name:\n filtered = [star for star in cluster.filtered if star.g_mag > 9 or star.b_r < 1]\n return filtered,magList(filtered)\n else:\n return cluster.filtered,cluster.mag\n\ndef magList(filtered):\n import numpy as np\n \n mag = np.empty((0,2))\n \n for star in filtered:\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n\n\ndef pmFilter(starList,name):\n #Imports\n import numpy as np\n \n filtered = []\n mag = np.empty((0,2))\n cluster = clusters[name]\n assert cluster.name == name\n \n #Apply an elliptical filter to the proper motion space \n pmra_width = (cluster.pmra_max-cluster.pmra_min)/2\n pmdec_width = (cluster.pmdec_max-cluster.pmdec_min)/2\n pmra_center = cluster.pmra_min+pmra_width\n pmdec_center = cluster.pmdec_min+pmdec_width\n \n print(pmra_center,pmdec_center)\n \n for star in starList:\n if (star.pmra-pmra_center)**2/pmra_width**2 + (star.pmdec-pmdec_center)**2/pmdec_width**2 <= 1:\n filtered.append(star)\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n \n assert len(filtered) > 1\n print(len(filtered))\n \n return filtered,mag\n\n\ndef distFilter(cluster):\n #Imports\n import numpy as np\n \n \n if cluster.par_min == 0 or cluster.par_max == 0:\n threshold = 1.5*cluster.mean_par\n \n print(f\"{cluster.name} filtered using mean parallax\")\n for star in cluster.unfilteredWide:\n if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):\n cluster.distFiltered.append(star)\n else:\n print(f\"{cluster.name} filtered using min & max parallax values\")\n for star in cluster.unfilteredWide:\n if star.par > cluster.par_min and star.par < cluster.par_max:\n cluster.distFiltered.append(star)\n\n\n\ndef cutNoise(cluster):\n #Imports\n import numpy as np\n \n stars = sorted(cluster.filtered,key=lambda x: x.g_mag)\n new = []\n newMag = np.empty((0,2))\n \n if cluster.noise_cutoff <= -98:\n threshold = 1\n print(f\"{cluster.name} noise cutoff undefined, using default\")\n else:\n threshold = cluster.noise_cutoff\n \n bad = 0\n badCut = 5\n for i,s in enumerate(stars):\n if s.astro_sigma5d > threshold:\n bad += 1\n if bad >= badCut:\n break\n else:\n new.append(s)\n newMag = np.r_[newMag,[[s.b_r,s.g_mag]]]\n \n cluster.filtered = new\n cluster.mag = newMag\n return s.g_mag\n\n\ndef turboFit(cl=[\"all\"],condensing='auto',weighting='pos',tp=\"catalogue\",minScore=0.001):\n #Typical use cases are auto, pos, catalogue --OR-- manual, equal, catalogue\n #Imports\n import time\n global clusterList\n \n cList = checkLoaded(cl)\n \n print(\"=========================Fitting=========================\")\n t0 = time.time()\n \n status = condense(cList,condensing,weighting,tp,minScore)\n if status == \"Suspended\":\n return\n \n for cluster in cList:\n redFitting(cluster,minScore,weighting)\n \n \n t1 = time.time()\n \n print(f\"Total {cluster.name} fit runtime: {t1-t0} seconds\")\n \n\n\ndef redFitting(cluster,minScore,weighting):\n #Imports\n import numpy as np\n import math\n from sys import stdout\n from time import sleep\n global clusterList\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n \n cluster.iso = []\n \n redMin = 0\n redMax = 0.7\n step = 0.05\n \n redList = [round(x,2) for x in np.arange(redMin,redMax+step,step)]\n \n for reddening in redList:\n stdout.write(f\"\\rCurrent reddening value for {cluster.name}: {reddening:.2f} / ({redList[0]:.2f}->{redList[-1]:.2f})\")\n shapeFit(cluster,reddening,minScore,weighting)\n stdout.flush()\n sleep(0.1)\n \n cluster.iso = sorted(cluster.iso,key=lambda x: x[1])\n best = float(cluster.iso[0][2])\n \n print(f\"\\nCoarse-step reddening for {cluster.name}: {best}\")\n \n subMin = best - 0.05\n subMax = best + 0.05\n substep = 0.01\n \n if subMin < 0:\n subMin = 0\n \n subList = [round(x,2) for x in np.arange(subMin,subMax+substep,substep) if not round(x,2) in redList and round(x,2) > subMin and round(x,2) < subMax]\n \n for reddening in subList:\n stdout.write(f\"\\rCurrent fine-step reddening value for {cluster.name}: {reddening:.2f} / ({subList[0]:.2f}->{subList[-1]:.2f})\")\n shapeFit(cluster,reddening,minScore,weighting)\n stdout.flush()\n sleep(0.1)\n \n cluster.iso = sorted(cluster.iso,key=lambda x: x[1])\n \n cluster.reddening = float(cluster.iso[0][2])\n cluster.fit_age = float(isochrones[cluster.iso[0][0]].age)\n cluster.fit_feh = float(isochrones[cluster.iso[0][0]].feh)\n cluster.fit_afe = float(isochrones[cluster.iso[0][0]].afe)\n cluster.fit_y = float(isochrones[cluster.iso[0][0]].y)\n \n #Unrelated properties but I needed somewhere to assign them\n setattr(cluster,'meanDist',1000/cluster.mean_par)\n \n meanL = np.mean([a.l*np.pi/180 for a in cluster.filtered])\n galDist = 8000 #pc\n gd = cluster.meanDist**2 + galDist**2 - 2*cluster.meanDist*galDist*np.cos(meanL)\n setattr(cluster,'meanGalacticDist',gd**0.5)\n \n print(f\"\\nReddening for {cluster.name}: {best}\")\n\n\ndef shapeFit(cluster,reddening,minScore,weighting):\n #Imports\n import numpy as np\n import shapely.geometry as geom\n global isoList\n \n \n conversion = 2.1\n \n isoFitList = np.empty((0,3))\n for iso in isoList:\n isoLine = geom.LineString(tuple(zip([x+reddening for x in iso.br],[x+cluster.dist_mod+conversion*reddening for x in iso.g])))\n dist = []\n for star in cluster.condensed:\n starPt = geom.Point(star.b_r,star.g_mag)\n #print(starPt.distance(isoLine))\n pointDist = np.abs(starPt.distance(isoLine))*star.weight\n if pointDist < minScore*star.weight:\n pointDist = minScore*star.weight\n dist.append(pointDist**2)\n isoScore = np.sum(dist[:])\n #print(isoScore,dist)\n #print(list(geom.shape(isoLine).coords))\n isoFitList = np.r_[isoFitList,[[iso.name,float(isoScore),float(reddening)]]]\n #compareInstances(iso,cluster.iso[-1][0])\n #print(isoScore)\n cluster.iso.extend(isoFitList)\n #best = cluster.iso[1][0]\n #specificPlot(cluster.name,best.name,reddening)\n #print(f\"\\nFirst point of best fit: {best.br[0]+reddening},{best.g[0]+conversion*reddening+cluster.dist_mod}\")\n\n \ndef onclick(x,y,fig,ax,cluster,minScore,weighting,newList):\n def func(event):\n import matplotlib.pyplot as plt\n global coords\n \n ix, iy = event.xdata, event.ydata\n \n if str(event.button) == \"MouseButton.RIGHT\":\n for i,(cx,cy) in enumerate(coords):\n if abs(ix-cx) <= 0.075 and abs(iy-cy) <= 0.25:\n coords.pop(i)\n ax.clear()\n ax.scatter(x,y,s=0.5,color='dimgray')\n ax.invert_yaxis()\n ax.scatter([a[0] for a in coords],[a[1] for a in coords],c='red',s=10)\n plt.gcf().canvas.draw_idle()\n \n if str(event.button) == \"MouseButton.LEFT\":\n coords.append((ix, iy))\n ax.scatter(ix,iy,c='red',s=10)\n plt.gcf().canvas.draw_idle()\n \n if str(event.button) == \"MouseButton.MIDDLE\":\n fig.canvas.mpl_disconnect(cid)\n plt.close(fig)\n updateCondensed(cluster,minScore,weighting,newList)\n \n if len(coords) >= 100:\n fig.canvas.mpl_disconnect(cid)\n plt.close(fig)\n updateCondensed(cluster,minScore,weighting,newList)\n \n \n return\n return func\n\n\ndef updateCondensed(cluster,minScore,weighting,newList):\n #Imports\n import numpy as np\n global coords\n \n condensed = []\n for point in coords:\n if cluster.clType.lower() == \"globular\" or weighting.lower() == \"equal\":\n weight = 1\n else:\n #Automatic weighting scheme currently unsupported for manual condensed point definition,\n #but the framework is here to be able to insert it without having to worry about it being\n #passed around from function to function\n weight = 1\n condensed.append(condensedPoint(point[0],point[1],weight))\n \n if cluster.reddening == 0:\n cluster.condensed0 = condensed\n cluster.condensed = condensed\n \n np.savetxt(f\"{cluster.dataPath}condensed.csv\",coords,delimiter=',')\n \n redFitting(cluster,minScore,weighting)\n if len(newList) > 0:\n turboFit(newList,'manual',weighting,'catalogue',minScore)\n return\n\n\ndef find_nearest(array, value):\n #Imports\n import numpy as np\n \n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\n\ndef testCluster(name='feh_0.00_afe_0.00_age_0.141_y_0.2703'):\n #Imports\n import numpy as np\n global clusterList\n global clIn\n \n iso = isochrones[name]\n test = clusterObj('test')\n filtered = [starObj('fake',0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,a.Gaia_G_EDR3,0,0,0,0,0,0,0,a.Gaia_BP_EDR3-a.Gaia_RP_EDR3,0,0,0,0,0,0,0,0,0,0,0) for a in iso.starList]\n test.filtered = filtered\n \n mag = np.empty((0,2))\n for star in test.filtered:\n mag = np.r_[mag,[[star.b_r,star.g_mag]]]\n test.mag = mag\n \n if not 'test' in clusters:\n clusterList.append(test)\n else:\n idx = clusterList.index(clusters['test'])\n clusterList.pop(idx)\n clusterList.append(test)\n clIn = True\n toDict()\n\ndef condense(cList,condensing,weighting,tp,minScore=0.001):\n #Imports\n import numpy as np\n global isoList\n global mag\n \n \n for cluster in cList:\n \n if type(cluster) == str:\n cluster = clusters[cluster]\n cList[cList.index(cluster.name)] = cluster\n \n \n #Creates mag arrays to be used in place of the filtered star objects\n mag = cluster.mag[:,:]\n mag[mag[:,1].argsort()]\n gmag = list(mag[:,1])\n gmin = mag[0,1]\n gmax = mag[-1,1]\n div = 50\n seg = (gmax-gmin)/div\n minpoints = 1\n \n #The array that will become the condensed points list\n condensed = np.empty((0,3))\n turnPoints = []\n \n \n if condensing.lower() == \"load\":\n global pts\n pts = np.genfromtxt(f\"{cluster.dataPath}condensed.csv\",delimiter=',')\n condensed = []\n for point in pts:\n #Missing alternate weighting schemes, but can be imlemented *here*\n condensed.append(condensedPoint(point[0],point[1],1))\n cluster.condensed = condensed\n cluster.condensed0 = condensed\n continue\n \n #Manual point definition\n if condensing.lower() == \"manual\":\n import matplotlib.pyplot as plt\n global cid\n global coords\n coords = []\n \n if len(cList) == 1:\n newList = []\n else:\n newList = cList[cList.index(cluster)+1:]\n \n x,y = mag[:,0],mag[:,1]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.invert_yaxis()\n \n hook = onclick(x,y,fig,ax,cluster,minScore,weighting,newList)\n cid = fig.canvas.mpl_connect('button_press_event', hook) \n \n return \"Suspended\"\n \n \n \n \n \n \n #Vertically stacked slices in brightness\n for i in range(div):\n sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]\n #print(np.array(sliced).shape)\n \n #Skip forseen problems with empty arrays\n if len(sliced) < minpoints:\n continue\n condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]\n \n condensed = condensed[::-1]\n \n \n \n #Uses defined turning points in the cluster catalogue\n if tp.lower() == \"catalogue\":\n if cluster.cltpx <= -98 and cluster.cltpy <= -98:\n tp == \"auto\"\n \n #If no turning point is found, or auto is specified, then this section of code\n #attempts to find the turning point through steep gradient changes in the main sequence\n if tp.lower() == \"auto\":\n #Criteria for the line that forms the basis of the gradient change method\n start = 4\n end = 11\n theta_crit = 5\n \n #Creates a slope-intercept fit for the lower main sequence\n basex = [a[0] for a in condensed[start:end]]\n basey = [a[1] for a in condensed[start:end]]\n base = np.polyfit(basex,basey,1)\n \n #Travels up the main sequence\n for i,point in enumerate(condensed):\n if i == start:\n continue\n #Creates a fit line between the start point and the current point\n x = [point[0],condensed[start,0]]\n y = [point[1],condensed[start,1]]\n lin = np.polyfit(x,y,1)\n \n #Calculates an angle between the new line and the lower main sequence\n point[2] = 180/np.pi*np.arctan(abs( (base[0]-lin[0])/(1+base[0]*lin[0]) ))\n \n #If the angle between the two lines is large enough, the point is considered\n #to be a candidate turning point, and is appended to the list of candidates\n if point[2] > theta_crit and i > end:\n turnPoints.append(point)\n \n \n #Analysis plot showing the theta value for each condensed point\n import matplotlib.pyplot as plt\n plt.figure()\n plt.scatter(condensed[:,0],condensed[:,1],c=condensed[:,2])\n plt.set_cmap('brg')\n plt.gca().invert_yaxis()\n clb = plt.colorbar()\n clb.ax.set_title(\"Theta\")\n plt.savefig(f'condensed_{cluster.name}')\n \n #If no automatic turning point is found, ends the method here\n if len(turnPoints) == 0:\n print(\"No turning point identified for {cluster.name}\")\n return\n else:\n #Identifies the proper turning point as a 5% color offset of the dimmest turning point candidate\n turnPoints = sorted(turnPoints,key=lambda x: x[1])\n tp = turnPoints[-1]\n tp[0] = tp[0] - 0.05*np.abs(tp[0])\n cluster.turnPoint = tp\n \n #Stores the condensed point list\n cl = []\n for point in condensed:\n cl.append(condensedPoint(point[0],point[1],point[2]))\n \n cluster.condensedInit = cl\n # [ B-R , G , Theta ]\n print(f\"{cluster.name} Turning Point: {cluster.turnPoint}\")\n \n \n \n \n \n \n \n #Assuming the undefined catch for manual would be caught the first time around\n if tp.lower() == \"catalogue\":\n cluster.turnPoint = [cluster.cltpx,cluster.cltpy]\n \n if cluster.clType.lower() == \"open\":\n #Recalc with the turnPoint limit enforced - Ignore blue stragglers\n condensed = np.empty((0,3))\n condensed_giant = np.empty((0,3))\n yList = []\n \n #Vertically stacked slices in brightness\n for i in range(div):\n rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]\n \n sliced = np.empty((0,2))\n sliced_giant = np.empty((0,2))\n for point in rawSliced:\n #print(point)\n if point[0] >= cluster.turnPoint[0]:\n sliced = np.r_[sliced,[[point[0],point[1]]]]\n else:\n sliced_giant = np.r_[sliced_giant,[[point[0],point[1]]]]\n \n #Skip forseen problems with empty arrays\n if len(sliced) > 0:\n x = np.median(sliced[:,0])\n y = np.median(sliced[:,1])\n yList.append(y)\n condensed = np.r_[condensed,[[x,y,1]]]\n if len(sliced_giant) > 3:\n xg = np.median(sliced_giant[:,0])\n yg = np.median(sliced_giant[:,1])\n condensed_giant = np.r_[condensed_giant,[[xg,yg,1]]]\n \n \n #New turning point found from the reduced data set\n newTP = find_nearest(yList,cluster.turnPoint[1])\n \n index = 0\n \n for i,point in enumerate(condensed):\n if newTP == point[1]:\n index = i\n #print(f\"{point} found to be TP\")\n break\n assert not index == 0\n \n \n #Binary star list\n tpcut = index + 3\n \n xset = condensed[tpcut:-1,0]\n yset = condensed[tpcut:-1,1]\n #print(cluster.name,yset)\n fit = np.polyfit(xset,yset,1)\n \n #Distance from the main sequence linear fit\n for star in cluster.filtered: \n x0 = star.b_r\n y0 = star.g_mag\n dist = abs( y0 - fit[0]*x0 - fit[1] ) / np.sqrt(fit[0]**2 + 1)\n star.distance_MS = dist\n \n if dist > 0.05 and y0 < fit[0]*x0+fit[1] and x0 > xset[0] and y0 > condensed[index,1]:\n cluster.binaries.append(star)\n star.binary = 1\n else:\n star.binary = 0\n \n \n \n \n #Fit weight parameters\n N = len(condensed)\n beta = -2\n \n index = index - 7\n \n for i,point in enumerate(condensed):\n #point[2] = 5/(1+np.abs(index-i))\n if weighting.lower() == 'pos':\n point[2] = np.exp(beta*((i-index)/N)**2)\n \n \n # if cluster.type == \"globular\":\n # condensed = np.vstack((condensed,condensed_giant))\n \n condensed = condensed[::-1]\n \n\n \n cl = []\n coords = []\n for point in condensed:\n cl.append(condensedPoint(point[0],point[1],point[2]))\n coords.append((point[0],point[1]))\n \n np.savetxt(f\"{cluster.dataPath}condensed.csv\",coords,delimiter=',')\n \n if cluster.reddening == 0:\n cluster.condensed0 = cl\n cluster.condensed = cl\n \n\n# def checkLoaded(cList):\n \n# needsLoading = []\n# loaded = []\n \n# for cl in cList:\n# if not cl in clusters:\n# needsLoading.append(cl)\n# else:\n# loaded.append(cl)\n \n# return loaded,needsLoading()\n \n\n\ndef toDict():\n #Imports\n global clusterList\n global clusters\n global isoList\n global isochrones\n global resultList\n global results\n global clIn\n global isoIn\n global resultsIn\n \n if clIn:\n clName = []\n \n for cluster in clusterList:\n clName.append(cluster.name)\n clusters = dict(zip(clName,clusterList))\n \n if isoIn:\n \n isoName = []\n \n for iso in isoList:\n isoName.append(iso.name)\n isochrones = dict(zip(isoName,isoList))\n \n if resultsIn:\n resName=[]\n \n for res in resultList:\n resName.append(res.name)\n results = dict(zip(resName,resultList))\n\n\ndef plot(cList=['all'],modes=['pos','pm','cmd','quiver','iso'],closePlots=False):\n #Imports\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n import numpy as np\n import os\n global clusterList\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}/png\"):\n os.mkdir(f\"{cluster.imgPath}/png\")\n \n #Position plots\n if 'pos' in modes:\n \n unfra=[star.ra for star in cluster.unfilteredWide]\n unfdec=[star.dec for star in cluster.unfilteredWide]\n ra=[star.ra for star in cluster.filtered]\n dec=[star.dec for star in cluster.filtered]\n \n unfnormra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide]\n normra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered]\n \n #Unfiltered position plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unfra[:],unfdec[:],s=0.5,c='dimgray')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered.png\",dpi=500)\n \n #Filtered position plot\n plt.figure(f\"{cluster.name}_ra_dec_filtered\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Filtered\")\n plt.scatter(ra[:],dec[:],s=0.5,c='midnightblue')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered.png\",dpi=500)\n \n #Position overlay\n plt.figure(f\"{cluster.name}_ra_dec_overlay\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unfra[:],unfdec[:],s=0.5,c='lightgray')\n plt.scatter(ra[:],dec[:],s=1,c='midnightblue')\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay.png\",dpi=500)\n \n \n #Normalized\n #NormRA = RA*cos(DEC)\n \n #Unfiltered normalized position plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered Normalized\")\n plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='dimgray')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_normalized.png\",dpi=500)\n \n #Filtered normalized position plot\n plt.figure(f\"{cluster.name}_ra_dec_filtered_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Filtered Normalized\")\n plt.scatter(normra[:],dec[:],s=0.5,c='midnightblue')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_filtered_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered_normalized.png\",dpi=500)\n \n #Position overlay normalized\n plt.figure(f\"{cluster.name}_ra_dec_overlay_normalized\")\n plt.xlabel('RA*cos(DEC) (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Overlay Normalized\")\n plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='lightgray')\n plt.scatter(normra[:],dec[:],s=1,c='midnightblue')\n #plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_overlay_normalized.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay_normalized.png\",dpi=500)\n \n #Proper motion plots\n if 'pm' in modes:\n \n unfpmra=[star.pmra for star in cluster.unfilteredWide]\n unfpmdec=[star.pmdec for star in cluster.unfilteredWide]\n pmra=[star.pmra for star in cluster.filtered]\n pmdec=[star.pmdec for star in cluster.filtered]\n \n unfpara=[star.par for star in cluster.unfilteredWide]\n para=[star.par for star in cluster.filtered]\n \n x0 = cluster.pmra_min\n x1 = cluster.pmra_max\n y0 = cluster.pmdec_min\n y1 = cluster.pmdec_max\n width = x1-x0\n scale = 5\n subscale = 2\n xmin = x0-scale*width\n xmax = x1+scale*width\n ymin = y0-scale*width\n ymax = y1+scale*width\n sxmin = x0-subscale*width\n sxmax = x1+subscale*width\n symin = y0-subscale*width\n symax = y1+subscale*width\n \n \n #Unfiltered proper motion plot\n plt.figure(f\"{cluster.name}_pm_unfiltered\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_unfiltered.png\",dpi=500)\n plt.xlim([sxmin,sxmax])\n plt.ylim([symin,symax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_unfiltered_closeup.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_unfiltered_closeup.png\",dpi=500)\n \n #Filtered proper motion plot\n plt.figure(f\"{cluster.name}_pm_filtered\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Filtered\")\n plt.scatter(pmra[:],pmdec[:],s=0.5,c='midnightblue')\n # plt.xlim([xmin,xmax])\n # plt.ylim([ymin,ymax])\n plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_filtered.png\",dpi=500)\n \n #Proper motion overlay\n plt.figure(f\"{cluster.name}_pm_overlay\")\n plt.xlabel(r'PMRA ($mas*yr^{-1}$)')\n plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='lightgray')\n plt.scatter(pmra[:],pmdec[:],s=1,c='midnightblue')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_overlay.png\",dpi=500)\n plt.xlim([sxmin,sxmax])\n plt.ylim([symin,symax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_overlay_closeup.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_overlay_closeup.png\",dpi=500)\n \n #Unfiltered PM/Parallax\n plt.figure(f\"{cluster.name}_pm_over_parallax_unfiltered\")\n plt.xlabel('PMRA / Parallax')\n plt.ylabel('PMDEC / Parallax')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter([a/b for a,b in zip(unfpmra,unfpara)],[a/b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_over_parallax_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_over_parallax_unfiltered.png\",dpi=500)\n \n #Unfiltered PM*Parallax\n plt.figure(f\"{cluster.name}_pm_times_parallax_unfiltered\")\n plt.xlabel('PMRA * Parallax')\n plt.ylabel('PMDEC * Parallax')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter([a*b for a,b in zip(unfpmra,unfpara)],[a*b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n # plt.axis(\"square\")\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_pm_times_parallax_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_pm_times_parallax_unfiltered.png\",dpi=500)\n \n \n #CMD plots\n if 'cmd' in modes:\n \n unfgmag=[star.g_mag for star in cluster.unfilteredWide]\n unf_b_r=[star.b_r for star in cluster.unfilteredWide]\n gmag=[star.g_mag for star in cluster.filtered]\n b_r=[star.b_r for star in cluster.filtered]\n \n bright_b_r = [x.b_r for x in cluster.filteredBright]\n bright_gmag = [x.g_mag for x in cluster.filteredBright]\n par_b_r = [x.b_r for x in cluster.distFiltered]\n par_gmag = [x.g_mag for x in cluster.distFiltered]\n \n #Reddening Correction\n plt.figure(f\"{cluster.name}_reddening_CMD\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('G Mag')\n plt.title(f\"{cluster.name} Reddening = {cluster.reddening:.2f}\")\n plt.scatter(b_r[:],gmag[:],s=0.5,c='dimgray',label='Observed')\n plt.arrow(b_r[int(len(b_r)/2)]-cluster.reddening,gmag[int(len(gmag)/2)]-2.1*cluster.reddening,cluster.reddening,2.1*cluster.reddening,color='red')\n plt.scatter([s-cluster.reddening for s in b_r[:]],[s-2.1*cluster.reddening for s in gmag[:]],s=1,c='midnightblue',label='Corrected')\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_reddening_CMD.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_reddening_CMD.png\",dpi=500)\n \n #Unfiltered CMD plot\n plt.figure(f\"{cluster.name}_CMD_unfiltered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Unfiltered\")\n plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_unfiltered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_unfiltered.png\",dpi=500)\n \n #Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Parallax & Proper Motion Filtered\")\n plt.scatter(b_r[:],gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_filtered.png\",dpi=500)\n \n #CMD overlay\n plt.figure(f\"{cluster.name}_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Overlay\")\n plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')\n plt.scatter(b_r[:],gmag[:],s=1,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_overlay.png\",dpi=500)\n \n #Condensed CMD overlay\n plt.figure(f\"{cluster.name}_condensed_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Condensed Overlay\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c='red',label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_condensed_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_condensed_CMD_overlay.png\",dpi=500)\n \n #Weighted CMD overlay\n plt.figure(f\"{cluster.name}_weighted_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Weighted Overlay\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(\"Weight\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_weighted_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_weighted_CMD_overlay.png\",dpi=500)\n \n \n #Initial Condensed CMD overlay\n plt.figure(f\"{cluster.name}_initial_condensed_CMD_overlay\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Initial Condensed Overlay\")\n plt.scatter(b_r,gmag,s=0.5,c='dimgray',label='Data')\n plt.scatter([s.b_r for s in cluster.condensedInit],[s.g_mag for s in cluster.condensedInit],s=5,c='red',label='Proxy Points')\n try:\n plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')\n except:\n print(f\"No turning point found for {cluster.name}\")\n plt.legend()\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_initial_condensed_CMD_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_initial_condensed_CMD_overlay.png\",dpi=500)\n \n #Brightness-PM Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_bright_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Bright-Only Proper Motion Filtered\")\n plt.scatter(bright_b_r[:],bright_gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_bright_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_bright_filtered.png\",dpi=500)\n \n #Parallax Filtered CMD plot\n plt.figure(f\"{cluster.name}_CMD_parallax_filtered\")\n plt.gca().invert_yaxis()\n plt.xlabel('BP-RP')\n plt.ylabel('Apparent G Mag')\n plt.title(f\"{cluster.name} Parallax Filtered\")\n plt.scatter(par_b_r[:],par_gmag[:],s=0.5,c='midnightblue')\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_parallax_filtered.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_parallax_filtered.png\",dpi=500)\n \n \n if 'quiver' in modes:\n \n unfra=[star.ra for star in cluster.unfilteredWide]\n unfdec=[star.dec for star in cluster.unfilteredWide]\n unfpmra=[star.pmra for star in cluster.unfilteredWide]\n unfpmdec=[star.pmdec for star in cluster.unfilteredWide]\n \n x0 = min([s.ra for s in cluster.filtered])\n x1 = max([s.ra for s in cluster.filtered])\n y0 = min([s.dec for s in cluster.filtered])\n y1 = max([s.dec for s in cluster.filtered])\n width = x1-x0\n scale = 0.25\n xmin = x0+scale*width\n xmax = x1-scale*width\n ymin = y0+scale*width\n ymax = y1-scale*width\n \n #Unfiltered position quiver plot\n plt.figure(f\"{cluster.name}_ra_dec_unfiltered_quiver\")\n plt.xlabel('RA (Deg)')\n plt.ylabel('DEC (Deg)')\n plt.title(f\"{cluster.name} Unfiltered\")\n ax = plt.gca()\n ax.quiver(unfra[:],unfdec[:],unfpmra[:],unfpmdec[:],color='midnightblue',width=0.003,scale=400,scale_units='width')\n plt.axis(\"square\")\n plt.gcf().set_size_inches(10,10)\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver.png\",dpi=500)\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.png\",dpi=500)\n \n \n #Isochrone plots\n if 'iso' in modes:\n \n gmag=[star.g_mag for star in cluster.filtered]\n b_r=[star.b_r for star in cluster.filtered]\n isochrone = isochrones[cluster.iso[0][0]]\n \n #Isochrone best fit\n plt.figure(f\"{cluster.name}_Iso_best\")\n plt.gca().invert_yaxis()\n plt.xlabel('Dereddened BP-RP')\n plt.ylabel('Corrected Absolute G Mag')\n plt.title(f\"{cluster.name} Isochrone Best Fit\")\n plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening-cluster.dist_mod for s in gmag],s=0.5,c='dimgray',label='Cluster')\n \n isoLabels = isochrone.name.split('_')\n isoLabel = r\"$[\\frac{Fe}{H}]$\" + \"=\" + isoLabels[1] + \"\\n\" \\\n + r\"$[\\frac{\\alpha}{Fe}]$\" + \"=\" + isoLabels[3] + \"\\n\" \\\n + r\"$[Y]$\" + \"=\" + isoLabels[7] + \"\\n\" \\\n + \"Age\" + \"=\" + isoLabels[5] + \" Gyr\"\n \n plt.plot(isochrone.br,isochrone.g,c='midnightblue',label=isoLabel)\n plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening-cluster.dist_mod for s in cluster.condensed],s=5,c='red',label='Cluster Proxy')\n extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n h,l = plt.gca().get_legend_handles_labels()\n h.insert(0,extra)\n l.insert(0,f\"Reddening: {cluster.reddening}\")\n plt.legend(h,l)\n plt.savefig(f\"{cluster.imgPath}{cluster.name}_CMD_Iso_BestFit.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{cluster.name}_CMD_Iso_BestFit.png\",dpi=500)\n \n #Membership plots\n if 'membership' in modes:\n proxyMatch([cl])\n boundedStats([cl],saveCl=False,unloadCl=False)\n membership(cl,mode='filtered')\n membership(cl,mode='bounded',N=50)\n \n #3D Position plots\n if '3D' in modes:\n \n A = [a.ra * np.pi/180 for a in cluster.filtered]\n B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n C = [1/(1000*c.par) for c in cluster.filtered]\n \n x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n z = [c*np.sin(b) for b,c in zip(B,C)]\n \n r = [np.sqrt(a**2+b**2) for a,b in zip(x,y)]\n theta = [np.arctan(b/a) for a,b in zip(x,y)]\n \n plt.figure(f\"{cluster.name}_3D_Position\")\n ax = plt.axes(projection='3d')\n ax.scatter3D(x,y,z)\n ax.scatter(0,0,0,color='red')\n scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)\n \n if closePlots:\n plt.close('all')\n\n\n\n# def Plot3D(cList):\n# #Imports\n# import matplotlib.pyplot as plt\n# import numpy as np\n# global clusterList\n \n# needsLoading=[]\n \n# plt.figure(f\"3D_Position_Ensemble\")\n# ax = plt.axes(projection='3d')\n \n \n# for cl in cList:\n# if not cl in clusters:\n# needsLoading.append(cl)\n \n# if not len(needsLoading) == 0:\n# loadClusters(needsLoading)\n \n# for cl in cList:\n# cluster = clusters[cl]\n \n# A = [a.ra * np.pi/180 for a in cluster.filtered]\n# B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n# C = [1/(0.001*c.par) for c in cluster.filtered]\n \n# #Flatten radially\n# C = [np.mean(C)]*len(C)\n \n# x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n# y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n# z = [c*np.sin(b) for b,c in zip(B,C)]\n \n# #Force Cluster to origin\n# # x = [a-np.mean(x) for a in x]\n# # y = [a-np.mean(y) for a in y]\n# # z = [a-np.mean(z) for a in z]\n \n# ax.scatter3D(x,y,z,label=cluster.name)\n \n# scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])\n# ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)\n# #ax.scatter(0,0,0,color='black')\n# plt.legend()\n\n\ndef yso_lookup():\n #Imports\n from astroquery.simbad import Simbad\n import numpy as np\n import os\n import re\n \n global names\n global sect\n global results\n global ra\n global dec\n \n main = open(\"Excess Examples/YSO_object_list.dat\").read()\n main = main.split(\"\\n\")[:-1]\n \n #Get the names of all of the objects identified\n names = []\n ra = []\n dec = []\n validNames = []\n for row in main:\n sect = re.split('\\s+',row)\n if sect[0] == '':\n sect = sect[1:]\n if sect[2] == 'none':\n continue\n \n name = sect[2]\n \n blacklist = ['A','Ab','AB','ABC','B','AaB']\n for entry in sect[3:]:\n if '.' in entry or entry in blacklist:\n break\n name = name + \" \" + entry\n \n names.append(name)\n \n #Perform a SIMBAD query for the identified objects\n results = []\n for name in names:\n result = Simbad.query_object(name)\n if not type(result) == type(None):\n results.append(result)\n validNames.append(name.replace(' ',''))\n \n ra1 = str(result.columns['RA']).split('\\n')[-1]\n ra1 = re.split('\\s+',ra1)\n \n if '' in ra1:\n ra.append('---')\n else:\n ra.append(str(round(float(ra1[0])*15+float(ra1[1])/4+float(ra1[2])/240,5)))\n \n dec1 = str(result.columns['DEC']).split('\\n')[-1]\n dec1 = re.split('\\s+',dec1)\n if '' in dec1:\n dec.append('---')\n else:\n dec.append(str(round(float(dec1[0])+float(dec1[1])/60+float(dec1[2])/3600,5)))\n \n #Create a text file in the VOSA readable format\n VOSAdata = []\n gaiadata = []\n for i in range(len(validNames)):\n line1 = f\"{validNames[i]} {ra[i]} {dec[i]} --- --- --- --- --- --- ---\"\n line2 = f\"{ra[i]} {dec[i]}\"\n VOSAdata.append(line1)\n if '-' in line2:\n continue\n gaiadata.append(line2)\n np.savetxt(\"Excess Examples/yso_vosa_output.txt\",VOSAdata,fmt=\"%s\")\n np.savetxt(\"Excess Examples/yso_gaia_output.txt\",gaiadata,fmt=\"%s\")\n \n\n\ndef exportVOSA(cl):\n #Imports\n import numpy as np\n \n if not cl in clusters:\n loadClusters([cl])\n \n cluster = clusters[cl]\n \n #objname RA DEC DIS Av Filter Flux Error PntOpts ObjOpts\n data = []\n for star in cluster.filtered:\n name = star.name.replace(\" \",\"\")\n line = f\"{name} {star.ra} {star.dec} {1000/star.par} --- --- --- --- --- ---\"\n data.append(line)\n np.savetxt(f\"{cluster.dataPath}{cluster.name}_VOSA.txt\",data,fmt=\"%s\")\n\n\ndef readSED(cList=['all'],printMissing=False):\n #imports\n import numpy as np\n import re\n import os\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n\n cluster = clusters[cl]\n \n objPath = cluster.dataPath + \"vosa_results/objects/\"\n \n names = []\n for star in cluster.filtered:\n flat = star.name.replace(\" \",\"\").replace(\"DR2\",\"\").replace(\"EDR3\",\"\").replace(\"DR3\",\"\")\n names.append(flat)\n star.flatName = flat\n cluster.stars = dict(zip(names,cluster.filtered))\n \n idx = 0\n newStars = dict()\n \n #Each star in a cluster has its own folder, and each folder contains several data sets\n for folder in os.listdir(objPath):\n \n fileName = folder.replace(\"DR2\",\"\").replace(\"EDR3\",\"\").replace(\"DR3\",\"\")\n #Weed out VOSA stars not in current filtered members list\n if not fileName in cluster.stars:\n if printMissing:\n print(f\"{fileName} is missing from filtered list, skipping it...\")\n continue\n \n main = open(objPath+folder+\"/sed/\"+folder+\".sed.dat\").read()\n main = main.split(\"\\n\")\n data = main[10:-1]\n \n #Create a list of measurement object pointers to attach to the stars later\n measurements = []\n \n #Convert every line of the data set into a vosaPoint object\n for row in data:\n sect = re.split('\\s+',row)[1:-1]\n measurements.append(vosaPoint(str(sect[0]),float(sect[1]),float(sect[2]),float(sect[3]),float(sect[4]),float(sect[5]),float(sect[6])))\n \n cluster.stars[fileName].vosaPoints = measurements\n #Weed out cluster.stars members who do not have a vosa table\n newStars[fileName] = cluster.stars[fileName]\n \n idx += 1\n \n cluster.stars = newStars\n \n \n \ndef checkBinary(cl):\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n cluster = clusters[cl]\n \n global lman\n \n \n data = [Datum(star.b_r,star.g_mag) for star in cluster.filtered]\n \n # ax = plt.axes(xlim=(cluster.min_b_r-0.25,cluster.max_b_r+0.25), ylim=(cluster.min_g_mag-1,cluster.max_g_mag+1),autoscale_on=False)\n ax = plt.axes(xlim=(0, 2.5), ylim=(8, 20), autoscale_on=False)\n \n ax.invert_yaxis()\n ax.set_title('Lasso points using left mouse button')\n\n lman = LassoManager(ax, data,cluster)\n\n plt.show()\n \n \n\ndef vosaBinaries(cl):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}vosaBinaries/\"):\n os.mkdir(f\"{cluster.imgPath}vosaBinaries/\")\n \n \n for star in cluster.stars.values():\n if not star.binary == 1:\n return\n \n\n\ndef excessIR(cl,plot=True):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n if not os.path.isdir(f\"{cluster.imgPath}excessIR/\"):\n os.mkdir(f\"{cluster.imgPath}excessIR/\")\n \n \n for star in cluster.stars.values():\n \n excess = False\n \n for vp in star.vosaPoints:\n \n if vp.excess > 0:\n excess = True\n \n if excess:\n \n #print(f\"{star.name} has {len(star.vosaPoints)} VOSA points\")\n \n star.hasExcess = 1\n \n if plot:\n plt.figure(f'{cluster.name} - {star.name}')\n plt.title(f'{cluster.name} : {star.name}')\n \n ax = plt.gca()\n ax.set_yscale('log')\n ax.set_xscale('log')\n plt.ylabel(r'Flux ($ergs^{-1}cm^{-2}\\AA^{-1}$)')\n plt.xlabel(r'Wavelength ($\\AA$)')\n \n plt.scatter([a.wavelength for a in star.vosaPoints],[a.flux for a in star.vosaPoints])\n \n plt.savefig(f\"{cluster.imgPath}excessIR/{star.name}.pdf\")\n plt.savefig(f\"{cluster.imgPath}excessIR/{star.name}.png\",dpi=500)\n\n\n\n\ndef proxyMatch(cList,plot=False):\n #Imports\n import matplotlib.pyplot as plt\n import numpy as np\n \n checkLoaded(cList) \n \n for cl in cList:\n cluster = clusters[cl]\n \n iso = isochrones[cluster.iso[0][0]]\n isoPoints = []\n for pt in iso.starList:\n isoPoints.append(pt)\n # if pt.Gaia_G_EDR3+cluster.dist_mod > cluster.turnPoint[1]:\n # isoPoints.append(pt)\n \n for star in cluster.filtered:\n minDist = 0.2\n smallestDist = 10\n vertCutoff = 1\n minPoint = None\n for point in isoPoints:\n dist = abs(point.Gaia_BP_EDR3-point.Gaia_RP_EDR3-star.b_r+cluster.reddening)\n if dist < minDist:\n if abs(point.Gaia_G_EDR3+cluster.dist_mod - star.g_mag + 2.1*cluster.reddening) < vertCutoff:\n minDist = dist\n minPoint = point\n elif dist < smallestDist:\n smallestDist = dist\n try:\n assert minDist < 0.2\n except:\n print(f\"[{cluster.name}] Star too distant from isochrone to make a good proxy: BP-RP: {star.b_r} | G: {star.g_mag} | Dist: {smallestDist}\")\n star.proxyMass = 0\n star.proxyLogTemp = 0\n star.proxyFeH = 0\n star.proxyLogAge = 0\n star.proxy = None\n continue\n \n #print(minDist)\n star.proxyMass = minPoint.star_mass\n star.proxyLogTemp = minPoint.log_Teff\n star.proxyFeH = minPoint.feh\n star.proxyLogAge = minPoint.log10_isochrone_age_yr\n star.proxy = minPoint\n \n cluster.massLoaded = True\n cluster.meanProxyMass = np.mean([a.proxyMass for a in cluster.filtered])\n cluster.totalProxyMass = np.sum([a.proxyMass for a in cluster.filtered])\n \n cluster.min_g_mag = min([a.g_mag for a in cluster.filtered])\n cluster.max_g_mag = max([a.g_mag for a in cluster.filtered])\n cluster.min_b_r = min([a.b_r for a in cluster.filtered])\n cluster.max_b_r = max([a.b_r for a in cluster.filtered])\n # if plot:\n # plt.figure(f\"{cluster.name}_proxy_fit\")\n \n \n\n\n\ndef variableHistogram(cl,var):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n \n cluster = clusters[cl]\n \n plt.figure()\n plt.title(f\"{cluster.name} Histogram of {var}\")\n plt.xlabel(f\"{var}\")\n plt.ylabel(\"Count\")\n plt.hist([eval(f\"a.{var}\") for a in cluster.filtered],bins='auto')\n\n\ndef varHist2D(cl,var1,var2,color='default',listType='filtered'):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([cl])\n \n \n #Check allowed entries\n allowedTypes = ['filtered','unfilteredWide','unfilteredBright,filteredBright,binaries']\n if not listType in allowedTypes:\n print(f\"{listType} is not a valid list type, defaulting to filtered\")\n listType = \"filtered\"\n \n \n cluster = clusters[cl]\n \n plt.figure(figsize=(8,8))\n \n #Axis size and spacing\n left, width = 0.1, 0.65\n bottom, height = 0.1, 0.65\n spacing = 0.005\n rect_scatter = [left, bottom, width, height]\n rect_histx = [left, bottom + height + spacing, width, 0.2]\n rect_histy = [left + width + spacing, bottom, 0.2, height]\n \n ax_scatter = plt.axes(rect_scatter)\n ax_scatter.tick_params(direction='in', top=True, right=True)\n ax_histx = plt.axes(rect_histx)\n ax_histx.tick_params(direction='in', labelbottom=False)\n ax_histy = plt.axes(rect_histy)\n ax_histy.tick_params(direction='in', labelleft=False)\n \n x = [eval(f\"a.{var1}\") for a in eval(f\"cluster.{listType}\")]\n y = [eval(f\"a.{var2}\") for a in eval(f\"cluster.{listType}\")]\n \n if color == 'default':\n ax_scatter.scatter(x, y, s=5)\n else:\n colorMap = plt.get_cmap('coolwarm')#.reversed()\n ax_scatter.scatter(x, y, s=5, c=[eval(f\"a.{color}\") for a in eval(f\"cluster.{listType}\")], cmap = colorMap)\n # clb = plt.colorbar(ax_scatter)\n # clb.ax.set_title(f\"{color}\")\n \n ax_histx.hist(x,bins='auto')\n ax_histy.hist(y,bins='auto',orientation='horizontal')\n \n ax_histx.set_title(f\"Histogram of {listType} {cluster.name} in {var1} and {var2}\")\n ax_scatter.set_xlabel(f\"{var1}\")\n ax_scatter.set_ylabel(f\"{var2}\")\n \n\n\n\n\n\ndef Plot3D(cList=['all'],showEarth=True,flatten=True):\n #Imports\n import plotly.express as px\n import plotly.io as pio\n import numpy as np\n global clusterList\n \n pio.renderers.default='browser'\n \n fig = px.scatter_3d()\n \n if showEarth:\n fig.add_scatter3d(x=[0],y=[0],z=[0],marker=dict(color='lightblue'),name=\"Earth\")\n \n cList = checkLoaded(cList)\n \n big = []\n \n for cl in cList:\n cluster = clusters[cl]\n \n A = [a.ra * np.pi/180 for a in cluster.filtered]\n B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]\n C = [1/(0.001*c.par) for c in cluster.filtered]\n \n #Flatten radially\n if flatten:\n C = [np.mean(C)]*len(C)\n \n x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]\n y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]\n z = [c*np.sin(b) for b,c in zip(B,C)]\n \n #Force Cluster to origin\n # x = [a-np.mean(x) for a in x]\n # y = [a-np.mean(y) for a in y]\n # z = [a-np.mean(z) for a in z]\n \n fig.add_scatter3d(x=x,y=y,z=z,name=cl,mode=\"markers\",marker=dict(size=2))\n \n big.append(np.amax(x))\n big.append(np.amax(y))\n big.append(np.amax(z))\n \n\n #fig.layout.scene = dict(aspectmode=\"manual\",aspectratio=dict(x=1,y=1,z=1))\n #fig.update_layout(scene=dict(aspectmode=\"cube\",xaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),yaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),zaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)])))\n fig.update_layout(scene=dict(aspectmode=\"cube\",xaxis=dict(showbackground=False),yaxis=dict(showbackground=False),zaxis=dict(showbackground=False,visible=False)))\n \n fig.show()\n\n\ndef specificPlot(cl,iso,reddening,score):\n #Imports\n import matplotlib.pyplot as plt\n from matplotlib.patches import Rectangle\n import os\n \n checkLoaded([cl])\n \n cluster = clusters[f\"{cl}\"]\n isochrone = isochrones[f\"{iso}\"]\n \n #These are displayed on the plot\n # score = 0\n reddening = float(reddening)\n \n #Directory for saving plot outputs\n if not os.path.isdir(\"SpecificPlots/pdf/\"):\n os.makedirs(\"SpecificPlots/pdf/\")\n if not os.path.isdir(\"SpecificPlots/png/\"):\n os.makedirs(\"SpecificPlots/png/\")\n \n # #Find the score of the associated isochrone\n # for chrone in cluster.iso:\n # if chrone[0] == iso and chrone[2] == reddening:\n # score = chrone[1]\n # break\n \n #Plots the CMD and the isochrone, with all of the points adjusted to reddening, extinction, and distance modulus\n plt.figure()\n plt.gca().invert_yaxis()\n plt.xlabel('B-R')\n plt.ylabel('G Mag')\n plt.title(f\"{cl} {iso}\")\n plt.scatter([s.b_r for s in cluster.filtered],[s.g_mag for s in cluster.filtered],s=0.05,c='dimgray',label='Cluster')\n plt.plot([x + reddening for x in isochrone.br],[x+cluster.dist_mod+2.1*reddening for x in isochrone.g],c='midnightblue',label=f\"Score: {float(score):.7f}\")\n plt.scatter([s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Cluster Proxy')\n \n #Colors the points by their fitting weight\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(\"Weight\")\n \n #Label for the reddening\n extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False, edgecolor='none', linewidth=0)\n h,l = plt.gca().get_legend_handles_labels()\n h.insert(0,extra)\n l.insert(0,f\"Reddening: {reddening}\")\n plt.legend(h,l)\n \n #Save figure output to disk\n plt.savefig(f\"SpecificPlots/pdf/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.pdf\")\n plt.savefig(f\"SpecificPlots/png/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.png\",dpi=500)\n\n\ndef plotRange(cl,a,b):\n global clusters\n \n checkLoaded([cl])\n \n #Plots the top fitting isochrones over the range a to b for a given cluster\n #Does this by calling the specificPlot() method for each isochrone over the range\n for isochrone in clusters[f\"{cl}\"].iso[a:b]:\n specificPlot(cl,isochrones[isochrone[0]].name,isochrone[2],isochrone[1])\n\ndef getIsoScore(cl,iso,red,output=True):\n #Return the score for a given cluster's isochrone fit\n for i in cl.iso:\n if i[0] == iso.name and float(i[2]) == red:\n return i[1]\n if output:\n print(f\"No score found for {cl.name} | {iso.name} | {red}\")\n return 0\n\n\ndef onkey(x,y,cx,cy,fig,ax,cluster,iso,reddening):\n global curIso\n global curReddening\n curIso = iso\n curReddening = reddening\n \n def func(event):\n import matplotlib.patches as patches\n global curIso\n global curReddening\n global isochrones\n \n key = str(event.key)\n #print(key)\n \n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]\n \n age_index = ageSorted.index(curIso)\n feh_index = fehSorted.index(curIso)\n \n #Move up or down in the desired variable space, with wrap-around at the ends of the lists\n if key == \"w\":\n #Increase metallicity\n try:\n curIso = fehSorted[feh_index+1]\n feh_index = feh_index+1\n except:\n curIso = fehSorted[0]\n feh_index = 0\n if key == \"s\":\n #Decrease metallicity\n curIso = fehSorted[feh_index-1]\n feh_index = feh_index-1\n if feh_index < 0:\n feh_index = len(fehSorted)+feh_index\n if key == \"a\":\n #Increase age\n curIso = ageSorted[age_index-1]\n age_index = age_index-1\n if age_index < 0:\n age_index = len(ageSorted)+age_index\n if key == \"d\":\n #Decrease age\n try:\n curIso = ageSorted[age_index+1]\n age_index = age_index+1\n except:\n curIso = ageSorted[0]\n age_index = 0\n if key == \"q\":\n #Decrease metallicity\n curReddening = round(curReddening-0.01,2)\n if key == \"e\":\n #Increase metalicity\n curReddening = round(curReddening+0.01,2)\n if key == \"r\":\n #Reset to originally requested isochrone\n curIso = iso\n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]\n age_index = ageSorted.index(curIso)\n feh_index = fehSorted.index(curIso)\n if key == \" \":\n #Print currently highlighted isochrone to console\n score = getIsoScore(cluster,curIso,curReddening)\n fig.savefig(f\"Jamboree Images/frames/{curIso.name}.png\",dpi=500)\n print(f\"{curIso.name} | {curReddening} | {score}\")\n \n score = getIsoScore(cluster,curIso,curReddening,output=False)\n \n #Replots everything with the updated isochrone\n ax.clear()\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.scatter(cx,cy,s=4,color='red')\n ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+curReddening for a in curIso.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*curReddening for a in curIso.starList],color='darkblue')\n ax.set_title(f\"{curIso.name}\\n {curReddening}\\n {score}\")\n ax.set_xlabel(\"Apparent BP-RP\")\n ax.set_ylabel(\"Apparent G Mag\")\n ax.invert_yaxis()\n \n \n #Progress bar indicators for the interactive plot\n \n #Sets the dimensons of the boxes\n x0,x1 = ax.get_xlim()\n y0,y1 = ax.get_ylim()\n margin = 0.01\n width = 0.05 * (x1-x0)\n height = 0.6 * (y1-y0)\n xmargin = margin * (x1-x0)\n ymargin = margin * (y1-y0)\n \n \n #The two main progress bars\n rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n #rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n \n ax.add_patch(rect1)\n ax.add_patch(rect2)\n #ax.add_patch(rect3)\n \n #The segments filling up the progress bars\n n = len(ageSorted)\n #Adds cells bottom to top\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == age_index:\n color = 'red'\n else:\n color = 'black'\n #Age progress bar\n ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n n = len(fehSorted)\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == feh_index:\n color = 'red'\n else:\n color = 'black'\n #Metallicity progress bar\n ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n \n fig.canvas.draw_idle()\n \n \n return func\n\ndef interactivePlot(cl,iso=0,reddening=\"auto\"):\n #Imports\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n global clusters\n global isochrones\n global kid\n \n checkLoaded([cl])\n \n cluster = clusters[f\"{cl}\"]\n \n #Select the starting isochrone based on user input\n if type(iso) == str:\n isochrone = isochrones[f\"{iso}\"]\n elif type(iso) == int:\n assert iso >= 0\n isochrone = isochrones[cluster.iso[iso][0]]\n else:\n print(\"Invalid declaration of 'iso'\")\n return\n name = isochrone.name\n \n #Get the reddening if not manually defined\n if reddening == \"auto\":\n reddening = cluster.reddening\n assert type(reddening) == float or type(reddening) == int\n \n score = getIsoScore(cluster,isochrone,reddening)\n \n # #Sorted and secondary-sorted isochrone lists\n # ageSorted = sorted(isoList,key=lambda x: (x.age,x.feh))\n # fehSorted = sorted(isoList,key=lambda x: (x.feh,x.age))\n ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == isochrone.feh]\n fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == isochrone.age]\n age_index = ageSorted.index(isochrone)\n feh_index = fehSorted.index(isochrone)\n \n \n #Coordinate lists to plot in addition to the isochrones\n x,y = cluster.mag[:,0],cluster.mag[:,1]\n cx,cy = [s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed]\n \n \n #Systematically remove some of the conflicting default keymaps in Pyplot\n letters = ['w','s','a','d','q','e','r']\n for letter in letters:\n #Finds all keymap references in the rcParams\n for param in [key for key in plt.rcParams if key.startswith(\"keymap\") ]:\n try:\n plt.rcParams[param].remove(letter)\n except:\n continue\n \n \n #Initialize the plot that will be updated every time\n fig = plt.figure(f\"Interactive plot of {cl}\")\n ax = fig.add_subplot(111)\n ax.scatter(x,y,s=0.25,color='dimgray')\n ax.scatter(cx,cy,s=4,color='red')\n ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+reddening for a in isochrone.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*reddening for a in isochrone.starList],color='darkblue')\n ax.set_title(f\"{name}\\n {reddening}\\n {score}\")\n ax.set_xlabel(\"Apparent BP-RP\")\n ax.set_ylabel(\"Apparent G Mag\")\n ax.invert_yaxis()\n \n x0,x1 = ax.get_xlim()\n y0,y1 = ax.get_ylim()\n margin = 0.01\n width = 0.05 * (x1-x0)\n height = 0.6 * (y1-y0)\n xmargin = margin * (x1-x0)\n ymargin = margin * (y1-y0)\n \n \n rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n #rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)\n \n ax.add_patch(rect1)\n ax.add_patch(rect2)\n #ax.add_patch(rect3)\n \n n = len(ageSorted)\n #Adds cells bottom to top\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == age_index:\n color = 'red'\n else:\n color = 'black'\n ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n n = len(fehSorted)\n for i in range(n):\n offset = i*height/n\n alpha = 0.25\n if i == feh_index:\n color = 'red'\n else:\n color = 'black'\n ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))\n \n #Launch the key_press listener\n hook = onkey(x,y,cx,cy,fig,ax,cluster,isochrone,reddening)\n kid = fig.canvas.mpl_connect('key_press_event',hook)\n\n\ndef printList(cList,varList):\n \n cList = checkLoaded(cList)\n \n for cl in cList:\n cluster = clusters[cl]\n for a in varList:\n clStr = f\"[{cl}] {a} =\"\n exec(f\"print(clStr,cluster.{a})\")\n\ndef statRange(cl,a,b):\n import numpy as np\n global clusters\n \n checkLoaded([cl])\n if not isoIn:\n loadIsochrones()\n \n ages = []\n fehs = []\n ys = []\n reds = []\n \n #Computes the mean age, metallicity, and reddening for the top fitting isochrones over the range a to b for a given cluster\n #For example, a=0, b=10 will average the top 10 isochrone fits\n for isochrone in clusters[cl].iso[a:b]:\n iso = isochrones[isochrone[0]]\n print(f\"{iso.name} Reddening:{isochrone[2]}\")\n ages.append(float(iso.age))\n fehs.append(float(iso.feh))\n ys.append(float(iso.y))\n reds.append(float(isochrone[2]))\n \n \n print(f\"[{cl}] Mean age= {np.mean(ages)} Mean feh= {np.mean(fehs)} Mean y= {np.mean(ys)} Mean Reddening= {np.mean(reds)}\")\n \n\n \ndef setFlag():\n #Imports\n global clusterlist\n \n #Goes back and sets membership flags for all of the clusters loaded in memory to ensure that this tag can be used later\n #This takes place automatically after running turboFilter()\n #Example use case for this variable is in the customPlot() method\n for cluster in clusterList:\n for star in cluster.filtered:\n for unfStar in cluster.unfilteredWide:\n if star == unfStar:\n unfStar.member = 1\n \ndef customPlot(var1,var2,clname,mode='filtered',iso=False,square=True,color='default',title='default',close=False,save=True):\n #Imports\n import matplotlib.pyplot as plt\n global closePlots\n \n #Load the cluster if it isn't yet\n checkLoaded([clname])\n cluster = clusters[f\"{clname}\"]\n \n \n #Set the list of stars to be used for the given cluster\n #Using a mode not specified will return a referenced before assignment error\n if mode == 'filtered':\n starlist = cluster.filtered\n elif mode == 'unfiltered':\n starlist = cluster.unfilteredWide\n elif mode == 'bright_filtered':\n starlist = cluster.filteredBright\n elif mode == 'dist_filtered':\n starlist = cluster.distFiltered\n elif mode == 'bright_unfiltered':\n starlist = cluster.unfilteredBright\n elif mode == 'duo':\n starlist = cluster.unfilteredWide \n starlistF = cluster.filtered\n elif mode == 'binary':\n starlist = cluster.binaries\n elif mode == 'duoBinary':\n starlist = cluster.filtered\n starlistF = cluster.binaries\n elif mode == 'duoBright':\n starlist = cluster.unfilteredBright\n starlistF = cluster.filteredBright\n elif mode == 'duoDist':\n starlist = cluster.distFiltered\n starlistF = cluster.filtered\n elif mode == 'condensed':\n starlist = cluster.condensed\n elif mode == 'duoCondensed':\n starlist = cluster.filtered\n starlistF = cluster.condensed\n elif mode == 'bounded':\n starlist = cluster.bounded\n elif mode == 'duoBounded':\n starlist = cluster.filtered\n starlistF = cluster.bounded\n else:\n print(\"No preset star list configuration found with that alias\")\n return\n \n #Basic plot features with axis labels and a title\n plt.figure()\n if title == 'default':\n plt.title(f\"{clname} {mode} | {var1} vs {var2} | {color} color\")\n else:\n plt.title(f\"{title}\")\n plt.xlabel(f\"{var1}\".upper())\n plt.ylabel(f\"{var2}\".upper())\n \n #Plots differently depending on the mode\n #The color tag can be used to add distinction of a third variable while limited to two axes\n #If unspecified, filtered starlist with midnight blue coloring will be the result\n if iso:\n plt.gca().invert_yaxis()\n if 'duo' in mode:\n #plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=[0.1+a.member*1.4 for a in starlist],c=[list(('lightgray',eval('z.par')))[z.member] for z in starlist])\n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=2,c='gray')\n if color == 'default': \n plt.scatter([eval(f\"x.{var1}\") for x in starlistF],[eval(f\"y.{var2}\") for y in starlistF],s=2.5,c='red')\n else:\n plt.scatter([eval(f\"x.{var1}\") for x in starlistF],[eval(f\"y.{var2}\") for y in starlistF],s=2.5,c=[eval(f\"z.{color}\") for z in starlistF])\n plt.set_cmap('brg')\n clb = plt.colorbar()\n clb.ax.set_title(f\"{color}\")\n else:\n if color == 'default': \n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=1,c='midnightblue')\n else:\n plt.scatter([eval(f\"x.{var1}\") for x in starlist],[eval(f\"y.{var2}\") for y in starlist],s=2,c=[eval(f\"z.{color}\") for z in starlist])\n plt.set_cmap('cool')\n clb = plt.colorbar()\n clb.ax.set_title(f\"{color}\")\n \n #By default, squares the axes to avoid misinformation from stretched axes\n #Turn this off and iso to true for a color magnitude diagram\n if square:\n plt.axis(\"square\")\n \n if save:\n plt.savefig(f\"SpecificPlots/pdf/{clname}_{mode}_{var1}_{var2}.pdf\")\n plt.savefig(f\"SpecificPlots/png/{clname}_{mode}_{var1}_{var2}.png\",dpi=500)\n \n if close or closePlots:\n plt.close()\n if save:\n print(f\"Custom Plot {clname}_{mode}_{var1}_{var2} saved and closed\")\n else:\n print(f\"Custom Plot {clname}_{mode}_{var1}_{var2} closed\")\n\ndef splitMS(clname='M67',slope=3,offset=12.2):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n \n checkLoaded([clname])\n cluster = clusters[clname]\n \n xlist = [s.b_r for s in cluster.filtered]\n ylist = [s.g_mag for s in cluster.filtered]\n \n x = np.linspace(1,2,100)\n \n #Create a diagram showing the lower edge and upper edge of the main sequence, which in theory are separated by 0.75mag\n plt.figure()\n plt.title('Main and Binary Sequences')\n plt.xlabel('B-R')\n plt.ylabel('Apparent G Mag')\n plt.scatter(xlist,ylist,s=0.5,label='Filtered Star Data')\n plt.plot(x,[slope*a + offset for a in x],color='r',label='Main Sequence')\n plt.plot(x,[slope*a + offset - 0.75 for a in x],'--',color='r',label='MS shifted 0.75 mag')\n plt.xlim(0.6,2.2)\n plt.ylim(13,19)\n plt.legend()\n plt.gca().invert_yaxis()\n plt.savefig(f\"SpecificPlots/png/{clname}_MS_Spread.png\",dpi=500)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_MS_Spread.pdf\")\n\n\ndef kingProfile(r,K,R):\n \n return K*(1+r**2/R**2)**(-1)\n\ndef kingError(r,K,R,dK,dR):\n import numpy as np\n \n dfdK = (1+r**2/R**2)**(-1)\n dfdR = 2*K*r**2*R*(r**2+R**2)**(-2)\n return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)\n\ndef densityProfile(r,K,R):\n import numpy as np\n \n #The exponential that is fit for the membership profile\n #R is a characteristic radius, typically negative but the absolute value is used for comparison\n #K is a scalar constant\n return K*np.exp(-1*r/R)\n\ndef densityError(r,K,R,dK,dR):\n import numpy as np\n \n dfdK = abs(np.exp(-1*r/R))\n dfdR = abs(K*r/(R**2)*np.exp(-1*r/R))\n return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)\n \n\ndef toIntensity(mag):\n msun = -26.74 #apparent magnitude\n Isun = 1360 #w/m^)\n \n return Isun*10**( 0.4*(msun-mag) )\n\n\ndef membership(clname='M67',N=100,mode='filtered',numPercentileBins=5,percentile=0.2,delta=5,normalize=True):\n #Imports\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.patches import Circle\n import scipy.optimize as so\n import scipy.stats as st\n import math\n \n global volume\n \n checkLoaded([clname])\n cluster = clusters[clname]\n \n mode = mode.lower()\n \n #Default mode is filtered, but unfiltered data can be processed\n if \"filtered\" in mode:\n starList = cluster.filtered\n elif \"bounded\" in mode:\n starList = cluster.bounded\n else:\n starList = cluster.unfilteredWide\n \n #Load mass estimates from isochrone fitting\n if not cluster.massLoaded:\n proxyMatch([cluster.name])\n assert cluster.massLoaded\n assert len(starList) > 0\n \n #Assign x and y lists based on normalization or not\n if normalize:\n starX = [a.ra*np.cos(a.dec*np.pi/180) for a in starList]\n starY = [a.dec for a in starList]\n mode = mode + \"_normalized\"\n else:\n starX = [a.ra for a in starList]\n starY = [a.dec for a in starList]\n \n #Determine bounds of the field of view (post-filtering)\n xmax = max(starX)\n ymax = max(starY)\n x0 = np.mean(starX)\n y0 = np.mean(starY)\n newN = N\n \n #Determine radius of the field of view\n rx = xmax-x0\n ry = ymax-y0\n #r = np.mean([rx,ry])\n radiusFOV = ry\n #Using the mean ra and dec radius caused problems with clusters\n #like NGC188, which are close to the celestial pole and have\n #a very stretched mapping to the RA DEC space\n \n ringBins = list(np.linspace(0,radiusFOV,N))\n \n #The bins are divided up such that 50% of the bins are located in the inner 25% of the cluster radius\n #The remaining 50% of the bins are divided from 25% to 100% of the radius\n rings = list(np.linspace(0,radiusFOV/4,math.ceil(N/2)))\n ring2 = list(np.linspace(radiusFOV/4,radiusFOV,math.floor(N/2)+1))\n ring2 = ring2[1:-1]\n rings.extend(ring2)\n \n x=rings[:-1]\n # for i in range(0,len(rings[:-1])):\n # x.append((rings[i+1]+rings[i])/2)\n counts = list(np.zeros(N-1,dtype=int))\n masses = list(np.zeros(N-1,dtype=int))\n \n rads=[]\n for star in starList:\n #Radial distance from the mean RA and Dec of the cluster\n if normalize:\n rads.append(np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2))\n else:\n rads.append(np.sqrt((star.ra-x0)**2+(star.dec-y0)**2))\n #Find the nearest ring to the star\n r = find_nearest(rings, rads[-1])\n i = rings.index(r)\n #Check bounds\n if i < len(counts):\n #If outside last ring, add to that count\n if r > rads[-1]:\n counts[i-1] += 1\n masses [i-1] += star.proxyMass\n else:\n counts[i] += 1\n masses [i] += star.proxyMass\n #Worth noting here that the way that this is set up, the rings don't actually mark the bounds of the bins but rather the midpoints.\n #There is no check to see if you are exterior or interior to the nearest ring, but rather what ring you are nearest to,\n #so the rings mark the midpoints of their bins not the boundaries\n \n \n #Histogram of the counts in each radial bin\n plt.figure(f\"{clname}_membership_{mode}\")\n plt.hist(rads,bins=ringBins)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} Membership\")\n plt.savefig(f\"{cluster.imgPath}{clname}_membership_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_membership_{mode}.png\",dpi=500)\n\n #Calculates the volume of each region bounded by two concentric rings and the number density of the stars counted in those regions\n volume = []\n for i in range(0,len(rings[:-1])):\n volume.append(np.pi*(rings[i+1]**2-rings[i]**2))\n numDensity = [a/b for a,b in zip(counts,volume)]\n massDensity = [a/b for a,b in zip(masses,volume)]\n error_num = [np.sqrt(a)/b for a,b in zip(counts,volume)]\n error_mass = [np.sqrt(a)/b for a,b in zip(masses,volume)]\n \n for i in range(0,len(error_num)):\n if error_num[i] < 0.1:\n error_num[i] = 0.1\n\n #Cut out the inner 5% because overbinning in the center of a circle doesn't help\n x = x[math.ceil(N/20):-1]\n counts = counts[math.ceil(N/20):-1]\n numDensity = numDensity[math.ceil(N/20):-1]\n massDensity = massDensity[math.ceil(N/20):-1]\n error_num = error_num[math.ceil(N/20):-1]\n error_mass = error_mass[math.ceil(N/20):-1]\n\n #Further filter the data based on outliers, either extremely low density or extremely big jumps in density from bin to bin\n i = 0\n numSmall = 0\n numGrad = 0\n while i < len(x)-1:\n if numDensity[i] < 0.5 or numDensity[i] < numDensity[i+1]/delta or massDensity[i] < 0.1:\n x.pop(i)\n counts.pop(i)\n numDensity.pop(i)\n massDensity.pop(i)\n error_num.pop(i)\n error_mass.pop(i)\n numSmall += 1\n newN -= 1\n elif abs(numDensity[i]) > abs(numDensity[i+1])*delta:# or abs(numDensity[i]) < abs(numDensity[i-1])/3:\n x.pop(i)\n counts.pop(i)\n numDensity.pop(i)\n massDensity.pop(i)\n error_num.pop(i)\n error_mass.pop(i)\n numGrad += 1\n newN -= 1\n else:\n i += 1\n if numDensity[-1] < 0.01 or massDensity[-1] < 0.01:\n x.pop(-1)\n counts.pop(-1)\n numDensity.pop(-1)\n massDensity.pop(-1)\n error_num.pop(-1)\n error_mass.pop(-1)\n numSmall += 1\n newN -= 1\n \n \n print(f\"[{cluster.name}] Removed {numSmall} points with too small of a density and {numGrad} points with too extreme of a delta\")\n\n\n\n #========= Number Density =========\n \n #Number density vs radial bin plot\n plt.figure(f\"{clname}_density_{mode}\")\n plt.errorbar(x,numDensity,yerr=error_num,ls='None')\n plt.scatter(x,numDensity)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(r\"Surface Number Density ($deg^{-2}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Number Density\".replace(\"_normalized\",' Normalized'))\n \n #Fit an exponential curve to the density plot based on the densityProfile function defined above\n \n if \"NGC2355\" in cluster.name:\n p0=[5000,0.1]\n else:\n p0=[5000,0.1]\n \n #print([b/a for a,b in zip(numDensity,error_num)])\n \n fit,var = so.curve_fit(kingProfile,x,numDensity,p0,maxfev=1000)\n \n #Std. Dev. from variance\n err = np.sqrt(var[1][1])\n err_coeff = np.sqrt(var[0][0])\n \n scale = np.abs(fit[1]*3600/206265)/(cluster.mean_par/1000)\n #scaleVar = (3600/206265)*(err/(cluster.mean_par/1000) ) + 2*fit[1]/(cluster.mean_par_err/1000)\n scaleVar = np.abs(scale*np.sqrt((var[1][1]/fit[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))\n \n #Scale radius from count in parsecs\n setattr(cluster,f\"scaleRad_{mode}\",scale)\n setattr(cluster,f\"scaleRad_err_{mode}\",scaleVar)\n #Scale radius from count in degrees\n setattr(cluster,f\"scaleAngle_{mode}\",abs(fit[1]))\n setattr(cluster,f\"scaleAngle_err_{mode}\",err)\n setattr(cluster,f\"numDensity_coeff_{mode}\",fit[0])\n setattr(cluster,f\"numDensity_coeff_err_{mode}\",err_coeff)\n\n \n #Plot the curve fit \n numLabel = ( f\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\" \n + fr\"K={fit[0]:.3f} $\\pm$ {err_coeff:.3f}\" + \"\\n\" \n + fr\"$\\rho$={np.abs(fit[1]):.3f}$\\degree$ $\\pm$ {err:.3f}$\\degree$\"+ \"\\n\" \n + fr\"R={scale:.3f}pc $\\pm$ {scaleVar:.3f}pc\" )\n \n plt.plot(x,[kingProfile(a,*fit) for a in x],color='red',label=numLabel)\n plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],label=r'$1\\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_{mode}.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_log_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_log_{mode}.png\",dpi=500)\n \n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_density_filtered\")\n \n plt.title(f\"{clname} Overlaid Number Density\")\n plt.errorbar(x,numDensity,yerr=error_num,ls='None',color='midnightblue')\n plt.scatter(x,numDensity,color='midnightblue')\n plt.plot(x,[kingProfile(a,*fit) for a in x],color='darkred',label=numLabel)\n plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.yscale('linear')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_overlay.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_numDensity_log_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_numDensity_log_overlay.png\",dpi=500)\n \n #========= Mass Density =========\n \n #Mass density vs radial bin plot\n plt.figure(f\"{clname}_mass_density_{mode}\")\n plt.errorbar(x,massDensity,yerr=error_mass,ls='None')\n plt.scatter(x,massDensity)\n plt.xlabel(\"Radius (deg)\")\n plt.ylabel(r\"Surface Mass Density ($M_{\\odot}*deg^{-2}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Mass Density\".replace(\"_normalized\",' Normalized'))\n \n #Fit an exponential curve to the density plot based on the densityProfile function defined above\n fit_mass,var_mass = so.curve_fit(kingProfile,x,massDensity,p0,maxfev=1000)\n \n #Std. Dev. from variance\n err_mass = np.sqrt(var[1][1])\n err_mass_coeff = np.sqrt(var[0][0])\n \n scale_mass = np.abs(fit_mass[1]*3600/206265)/(cluster.mean_par/1000)\n #scaleVar_mass = (3600/206265)*(err_mass/(cluster.mean_par/1000) ) + 2*fit_mass[1]/(cluster.mean_par_err/1000)\n scaleVar_mass = np.abs(scale_mass*np.sqrt((var_mass[1][1]/fit_mass[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))\n \n #Scale radius from mass in parsecs\n setattr(cluster,f\"scaleRad_mass_{mode}\",scale_mass)\n setattr(cluster,f\"scaleRad_mass_err_{mode}\",scaleVar_mass)\n #Scale radius from mass in degrees\n setattr(cluster,f\"scaleAngle_mass_{mode}\",abs(fit_mass[1]))\n setattr(cluster,f\"scaleAngle_mass_err_{mode}\",err_mass)\n setattr(cluster,f\"massDensity_coeff_{mode}\",fit_mass[0])\n setattr(cluster,f\"massDensity_coeff_err_{mode}\",err_mass_coeff)\n \n #Plot the curve fit\n massLabel = ( f\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\" \n + fr\"K={fit_mass[0]:.3f} $\\pm$ {err_mass_coeff:.3f}\" + \"\\n\" \n + fr\"$\\rho$={np.abs(fit_mass[1]):.3f}$\\degree$ $\\pm$ {err_mass:.3f}$\\degree$\"+ \"\\n\" \n + fr\"R={scale_mass:.3f}pc $\\pm$ {scaleVar_mass:.3f}pc\" )\n \n plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='red',label=massLabel)\n plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],label=r'$1\\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_{mode}.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_log_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_log_{mode}.png\",dpi=500)\n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mass_density_filtered\")\n \n plt.title(f\"{clname} Overlaid Mass Density\")\n plt.errorbar(x,massDensity,yerr=error_mass,ls='None',color='midnightblue')\n plt.scatter(x,massDensity,color='midnightblue')\n plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='darkred',label=massLabel)\n plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')\n plt.legend(fontsize=8,loc='upper right')\n plt.yscale('linear')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_overlay.png\",dpi=500)\n plt.yscale('log')\n plt.savefig(f\"{cluster.imgPath}{clname}_massDensity_log_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massDensity_log_overlay.png\",dpi=500)\n \n \n #========= Average Mass =========\n \n averageMass = [a/b for a,b in zip(massDensity,numDensity)]\n \n xDist = [np.abs(a*3600/206265)/(cluster.mean_par/1000) for a in x]\n \n #Average Mass plot\n plt.figure(f\"{clname}_average_mass_{mode}\")\n plt.scatter(xDist,averageMass,label=fr\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\"+f\"{numPercentileBins} Percentile Bins\")\n plt.xlabel(\"Distance from Center (pc)\")\n plt.ylabel(r\"Average Stellar Mass ($M_{\\odot}$)\")\n plt.title(f\"{clname} {mode.capitalize()} Average Mass\".replace(\"_normalized\",' Normalized'))\n \n \n #Split average mass data into numPercentileBins number of bins\n if \"filtered\" in mode:\n cluster.pMin = xDist[0]\n cluster.pMax = xDist[-1]\n \n pBins = np.linspace(cluster.pMin,cluster.pMax,numPercentileBins+1)\n xBins = []\n for i in range(len(pBins)-1):\n xBins.append((pBins[i]+pBins[i+1])/2)\n pBins = np.delete(pBins,0)\n pBins = np.delete(pBins,-1)\n for b in pBins:\n plt.axvline(x=b,color='black',linestyle='--')\n \n binned = []\n for n in range(numPercentileBins):\n binned.append([])\n \n #Assign the average mass data points to the bins\n for i in range(len(xDist)):\n #Finds the nearest xBin to each x value and sorts the corresponding averageMass into that bin\n val = find_nearest(xBins,xDist[i])\n idx = xBins.index(val)\n binned[idx].append(averageMass[i])\n \n #Creates arrays that are numPercentileBins long that store the standard and quantile means of the points in those bins\n quantileMean = []\n binMean = []\n meanBins = []\n for b in binned:\n if len(b) == 0:\n continue\n binSorted = sorted(b)\n #Finds the index of the lower percentile marker (ex. 20%)\n lower = binSorted.index(find_nearest(binSorted, np.quantile(b,percentile)))\n #Finds the index of the upper percentile marker (ex. 80%)\n upper = binSorted.index(find_nearest(binSorted, np.quantile(b,1-percentile)))\n #Means between lower and upper percentile markers\n quantileMean.append(np.mean(binSorted[lower:upper+1]))\n #Standard Mean\n binMean.append(np.mean(b))\n #Bins\n meanBins.append(xBins[binned.index(b)])\n \n try:\n fit, var = so.curve_fit(kingProfile,xDist,[kingProfile(a,*fit_mass)/kingProfile(a,*fit) for a in x])\n residual_coeff, residual_scaleAngle = fit[0],fit[1]\n except:\n print(f\"Unable to fit the residuals for {cluster.name}\")\n residual_coeff, residual_scaleAngle = -99, -99\n \n massFit = st.linregress(meanBins,quantileMean)\n fitslope, intercept, rval, pval, fitslope_err, intercept_err = massFit.slope, massFit.intercept, massFit.rvalue, massFit.pvalue, massFit.stderr, massFit.intercept_stderr\n residual_scaleRad = np.abs(residual_scaleAngle*3600/206265)/(cluster.mean_par/1000)\n \n setattr(cluster,f\"residual_coeff_{mode}\",residual_coeff)\n setattr(cluster,f\"residual_scaleAngle_{mode}\",residual_scaleAngle)\n setattr(cluster,f\"residual_scaleRad_{mode}\",residual_scaleRad)\n \n setattr(cluster,f\"mass_slope_{mode}\",fitslope)\n setattr(cluster,f\"mass_slope_err_{mode}\",fitslope_err)\n setattr(cluster,f\"mass_intercept_{mode}\",intercept)\n setattr(cluster,f\"mass_intercept_err_{mode}\",intercept_err)\n setattr(cluster,f\"mass_fit_r2_{mode}\",rval**2)\n setattr(cluster,f\"mass_fit_p_{mode}\",pval)\n \n fitLabel = ( fr\"Slope = {fitslope:.3f} $\\pm$ {fitslope_err:.3f}\" + \"\\n\" \n + fr\"Intercept = {intercept:.3f} $\\pm$ {intercept_err:.3f}\" + \"\\n\" \n + fr\"$r^2$ = {rval**2:.3f} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized'))\n \n #Plot the quantile and standard means on the existing average mass plot\n plt.scatter(meanBins,quantileMean,color='red',label=f'Interquartile Mean ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='red',label=fitLabel)\n #plt.scatter(meanBins,binMean,color='dimgray',label=f'{mode.capitalize()} Standard Mean')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_averageMass_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_averageMass_{mode}.png\",dpi=500)\n \n \n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_average_mass_filtered\")\n \n plt.title(f\"{clname} Overlaid Average Mass\")\n plt.scatter(xDist,averageMass,color='midnightblue',label=fr\"N={newN} ({mode.capitalize()})\".replace(\"_normalized\",' Normalized')+\"\\n\"+f\"{numPercentileBins} Percentile Bins\")\n plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='darkred',label=fitLabel)\n plt.scatter(meanBins,quantileMean,color='darkred',label=f'Interquartile Mean ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n #plt.scatter(meanBins,binMean,color='black',label=f'{mode.capitalize()} Standard Mean')\n plt.legend(fontsize=8,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_averageMass_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_averageMass_overlay.png\",dpi=500)\n \n #========= Radius Plot =========\n plt.figure(f\"{clname}_characteristic_radius_{mode}\")\n if normalize:\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (Deg)\")\n else:\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (Deg)\")\n pltRad = abs(getattr(cluster,f\"scaleAngle_{mode}\"))\n outline1 = Circle([x0,y0],1*pltRad,color='red',fill=False,ls='--',label=fr\"$\\rho$={1*pltRad:0.3f}$\\degree$\",alpha=0.7)\n outline2 = Circle([x0,y0],5*pltRad,color='red',fill=False,ls='--',label=fr\"5$\\rho$={5*pltRad:0.3f}$\\degree$\",alpha=0.7)\n #outline3 = Circle([x0,y0],10*abs(getattr(cluster,f\"scaleAngle_{mode}\")),color='red',fill=False,ls='--',label=fr\"10$\\rho$={3*abs(fit[1]):0.3f}$\\degree$\",alpha=0.7)\n plt.gca().add_patch(outline1)\n plt.gca().add_patch(outline2)\n #plt.gca().add_patch(outline3)\n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n \n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} {mode.capitalize()} Characteristic Radius\".replace(\"_normalized\",' Normalized'))\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"{cluster.imgPath}{clname}_radialMembership_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_radialMembership_{mode}.png\",dpi=500)\n \n if \"M67\" in clname and \"filtered\" in mode:\n plt.figure(f\"{clname}_rings_{mode}\")\n if normalize:\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (Deg)\")\n \n else:\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (Deg)\")\n \n \n for i in range(0,len(rings)):\n outline = Circle([x0,y0],rings[i],color='red',fill=False)\n plt.gca().add_patch(outline)\n \n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n \n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} Radial Bins\")\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_radialBins_{mode}.pdf\".replace(\"_filtered\",''))\n plt.savefig(f\"SpecificPlots/png/{clname}_radialBins_{mode}.png\".replace(\"_filtered\",''),dpi=500)\n plt.xlim(x0-0.15,x0+0.15)\n plt.ylim(y0-0.15,y0+0.15)\n plt.savefig(f\"SpecificPlots/pdf/{clname}_radialBins_center_{mode}.pdf\".replace(\"_filtered\",''))\n plt.savefig(f\"SpecificPlots/png/{clname}_radialBins_center_{mode}.png\".replace(\"_filtered\",''),dpi=500)\n \n \n #========= Stars by Mass =========\n massList = []\n innerMassList = []\n for star in starList:\n massList.append(star.proxyMass)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMassList.append(star.proxyMass)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMassList.append(star.proxyMass)\n \n mBins = np.arange(min(massList),max(massList)+0.1,0.1)\n inBins = np.arange(min(innerMassList),max(innerMassList)+0.1,0.1)\n plt.figure(f\"{clname}_mass_frequency_{mode}\")\n plt.xlabel(r\"Stellar Mass ($M_{\\odot}$)\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Mass Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(massList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerMassList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mass_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Mass Frequency\")\n plt.hist(massList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerMassList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_massFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_massFrequency_overlay.png\",dpi=500)\n \n \n #========= Stars by Magnitude =========\n magList = []\n innerMagList = []\n for star in starList:\n magList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)\n \n mBins = np.arange(min(magList),max(magList)+0.1,0.1)\n inBins = np.arange(min(innerMagList),max(innerMagList)+0.1,0.1)\n plt.figure(f\"{clname}_mag_frequency_{mode}\")\n plt.xlabel(r\"Absolute G Mag\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Absolute Magnitude Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(magList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerMagList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_magFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_magFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_mag_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Absolute Magnitude Frequency\")\n plt.hist(magList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerMagList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_magFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_magFrequency_overlay.png\",dpi=500)\n \n #========= Stars by Color =========\n colorList = []\n innerColorList = []\n for star in starList:\n colorList.append(star.b_r-cluster.reddening)\n if normalize:\n if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerColorList.append(star.b_r-cluster.reddening)\n else:\n if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f\"scaleAngle_{mode}\"):\n innerColorList.append(star.b_r-cluster.reddening)\n \n mBins = np.arange(min(colorList),max(colorList)+0.1,0.1)\n inBins = np.arange(min(innerColorList),max(innerColorList)+0.1,0.1)\n plt.figure(f\"{clname}_color_frequency_{mode}\")\n plt.xlabel(r\"Dereddened BP-RP\")\n plt.ylabel(\"Number of Stars\")\n plt.title(f\"{clname} {mode.capitalize()} Dereddened Color Index Frequency\".replace(\"_normalized\",' Normalized'))\n plt.hist(colorList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'))\n plt.hist(innerColorList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_colorFrequency_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_colorFrequency_{mode}.png\",dpi=500)\n\n #Double plot for bounded regions\n if \"bounded\" in mode:\n plt.figure(f\"{clname}_color_frequency_filtered\")\n plt.title(f\"{clname} Overlaid Dereddened Color Index Frequency\")\n plt.hist(colorList,bins=mBins,label=f\"Total {mode.capitalize()}\".replace(\"_normalized\",' Normalized'),color='red')\n plt.hist(innerColorList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace(\"_normalized\",' Normalized'))\n plt.legend(fontsize=10,loc='upper right')\n plt.savefig(f\"{cluster.imgPath}{clname}_colorFrequency_overlay.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_colorFrequency_overlay.png\",dpi=500)\n \n \n \n #========= Other Radii ========= \n massSum = np.sum([star.proxyMass for star in starList])\n intensitySum = np.sum([toIntensity(star.g_mag) for star in starList])\n \n curMassSum = 0\n curIntSum = 0\n massFound = False\n intFound = False\n \n if normalize:\n setattr(cluster,f\"medianRad_{mode}\",np.median([np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))\n setattr(cluster,f\"medianAngle_{mode}\",np.median([star.normRadDist for star in starList]))\n radialStarList = sorted(starList,key=lambda x: x.normRadDist)\n \n for star in radialStarList:\n curMassSum += star.proxyMass\n curIntSum += toIntensity(star.g_mag)\n \n if curMassSum > massSum/2 and not massFound:\n setattr(cluster,f\"halfMassRad_{mode}\",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfMassAngle_{mode}\",star.normRadDist)\n massFound = True\n if curIntSum > intensitySum/2 and not intFound:\n setattr(cluster,f\"halfLightRad_{mode}\",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfLightAngle_{mode}\",star.normRadDist)\n intFound = True\n if massFound and intFound:\n break\n \n plt.figure(f\"{clname}_other_radii_{mode}\")\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA*cos(Dec) (deg)\")\n else:\n setattr(cluster,f\"medianRad_{mode}\",np.median([np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))\n setattr(cluster,f\"medianAngle_{mode}\",np.median([star.radDist for star in starList]))\n radialStarList = sorted(starList,key=lambda x: x.radDist)\n \n for star in radialStarList:\n curMassSum += star.proxyMass\n curIntSum += toIntensity(star.g_mag)\n \n if curMassSum > massSum/2 and not massFound:\n setattr(cluster,f\"halfMassRad_{mode}\",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfMassAngle_{mode}\",star.radDist)\n massFound = True\n if curIntSum > intensitySum/2 and not intFound:\n setattr(cluster,f\"halfLightRad_{mode}\",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))\n setattr(cluster,f\"halfLightAngle_{mode}\",star.radDist)\n intFound = True\n if massFound and intFound:\n break\n \n plt.figure(f\"{clname}_other_radii_{mode}\")\n plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')\n plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')\n plt.xlabel(\"RA (deg)\")\n \n medRad = getattr(cluster,f\"medianRad_{mode}\")\n medAngle = getattr(cluster,f\"medianAngle_{mode}\")\n mRad = getattr(cluster,f\"halfMassRad_{mode}\")\n mAngle = getattr(cluster,f\"halfMassAngle_{mode}\")\n lRad = getattr(cluster,f\"halfLightRad_{mode}\")\n lAngle = getattr(cluster,f\"halfLightAngle_{mode}\")\n print(medAngle)\n outline1 = Circle([x0,y0],medAngle,color='red',fill=False,ls='--',label=fr\"Median Star Distance = {medAngle:.3f}$\\degree$, {medRad:.3f}pc\",alpha=1)\n outline2 = Circle([x0,y0],mAngle,color='darkgreen',fill=False,ls='--',label=fr\"Half Mass Radius = {mAngle:.3f}$\\degree$, {mRad:.3f}pc\",alpha=1)\n outline3 = Circle([x0,y0],lAngle,color='purple',fill=False,ls='--',label=fr\"Half Light Radius = {lAngle:.3f}$\\degree$, {lRad:.3f}pc\",alpha=1)\n plt.gca().add_patch(outline1)\n plt.gca().add_patch(outline2)\n plt.gca().add_patch(outline3)\n plt.legend(fontsize=10,loc='upper right')\n plt.axis('square')\n plt.ylabel(\"DEC (Deg)\")\n plt.title(f\"{clname} {mode.capitalize()} Various Radii\".replace(\"_normalized\",' Normalized'))\n plt.gcf().set_size_inches(8,8)\n plt.savefig(f\"{cluster.imgPath}{clname}_otherRadii_{mode}.pdf\")\n plt.savefig(f\"{cluster.imgPath}png/{clname}_otherRadii_{mode}.png\",dpi=500)\n \n \n\ndef checkLoaded(cList):\n if 'all' in cList:\n cList = [c.name for c in clusterList]\n else:\n for cl in cList:\n if not cl in clusters:\n loadClusters([cl])\n \n return cList\n\ndef saveResults(cList,outdir=\"results\"):\n #Imports\n import numpy as np\n import dill\n import os\n global clusters\n global clusterList\n \n checkLoaded(cList)\n \n #Check and create the relevant directory paths to save/load the results\n if not os.path.isdir(f\"{outdir}/\"):\n os.mkdir(f\"{outdir}/\")\n if not os.path.isdir(f\"{outdir}/pickled/\"):\n os.mkdir(f\"{outdir}/pickled/\")\n \n else:\n for cl in cList:\n cluster = clusters[cl]\n #Creates a \"result cluster\" object from the cluster, effectively just stripping away lists\n rCl = resultClusterObj(cluster)\n #Pickle the result cluster object\n with open(f\"{outdir}/pickled/{cluster.name}.pk1\", 'wb') as output:\n dill.dump(rCl, output)\n \n #Store variables into an array to be printed as csv\n properties = [a for a in dir(rCl) if not a.startswith('_')]\n res = [getattr(rCl,p) for p in properties]\n #Stack into an array of 2 rows with variable names and values\n fin = np.vstack((properties,res))\n np.savetxt(f\"{outdir}/{cluster.name}.csv\",fin,delimiter=',',fmt='%s')\n\ndef loadResults(filter=\"None\",indir=\"results\"):\n #Imports\n import numpy as np\n import dill\n import os\n global resultList\n global resultsIn\n \n assert os.path.isdir(\"results/\")\n resultList = []\n for fn in os.listdir(indir+\"/pickled/\"):\n #Reads in instances from the saved pickle file\n with open(f\"{indir}/pickled/{fn}\",'rb') as input:\n res = dill.load(input)\n resultList.append(res)\n resultsIn = True\n toDict()\n\ndef refreshProperties(cList=['all']):\n import numpy as np\n global catalogue\n global clusterList\n global clusters\n \n clusterCatalogue()\n checkLoaded(cList)\n \n #Loop through clusters\n for cluster in cList:\n \n reference = None\n \n for cl in catalogue:\n if str(cl.name) == str(cluster.name):\n reference = cl\n print(f\"Catalogue match for {cluster.name} found\")\n break\n if reference == None:\n print(f\"Catalogue match for {cluster.name} was not found, please create one\")\n continue\n\n #Filter all of the methods out of the properties list\n properties = [a for a in dir(reference) if not a.startswith('_')]\n #print(properties)\n #exec(f\"print(reference.{properties[1]})\")\n #print(properties)\n \n #Now we have a list of all the attributes assigned to the catalogue (the self.variables)\n for p in properties:\n prop = getattr(reference,p)\n #print(prop)\n exec(f\"cluster.{p} = prop\")\n try:\n if prop <= -98:\n print(f\"{cluster.name} does not have a specified catalogue value for {p}\")\n except:\n continue\n \n #Additional properties that may be useful\n for star in cluster.filtered:\n star.normRA = star.pmra*np.cos(star.dec*np.pi/180)\n \n print(f\"{cluster.name} properties refreshed from catalogue\")\n\n\n \n\ndef statPlot(statX,statY,population=\"open\",color=\"default\",square=True,invertY=False,logX=False,logY=False,pointLabels=True,linFit=False,directory='default'):\n #Create plots of stat X vs stat Y across a population of clusters, similar to customPlot()\n #Can be set to use a custom list of clusters, or all clusters of a given type\n #\n import matplotlib\n import matplotlib.pyplot as plt\n import numpy as np\n from scipy.stats import linregress\n global clusters\n global clusterList\n global catalogue\n global resultsIn\n global resultList\n \n \n if not resultsIn:\n loadResults()\n \n #Filter out incorrect inputs\n if type(population) == str:\n population = population.lower()\n try:\n assert population == \"open\" or population == \"globular\"\n except:\n print(\"Specified population type not recognized\")\n else:\n try:\n assert type(population) == list\n assert type(population[0]) == str\n except:\n print(\"Population type given is not valid, must be either a list of cluster name strings or a single string \\'open\\' or \\'closed\\'\")\n return\n try:\n assert len(population) > 1\n except:\n print(\"Population statistic plots cannot be made with fewer than 2 clusters given\")\n return\n \n \n #Load cluster information from cList\n #This is going to involve using the resultCluster object to read data from each cluster folder in the cList\n cList = []\n banList = ['NGC2204']\n if type(population) == str:\n for res in resultList:\n if res.clType.lower() == population and not res.name in banList:\n cList.append(res)\n else:\n for res in resultList:\n if res.name in population:\n cList.append(res)\n \n if statX.lower() == \"b_r\" and statY.lower() == \"g_mag\":\n #Corrected CMD overlay\n \n NUM_COLORS = len(cList)\n cm = plt.get_cmap('nipy_spectral')\n \n \n plt.figure(\"uncorrected\")\n plt.title(\"Cluster Overlay\")\n plt.xlabel(\"Observed B-R\")\n plt.ylabel(\"Apparent G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n plt.figure(\"unshifted\")\n plt.title(\"Corrected Cluster Overlay\")\n plt.xlabel(\"Dereddened B-R\")\n plt.ylabel(\"Absolute G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n plt.figure(\"shifted\")\n plt.title(\"Corrected Cluster Overlay - Offset\")\n plt.xlabel(\"Dereddened B-R\")\n plt.ylabel(\"Absolute G Mag\")\n plt.gca().invert_yaxis()\n plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])\n \n index = 0\n offset = 2.5\n for cluster in cList:\n try:\n path = cluster.dataPath\n except:\n path = f\"clusters/{cluster.name}/data/\"\n \n condensed = np.genfromtxt(f\"{path}condensed.csv\",delimiter=\",\")\n cluster.condensed = condensed\n \n #Adjust by cluster.reddening and cluster.dist_mod\n x1 = [a[0] for a in condensed]\n y1 = [a[1] for a in condensed]\n x2 = [a[0]-cluster.reddening for a in condensed]\n y2 = [a[1]-2.1*cluster.reddening-cluster.dist_mod for a in condensed]\n x3 = [a[0]-cluster.reddening for a in condensed]\n y3 = [a[1]-2.1*cluster.reddening-cluster.dist_mod+index*offset for a in condensed]\n \n index += 1\n \n plt.figure(\"uncorrected\")\n plt.scatter(x1,y1,label=f\"{cluster.name}\")\n \n plt.figure(\"unshifted\")\n plt.axvline(x=1.6,ymax=0.5,color='black',linestyle='--')\n plt.axhline(y=4,xmin=0.59,color='black',linestyle='--')\n plt.scatter(x2,y2,label=f\"{cluster.name}\")\n \n plt.figure(\"shifted\")\n plt.scatter(x3,y3,label=f\"{cluster.name}\")\n plt.axvline(x=1.6,color='black',linestyle='--')\n \n # if 'NGC2301' in cluster.name:\n # for a,b in zip(x2,y2):\n # print(f\"{a},{b}\")\n \n \n plt.figure(\"uncorrected\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_apparent.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_apparent.png\",dpi=500)\n \n plt.figure(\"unshifted\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_absolute.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_absolute.png\",dpi=500)\n \n plt.figure(\"shifted\")\n plt.legend(fontsize=10,loc='upper right')\n plt.gcf().set_size_inches(8,6)\n plt.savefig(f\"results/plots/pdf/{population}_clusters_stacked_cmd_shifted.pdf\")\n plt.savefig(f\"results/plots/png/{population}_clusters_stacked_cmd_shifted.png\",dpi=500)\n \n \n \n else:\n x = [getattr(a, statX) for a in cList]\n y = [getattr(a, statY) for a in cList]\n \n plt.figure()\n plt.xlabel(f\"{statX}\")\n plt.ylabel(f\"{statY}\")\n if pointLabels:\n for cluster in cList:\n plt.scatter(getattr(cluster, statX),getattr(cluster, statY),label=cluster.name)\n plt.legend(fontsize=\"small\")\n else:\n plt.scatter(x,y)\n \n if linFit:\n reg = linregress(x,y)\n plt.plot(x,[reg[0]*a+reg[1] for a in x])\n \n plt.savefig(f\"SpecificPlots/pdf/{population}_{statX}_{statY}.pdf\")\n plt.savefig(f\"SpecificPlots/png/{population}_{statX}_{statY}.png\",dpi=500)\n \n return\n\ndef ageMassFit(t,m0,k):\n import numpy as np\n \n return 1 + m0*np.exp(-1*k*t)\n\ndef extinctionLaw(d,M0):\n import numpy as np\n \n return M0 -2.5*np.log10(1/(4*np.pi*d**2))\n\ndef resultPlots():\n #Imports\n import matplotlib.pyplot as plt\n import numpy as np\n from scipy.stats import linregress\n from scipy.optimize import curve_fit\n global clusters\n global clusterList\n global catalogue\n global resultsIn\n global resultList\n \n \n if not resultsIn:\n loadResults()\n \n #Select open clusters from resultList\n banList = ['NGC2204']\n cList = []\n for res in resultList:\n if res.clType.lower() == \"open\" and not res.name in banList:\n cList.append(res)\n \n \n #Filtered mass versus age\n fname = \"mass_vs_age_filtered\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Cluster Member Mass ($M_{\\odot}$)\")\n plt.scatter([c.fit_age for c in cList],[c.meanProxyMass for c in cList])\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Bounded mass versus age\n fname = \"mass_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Cluster Member Mass ($M_{\\odot}$)\")\n \n x,y = [c.fit_age for c in cList],[c.meanBoundedProxyMass for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n \n fitLabel = fr\"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$\" + \"\\n\" + fr\"Uncertainties = $\\pm{var[0][0]:.3f}, \\pm{var[1][1]:.3f}$\"\n \n plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Mass intercept versus age\n fname = \"mass_intercept_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"Mean Stellar Mass in Core ($M_{\\odot}$)\")\n \n x,y = [c.fit_age for c in cList],[c.mass_intercept_bounded for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n \n fitLabel = fr\"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$\" + \"\\n\" + fr\"Uncertainties = $\\pm{var[0][0]:.3f}, \\pm{var[1][1]:.3f}$\"\n \n plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Mass slope versus age\n fname = \"mass_slop_vs_age_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Fit Age (Gyr)\")\n plt.ylabel(r\"IQM Stellar Mass Dropoff ($\\frac{M_{\\odot}}{pc}$)\")\n \n x,y = [c.fit_age for c in cList],[c.mass_slope_bounded for c in cList]\n plt.scatter(x,y)\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Magnitude versus distance (Extinction law)\n fname = \"mag_vs_dist_bounded\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Cluster Distance from Earth (pc)\")\n plt.ylabel(r\"Mean Apparent G Magnitude\")\n \n x,y = [c.meanDist for c in cList],[c.mean_bounded_g_mag for c in cList]\n plt.scatter(x,y)\n fit,var = curve_fit(extinctionLaw,x,y,maxfev=1000)\n xr = list(np.linspace(min(x),max(x),101))\n plt.plot(xr,[extinctionLaw(a,fit[0]) for a in xr],label=\"Inverse Square Law \\n\" + fr\" $M_0 = {fit[0]:.3f} \\pm {var[0][0]:.3f}$\")\n plt.gca().invert_yaxis()\n plt.legend()\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n #Bounded fraction versus distance\n fname = \"bounded_fraction_vs_dist\"\n plt.figure(fname)\n plt.title(f\"{len(cList)} Open Clusters - BR-RP Limit Enforced\")\n plt.xlabel(\"Cluster Distance from Earth (pc)\")\n plt.ylabel(\"Fraction Unaffected by BP-RP Limit\")\n \n x,y = [c.meanDist for c in cList],[c.fractionBounded for c in cList]\n plt.scatter(x,y)\n plt.savefig(f\"results/plots/pdf/{fname}.pdf\")\n plt.savefig(f\"results/plots/png/{fname}.png\",dpi=500)\n \n \n #Radii\n plt.figure()\n plt.scatter([c.meanGalacticDist for c in cList],[c.halfLightRad_bounded/c.medianRad_bounded for c in cList])\n\n\n \ndef boundedStats(cList,xmax=1.6,saveCl=True,unloadCl=True):\n import numpy as np\n global clusters\n global subList\n for cl in cList:\n checkLoaded([cl])\n cluster = clusters[cl]\n \n subList = [star for star in cluster.filtered if not (star.b_r-cluster.reddening > xmax and star.g_mag > cluster.cltpy)]\n \n cluster.bounded = subList\n \n #Windowed properties (over the xmin to xmax range)\n cluster.meanBoundedProxyMass = np.mean([a.proxyMass for a in subList])\n cluster.totalBoundedProxyMass = np.sum([a.proxyMass for a in subList])\n cluster.numBounded = len(subList)\n cluster.fractionBounded = len(subList)/len(cluster.filtered)\n cluster.mean_bounded_b_r = np.mean([a.b_r for a in subList])\n cluster.mean_bounded_g_mag = np.mean([a.g_mag for a in subList])\n \n if saveCl:\n saveClusters([cl])\n saveResults([cl])\n if unloadCl:\n unloadClusters([cl])\n \n \n \n\n\ndef tryFits(fitVar='fit_age'):\n from scipy.stats import linregress\n \n global resultsIn\n global resultList\n global props\n global r2\n \n if not resultsIn:\n loadResults()\n \n cList = []\n for res in resultList:\n if res.clType.lower() == \"open\":\n cList.append(res)\n \n if 'all' in fitVar:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n propList = [a for a in props if type(getattr(cList[0],a)) == float]\n propList.remove('turnPoint')\n \n \n r2 = []\n \n for pr in propList:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n props = [a for a in props if type(getattr(cList[0],a)) == float]\n props.remove('turnPoint')\n props.remove(pr)\n \n for prop in props:\n \n x = [getattr(a, pr) for a in cList]\n y = [getattr(a, prop) for a in cList]\n \n reg = linregress(x,y)\n r2.append((pr,prop,reg[2]**2))\n \n r2 = sorted(r2,key = lambda x: x[2],reverse=True)\n \n print(\"Top 100 r^2 values:\")\n for r in r2[:200]:\n print(f\"{r[0]} | {r[1]} | {r[2]}\")\n \n \n else:\n #List of plottable variables\n props = dir(cList[0])\n props = [a for a in props if not '__' in a]\n props = [a for a in props if type(getattr(cList[0],a)) == float]\n props.remove('turnPoint')\n props.remove(fitVar)\n \n r2 = []\n for prop in props:\n \n x = [getattr(a, fitVar) for a in cList]\n y = [getattr(a, prop) for a in cList]\n \n reg = linregress(x,y)\n r2.append((prop,reg[2]**2))\n \n r2 = sorted(r2,key = lambda x: x[1],reverse=True)\n \n print(\"Top 20 r^2 values:\")\n for r in r2[:20]:\n print(f\"{r[0]} | {r[1]}\")\n \n \n\ndef prelimPlot(cl):\n import matplotlib.pyplot as plt\n \n cluster = clusters[cl]\n plt.scatter([a.ra for a in cluster.unfilteredWide],[a.dec for a in cluster.unfilteredWide],s=0.1)\n plt.figure()\n plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1)\n # plt.figure()\n # plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1,c=[a.par for a in cluster.unfilteredWide])\n # plt.set_cmap('cool')\n # clb = plt.colorbar()\n plt.figure()\n plt.scatter([a.b_r for a in cluster.unfilteredWide],[a.g_mag for a in cluster.unfilteredWide],s=0.1)\n plt.gca().invert_yaxis()\n # plt.figure()\n # plt.scatter([a.par for a in cluster.unfilteredWide],[a.par for a in cluster.unfilteredWide],s=0.1,c=[(a.pmra**2 + a.pmdec**2)**0.5 for a in cluster.unfilteredWide])\n # plt.set_cmap('cool')\n \n \n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.polyfit",
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"matplotlib.colors.to_rgba",
"numpy.asarray",
"numpy.vstack",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.exp",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.gca",
"pandas.read_csv",
"matplotlib.collections.RegularPolyCollection",
"numpy.arange",
"matplotlib.pyplot.gcf",
"numpy.sin",
"numpy.std",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.less_equal",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.widgets.Lasso",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.patches.Rectangle",
"matplotlib.path.Path",
"numpy.median",
"matplotlib.patches.Circle",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"numpy.quantile",
"numpy.delete",
"scipy.stats.linregress",
"numpy.log10",
"numpy.savetxt",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.hist",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"numpy.abs",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.yscale",
"numpy.cos",
"matplotlib.pyplot.set_cmap",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
aripekka/tbcalc | [
"a0337db245f5391bfa9a42123994832c299b1fbe"
] | [
"tests/test_tensor_transform.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nTests for the tensor transform functions. Run with pytest.\n\nCreated on Sat May 9 00:09:00 2020\n\n@author: aripekka\n\"\"\"\n\nimport sys\nimport os.path\nimport numpy as np\n\nsys.path.insert(1, os.path.join(os.path.dirname(__file__),'..'))\n\nfrom tbcalc.transverse_deformation import * \nfrom tbcalc import cartesian_tensors_to_cylindrical\n\nfrom pyTTE import TTcrystal, Quantity\n\ndef test_isotropic_circular():\n\n #Calculate the reference stresses and strains as implemented in the \n #deprecated sbcalc package\n\n E = 165\n nu = 0.22\n\n thickness = 0.1\n\n Rx = 1000.0\n Ry = 500.0\n\n R = np.sqrt(Rx*Ry)\n \n L = 100.0 \n \n x=np.linspace(-L/2,L/2,150)\n X,Y=np.meshgrid(x,x)\n\n RR = np.sqrt(X**2 + Y**2)\n PHI = np.arctan2(Y,X)\n\n stress, strain, P_imp = isotropic_circular(Rx, Ry, L, thickness, nu, E)\n\n stress_cyl = cartesian_tensors_to_cylindrical(stress)\n strain_cyl = cartesian_tensors_to_cylindrical(strain)\n\n\n stress_cyl_ref = {}\n stress_cyl_ref['rr'] = E/(16*R**2)*(L**2/4-RR**2)+stress['xx'](X,Y)*0\n stress_cyl_ref['phiphi'] = E/(16*R**2)*(L**2/4-3*RR**2)+stress['xx'](X,Y)*0\n stress_cyl_ref['rphi'] = stress['xx'](X,Y)*0\n stress_cyl_ref['phir'] = stress['xx'](X,Y)*0\n\n strain_cyl_ref = {}\n strain_cyl_ref['rr'] = 1/(16*R**2)*((1-nu)*L**2/4-(1-3*nu)*RR**2)+stress['xx'](X,Y)*0\n strain_cyl_ref['phiphi'] = 1/(16*R**2)*((1-nu)*L**2/4-(3-nu)*RR**2)+stress['xx'](X,Y)*0\n strain_cyl_ref['rphi'] = stress['xx'](X,Y)*0\n strain_cyl_ref['phir'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zphi'] = stress['xx'](X,Y)*0\n strain_cyl_ref['phiz'] = stress['xx'](X,Y)*0\n strain_cyl_ref['rz'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zr'] = stress['xx'](X,Y)*0\n strain_cyl_ref['zz'] = nu/(4*R**2)*(RR**2-L**2/8)+stress['xx'](X,Y)*0\n\n meps = np.finfo(np.float).eps #m\n \n for i in ['r','phi']:\n for j in ['r','phi']:\n assert np.all(np.logical_or(np.abs(stress_cyl_ref[i+j] - stress_cyl[i+j](RR,PHI)) < meps,\n np.logical_and(np.isnan(stress_cyl_ref[i+j]), np.isnan(stress_cyl[i+j](RR,PHI)))))\n\n for i in ['r','phi','z']:\n for j in ['r','phi','z']:\n assert np.all(np.logical_or(np.abs(strain_cyl_ref[i+j] - strain_cyl[i+j](RR,PHI)) < meps,\n np.logical_and(np.isnan(strain_cyl_ref[i+j]), np.isnan(strain_cyl[i+j](RR,PHI)))))"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.isnan",
"numpy.finfo",
"numpy.arctan2",
"numpy.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyunze-coding/Trigger-Me-Elmo-2 | [
"6950ffa4bfd264e213626f1ab3cff249fbab36da"
] | [
"app.py"
] | [
"from flask import Flask, render_template, request, jsonify\nimport base64\nimport logging\nimport numpy as np\nfrom deepface import DeepFace\nfrom PIL import Image\nfrom io import BytesIO\nimport subprocess\nimport os\nimport cv2\nimport random\nimport webbrowser\n\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nerror_path = {'race': {'asian': 0, 'indian': 0, 'black': 0, 'white': 0,\n 'middle eastern': 0, 'latino hispanic': 0}, 'dominant_race': '?'}\ndirectory = 'static/img'\n\nif 'img' not in os.listdir('static/'):\n os.mkdir(directory)\n\nfor f in os.listdir(directory):\n os.remove(os.path.join(directory, f))\n\n\ndef generate_random_string():\n numbers = '1234567890'\n res = ''.join(random.choice(numbers) for _ in range(10))\n return f'{directory}/{res}.png'\n\n\[email protected]('/')\ndef main():\n return render_template('index.html')\n\n\[email protected]('/photocap')\ndef photo_cap():\n photo_base64 = request.args.get('photo')\n\n _, encoded = photo_base64.split(\",\", 1)\n binary_data = base64.b64decode(encoded)\n\n f = BytesIO()\n f.write(binary_data)\n f.seek(0)\n image = Image.open(f)\n image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)\n fn = generate_random_string()\n\n cv2.imwrite(fn, image)\n try:\n obj = DeepFace.analyze(image, actions=['race'])\n obj['filename'] = fn\n return jsonify(obj)\n\n except ValueError:\n other_json = error_path\n other_json['filename'] = fn\n\n return jsonify(other_json)\n\n except Exception as e:\n print(e)\n other_json = error_path\n other_json['filename'] = fn\n\n return jsonify(other_json)\n\n\nif __name__ == \"__main__\":\n # p = subprocess.Popen(['python -m SimpleHTTPServer'], shell=True) #Only for macOS\n webbrowser.open_new('http://127.0.0.1:8000/')\n app.run(host='localhost', port=8000, debug=True)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pyrito/SpeechSplit | [
"ee70ee77e54d5b7cd1b39e7bef1cb96ae78f8beb"
] | [
"solver.py"
] | [
"from torch.utils.tensorboard.summary import hparams\nfrom model import Generator_3 as Generator\nfrom model import InterpLnr\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport pickle\n\nfrom utils import pad_seq_to_2, quantize_f0_torch, quantize_f0_numpy\n\n# use demo data for simplicity\n# make your own validation set as needed\nvalidation_pt = pickle.load(open('assets/demo.pkl', \"rb\"))\n\nclass Solver(object):\n \"\"\"Solver for training\"\"\"\n\n def __init__(self, vcc_loader, config, hparams):\n \"\"\"Initialize configurations.\"\"\"\n\n # Data loader.\n self.vcc_loader = vcc_loader\n self.hparams = hparams\n\n # Training configurations.\n self.num_iters = config.num_iters\n self.g_lr = config.g_lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.resume_iters = config.resume_iters\n \n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device('cuda:{}'.format(config.device_id) if self.use_cuda else 'cpu')\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n\n # Step size.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n \n\n # Build the model and tensorboard.\n self.build_model()\n if self.use_tensorboard:\n self.build_tensorboard()\n\n \n def build_model(self): \n self.G = Generator(self.hparams)\n \n self.Interp = InterpLnr(self.hparams)\n \n self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n self.print_network(self.G, 'G')\n \n self.G.to(self.device)\n self.Interp.to(self.device)\n\n \n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n \n \n def print_optimizer(self, opt, name):\n print(opt)\n print(name)\n \n \n def restore_model(self, resume_iters):\n print('Loading the trained models from step {}...'.format(resume_iters))\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))\n g_checkpoint = torch.load(G_path, map_location=lambda storage, loc: storage)\n self.G.load_state_dict(g_checkpoint['model'])\n self.g_optimizer.load_state_dict(g_checkpoint['optimizer'])\n self.g_lr = self.g_optimizer.param_groups[0]['lr']\n \n \n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter(self.log_dir)\n \n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.g_optimizer.zero_grad()\n \n def encode_context(self):\n # Set data loader.\n data_loader = self.vcc_loader\n \n # Fetch fixed inputs for debugging.\n data_iter = iter(data_loader)\n \n # Start encoding from scratch or resume from checkpoint.\n start_iters = 0\n if self.resume_iters:\n print('Resuming ...')\n start_iters = self.resume_iters\n self.num_iters += self.resume_iters\n self.restore_model(self.resume_iters)\n # self.print_optimizer(self.g_optimizer, 'G_optimizer')\n \n \n # Print logs in specified order\n keys = ['G/loss_id']\n \n # Start encoding.\n print('Start encoding...')\n start_time = time.time()\n\n encoded_audio = {}\n # May need this if looping doesn't work: \n # for i in max(range(start_iters, self.num_iters), len(self.vcc_loader)):\n print(len(self.vcc_loader))\n count = 0\n for i, (x_real_org, emb_org, f0_org, len_org, id_org) in enumerate(self.vcc_loader):\n\n # =================================================================================== #\n # 1. Send input data to device #\n # =================================================================================== #\n \n # x_real_org = x_real_org.to(self.device)\n # emb_org = emb_org.to(self.device)\n # len_org = len_org.to(self.device)\n # f0_org = f0_org.to(self.device)\n \n \n # =================================================================================== #\n # 2. Encode using the generator #\n # =================================================================================== #\n \n self.G = self.G.eval()\n\n pad = 8 - ((len_org[0] + 1) % 8)\n encode_length = len_org[0] + 1 + pad\n print(id_org)\n x_real_pad, _ = pad_seq_to_2(x_real_org, encode_length)\n # len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org_pad, _ = pad_seq_to_2(f0_org, encode_length) # np.pad(f0_org, (0, 512-len_org[0]), 'constant', constant_values=(0, 0))\n assert x_real_pad.shape[1] == f0_org_pad.shape[1]\n f0_quantized = quantize_f0_numpy(np.squeeze(f0_org_pad))[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0, x_real_pad, emb_org)\n\n # code_content, code_pitch, code_rhythm, speaker_emb = self.G.forward_encode(x_f0_intrp_org, x_real_org, emb_org)\n # print(f'content: {code_content}')\n\n encoded_audio[id_org[0]] = code_content\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log = \"Elapsed [{}], Audio file[{}/{}]\".format(et, i+1, len(self.vcc_loader))\n print(log)\n count += 1\n if count % 100 == 0:\n with open(f'assets/encoded-{self.hparams.encode_mode}-{count}.pkl', 'wb') as f:\n pickle.dump(encoded_audio, f)\n del encoded_audio\n encoded_audio = {}\n\n\n\n#=====================================================================================================================\n \n \n \n def train(self):\n # Set data loader.\n data_loader = self.vcc_loader\n \n # Fetch fixed inputs for debugging.\n data_iter = iter(data_loader)\n \n # Start training from scratch or resume training.\n start_iters = 0\n if self.resume_iters:\n print('Resuming ...')\n start_iters = self.resume_iters\n self.num_iters += self.resume_iters\n self.restore_model(self.resume_iters)\n self.print_optimizer(self.g_optimizer, 'G_optimizer')\n \n # Learning rate cache for decaying.\n g_lr = self.g_lr\n print ('Current learning rates, g_lr: {}.'.format(g_lr))\n \n # Print logs in specified order\n keys = ['G/loss_id']\n \n # Start training.\n print('Start training...')\n start_time = time.time()\n \n for i in range(start_iters, self.num_iters):\n\n # =================================================================================== #\n # 1. Preprocess input data #\n # =================================================================================== #\n\n # Fetch real images and labels.\n try:\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n except:\n data_iter = iter(data_loader)\n x_real_org, emb_org, f0_org, len_org = next(data_iter)\n \n x_real_org = x_real_org.to(self.device)\n emb_org = emb_org.to(self.device)\n len_org = len_org.to(self.device)\n f0_org = f0_org.to(self.device)\n \n \n # =================================================================================== #\n # 2. Train the generator #\n # =================================================================================== #\n \n self.G = self.G.train()\n \n # Identity mapping loss\n x_f0 = torch.cat((x_real_org, f0_org), dim=-1)\n x_f0_intrp = self.Interp(x_f0, len_org) \n f0_org_intrp = quantize_f0_torch(x_f0_intrp[:,:,-1])[0]\n x_f0_intrp_org = torch.cat((x_f0_intrp[:,:,:-1], f0_org_intrp), dim=-1)\n \n x_identic = self.G(x_f0_intrp_org, x_real_org, emb_org)\n g_loss_id = F.mse_loss(x_real_org, x_identic, reduction='mean') \n \n # Backward and optimize.\n g_loss = g_loss_id\n self.reset_grad()\n g_loss.backward()\n self.g_optimizer.step()\n\n # Logging.\n loss = {}\n loss['G/loss_id'] = g_loss_id.item()\n \n\n # =================================================================================== #\n # 4. Miscellaneous #\n # =================================================================================== #\n\n # Print out training information.\n if (i+1) % self.log_step == 0:\n et = time.time() - start_time\n et = str(datetime.timedelta(seconds=et))[:-7]\n log = \"Elapsed [{}], Iteration [{}/{}]\".format(et, i+1, self.num_iters)\n for tag in keys:\n log += \", {}: {:.8f}\".format(tag, loss[tag])\n print(log)\n\n if self.use_tensorboard:\n for tag, value in loss.items():\n self.writer.add_scalar(tag, value, i+1)\n \n \n # Save model checkpoints.\n if (i+1) % self.model_save_step == 0:\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))\n torch.save({'model': self.G.state_dict(),\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\n print('Saved model checkpoints into {}...'.format(self.model_save_dir)) \n \n\n # Validation.\n if (i+1) % self.sample_step == 0:\n self.G = self.G.eval()\n with torch.no_grad():\n loss_val = []\n for val_sub in validation_pt:\n emb_org_val = torch.from_numpy(val_sub[1]).to(self.device) \n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)\n len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n g_loss_val = F.mse_loss(x_real_pad, x_identic_val, reduction='sum')\n loss_val.append(g_loss_val.item())\n val_loss = np.mean(loss_val) \n print('Validation loss: {}'.format(val_loss))\n if self.use_tensorboard:\n self.writer.add_scalar('Validation_loss', val_loss, i+1)\n \n\n # plot test samples\n if (i+1) % self.sample_step == 0:\n self.G = self.G.eval()\n with torch.no_grad():\n for val_sub in validation_pt:\n emb_org_val = torch.from_numpy(val_sub[1]).to(self.device) \n for k in range(2, 3):\n x_real_pad, _ = pad_seq_to_2(val_sub[k][0][np.newaxis,:,:], 192)\n len_org = torch.tensor([val_sub[k][2]]).to(self.device) \n f0_org = np.pad(val_sub[k][1], (0, 192-val_sub[k][2]), 'constant', constant_values=(0, 0))\n f0_quantized = quantize_f0_numpy(f0_org)[0]\n f0_onehot = f0_quantized[np.newaxis, :, :]\n f0_org_val = torch.from_numpy(f0_onehot).to(self.device) \n x_real_pad = torch.from_numpy(x_real_pad).to(self.device) \n x_f0 = torch.cat((x_real_pad, f0_org_val), dim=-1)\n x_f0_F = torch.cat((x_real_pad, torch.zeros_like(f0_org_val)), dim=-1)\n x_f0_C = torch.cat((torch.zeros_like(x_real_pad), f0_org_val), dim=-1)\n \n x_identic_val = self.G(x_f0, x_real_pad, emb_org_val)\n x_identic_woF = self.G(x_f0_F, x_real_pad, emb_org_val)\n x_identic_woR = self.G(x_f0, torch.zeros_like(x_real_pad), emb_org_val)\n x_identic_woC = self.G(x_f0_C, x_real_pad, emb_org_val)\n \n melsp_gd_pad = x_real_pad[0].cpu().numpy().T\n melsp_out = x_identic_val[0].cpu().numpy().T\n melsp_woF = x_identic_woF[0].cpu().numpy().T\n melsp_woR = x_identic_woR[0].cpu().numpy().T\n melsp_woC = x_identic_woC[0].cpu().numpy().T\n \n min_value = np.min(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n max_value = np.max(np.hstack([melsp_gd_pad, melsp_out, melsp_woF, melsp_woR, melsp_woC]))\n \n # fig, (ax1,ax2,ax3,ax4,ax5) = plt.subplots(5, 1, sharex=True)\n # im1 = ax1.imshow(melsp_gd_pad, aspect='auto', vmin=min_value, vmax=max_value)\n # im2 = ax2.imshow(melsp_out, aspect='auto', vmin=min_value, vmax=max_value)\n # im3 = ax3.imshow(melsp_woC, aspect='auto', vmin=min_value, vmax=max_value)\n # im4 = ax4.imshow(melsp_woR, aspect='auto', vmin=min_value, vmax=max_value)\n # im5 = ax5.imshow(melsp_woF, aspect='auto', vmin=min_value, vmax=max_value)\n # plt.savefig(f'{self.sample_dir}/{i+1}_{val_sub[0]}_{k}.png', dpi=150)\n # plt.close(fig) "
] | [
[
"numpy.hstack",
"numpy.pad",
"torch.cat",
"torch.load",
"numpy.squeeze",
"torch.zeros_like",
"torch.from_numpy",
"torch.tensor",
"torch.nn.functional.mse_loss",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
snsnlou/mars | [
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302",
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302",
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302",
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302",
"6b8eec162eccc8bb980a98ca2cf1e6a4b866d302"
] | [
"mars/dataframe/datastore/tests/test_datastore_execute.py",
"mars/tensor/datasource/diag.py",
"mars/dataframe/indexing/setitem.py",
"mars/tensor/datasource/eye.py",
"mars/tensor/base/isin.py"
] | [
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nimport mars.dataframe as md\nfrom mars.config import option_context\nfrom mars.dataframe import DataFrame\nfrom mars.deploy.local.core import new_cluster\nfrom mars.session import new_session\nfrom mars.tests.core import TestBase, flaky\n\ntry:\n import vineyard\nexcept ImportError:\n vineyard = None\ntry:\n import sqlalchemy\nexcept ImportError:\n sqlalchemy = None\ntry:\n import pyarrow as pa\nexcept ImportError:\n pa = None\ntry:\n import fastparquet\nexcept ImportError:\n fastparquet = None\n\n_exec_timeout = 120 if 'CI' in os.environ else -1\n\n\nclass Test(TestBase):\n def setUp(self):\n super().setUp()\n self.ctx, self.executor = self._create_test_context()\n\n def testToCSVExecution(self):\n index = pd.RangeIndex(100, 0, -1, name='index')\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100)\n }, index=index)\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # DATAFRAME TESTS\n # test one file with dataframe\n path = os.path.join(base_path, 'out.csv')\n\n r = df.to_csv(path)\n self.executor.execute_dataframe(r)\n\n result = pd.read_csv(path, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n\n # test multi files with dataframe\n path = os.path.join(base_path, 'out-*.csv')\n r = df.to_csv(path)\n self.executor.execute_dataframe(r)\n\n dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),\n dtype=raw.dtypes.to_dict())\n for i in range(4)]\n result = pd.concat(dfs, axis=0)\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.iloc[33: 66])\n\n with self.ctx:\n # test df with unknown shape\n df2 = DataFrame(raw, chunk_size=(50, 2))\n df2 = df2[df2['col1'] < 1]\n path2 = os.path.join(base_path, 'out2.csv')\n r = df2.to_csv(path2)\n self.executor.execute_dataframes([r])\n\n result = pd.read_csv(path2, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw)\n\n # SERIES TESTS\n series = md.Series(raw.col1, chunk_size=33)\n\n # test one file with series\n path = os.path.join(base_path, 'out.csv')\n r = series.to_csv(path)\n self.executor.execute_dataframe(r)\n\n result = pd.read_csv(path, dtype=raw.dtypes.to_dict())\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw.col1.to_frame())\n\n # test multi files with series\n path = os.path.join(base_path, 'out-*.csv')\n r = series.to_csv(path)\n self.executor.execute_dataframe(r)\n\n dfs = [pd.read_csv(os.path.join(base_path, f'out-{i}.csv'),\n dtype=raw.dtypes.to_dict())\n for i in range(4)]\n result = pd.concat(dfs, axis=0)\n result.set_index('index', inplace=True)\n pd.testing.assert_frame_equal(result, raw.col1.to_frame())\n pd.testing.assert_frame_equal(dfs[1].set_index('index'), raw.col1.to_frame().iloc[33: 66])\n\n @unittest.skipIf(sqlalchemy is None, 'sqlalchemy not installed')\n def testToSQL(self):\n index = pd.RangeIndex(100, 0, -1, name='index')\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.random.choice(['a', 'b', 'c'], (100,)),\n 'col3': np.arange(100).astype('int64'),\n }, index=index)\n\n with tempfile.TemporaryDirectory() as d:\n table_name1 = 'test_table'\n table_name2 = 'test_table2'\n uri = 'sqlite:///' + os.path.join(d, 'test.db')\n\n engine = sqlalchemy.create_engine(uri)\n\n # test write dataframe\n df = DataFrame(raw, chunk_size=33)\n r = df.to_sql(table_name1, con=engine)\n self.executor.execute_dataframe(r)\n\n written = pd.read_sql(table_name1, con=engine, index_col='index') \\\n .sort_index(ascending=False)\n pd.testing.assert_frame_equal(raw, written)\n\n # test write with existing table\n with self.assertRaises(ValueError):\n df.to_sql(table_name1, con=uri).execute()\n\n # test write series\n series = md.Series(raw.col1, chunk_size=33)\n with engine.connect() as conn:\n r = series.to_sql(table_name2, con=conn)\n self.executor.execute_dataframe(r)\n\n written = pd.read_sql(table_name2, con=engine, index_col='index') \\\n .sort_index(ascending=False)\n pd.testing.assert_frame_equal(raw.col1.to_frame(), written)\n\n @unittest.skipIf(vineyard is None, 'vineyard not installed')\n @flaky(max_runs=3)\n def testToVineyard(self):\n def run_with_given_session(session, **kw):\n ipc_socket = os.environ.get('VINEYARD_IPC_SOCKET', '/tmp/vineyard/vineyard.sock')\n with option_context({'vineyard.socket': ipc_socket}):\n df1 = DataFrame(pd.DataFrame(np.arange(12).reshape(3, 4), columns=['a', 'b', 'c', 'd']),\n chunk_size=2)\n object_id = df1.to_vineyard().execute(session=session, **kw).fetch(session=session)\n df2 = md.from_vineyard(object_id)\n\n df1_value = df1.execute(session=session, **kw).fetch(session=session)\n df2_value = df2.execute(session=session, **kw).fetch(session=session)\n pd.testing.assert_frame_equal(\n df1_value.reset_index(drop=True), df2_value.reset_index(drop=True))\n\n with new_session().as_default() as session:\n run_with_given_session(session)\n\n with new_cluster(scheduler_n_process=2, worker_n_process=2,\n shared_memory='20M', web=False) as cluster:\n with new_session(cluster.endpoint).as_default() as session:\n run_with_given_session(session, timeout=_exec_timeout)\n\n @unittest.skipIf(pa is None, 'pyarrow not installed')\n def testToParquetArrowExecution(self):\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.arange(100),\n 'col3': np.random.choice(['a', 'b', 'c'], (100,)),\n })\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # DATAFRAME TESTS\n path = os.path.join(base_path, 'out-*.parquet')\n r = df.to_parquet(path)\n self.executor.execute_dataframe(r)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result = result.sort_index()\n pd.testing.assert_frame_equal(result, raw)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result = result.sort_index()\n pd.testing.assert_frame_equal(result, raw)\n\n # test read_parquet then to_parquet\n read_df = md.read_parquet(path)\n r = read_df.to_parquet(path)\n self.executor.execute_dataframes([r])\n\n # test partition_cols\n path = os.path.join(base_path, 'out-partitioned')\n r = df.to_parquet(path, partition_cols=['col3'])\n self.executor.execute_dataframe(r)\n\n read_df = md.read_parquet(path)\n result = self.executor.execute_dataframe(read_df, concat=True)[0]\n result['col3'] = result['col3'].astype('object')\n pd.testing.assert_frame_equal(result.sort_values('col1').reset_index(drop=True),\n raw.sort_values('col1').reset_index(drop=True))\n\n @unittest.skipIf(fastparquet is None, 'fastparquet not installed')\n def testToParquetFastParquetExecution(self):\n raw = pd.DataFrame({\n 'col1': np.random.rand(100),\n 'col2': np.arange(100),\n 'col3': np.random.choice(['a', 'b', 'c'], (100,)),\n })\n df = DataFrame(raw, chunk_size=33)\n\n with tempfile.TemporaryDirectory() as base_path:\n # test fastparquet\n path = os.path.join(base_path, 'out-fastparquet-*.parquet')\n r = df.to_parquet(path, engine='fastparquet', compression='gzip')\n self.executor.execute_dataframe(r)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...core import TilesError\nfrom ...serialize import KeyField, Int32Field\nfrom ...utils import check_chunks_unknown_shape\nfrom ..core import TENSOR_TYPE\nfrom ...lib.sparse import diag as sparse_diag\nfrom ...lib.sparse.core import issparse, get_array_module, get_sparse_module\nfrom ...lib import sparse\nfrom ..core import TensorOrder\nfrom ..array_utils import create_array\nfrom .core import TensorHasInput\nfrom .zeros import TensorZeros\nfrom .array import tensor\n\n\ndef _get_diag_shape(v_shape, k):\n size_0, size_1 = 0, 0\n if k > 0:\n size_1 += k\n elif k < 0:\n size_0 -= k\n size = min(v_shape[0] - size_0, v_shape[1] - size_1)\n return size,\n\n\nclass TensorDiagBase(object):\n __slots__ = ()\n\n def to_chunk_op(self, *args):\n op = self.copy().reset_key()\n k, = args\n op._k = k\n return op\n\n @classmethod\n def _get_nsplits(cls, op):\n raise NotImplementedError\n\n @classmethod\n def _get_chunk(cls, op, chunk_k, chunk_shape, chunk_idx):\n raise NotImplementedError\n\n @classmethod\n def tile(cls, op):\n if op.inputs:\n check_chunks_unknown_shape(op.inputs, TilesError)\n tensor = op.outputs[0]\n\n # op can be TensorDiag or TensorEye\n k = op.k\n nsplits = op._get_nsplits(op)\n\n fx = lambda x, y: x - y + k\n cum_size = [np.cumsum(s).tolist() for s in nsplits]\n out_chunks = []\n for out_idx in itertools.product(*[range(len(s)) for s in nsplits]):\n i, j = out_idx\n ld_pos = cum_size[0][i] - 1, cum_size[1][j] - nsplits[1][j]\n ru_pos = cum_size[0][i] - nsplits[0][i], cum_size[1][j] - 1\n\n ld_fx = fx(*ld_pos)\n ru_fx = fx(*ru_pos)\n\n chunk_shape = (nsplits[0][i], nsplits[1][j])\n if (ld_fx > 0 and ru_fx > 0) or (ld_fx < 0 and ru_fx < 0):\n # does not cross, fill with zeros\n chunk_op = TensorZeros(dtype=op.dtype, gpu=op.gpu, sparse=op.sparse)\n chunk = chunk_op.new_chunk(None, shape=chunk_shape, index=out_idx)\n else:\n lu_pos = ru_pos[0], ld_pos[1]\n chunk_k = fx(*lu_pos)\n chunk = op._get_chunk(op, chunk_k, chunk_shape, out_idx)\n\n out_chunks.append(chunk)\n\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, tensor.shape, chunks=out_chunks,\n nsplits=nsplits)\n\n\nclass TensorDiag(TensorDiagBase, TensorHasInput):\n _op_type_ = OperandDef.TENSOR_DIAG\n\n _input = KeyField('input')\n _k = Int32Field('k')\n\n def __init__(self, k=None, **kw):\n super().__init__(_k=k, **kw)\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n if self.dtype is None:\n self._dtype = self.input.dtype\n\n def to_chunk_op(self, *args):\n return TensorDiagBase.to_chunk_op(self, *args)\n\n @classmethod\n def _get_nsplits(cls, op):\n assert op.input.ndim == 1\n k = op.k\n nsplits_1d = op.input.nsplits[0]\n nsplit_0, nsplit_1 = list(nsplits_1d), list(nsplits_1d)\n if k > 0:\n nsplit_0.append(k)\n nsplit_1.insert(0, k)\n elif k < 0:\n nsplit_0.insert(0, abs(k))\n nsplit_1.append(abs(k))\n return nsplit_0, nsplit_1\n\n @classmethod\n def _get_chunk(cls, op, chunk_k, chunk_shape, chunk_idx):\n assert chunk_shape[0] == chunk_shape[1]\n input_idx = chunk_idx[1] if op.k < 0 else chunk_idx[0]\n input_chunk = op.inputs[0].cix[input_idx, ]\n op = TensorDiag(k=chunk_k, dtype=op.dtype, gpu=op.gpu, sparse=op.sparse)\n return op.new_chunk([input_chunk], shape=chunk_shape, index=chunk_idx)\n\n def __call__(self, v, shape, chunk_size=None):\n return self.new_tensor([v], shape, raw_chunk_size=chunk_size,\n order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n tensor = op.outputs[0]\n\n v = op.input\n k = op.k\n idx = itertools.count(0)\n if v.ndim == 2:\n check_chunks_unknown_shape(op.inputs, TilesError)\n chunks = []\n nsplit = []\n\n fx = lambda x, y: x - y + k\n in_nsplits = v.nsplits\n cum_size = [np.cumsum(s).tolist() for s in in_nsplits]\n for c in v.chunks:\n i, j = c.index\n ld_pos = cum_size[0][i] - 1, cum_size[1][j] - in_nsplits[1][j]\n ru_pos = cum_size[0][i] - in_nsplits[0][i], cum_size[1][j] - 1\n\n ld_fx = fx(*ld_pos)\n ru_fx = fx(*ru_pos)\n\n if (ld_fx > 0 and ru_fx > 0) or (ld_fx < 0 and ru_fx < 0):\n continue\n\n lu_pos = ru_pos[0], ld_pos[1]\n chunk_k = fx(*lu_pos)\n\n chunk_shape = _get_diag_shape(c.shape, chunk_k)\n chunk_idx = (next(idx),)\n chunk_op = op.to_chunk_op(chunk_k)\n chunk = chunk_op.new_chunk([c], shape=chunk_shape,\n index=chunk_idx, order=tensor.order)\n nsplit.append(chunk_shape[0])\n chunks.append(chunk)\n\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, op.outputs[0].shape, order=tensor.order,\n chunks=chunks, nsplits=(tuple(nsplit),))\n else:\n return super().tile(op)\n\n @property\n def k(self):\n return getattr(self, '_k', 0)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n if op.sparse:\n ctx[chunk.key] = sparse.diag(ctx[op.inputs[0].key], k=op.k, gpu=op.gpu)\n else:\n ctx[chunk.key] = create_array(op)(\n 'diag', ctx[op.inputs[0].key], k=op.k)\n\n\ndef diag(v, k=0, sparse=None, gpu=False, chunk_size=None):\n \"\"\"\n Extract a diagonal or construct a diagonal tensor.\n\n See the more detailed documentation for ``mt.diagonal`` if you use this\n function to extract a diagonal and wish to write to the resulting tensor\n\n Parameters\n ----------\n v : array_like\n If `v` is a 2-D tensor, return its `k`-th diagonal.\n If `v` is a 1-D tensor, return a 2-D tensor with `v` on the `k`-th\n diagonal.\n k : int, optional\n Diagonal in question. The default is 0. Use `k>0` for diagonals\n above the main diagonal, and `k<0` for diagonals below the main\n diagonal.\n sparse: bool, optional\n Create sparse tensor if True, False as default\n gpu : bool, optional\n Allocate the tensor on GPU if True, False as default\n chunk_size : int or tuple of int or tuple of ints, optional\n Desired chunk size on each dimension\n\n Returns\n -------\n out : Tensor\n The extracted diagonal or constructed diagonal tensor.\n\n See Also\n --------\n diagonal : Return specified diagonals.\n diagflat : Create a 2-D array with the flattened input as a diagonal.\n trace : Sum along diagonals.\n triu : Upper triangle of a tensor.\n tril : Lower triangle of a tensor.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> x = mt.arange(9).reshape((3,3))\n >>> x.execute()\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n\n >>> mt.diag(x).execute()\n array([0, 4, 8])\n >>> mt.diag(x, k=1).execute()\n array([1, 5])\n >>> mt.diag(x, k=-1).execute()\n array([3, 7])\n\n >>> mt.diag(mt.diag(x)).execute()\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 8]])\n\n \"\"\"\n if not isinstance(v, TENSOR_TYPE):\n tensor_v = tensor(v)\n if tensor_v.issparse():\n xps = get_sparse_module(tensor_v.data)\n v = xps.csr_matrix((tensor_v.op.data, tensor_v.op.indices, tensor_v.op.indptr),\n tensor_v.shape)\n diag_v = sparse_diag(v, k=k)\n else:\n v = tensor(v).op.data\n diag_v = get_array_module(v).diag(v, k=k)\n sparse = sparse if sparse is not None else issparse(v)\n return tensor(diag_v, gpu=gpu, sparse=sparse, chunk_size=chunk_size)\n\n sparse = sparse if sparse is not None else v.issparse()\n\n if v.ndim == 1:\n shape = (v.size + abs(k),) * 2\n elif v.ndim == 2:\n shape = _get_diag_shape(v.shape, k)\n else:\n raise ValueError('Input must be 1- or 2-d.')\n\n op = TensorDiag(k, dtype=v.dtype, gpu=gpu, sparse=sparse)\n return op(v, shape)\n",
"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom ... import opcodes\nfrom ...core import OutputType, TilesError\nfrom ...serialize import KeyField, AnyField\nfrom ...tensor.core import TENSOR_TYPE\nfrom ..core import SERIES_TYPE, DataFrame\nfrom ..initializer import Series as asseries\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin\nfrom ..utils import parse_index\n\n\nclass DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.INDEXSETVALUE\n\n _target = KeyField('target')\n _indexes = AnyField('indexes')\n _value = AnyField('value')\n\n def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):\n super().__init__(_target=target, _indexes=indexes,\n _value=value, _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.dataframe]\n\n @property\n def target(self):\n return self._target\n\n @property\n def indexes(self):\n return self._indexes\n\n @property\n def value(self):\n return self._value\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._target = self._inputs[0]\n if len(inputs) > 1:\n self._value = self._inputs[-1]\n\n @staticmethod\n def _is_scalar_tensor(t):\n return isinstance(t, TENSOR_TYPE) and t.ndim == 0\n\n def __call__(self, target: DataFrame, value):\n raw_target = target\n\n inputs = [target]\n if np.isscalar(value):\n value_dtype = np.array(value).dtype\n elif self._is_scalar_tensor(value):\n inputs.append(value)\n value_dtype = value.dtype\n else:\n if isinstance(value, (pd.Series, SERIES_TYPE)):\n value = asseries(value)\n value_dtype = value.dtype\n elif is_list_like(value) or isinstance(value, TENSOR_TYPE):\n value = asseries(value, index=target.index)\n value_dtype = value.dtype\n else: # pragma: no cover\n raise TypeError('Wrong value type, could be one of scalar, Series or tensor')\n\n if target.shape[0] == 0:\n # target empty, reindex target first\n target = target.reindex(value.index)\n inputs[0] = target\n elif value.index_value.key != target.index_value.key:\n # need reindex when target df is not empty and index different\n value = value.reindex(target.index)\n inputs.append(value)\n\n index_value = target.index_value\n dtypes = target.dtypes.copy(deep=True)\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n dtypes=dtypes, index_value=index_value,\n columns_value=columns_value)\n raw_target.data = ret.data\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n target = op.target\n value = op.value\n col = op.indexes\n columns = target.columns_value.to_pandas()\n is_value_scalar = np.isscalar(value) or cls._is_scalar_tensor(value)\n\n if not is_value_scalar:\n # check if all chunk's index_value are identical\n target_chunk_index_values = [c.index_value for c in target.chunks\n if c.index[1] == 0]\n value_chunk_index_values = [v.index_value for v in value.chunks]\n is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \\\n all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))\n if not is_identical:\n # do rechunk\n if any(np.isnan(s) for s in target.nsplits[0]) or \\\n any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover\n raise TilesError('target or value has unknown chunk shape')\n\n value = value.rechunk({0: target.nsplits[0]})._inplace_tile()\n\n out_chunks = []\n nsplits = [list(ns) for ns in target.nsplits]\n if col not in columns:\n nsplits[1][-1] += 1\n column_chunk_shape = target.chunk_shape[1]\n # append to the last chunk on columns axis direction\n for c in target.chunks:\n if c.index[-1] != column_chunk_shape - 1:\n # not effected, just output\n out_chunks.append(c)\n else:\n chunk_op = op.copy().reset_key()\n if pd.api.types.is_scalar(value):\n chunk_inputs = [c]\n elif is_value_scalar:\n chunk_inputs = [c, value.chunks[0]]\n else:\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n\n dtypes = c.dtypes.copy(deep=True)\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n dtypes=dtypes,\n index_value=c.index_value,\n columns_value=parse_index(dtypes.index, store_data=True),\n index=c.index)\n out_chunks.append(chunk)\n else:\n # replace exist column\n for c in target.chunks:\n if col in c.dtypes:\n chunk_inputs = [c]\n if not np.isscalar(value):\n chunk_inputs.append(value.cix[c.index[0], ])\n chunk_op = op.copy().reset_key()\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=c.shape,\n dtypes=c.dtypes,\n index_value=c.index_value,\n columns_value=c.columns_value,\n index=c.index)\n out_chunks.append(chunk)\n else:\n out_chunks.append(c)\n\n params = out.params\n params['nsplits'] = tuple(tuple(ns) for ns in nsplits)\n params['chunks'] = out_chunks\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n target = ctx[op.target.key].copy()\n value = ctx[op.value.key] if not np.isscalar(op.value) else op.value\n target[op.indexes] = value\n ctx[op.outputs[0].key] = target\n\n\ndef dataframe_setitem(df, col, value):\n op = DataFrameSetitem(target=df, indexes=col, value=value)\n return op(df, value)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import Int32Field, StringField\nfrom ...config import options\nfrom ..utils import decide_chunk_sizes, get_order\nfrom .diag import TensorDiagBase\nfrom .core import TensorNoInput\nfrom ...lib import sparse\nfrom ..array_utils import create_array\n\n\nclass TensorEye(TensorNoInput, TensorDiagBase):\n _op_type_ = OperandDef.TENSOR_EYE\n\n _k = Int32Field('k')\n _order = StringField('order')\n\n def __init__(self, k=None, dtype=None, order=None, **kw):\n dtype = np.dtype(dtype or 'f8')\n super().__init__(_k=k, dtype=dtype, _order=order, **kw)\n\n @property\n def k(self):\n return getattr(self, '_k', 0)\n\n @property\n def order(self):\n return self._order\n\n @classmethod\n def _get_nsplits(cls, op):\n tensor = op.outputs[0]\n chunk_size = tensor.extra_params.raw_chunk_size or options.chunk_size\n return decide_chunk_sizes(tensor.shape, chunk_size, tensor.dtype.itemsize)\n\n @classmethod\n def _get_chunk(cls, op, chunk_k, chunk_shape, chunk_idx):\n chunk_op = TensorEye(k=chunk_k, dtype=op.dtype, gpu=op.gpu, sparse=op.sparse)\n return chunk_op.new_chunk(None, shape=chunk_shape, index=chunk_idx)\n\n @classmethod\n def tile(cls, op):\n return TensorDiagBase.tile(op)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n if op.sparse:\n ctx[chunk.key] = sparse.eye(chunk.shape[0], M=chunk.shape[1], k=op.k,\n dtype=op.dtype, gpu=op.gpu)\n else:\n ctx[chunk.key] = create_array(op)(\n 'eye', chunk.shape[0], M=chunk.shape[1], k=op.k,\n dtype=op.dtype, order=op.order)\n\n\ndef eye(N, M=None, k=0, dtype=None, sparse=False, gpu=False, chunk_size=None, order='C'):\n \"\"\"\n Return a 2-D tensor with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned tensor.\n sparse: bool, optional\n Create sparse tensor if True, False as default\n gpu : bool, optional\n Allocate the tensor on GPU if True, False as default\n chunk_size : int or tuple of int or tuple of ints, optional\n Desired chunk size on each dimension\n order : {'C', 'F'}, optional\n Whether the output should be stored in row-major (C-style) or\n column-major (Fortran-style) order in memory.\n\n Returns\n -------\n I : Tensor of shape (N,M)\n An tensor where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n See Also\n --------\n identity : (almost) equivalent function\n diag : diagonal 2-D tensor from a 1-D tensor specified by the user.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.eye(2, dtype=int).execute()\n array([[1, 0],\n [0, 1]])\n >>> mt.eye(3, k=1).execute()\n array([[ 0., 1., 0.],\n [ 0., 0., 1.],\n [ 0., 0., 0.]])\n\n \"\"\"\n if M is None:\n M = N\n\n shape = (N, M)\n tensor_order = get_order(order, None, available_options='CF',\n err_msg=\"only 'C' or 'F' order is permitted\")\n op = TensorEye(k, dtype=dtype, gpu=gpu, sparse=sparse, order=order)\n return op(shape, chunk_size=chunk_size, order=tensor_order)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...core import TilesError\nfrom ...serialize import KeyField, BoolField\nfrom ...utils import check_chunks_unknown_shape\nfrom ..operands import TensorOperand, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..array_utils import as_same_device, device\nfrom ..core import TensorOrder\nfrom .ravel import ravel\n\n\nclass TensorIsIn(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.ISIN\n\n _element = KeyField('element')\n _test_elements = KeyField('test_elements')\n _assume_unique = BoolField('assume_unique')\n _invert = BoolField('invert')\n\n def __init__(self, assume_unique=None, invert=None, dtype=None, **kw):\n dtype = np.dtype(bool) if dtype is None else dtype\n super().__init__(_assume_unique=assume_unique, _invert=invert,\n dtype=dtype, **kw)\n\n @property\n def element(self):\n return self._element\n\n @property\n def test_elements(self):\n return self._test_elements\n\n @property\n def assume_unique(self):\n return self._assume_unique\n\n @property\n def invert(self):\n return self._invert\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._element = self._inputs[0]\n self._test_elements = self._inputs[1]\n\n def __call__(self, element, test_elements):\n element, test_elements = astensor(element), ravel(astensor(test_elements))\n\n return self.new_tensor([element, test_elements], element.shape, order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n in_tensor = op.element\n test_elements = op.test_elements\n out_tensor = op.outputs[0]\n\n if len(test_elements.chunks) != 1:\n check_chunks_unknown_shape([test_elements], TilesError)\n test_elements = test_elements.rechunk(len(test_elements))._inplace_tile()\n test_elements_chunk = test_elements.chunks[0]\n\n out_chunks = []\n for c in in_tensor.chunks:\n chunk_op = op.copy().reset_key()\n out_chunk = chunk_op.new_chunk([c, test_elements_chunk], shape=c.shape,\n index=c.index, order=out_tensor.order)\n out_chunks.append(out_chunk)\n\n new_op = op.copy()\n return new_op.new_tensors([in_tensor, test_elements], out_tensor.shape,\n order=out_tensor.order, chunks=out_chunks,\n nsplits=in_tensor.nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n (element, test_elements), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n ctx[op.outputs[0].key] = xp.isin(element, test_elements,\n assume_unique=op.assume_unique,\n invert=op.invert)\n\n\ndef isin(element, test_elements, assume_unique=False, invert=False):\n \"\"\"\n Calculates `element in test_elements`, broadcasting over `element` only.\n Returns a boolean array of the same shape as `element` that is True\n where an element of `element` is in `test_elements` and False otherwise.\n\n Parameters\n ----------\n element : array_like\n Input tensor.\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if it is a tensor or array_like.\n See notes for behavior with non-array-like parameters.\n assume_unique : bool, optional\n If True, the input tensors are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned tensor are inverted, as if\n calculating `element not in test_elements`. Default is False.\n ``mt.isin(a, b, invert=True)`` is equivalent to (but faster\n than) ``mt.invert(mt.isin(a, b))``.\n\n Returns\n -------\n isin : Tensor, bool\n Has the same shape as `element`. The values `element[isin]`\n are in `test_elements`.\n\n See Also\n --------\n in1d : Flattened version of this function.\n\n Notes\n -----\n\n `isin` is an element-wise function version of the python keyword `in`.\n ``isin(a, b)`` is roughly equivalent to\n ``mt.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.\n\n `element` and `test_elements` are converted to tensors if they are not\n already. If `test_elements` is a set (or other non-sequence collection)\n it will be converted to an object tensor with one element, rather than a\n tensor of the values contained in `test_elements`. This is a consequence\n of the `tensor` constructor's way of handling non-sequence collections.\n Converting the set to a list usually gives the desired behavior.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> element = 2*mt.arange(4).reshape((2, 2))\n >>> element.execute()\n array([[0, 2],\n [4, 6]])\n >>> test_elements = [1, 2, 4, 8]\n >>> mask = mt.isin(element, test_elements)\n >>> mask.execute()\n array([[ False, True],\n [ True, False]])\n >>> element[mask].execute()\n array([2, 4])\n >>> mask = mt.isin(element, test_elements, invert=True)\n >>> mask.execute()\n array([[ True, False],\n [ False, True]])\n >>> element[mask]\n array([0, 6])\n\n Because of how `array` handles sets, the following does not\n work as expected:\n\n >>> test_set = {1, 2, 4, 8}\n >>> mt.isin(element, test_set).execute()\n array([[ False, False],\n [ False, False]])\n\n Casting the set to a list gives the expected result:\n\n >>> mt.isin(element, list(test_set)).execute()\n array([[ False, True],\n [ True, False]])\n \"\"\"\n op = TensorIsIn(assume_unique, invert)\n return op(element, test_elements)\n"
] | [
[
"pandas.concat",
"numpy.random.choice",
"pandas.RangeIndex",
"numpy.arange",
"pandas.testing.assert_frame_equal",
"numpy.random.rand",
"pandas.read_sql"
],
[
"numpy.cumsum"
],
[
"numpy.isnan",
"pandas.api.types.is_scalar",
"pandas.api.types.is_list_like",
"numpy.isscalar",
"numpy.array"
],
[
"numpy.dtype"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.24"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
candleinwindsteve/Stratipy | [
"ea505df1e4830141c590922d654edfbde498b924",
"ea505df1e4830141c590922d654edfbde498b924"
] | [
"stratipy/filtering_diffusion.py",
"stratipy/nbs_cluster.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg import norm\nfrom scipy.io import loadmat, savemat\nfrom nbs_class import Ppi, Patient\nfrom subprocess import call\n# import h5py\nimport os\nimport glob\nimport time\nimport datetime\n\n# NOTE mutationProfileDiffusion -> propagation\n# mutationProfile -> M, PPIAdjacencyMatrix -> adj, dataFolder -> result_folder\n# PPI_influence_min -> ppi_influence_min, PPI_influence_max-> ppi_influence_max\n# PPI_influence()-> calcul_ppi_influence(), PPI_influence -> ppi_influence\n# influenceDistance->influence_distance\n# influenceMat -> ppi_influence, PPIneighboorsMax -> ngh_max,\n# bestInfluencers -> best_influencers\n# filteredGenes -> deg0, keepSingletons -> keep_singletons\n# mutationsMin -> min_mutation, mutationsMax -> mutationsMax\n# newnet -> ppi_ngh, netFinal -> ppi_final, mutFinal -> mut_final\n# filteredPatients -> filtered_patients\n\n\n# @profile\ndef propagation(M, adj, alpha=0.7, tol=10e-6): # TODO equation, M, alpha\n \"\"\"Network propagation iterative process\n\n Iterative algorithm for apply propagation using random walk on a network:\n Initialize::\n X1 = M\n\n Repeat::\n X2 = alpha * X1.A + (1-alpha) * M\n X1 = X2\n\n Until::\n norm(X2-X1) < tol\n\n Where::\n A : degree-normalized adjacency matrix\n\n Parameters\n ----------\n M : sparse matrix\n Data matrix to be diffused.\n\n adj : sparse matrix\n Adjacency matrice.\n\n alpha : float, default: 0.7\n Diffusion/propagation factor with 0 <= alpha <= 1.\n For alpha = 0 : no diffusion.\n For alpha = 1 :\n\n tol : float, default: 10e-6\n Convergence threshold.\n\n Returns\n -------\n X2 : sparse matrix\n Smoothed matrix.\n \"\"\"\n print(' ==== propagation ==== ')\n\n n = adj.shape[0]\n # diagonal = 1 -> degree\n # TODO to set diagonal = 0 before applying eye\n adj = adj+sp.eye(n, dtype=np.float32)\n\n d = sp.dia_matrix((np.array(adj.sum(axis=0))**-1, [0]),\n shape=(n, n),\n dtype=np.float32)\n A = adj.dot(d)\n\n X1 = M.astype(np.float32)\n X2 = alpha * X1.dot(A) + (1-alpha) * M\n i = 0\n while norm(X2-X1) > tol:\n X1 = X2\n X2 = alpha * X1.dot(A) + (1-alpha) * M\n i += 1\n print('Propagation iteration = {} ----- {}'.format(\n i, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n return X2\n\n\n# @profile\ndef compare_ij_ji(ppi, out_min=True, out_max=True):\n \"\"\"Helper function for calcul_ppi_influence\n\n In most cases the influence (propagation) is not symmetric. We have to\n compare weight (a_ij) and (a_ji) for all pairs in order to obtain symmetric\n matrix/matrices. 2 choices available: minimum or maximum weight.\n a = min [(a_ij),(a_ji)]\n a = max [(a_ij),(a_ji)]\n Minimum weight is chosen to avoid Hubs phenomenon.\n\n Parameters\n ----------\n ppi : sparse matrix\n Matrice to apply comparison.\n\n out_min, out_max : boolean, default: True\n Minimum and/or maximum weight is chosen.\n\n Returns\n -------\n ppi_min, ppi_max : sparse matrix\n Symmertric matrix with minimum and/or maximum weight.\n \"\"\"\n # TODO matrice type of ppi\n n = ppi.shape[0]\n ppi = ppi.tolil() # need \"lil_matrix\" for reshape\n # transpose to compare ppi(ij) and ppi(ji)\n ppi_transp = sp.lil_matrix.transpose(ppi)\n # reshape to 1D matrix\n ppi_1d = ppi.reshape((1, n**2))\n ppi_1d_transp = ppi_transp.reshape((1, n**2))\n\n # reshapeto original size matrix after comparison (min/max)\n if out_min and out_max:\n ppi_min = (sp.coo_matrix.tolil(\n sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))\n ).reshape((n, n)).astype(np.float32)\n ppi_max = (sp.coo_matrix.tolil(\n sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0))\n ).reshape((n, n)).astype(np.float32)\n\n print('ppi_min', type(ppi_min), ppi_min.dtype, ppi_min.shape)\n print('ppi_max', type(ppi_max), ppi_max.dtype, ppi_max.shape)\n return ppi_min, ppi_max\n\n elif out_min:\n ppi_min = (sp.coo_matrix.tolil(\n sp.coo_matrix.min(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,\n dtype=np.float32))).reshape((n, n))\n return ppi_min\n\n elif out_max:\n ppi_max = (sp.coo_matrix.tolil(\n sp.coo_matrix.max(sp.vstack([ppi_1d, ppi_1d_transp]), axis=0,\n dtype=np.float32))).reshape((n, n))\n return ppi_max\n else:\n print('You have to choice Min or Max') # TODO change error message\n\n\n# @profile\ndef calcul_final_influence(M, adj, result_folder, influence_weight='min',\n simplification=True, compute=False, overwrite=False,\n alpha=0.7, tol=10e-6):\n \"\"\"Compute network influence score\n\n Network propagation iterative process is applied on PPI. (1) The network\n influence distance matrix and (2) influence matrices based on minimum /\n maximum weight are saved as MATLAB-style files (.mat).\n - (1) : 'influence_distance_alpha={}_tol={}.mat'\n in 'influence_distance' directory\n - (2) : 'ppi_influence_alpha={}_tol={}.mat'\n in 'ppi_influence' directory\n Where {} are parameter values. The directories will be automatically\n created if not exist.\n\n If compute=False, the latest data of directory will be taken into\n account:\n - latest data with same parameters (alpha and tol)\n - if not exist, latest data of directory but with differents parameters\n\n Parameters\n ----------\n M : sparse matrix\n Data matrix to be diffused.\n\n adj : sparse matrix\n Adjacency matrice.\n\n result_folder : str\n Path to create a new directory for save new files. If you want to creat\n in current directory, enter '/directory_name'. Absolute path is also\n supported.\n\n influence_weight :\n\n simplification : boolean, default: True\n\n compute : boolean, default: False\n If True, new network influence score will be computed.\n If False, the latest network influence score will be taken into\n account.\n\n overwrite : boolean, default: False\n If True, new network influence score will be computed even if the file\n which same parameters already exists in the directory.\n\n alpha : float, default: 0.7\n Diffusion (propagation) factor with 0 <= alpha <= 1.\n For alpha = 0 : no diffusion.\n For alpha = 1 :\n\n tol : float, default: 10e-6\n Convergence threshold.\n\n Returns\n -------\n final_influence : sparse matrix\n Smoothed PPI influence matrices based on minimum / maximum weight.\n \"\"\"\n influence_distance_directory = result_folder + 'influence_distance/'\n influence_distance_file = (\n influence_distance_directory +\n 'influence_distance_alpha={}_tol={}.mat'.format(alpha, tol))\n #######\n final_influence_directory = result_folder + 'final_influence/'\n final_influence_file = (\n final_influence_directory +\n 'final_influence_simp={}_alpha={}_tol={}.mat'.format(\n simplification, alpha, tol))\n #######\n\n existance_same_param = os.path.exists(final_influence_file)\n # TODO overwrite condition\n\n # check if same parameters file exists in directory\n if existance_same_param:\n final_influence_data = loadmat(final_influence_file)\n if influence_weight == 'min':\n final_influence = final_influence_data['final_influence_min']\n else:\n final_influence = final_influence_data['final_influence_max']\n print('final influence matrix', type(final_influence), final_influence.shape)\n print('***** Same parameters file of FINAL INFLUENCE already exists ***** {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n else:\n if compute:\n start = time.time()\n\n # check if influence distance file exists\n existance_same_influence = os.path.exists(influence_distance_file)\n if existance_same_influence:\n influence_data = loadmat(influence_distance_file)\n influence = influence_data['influence_distance']\n print('***** Same parameters file of INFLUENCE DISTANCE already exists ***** {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n else:\n influence = propagation(M, adj, alpha, tol)\n print('influence', type(influence), influence.dtype)\n\n # save influence distance before simplification with parameters' values in filename\n os.makedirs(influence_distance_directory, exist_ok=True) # NOTE For Python ≥ 3.2\n print(' ==== Start to save INFLUENCE DISTANCE ==== {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n start_save = time.time()\n savemat(influence_distance_file,\n {'influence_distance': influence,\n 'alpha': alpha},\n do_compression=True)\n end_save = time.time()\n print(\"---------- save time = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_save - start_save),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # simplification: multiply by PPI adjacency matrix\n if simplification:\n influence = influence.multiply(sp.lil_matrix(adj))\n # -> influence as csr_matrix\n else:\n print(\"---------- No simplification ----------\")\n pass\n\n # compare influence[i,j] and influence[j,i] => min/max => final influence\n start_ij = time.time()\n final_influence_min, final_influence_max = compare_ij_ji(\n influence, out_min=True, out_max=True)\n end_ij = time.time()\n print(\"---------- compare ij/ji = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_ij - start_ij),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # save final influence with parameters' values in filename\n os.makedirs(final_influence_directory, exist_ok=True)\n\n print(' ==== Start to save FINAL INFLUENCE ==== {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n start_save = time.time()\n savemat(final_influence_file,\n {'final_influence_min': final_influence_min,\n 'final_influence_max': final_influence_max,\n 'alpha': alpha}, do_compression=True)\n end_save = time.time()\n print(\"---------- save time = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end_save - start_save),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n if influence_weight == 'min':\n final_influence = final_influence_min\n else:\n final_influence = final_influence_max\n\n end = time.time()\n print(\"---------- Influence = {} ---------- {}\"\n .format(datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # take most recent file\n else:\n for x in final_influence_file, influence_distance_directory:\n print(x)\n newest_file = max(glob.iglob(x + '*.mat'),\n key=os.path.getctime)\n final_influence_data = loadmat(newest_file)\n if x == final_influence_directory:\n if influence_weight == 'min':\n final_influence = final_influence_data['final_influence_min']\n else:\n final_influence = final_influence_data['final_influence_max']\n return final_influence\n\n\n# @profile\ndef best_neighboors(ppi_filt, final_influence, ngh_max):\n \"\"\"Helper function for filter_ppi_patients\n\n Keeps only the connections with the best influencers.\n\n Parameters\n ----------\n ppi_filt : sparse matrix\n Filtration from ppi_total : only genes in PPI are considered.\n\n final_influence :\n Smoothed PPI influence matrices based on minimum or maximum weight.\n\n ngh_max : int\n Number of best influencers in PPI.\n\n Returns\n -------\n ppi_ngh : sparse matrix\n PPI with only best influencers.\n \"\"\"\n ngh_max = ngh_max + 1 # central protein included\n final_influence = final_influence.todense()\n print(type(final_influence))\n ppi_filt = ppi_filt.todense()\n ppi_ngh = np.zeros(ppi_filt.shape, dtype=np.float32)\n print('ppi_ngh', ppi_ngh.shape)\n for i in range(ppi_filt.shape[0]):\n best_influencers = np.argpartition(-final_influence[i, :], ngh_max)[:ngh_max]\n #NOTE different result if same value exists several times\n # best_influencers2 = np.argpartition(final_influence[i, :], -ngh_max)[-ngh_max:]\n ppi_ngh[i, best_influencers] = ppi_filt[i, best_influencers]\n ppi_ngh = np.max(np.dstack((ppi_ngh, ppi_ngh.T)), axis=2)\n print('ppi_ngh ', ppi_ngh.dtype)\n # too stringent if np.min\n return sp.csc_matrix(ppi_ngh)\n\n\n# @profile\ndef filter_ppi_patients(ppi_total, mut_total, ppi_filt, final_influence, ngh_max,\n keep_singletons=False,\n min_mutation=10, max_mutation=2000):\n \"\"\"Keeping only the connections with the best influencers and Filtering some\n patients based on mutation number\n\n 'the 11 most influential neighbors of each gene in the network as\n determined by network influence distance were used'\n 'Only mutation data generated using the Illumina GAIIx platform were\n retained for subsequent analy- sis, and patients with fewer than 10\n mutations were discarded.'\n\n Parameters\n ----------\n ppi_total : sparse matrix\n Built from all sparse sub-matrices (AA, ... , CC).\n\n mut_total : sparse matrix\n Patients' mutation profiles of all genes (rows: patients,\n columns: genes of AA, BB and CC).\n\n ppi_filt : sparse matrix\n Filtration from ppi_total : only genes in PPI are considered.\n\n final_influence :\n Smoothed PPI influence matrices based on minimum or maximum weight.\n\n ngh_max : int\n Number of best influencers in PPI.\n\n keep_singletons : boolean, default: False\n If True, proteins not annotated in PPI (genes founded only in patients'\n mutation profiles) will be also considered.\n If False, only annotated proteins in PPI will be considered.\n\n min_mutation, max_mutation : int\n Numbers of lowest mutations and highest mutations per patient.\n\n Returns\n -------\n ppi_final, mut_final : sparse matrix\n PPI and mutation profiles after filtering.\n \"\"\"\n # n = final_influence.shape[0]\n # final_influence = index_to_sym_matrix(n, final_influence)\n\n ppi_ngh = best_neighboors(ppi_filt, final_influence, ngh_max)\n print('ppi_ngh ', ppi_ngh.dtype)\n deg0 = Ppi(ppi_total).deg == 0 # True if protein degree = 0\n\n if keep_singletons:\n ppi_final = sp.bmat([\n [ppi_ngh, sp.csc_matrix((ppi_ngh.shape[0], sum(deg0)))],\n [sp.csc_matrix((sum(deg0), ppi_ngh.shape[0])),\n sp.csc_matrix((sum(deg0), sum(deg0)))]\n ]) # -> COO matrix\n # mut_final=sp.bmat([[mut_total[:,deg0==False],mut_total[:,deg0==True]]])\n mut_final = mut_total\n else:\n ppi_final = ppi_ngh\n mut_final = mut_total[:, Ppi(ppi_total).deg > 0]\n\n # filtered_patients = np.array([k < min_mutation or k > max_mutation for k in Patient(mut_final).mut_per_patient])\n # mut_final = mut_final[filtered_patients == False, :]\n\n # to avoid worse comparison '== False'\n mut_final = mut_final[np.array([min_mutation < k < max_mutation for k in\n Patient(mut_final).mut_per_patient])]\n\n print(\"Removing %i patients with less than %i or more than %i mutations\" %\n (mut_total.shape[0]-mut_final.shape[0], min_mutation, max_mutation))\n print(\"New adjacency matrix:\", ppi_final.shape)\n print(\"New mutation profile matrix:\", mut_final.shape)\n\n return ppi_final, mut_final\n\n\n# @profile\ndef quantile_norm_mean(anarray):\n \"\"\"Helper function for propagation_profile\n\n Forces the observations/variables to have identical intensity distribution.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n A = np.squeeze(np.asarray(anarray.T))\n AA = np.zeros_like(A)\n I = np.argsort(A, axis=0)\n AA[I, np.arange(A.shape[1])] = np.mean(A[I, np.arange(A.shape[1])],\n axis=1)[:, np.newaxis]\n return AA.T\n\n\n# @profile\ndef quantile_norm_median(anarray):\n A = np.squeeze(np.asarray(anarray.T))\n AA = np.zeros_like(A)\n I = np.argsort(A, axis=0)\n AA[I, np.arange(A.shape[1])] = np.median(A[I, np.arange(A.shape[1])],\n axis=1)[:, np.newaxis]\n return AA.T\n\n\n# @profile\ndef propagation_profile(mut_raw, adj, alpha, tol, qn):\n # TODO error messages\n start = time.time()\n if alpha > 0:\n # TODO verification of same parameter file\n mut_propag = propagation(mut_raw, adj, alpha, tol).todense()\n mut_propag[np.isnan(mut_propag)] = 0\n if qn == 'mean':\n mut_type = 'mean_qn'\n mut_propag = quantile_norm_mean(mut_propag)\n elif qn == 'median':\n mut_type = 'median_qn'\n mut_propag = quantile_norm_median(mut_propag)\n else:\n mut_type = 'diff'\n\n end = time.time()\n print(\"---------- Propagation on {} mutation profile = {} ---------- {}\"\n .format(mut_type,\n datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return mut_type, mut_propag\n\n else:\n mut_type = 'raw'\n mut_raw = mut_raw.todense()\n\n end = time.time()\n print(\"---------- Propagation on {} mutation profile = {} ---------- {}\"\n .format(mut_type,\n datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return mut_type, mut_raw\n",
"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nimport os.path\nimport importlib # NOTE for python >= Python3.4\nimport load_data\nimport formatting_data\nimport filtering_diffusion\nimport clustering\nimport hierarchical_clustering\nimport scipy.sparse as sp\nimport numpy as np\nimport time\nimport datetime\nfrom sklearn.grid_search import ParameterGrid\nfrom scipy.io import loadmat, savemat\nimport os\nfrom memory_profiler import profile\n \ni = int(sys.argv[1])-1\n\n# TODO PPI type param\nparam_grid = {'data_folder': ['../data/'],\n # 'patient_data': ['TCGA_UCEC'],\n 'patient_data': ['Faroe'],\n # 'patient_data': ['TCGA_UCEC', 'SIMONS'],\n 'ppi_data': ['STRING', 'Y2H'],\n 'influence_weight': ['min'],\n 'simplification': [True],\n 'compute': [True],\n 'overwrite': [False],\n # 'alpha': [0, 0.3, 0.5, 0.7, 1],\n # 'alpha': [0.7, 0.8, 0.9],\n 'alpha': [0.7],\n 'tol': [10e-3],\n 'ngh_max': [11],\n 'keep_singletons': [False],\n # 'min_mutation': [10],\n 'min_mutation': [0],\n 'max_mutation': [200000],\n 'qn': [None, 'mean', 'median'],\n # 'qn': [None],\n 'n_components': [2],\n # 'n_components': range(2, 10),\n # 'n_permutations': [1000],\n 'n_permutations': [1000],\n 'run_bootstrap': [True],\n 'run_consensus': [True],\n # 'lambd': [0, 1, 200],\n 'lambd': [0, 1],\n 'tol_nmf': [1e-3],\n 'linkage_method': ['ward']\n # 'linkage_method': ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']\n }\n\n# 'lambd': range(0, 2)\n\n# NOTE sys.stdout.flush()\n\n@profile\ndef all_functions(params):\n\n if alpha == 0 and qn is not None:\n print('############ PASS ############')\n pass\n\n else:\n result_folder = data_folder + 'result_' + patient_data + '_' + ppi_data + '/'\n print(result_folder)\n print(\"alpha =\", alpha)\n print(\"QN =\", qn)\n print(\"k =\", n_components)\n print(\"lambda =\", lambd)\n print(\"Patients data =\", patient_data)\n print(\"PPI network =\", ppi_data)\n\n # ------------ load_data.py ------------\n print(\"------------ load_data.py ------------\")\n if patient_data == 'TCGA_UCEC':\n (patient_id, mutation_profile, gene_id_patient,\n gene_symbol_profile) = load_data.load_TCGA_UCEC_patient_data(\n data_folder)\n\n elif patient_data == 'Faroe':\n mutation_profile, gene_id_patient = load_data.load_Faroe_Islands_data(\n data_folder)\n\n if ppi_data == 'STRING':\n gene_id_ppi, network = load_data.load_PPI_String(\n data_folder, ppi_data)\n\n elif ppi_data == 'Y2H':\n gene_id_ppi, network = load_data.load_PPI_Y2H(\n data_folder, ppi_data)\n\n # ------------ formatting_data.py ------------\n print(\"------------ formatting_data.py ------------\")\n (network, mutation_profile,\n idx_ppi, idx_mut, idx_ppi_only, idx_mut_only) = (\n formatting_data.classify_gene_index(\n network, mutation_profile, gene_id_ppi, gene_id_patient))\n\n (ppi_total, mut_total, ppi_filt, mut_filt) = (\n formatting_data.all_genes_in_submatrices(\n network, idx_ppi, idx_mut, idx_ppi_only, idx_mut_only,\n mutation_profile))\n\n # ------------ filtering_diffusion.py ------------\n print(\"------------ filtering_diffusion.py ------------\")\n final_influence = (\n filtering_diffusion.calcul_final_influence(\n sp.eye(ppi_filt.shape[0], dtype=np.float32), ppi_filt,\n result_folder, influence_weight, simplification,\n compute, overwrite, alpha, tol))\n\n ppi_final, mut_final = filtering_diffusion.filter_ppi_patients(\n ppi_total, mut_total, ppi_filt, final_influence, ngh_max,\n keep_singletons, min_mutation, max_mutation)\n\n mut_type, mut_propag = filtering_diffusion.propagation_profile(\n mut_final, ppi_filt, alpha, tol, qn)\n\n # ------------ clustering.py ------------\n print(\"------------ clustering.py ------------\")\n genes_clustering, patients_clustering = (clustering.bootstrap(\n result_folder, mut_type, mut_propag, ppi_final,\n influence_weight, simplification,\n alpha, tol, keep_singletons, ngh_max, min_mutation, max_mutation,\n n_components, n_permutations,\n run_bootstrap, lambd, tol_nmf))\n\n distance_genes, distance_patients = clustering.consensus_clustering(\n result_folder, genes_clustering, patients_clustering,\n influence_weight, simplification, mut_type,\n alpha, tol, keep_singletons, ngh_max, min_mutation, max_mutation,\n n_components, n_permutations, run_consensus, lambd, tol_nmf)\n\n # ------------ hierarchical_clustering.py ------------\n print(\"------------ hierarchical_clustering.py ------------\")\n # if alpha > 0:\n # if qn == 'mean':\n # mut_type = 'mean_qn'\n # elif qn == 'median':\n # mut_type = 'median_qn'\n # else:\n # mut_type = 'diff'\n # else:\n # mut_type = 'raw'\n # print(\"mutation type =\", mut_type)\n #\n # consensus_directory = result_folder+'consensus_clustering/'\n # consensus_mut_type_directory = consensus_directory + mut_type + '/'\n #\n # hierarchical_directory = result_folder+'hierarchical_clustering/'\n # os.makedirs(hierarchical_directory, exist_ok=True)\n # hierarchical_mut_type_directory = hierarchical_directory + mut_type + '/'\n # os.makedirs(hierarchical_mut_type_directory, exist_ok=True)\n #\n # if lambd > 0:\n # consensus_factorization_directory = (consensus_mut_type_directory + 'gnmf/')\n # hierarchical_factorization_directory = (hierarchical_mut_type_directory + 'gnmf/')\n #\n # else:\n # consensus_factorization_directory = (consensus_mut_type_directory + 'nmf/')\n # hierarchical_factorization_directory = (hierarchical_mut_type_directory + 'nmf/')\n # os.makedirs(hierarchical_factorization_directory, exist_ok=True)\n #\n # consensus_file = (consensus_factorization_directory +\n # 'consensus_weight={}_simp={}_alpha={}_tol={}_singletons={}_ngh={}_minMut={}_maxMut={}_comp={}_permut={}_lambd={}_tolNMF={}.mat'\n # .format(influence_weight, simplification, alpha, tol,\n # keep_singletons, ngh_max,\n # min_mutation, max_mutation,\n # n_components, n_permutations, lambd, tol_nmf))\n #\n # consensus_data = loadmat(consensus_file)\n # distance_patients = consensus_data['distance_patients']\n\n hierarchical_clustering.distance_patients_from_consensus_file(\n result_folder, distance_patients, patient_data, ppi_data, mut_type,\n influence_weight, simplification, alpha, tol, keep_singletons,\n ngh_max, min_mutation, max_mutation, n_components, n_permutations,\n lambd, tol_nmf, linkage_method)\n\nif (sys.version_info < (3, 2)):\n raise \"Must be using Python ≥ 3.2\"\n\nstart = time.time()\n\nparams = list(ParameterGrid(param_grid))\nprint(params[i])\n\nfor k in params[i].keys():\n exec(\"%s = %s\" % (k, 'params[i][k]'))\n\nall_functions(params[i])\n\nend = time.time()\nprint('---------- ONE STEP = {} ---------- {}'\n .format(datetime.timedelta(seconds=end-start),\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n"
] | [
[
"scipy.sparse.csc_matrix",
"scipy.sparse.eye",
"numpy.asarray",
"numpy.arange",
"numpy.isnan",
"scipy.io.loadmat",
"numpy.dstack",
"scipy.sparse.linalg.norm",
"numpy.zeros_like",
"numpy.argpartition",
"scipy.sparse.vstack",
"scipy.sparse.lil_matrix.transpose",
"numpy.argsort",
"scipy.io.savemat",
"numpy.zeros",
"scipy.sparse.lil_matrix"
],
[
"scipy.sparse.eye",
"sklearn.grid_search.ParameterGrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
MohammadWasil/Self-Driving-Car | [
"9ef5b77e1268623c11e4c39d5c8e1e990caee273",
"9ef5b77e1268623c11e4c39d5c8e1e990caee273"
] | [
"Self Driving Car/Python with Tensorflow/driveSDC.py",
"Self Driving Car/Python with Tensorflow/CNN_Model.py"
] | [
"import socket\r\n\r\nfrom tensorflow.keras.models import load_model\r\n\r\n\r\nfrom PIL import ImageGrab\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\n#Load the model.\r\nmodel = load_model(r\"D:\\Unity Game\\Self Driving Car\\SDCProgram\\Best Models\\data-003.h5\") \t# Directory to load the model\r\n\r\n\r\n# Socket Tcp Connection.\r\nhost = \"127.0.0.1\"\r\nport = 25001 # Port number\r\n#data = \"1,1,11\" # Data to be send\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP connection\r\nprint(\"starting connection\")\r\ntry:\r\n sock.connect((host, port)) #To connect ot the given port.\r\n print(\"Connected\")\r\n \r\nexcept:\r\n print(\"Might happen socket is closed!\")\r\n#######\r\n\r\ndef send_data(steering_angle, throttle):\r\n data_01 = str(steering_angle)\r\n data_02 = str(throttle)\r\n data = data_01 + ',' + data_02\r\n sock.sendall(data.encode(\"utf-8\")) # To send the data\r\n\r\nsteeringAngleList = []\r\nvelocityList = []\r\nthrottleList = []\r\n\r\nsteeringAngle = 0\r\nvelocity = 0\r\nthrottle = 0\r\n\r\narr1=[]\r\narr2=[]\r\narr3=[]\r\nsplitted_data = []\r\nreply=[]\r\ndef socketConnection():\r\n global globalsteeringAngle\r\n global velocity\r\n global throttle\r\n try:\r\n #data = \"1,0\"\r\n \r\n reply = sock.recv(2048).decode(\"utf-8\") # To receive the data\r\n #######send_data(reply)\r\n #print(\"Actual data received is: \", reply)\r\n \r\n splitted_data = reply.split(',')\r\n #print(\"after splitting the data: \", splitted_data)\r\n arr1.append(splitted_data[0])\r\n arr2.append(splitted_data[1])\r\n arr3.append(splitted_data[2])\r\n \r\n steeringAngle = float(splitted_data[0])\r\n velocity = float(splitted_data[1])\r\n throttle = float(splitted_data[2])\r\n \r\n except:\r\n print(\"Exception\")\r\n \r\n steeringAngleList = np.array(arr1) \r\n velocityList = np.array(arr2)\r\n throttleList = np.array(arr3)\r\n\r\n return steeringAngleList, velocityList, throttleList, steeringAngle, velocity, throttle\r\n\r\n\r\nfilename = r\"D:\\ML\\Unity-ML\\Drive SDC.csv\" \t#Directory to save your current Data in a csv file.\r\n\r\ndef csv_file(steer_Angle, velocity, throttle):\r\n \r\n #print(\"Writing to csv file!\")\r\n f = open(filename, \"w\")\r\n f.write(\"{},{},{}\\n\".format(\"Steerring Angle\", \"Current Velocity\", \"Throttle\"))\r\n \r\n for x in zip( steer_Angle, velocity, throttle):\r\n f.write(\"{},{},{}\\n\".format(x[0], x[1], x[2]))\r\n \r\n f.close()\r\n\r\n############################# \r\nMAX_SPEED = 25\r\nMIN_SPEED = 10\r\nspeed_limit = MAX_SPEED\r\n\r\ndef preprocess(image):\r\n return cv2.resize(image, (200, 66), cv2.INTER_AREA)\r\n\r\n\r\ndef drive(image, steering_angle, velocity, throttle):\r\n\r\n try:\r\n image = np.asarray(image) # from PIL image to numpy array\r\n image = preprocess(image) # apply the preprocessing\r\n image = np.array([image]) # the model expects 4D array\r\n \r\n steering_angle = float(model.predict(image, batch_size=1))\r\n steering_angle = (steering_angle/10)\r\n global speed_limit\r\n if velocity > speed_limit:\r\n speed_limit = MIN_SPEED # slow down\r\n else:\r\n speed_limit = MAX_SPEED\r\n throttle = 1.0 - steering_angle**2 - (velocity/speed_limit)**2\r\n\r\n print('{} {} {}'.format(steering_angle, throttle, velocity))\r\n steering_angle = (steering_angle*10)\r\n send_data(steering_angle, throttle)\r\n \r\n except Exception as e:\r\n print(\"Exception Occured\", e)\r\n \r\nnum = 0 \r\npath = r\"D:\\ML\\Unity-ML\\Drive SDC\" # Destination/path to which all the current images will be saved \r\nwhile (True):\r\n num = num + 1\r\n imageName = 'Wasil'+ str(num) + '.png' # Name of the images.\r\n #collecting current data\r\n strAngl, vlcty, thrttl, steeringAngle, velocity, throttle = socketConnection()\r\n image = np.array(ImageGrab.grab(bbox=(0, 120, 750, 540))) # Taking the screebshot and adding in the array\r\n \r\n csv_file(strAngl, vlcty, thrttl)\r\n cv2.imwrite(os.path.join(path, imageName), image) # Trying to save the image in the exact same directory.\r\n \r\n\r\n drive(image, steeringAngle, velocity, throttle)\r\n\r\n\"\"\"\r\n### NOTE: divide steering angle by 10.\r\n\"\"\"",
"\r\nimport pandas as p\r\nimport cv2\r\nfrom sklearn import model_selection\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential#, Input\r\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\r\nfrom tensorflow.keras.layers import Lambda, Conv2D\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\n\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\nimage_input_array = []\r\n\r\ndef LoadData(): \r\n image_input_array2 = np.zeros((4536, 66, 200,3)) # Replace the value of 4536 with the number of images, you are going to train.\r\n URL = r\"D:\\ML\\Unity-ML\\sdcdata_1.csv\"\t\t # Load your csv file.\r\n url_image = r\"D:\\\\ML\\\\Unity-ML\\\\SDC\\\\\" # path of training images.\r\n \r\n data = p.read_csv(URL)\r\n \r\n image_input = data['Image Directory']\r\n steering_Angle = data['Steering Angle'].values\r\n \r\n for i in range(0,len(image_input)):\r\n #print(\"Proccessing image: \", i)\r\n \r\n URL_image = image_input[i]\r\n #print(URL_image)\r\n # addd path to variable URL_image\r\n image_input_array = Image.open(url_image +URL_image)\r\n image_input_list = np.array(image_input_array) \r\n #print(image_input_list.shape) \r\n \r\n image_input_list2 = cv2.resize(image_input_list, dsize=(200, 66), interpolation=cv2.INTER_CUBIC)\r\n #print(image_input_list2.shape)\r\n \r\n image_input_list2 = np.expand_dims(image_input_list2, axis=0)\r\n #print(image_input_list2.shape) \r\n #print(len(image_input_list2))\r\n \r\n image_input_array2[i, :, :, :] = image_input_list2\r\n #print(image_input_array2.shape)\r\n #print(len(image_input_array2))\r\n #image_input_list2.show()\r\n \r\n if i % 100 == 0:\r\n print(\"\\r\", end='')\r\n print(\"Image Processed: \", i,end = '', flush = False)\r\n \r\n #print(image_input_array.)\r\n print(\"Processng image Done!\")\r\n print(image_input_array2.shape)\r\n #image_input_array2 = np.array(image_input_array3)\r\n #image_input_list = np.expand_dims(image_input_list, axis=0)\r\n '''\r\n print(image_input_list.shape)\r\n \r\n for i in range(0,10):\r\n image_input_array2[i,:,:,:] = image_input_list\r\n ''' \r\n #split(image_input)\r\n \r\n #image_input_list.resize((2116,420,750,3))\r\n \r\n '''\r\n arrs = [np.random.random((420, 750, 3))\r\n for i in range(len(image_input_list))]\r\n\r\n image_input_list = np.array(arrs)\r\n \r\n new_image = np.ones((1,420,750,3)) \r\n # lets jsut say you have two Images \r\n old_image = np.reshape(image_input_list , (1,420,750,3))\r\n new_image = np.reshape(new_image , (2115,420,750,3))\r\n image_input_list = np.append( new_image , old_image , axis = 0)\r\n '''\r\n \r\n #print(image_input_list.shape)\r\n #print(len(image_input_list))\r\n \r\n validation_size = 0.15 # validation is 0.15, so the size of the X and Y validaion will be 15% of the X and Y(actual size of the array)\r\n seed = 7\r\n \r\n #image_input_list = image_input_list.reshape(1, 420, 750, 3, )\r\n #print(\"size is: \",image_input_list.shape)\r\n \r\n # This splits the dataset, so that we can use some data for training, some for testing.\r\n X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(image_input_array2, steering_Angle, test_size=validation_size, random_state=seed)\r\n \r\n '''\r\n for i in range(0,1693): # 0, 1693\r\n print(\"Proccessing X_train image: \", i)\r\n URL_image = image_input[i]\r\n image_input_array = PImage.open(URL_image)\r\n X_train = np.array(image_input_array) \r\n \r\n Y_train = data[' Steerring Angle'].values\r\n \r\n #print(X_train.shape) # 420, 750, 3\r\n #print(Y_train.shape)\r\n \r\n #print(len(X_train))\r\n #image_input_array.show()\r\n\r\n for i in range(1693,len(image_input)): #1693, length\r\n print(\"Proccessing X_validation image: \", i)\r\n URL_image = image_input[i]\r\n image_input_array = PImage.open(URL_image)\r\n X_validation = np.array(image_input_array) \r\n \r\n Y_validation = data[' Steerring Angle'].values\r\n \r\n #print(X_validation.shape) # 420, 750, 3\r\n #print(Y_validation.shape)\r\n \r\n #print(len(X_validation))\r\n #mage_input_array.show()\r\n '''\r\n \t\t\t\t # If the actual image and steering data is 2116, then... \r\n print(X_train.shape) # the Size is 1692 which is about 80% of actual image data. 1692/2116 * 100 = 79.9621% ~ 80%\r\n print(Y_train.shape) # the size is 1692 which is about 80% of actual steering data. 1692/2116 * 100 = 79.9621% ~ 80%\r\n print(X_validation.shape) # the size is 424 which is about 20% of actual image data. 424/2116 * 100 = 20.0378% ~ 20%\r\n print(Y_validation.shape) # the size is 424 which is about 20% of actual steering data. 424/2116 * 100 = 20.0378% ~ 20%\r\n \r\n return X_train, X_validation, Y_train, Y_validation\r\n\r\ndef buildModel(image_train):\r\n #print(\"building our model\")\r\n model = Sequential()\r\n model.add(Lambda(lambda x : x/127.5-1.0, input_shape = (66,200,3) ))\r\n model.add(Conv2D(24, (5, 5), activation = \"elu\", strides=(2,2)))\r\n model.add(Conv2D(36, (5, 5), activation = \"elu\", strides=(2,2)))\r\n model.add(Conv2D(48, (5, 5), activation = \"elu\", strides=(2,2)))\r\n model.add(Conv2D(64, (5, 5), activation = \"elu\"))\r\n #model.add(Conv2D(64, (5, 5), activation = \"elu\"))\r\n model.add(Dropout(0.5))\r\n model.add(Flatten())\r\n model.add(Dense(100, activation='elu'))\r\n model.add(Dense(50, activation='elu'))\r\n model.add(Dense(10, activation='elu'))\r\n model.add(Dense(1, activation='elu'))\r\n model.summary()\r\n \r\n return model\r\n\r\ndef train(model, image_train, image_valiation, steer_train, steer_validation):\r\n checkpoints = ModelCheckpoint('data-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='auto') # You can change the name of the model, by replacing \"data\" with your preferred name.\r\n \r\n model.compile(loss='mean_squared_error', optimizer=Adam(lr = 0.001))\r\n \r\n model.fit(image_train, steer_train, epochs=60, callbacks=[checkpoints],validation_data=(image_valiation, steer_validation))\r\n \r\nimage_train, image_valiation, steer_train, steer_validation = LoadData()\r\nmodel = buildModel(image_train)\r\ntrain(model, image_train, image_valiation, steer_train, steer_validation)\r\n"
] | [
[
"numpy.asarray",
"tensorflow.keras.models.load_model",
"numpy.array"
],
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.read_csv",
"numpy.expand_dims",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"numpy.zeros",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
fluxtransport/fiasco | [
"9d70d8bdb03197be1ddfd433e1392e214a1468e8",
"9d70d8bdb03197be1ddfd433e1392e214a1468e8"
] | [
"fiasco/element.py",
"fiasco/fiasco.py"
] | [
"\"\"\"\nClasses and functions for element-level operations\n\"\"\"\nimport numpy as np\nimport astropy.units as u\nimport plasmapy\n\nimport fiasco\n\n__all__ = ['Element']\n\n\nclass Element(fiasco.IonCollection):\n \"\"\"\n Collection of all ions for a particular element.\n\n The `Element` object provides a way to logically group together ions of the same\n element. This provides an easy way to compute element-level derived quantities such\n as the ionization fraction as a function of temperature.\n\n Parameters\n ----------\n element_name : `str`, `int`\n Symbol, atomic number, or full name of the element\n temperature : `~astropy.units.Quantity`\n\n See Also\n --------\n fiasco.Ion : All the same keyword arguments can also be passed here.\n \"\"\"\n\n @u.quantity_input\n def __init__(self, element_name, temperature: u.K, **kwargs):\n if type(element_name) is str:\n element_name = element_name.capitalize()\n Z = plasmapy.atomic.atomic_number(element_name)\n ion_list = []\n for i in range(Z + 1):\n ion = fiasco.Ion(f'{Z} {i+1}', temperature, **kwargs)\n ion_list.append(ion)\n\n super().__init__(*ion_list)\n\n @property\n def atomic_symbol(self):\n return self[0].atomic_symbol\n\n @property\n def atomic_number(self):\n return self[0].atomic_number\n\n @property\n def element_name(self):\n return self[0].element_name\n\n @property\n def abundance(self):\n return self[0].abundance\n\n def _rate_matrix(self):\n rate_matrix = np.zeros(self.temperature.shape+(self.atomic_number+1, self.atomic_number+1))\n rate_unit = self[0].ionization_rate().unit\n rate_matrix = rate_matrix * rate_unit\n for i in range(1, self.atomic_number):\n rate_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate())\n rate_matrix[:, i, i-1] = self[i-1].ionization_rate()\n rate_matrix[:, i, i+1] = self[i+1].recombination_rate()\n rate_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate())\n rate_matrix[:, 0, 1] = self[1].recombination_rate()\n rate_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate())\n rate_matrix[:, -1, -2] = self[-2].ionization_rate()\n\n return rate_matrix\n\n def equilibrium_ionization(self, **kwargs):\n \"\"\"\n Calculate the ionization fraction, in equilibrium, for all ions of the element.\n\n Calculate the population fractions for every ion of this element as a function of\n temperature, assuming ionization equilibrium.\n\n Parameters\n ----------\n rate_matrix : `~astropy.units.Quantity`, optional\n :math:`Z+1` by :math:`Z+1` matrix of ionization and recombination rates. If not\n given, this will be computed automatically.\n\n See Also\n --------\n fiasco.Ion.ionization_rate\n fiasco.Ion.recombination_rate\n \"\"\"\n rate_matrix = kwargs.get('rate_matrix', None)\n if rate_matrix is None:\n rate_matrix = self._rate_matrix()\n # Solve system of equations using singular value decomposition\n _, _, V = np.linalg.svd(rate_matrix.value)\n # Select columns of V with smallest eigenvalues (returned in descending order)\n # NOTE: must take the absolute value as the SVD solution is only accurate up\n # to the sign. We require that the solutions must be positive.\n ioneq = np.fabs(V[:, -1, :])\n ioneq /= ioneq.sum(axis=1)[:, np.newaxis]\n\n return u.Quantity(ioneq)\n\n def __getitem__(self, value):\n if type(value) is str:\n el, ion = value.split()\n if '+' in ion:\n value = int(ion.strip('+'))\n else:\n value = int(ion) - 1\n return super().__getitem__(value)\n\n def __repr__(self):\n ion_list = '\\n'.join([i.ion_name for i in self._ion_list])\n return f\"\"\"Element\n-------\n{self.atomic_symbol} ({self.atomic_number}) -- {self.element_name}\n\nAvailable Ions\n--------------\n{ion_list}\"\"\"\n",
"\"\"\"\nPackage-level functions\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\nimport astropy.units as u\nimport plasmapy.atomic\nfrom plasmapy.atomic.exceptions import InvalidParticleError\n\nimport fiasco\nfrom fiasco.io import DataIndexer\n\n__all__ = ['list_elements', 'list_ions', 'proton_electron_ratio']\n\n\ndef list_elements(hdf5_dbase_root, sort=True):\n \"\"\"\n List all available elements in the CHIANTI database.\n \"\"\"\n elements = []\n root = DataIndexer.create_indexer(hdf5_dbase_root, '/')\n for f in root.fields:\n try:\n elements.append(plasmapy.atomic.atomic_symbol(f.capitalize()))\n except InvalidParticleError:\n continue\n if sort:\n elements = sorted(elements, key=lambda x: plasmapy.atomic.atomic_number(x))\n return elements\n\n\ndef list_ions(hdf5_dbase_root, sort=True):\n \"\"\"\n List all available ions in the CHIANTI database\n \"\"\"\n root = DataIndexer(hdf5_dbase_root, '/')\n # NOTE: get the list from the index if possible. This is ~30x faster\n ions = root['ion_index']\n if ions is None:\n ions = []\n for f in root.fields:\n try:\n el = plasmapy.atomic.atomic_symbol(f.capitalize())\n for i in root[f].fields:\n if f == i.split('_')[0]:\n ions.append(f\"{el} {i.split('_')[1]}\")\n except InvalidParticleError:\n continue\n # Optional because adds significant overhead\n if sort:\n ions = sorted(ions, key=lambda x: (plasmapy.atomic.atomic_number(x.split()[0]),\n int(x.split()[1])))\n # NOTE: when grabbing straight from the index and not sorting, the result will be\n # a numpy array. Cast to a list to make sure the return type is consistent for\n # all possible inputs\n return ions.tolist() if type(ions) == np.ndarray else ions\n\n\[email protected]_input\ndef proton_electron_ratio(temperature: u.K, **kwargs):\n \"\"\"\n Calculate ratio between proton and electron densities as a function of temperature\n according to Eq. 7 of [1]_.\n\n Parameters\n ----------\n temperature : `~astropy.units.Quantity`\n\n See Also\n --------\n fiasco.Ion : Accepts same keyword arguments for setting database and dataset names\n\n References\n ----------\n .. [1] Young, P. et al., 2003, ApJS, `144 135 <http://adsabs.harvard.edu/abs/2003ApJS..144..135Y>`_\n \"\"\"\n h_2 = fiasco.Ion('H +1', temperature, **kwargs)\n numerator = h_2.abundance * h_2._ioneq[h_2._dset_names['ioneq_filename']]['ionization_fraction']\n denominator = u.Quantity(np.zeros(numerator.shape))\n for el_name in list_elements(h_2.hdf5_dbase_root):\n el = fiasco.Element(el_name, temperature, **kwargs)\n abundance = el.abundance\n if abundance is None:\n warnings.warn(f'Not including {el.atomic_symbol}. Abundance not available.')\n continue\n for ion in el:\n ioneq = ion._ioneq[ion._dset_names['ioneq_filename']]['ionization_fraction']\n if ioneq is None:\n warnings.warn(f'Not including {ion.ion_name}. Ionization fraction not available.')\n continue\n denominator += ioneq * abundance * ion.charge_state\n\n ratio = numerator / denominator\n interp = interp1d(ion._ioneq[ion._dset_names['ioneq_filename']]['temperature'].value,\n ratio.value,\n kind='linear',\n bounds_error=False,\n fill_value=(ratio[0], ratio[-1]))\n\n return u.Quantity(interp(temperature))\n"
] | [
[
"numpy.linalg.svd",
"numpy.zeros",
"numpy.fabs"
],
[
"scipy.interpolate.interp1d",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
yigitozgumus/Polimi_Thesis | [
"711c1edcf1fdb92fc6c15bf5ab1be141c13995c3",
"711c1edcf1fdb92fc6c15bf5ab1be141c13995c3",
"711c1edcf1fdb92fc6c15bf5ab1be141c13995c3"
] | [
"models/new/sencebgan.py",
"trainers/bigan_trainer.py",
"trainers/sencebgan_denoiser_trainer.py"
] | [
"import tensorflow as tf\n\nfrom base.base_model import BaseModel\nfrom utils.alad_utils import get_getter\nimport utils.alad_utils as sn\n\n\nclass SENCEBGAN(BaseModel):\n def __init__(self, config):\n super(SENCEBGAN, self).__init__(config)\n self.build_model()\n self.init_saver()\n\n def build_model(self):\n ############################################################################################\n # INIT\n ############################################################################################\n # Kernel initialization for the convolutions\n if self.config.trainer.init_type == \"normal\":\n self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)\n elif self.config.trainer.init_type == \"xavier\":\n self.init_kernel = tf.contrib.layers.xavier_initializer(\n uniform=False, seed=None, dtype=tf.float32\n )\n # Placeholders\n self.is_training_gen = tf.placeholder(tf.bool)\n self.is_training_dis = tf.placeholder(tf.bool)\n self.is_training_enc_g = tf.placeholder(tf.bool)\n self.is_training_enc_r = tf.placeholder(tf.bool)\n self.feature_match1 = tf.placeholder(tf.float32)\n self.feature_match2 = tf.placeholder(tf.float32)\n self.image_input = tf.placeholder(\n tf.float32, shape=[None] + self.config.trainer.image_dims, name=\"x\"\n )\n self.noise_tensor = tf.placeholder(\n tf.float32, shape=[None, self.config.trainer.noise_dim], name=\"noise\"\n )\n ############################################################################################\n # MODEL\n ############################################################################################\n self.logger.info(\"Building training graph...\")\n with tf.variable_scope(\"SENCEBGAN\"):\n # First training part\n # G(z) ==> x'\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen = self.generator(self.noise_tensor)\n # Discriminator outputs\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_real, self.decoded_real = self.discriminator(\n self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n self.embedding_fake, self.decoded_fake = self.discriminator(\n self.image_gen, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n # Second training part\n # E(x) ==> z'\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded = self.encoder_g(self.image_input)\n # G(z') ==> G(E(x)) ==> x''\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc = self.generator(self.image_encoded)\n # Discriminator outputs\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_enc_fake, self.decoded_enc_fake = self.discriminator(\n self.image_gen_enc, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n self.embedding_enc_real, self.decoded_enc_real = self.discriminator(\n self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm\n )\n with tf.variable_scope(\"Discriminator_Model_XX\"):\n self.im_logit_real, self.im_f_real = self.discriminator_xx(\n self.image_input,\n self.image_input,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.im_logit_fake, self.im_f_fake = self.discriminator_xx(\n self.image_input,\n self.image_gen_enc,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Third training part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_r = self.encoder_g(self.image_input)\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_r = self.generator(self.image_encoded_r)\n\n with tf.variable_scope(\"Encoder_R_Model\"):\n self.image_ege = self.encoder_r(self.image_gen_enc_r)\n\n with tf.variable_scope(\"Discriminator_Model_ZZ\"):\n self.z_logit_real, self.z_f_real = self.discriminator_zz(\n self.image_encoded_r,\n self.image_encoded_r,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.z_logit_fake, self.z_f_fake = self.discriminator_zz(\n self.image_encoded_r,\n self.image_ege,\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n ############################################################################################\n # LOSS FUNCTIONS\n ############################################################################################\n with tf.name_scope(\"Loss_Functions\"):\n with tf.name_scope(\"Generator_Discriminator\"):\n # Discriminator Loss\n if self.config.trainer.mse_mode == \"norm\":\n self.disc_loss_real = tf.reduce_mean(\n self.mse_loss(\n self.decoded_real,\n self.image_input,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n self.disc_loss_fake = tf.reduce_mean(\n self.mse_loss(\n self.decoded_fake,\n self.image_gen,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n elif self.config.trainer.mse_mode == \"mse\":\n self.disc_loss_real = self.mse_loss(\n self.decoded_real,\n self.image_input,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n self.disc_loss_fake = self.mse_loss(\n self.decoded_fake,\n self.image_gen,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n self.loss_discriminator = (\n tf.math.maximum(self.config.trainer.disc_margin - self.disc_loss_fake, 0)\n + self.disc_loss_real\n )\n # Generator Loss\n pt_loss = 0\n if self.config.trainer.pullaway:\n pt_loss = self.pullaway_loss(self.embedding_fake)\n self.loss_generator = self.disc_loss_fake + self.config.trainer.pt_weight * pt_loss\n # New addition to enforce visual similarity\n delta_noise = self.embedding_real - self.embedding_fake\n delta_flat = tf.layers.Flatten()(delta_noise)\n loss_noise_gen = tf.reduce_mean(tf.norm(delta_flat, ord=2, axis=1, keepdims=False))\n self.loss_generator += 0.1 * loss_noise_gen\n\n with tf.name_scope(\"Encoder_G\"):\n if self.config.trainer.mse_mode == \"norm\":\n self.loss_enc_rec = tf.reduce_mean(\n self.mse_loss(\n self.image_gen_enc,\n self.image_input,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n self.loss_enc_f = tf.reduce_mean(\n self.mse_loss(\n self.decoded_enc_real,\n self.decoded_enc_fake,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n elif self.config.trainer.mse_mode == \"mse\":\n self.loss_enc_rec = tf.reduce_mean(\n self.mse_loss(\n self.image_gen_enc,\n self.image_input,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n self.loss_enc_f = tf.reduce_mean(\n self.mse_loss(\n self.embedding_enc_real,\n self.embedding_enc_fake,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n self.loss_encoder_g = (\n self.loss_enc_rec + self.config.trainer.encoder_f_factor * self.loss_enc_f\n )\n if self.config.trainer.enable_disc_xx:\n self.enc_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_real, labels=tf.zeros_like(self.im_logit_real)\n )\n self.enc_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_fake, labels=tf.ones_like(self.im_logit_fake)\n )\n self.enc_loss_xx = tf.reduce_mean(self.enc_xx_real + self.enc_xx_fake)\n self.loss_encoder_g += self.enc_loss_xx\n\n with tf.name_scope(\"Encoder_R\"):\n if self.config.trainer.mse_mode == \"norm\":\n self.loss_encoder_r = tf.reduce_mean(\n self.mse_loss(\n self.image_ege,\n self.image_encoded_r,\n mode=\"norm\",\n order=self.config.trainer.order,\n )\n )\n\n elif self.config.trainer.mse_mode == \"mse\":\n self.loss_encoder_r = tf.reduce_mean(\n self.mse_loss(\n self.image_ege,\n self.image_encoded_r,\n mode=\"mse\",\n order=self.config.trainer.order,\n )\n )\n\n if self.config.trainer.enable_disc_zz:\n self.enc_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_real, labels=tf.zeros_like(self.z_logit_real)\n )\n self.enc_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_fake, labels=tf.ones_like(self.z_logit_fake)\n )\n self.enc_loss_zz = tf.reduce_mean(self.enc_zz_real + self.enc_zz_fake)\n self.loss_encoder_r += self.enc_loss_zz\n\n if self.config.trainer.enable_disc_xx:\n with tf.name_scope(\"Discriminator_XX\"):\n self.loss_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_real, labels=tf.ones_like(self.im_logit_real)\n )\n self.loss_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.im_logit_fake, labels=tf.zeros_like(self.im_logit_fake)\n )\n self.dis_loss_xx = tf.reduce_mean(self.loss_xx_real + self.loss_xx_fake)\n if self.config.trainer.enable_disc_zz:\n with tf.name_scope(\"Discriminator_ZZ\"):\n self.loss_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_real, labels=tf.ones_like(self.z_logit_real)\n )\n self.loss_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=self.z_logit_fake, labels=tf.zeros_like(self.z_logit_fake)\n )\n self.dis_loss_zz = tf.reduce_mean(self.loss_zz_real + self.loss_zz_fake)\n\n ############################################################################################\n # OPTIMIZERS\n ############################################################################################\n with tf.name_scope(\"Optimizers\"):\n self.generator_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_gen,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.encoder_g_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_enc,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.encoder_r_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_enc,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n self.discriminator_optimizer = tf.train.AdamOptimizer(\n self.config.trainer.standard_lr_dis,\n beta1=self.config.trainer.optimizer_adam_beta1,\n beta2=self.config.trainer.optimizer_adam_beta2,\n )\n # Collect all the variables\n all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # Generator Network Variables\n self.generator_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Generator_Model\")\n ]\n # Discriminator Network Variables\n self.discriminator_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model\")\n ]\n # Discriminator Network Variables\n self.encoder_g_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Encoder_G_Model\")\n ]\n self.encoder_r_vars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Encoder_R_Model\")\n ]\n self.dxxvars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model_XX\")\n ]\n self.dzzvars = [\n v for v in all_variables if v.name.startswith(\"SENCEBGAN/Discriminator_Model_ZZ\")\n ]\n # Generator Network Operations\n self.gen_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Generator_Model\"\n )\n # Discriminator Network Operations\n self.disc_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model\"\n )\n self.encg_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Encoder_G_Model\"\n )\n\n self.encr_update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Encoder_R_Model\"\n )\n self.update_ops_dis_xx = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model_XX\"\n )\n self.update_ops_dis_zz = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, scope=\"SENCEBGAN/Discriminator_Model_ZZ\"\n )\n with tf.control_dependencies(self.gen_update_ops):\n self.gen_op = self.generator_optimizer.minimize(\n self.loss_generator,\n var_list=self.generator_vars,\n global_step=self.global_step_tensor,\n )\n with tf.control_dependencies(self.disc_update_ops):\n self.disc_op = self.discriminator_optimizer.minimize(\n self.loss_discriminator, var_list=self.discriminator_vars\n )\n with tf.control_dependencies(self.encg_update_ops):\n self.encg_op = self.encoder_g_optimizer.minimize(\n self.loss_encoder_g,\n var_list=self.encoder_g_vars,\n global_step=self.global_step_tensor,\n )\n with tf.control_dependencies(self.encr_update_ops):\n self.encr_op = self.encoder_r_optimizer.minimize(\n self.loss_encoder_r,\n var_list=self.encoder_r_vars,\n global_step=self.global_step_tensor,\n )\n if self.config.trainer.enable_disc_xx:\n with tf.control_dependencies(self.update_ops_dis_xx):\n self.disc_op_xx = self.discriminator_optimizer.minimize(\n self.dis_loss_xx, var_list=self.dxxvars\n )\n if self.config.trainer.enable_disc_zz:\n with tf.control_dependencies(self.update_ops_dis_zz):\n self.disc_op_zz = self.discriminator_optimizer.minimize(\n self.dis_loss_zz, var_list=self.dzzvars\n )\n # Exponential Moving Average for Estimation\n self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)\n\n self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)\n\n self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)\n\n self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)\n maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)\n\n if self.config.trainer.enable_disc_xx:\n self.dis_xx_ema = tf.train.ExponentialMovingAverage(\n decay=self.config.trainer.ema_decay\n )\n maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)\n\n if self.config.trainer.enable_disc_zz:\n self.dis_zz_ema = tf.train.ExponentialMovingAverage(\n decay=self.config.trainer.ema_decay\n )\n maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)\n\n with tf.control_dependencies([self.disc_op]):\n self.train_dis_op = tf.group(maintain_averages_op_dis)\n\n with tf.control_dependencies([self.gen_op]):\n self.train_gen_op = tf.group(maintain_averages_op_gen)\n\n with tf.control_dependencies([self.encg_op]):\n self.train_enc_g_op = tf.group(maintain_averages_op_encg)\n\n with tf.control_dependencies([self.encr_op]):\n self.train_enc_r_op = tf.group(maintain_averages_op_encr)\n\n if self.config.trainer.enable_disc_xx:\n with tf.control_dependencies([self.disc_op_xx]):\n self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)\n\n if self.config.trainer.enable_disc_zz:\n with tf.control_dependencies([self.disc_op_zz]):\n self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)\n\n ############################################################################################\n # TESTING\n ############################################################################################\n self.logger.info(\"Building Testing Graph...\")\n with tf.variable_scope(\"SENCEBGAN\"):\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_q_ema, self.decoded_q_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_ema = self.generator(\n self.embedding_q_ema, getter=get_getter(self.gen_ema)\n )\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(\n self.image_gen_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Second Training Part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_ema = self.encoder_g(\n self.image_input, getter=get_getter(self.encg_ema)\n )\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_ema = self.generator(\n self.image_encoded_ema, getter=get_getter(self.gen_ema)\n )\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(\n self.image_gen_enc_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n if self.config.trainer.enable_disc_xx:\n with tf.variable_scope(\"Discriminator_Model_XX\"):\n self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(\n self.image_input,\n self.image_input,\n getter=get_getter(self.dis_xx_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(\n self.image_input,\n self.image_gen_enc_ema,\n getter=get_getter(self.dis_xx_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n # Third training part\n with tf.variable_scope(\"Encoder_G_Model\"):\n self.image_encoded_r_ema = self.encoder_g(self.image_input)\n\n with tf.variable_scope(\"Generator_Model\"):\n self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)\n\n with tf.variable_scope(\"Encoder_R_Model\"):\n self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)\n\n with tf.variable_scope(\"Discriminator_Model\"):\n self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(\n self.image_gen_enc_r_ema,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(\n self.image_input,\n getter=get_getter(self.dis_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n if self.config.trainer.enable_disc_zz:\n with tf.variable_scope(\"Discriminator_Model_ZZ\"):\n self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(\n self.image_encoded_r_ema,\n self.image_encoded_r_ema,\n getter=get_getter(self.dis_zz_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(\n self.image_encoded_r_ema,\n self.image_ege_ema,\n getter=get_getter(self.dis_zz_ema),\n do_spectral_norm=self.config.trainer.do_spectral_norm,\n )\n\n with tf.name_scope(\"Testing\"):\n with tf.name_scope(\"Image_Based\"):\n delta = self.image_input - self.image_gen_enc_ema\n self.rec_residual = -delta\n delta_flat = tf.layers.Flatten()(delta)\n img_score_l1 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"img_loss__1\"\n )\n self.img_score_l1 = tf.squeeze(img_score_l1)\n\n delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema\n delta_flat = tf.layers.Flatten()(delta)\n img_score_l2 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"img_loss__2\"\n )\n self.img_score_l2 = tf.squeeze(img_score_l2)\n \n with tf.name_scope(\"Noise_Based\"):\n\n delta = self.image_encoded_r_ema - self.image_ege_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_1 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_1\"\n )\n self.final_score_1 = tf.squeeze(final_score_1)\n self.score_comb_im = (\n 1 * self.img_score_l1\n + self.feature_match1 * self.final_score_1\n )\n delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_2 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_2\"\n )\n self.final_score_2 = tf.squeeze(final_score_2)\n\n delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_3 = tf.norm(\n delta_flat, ord=2, axis=1, keepdims=False, name=\"final_score_3\"\n )\n self.final_score_3 = tf.squeeze(final_score_3)\n\n # Combo 1\n self.score_comb_z = (\n (1 - self.feature_match2) * self.final_score_2\n + self.feature_match2 * self.final_score_3\n )\n\n # Combo 2\n\n\n if self.config.trainer.enable_disc_xx:\n\n delta = self.im_f_real_ema - self.im_f_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_4 = tf.norm(\n delta_flat, ord=1, axis=1, keepdims=False, name=\"final_score_4\"\n )\n self.final_score_4 = tf.squeeze(final_score_4)\n\n delta = self.z_f_real_ema - self.z_f_fake_ema\n delta_flat = tf.layers.Flatten()(delta)\n final_score_6 = tf.norm(\n delta_flat, ord=1, axis=1, keepdims=False, name=\"final_score_6\"\n )\n self.final_score_6 = tf.squeeze(final_score_6)\n\n ############################################################################################\n # TENSORBOARD\n ############################################################################################\n if self.config.log.enable_summary:\n with tf.name_scope(\"train_summary\"):\n with tf.name_scope(\"dis_summary\"):\n tf.summary.scalar(\"loss_disc\", self.loss_discriminator, [\"dis\"])\n tf.summary.scalar(\"loss_disc_real\", self.disc_loss_real, [\"dis\"])\n tf.summary.scalar(\"loss_disc_fake\", self.disc_loss_fake, [\"dis\"])\n if self.config.trainer.enable_disc_xx:\n tf.summary.scalar(\"loss_dis_xx\", self.dis_loss_xx, [\"enc_g\"])\n if self.config.trainer.enable_disc_zz:\n tf.summary.scalar(\"loss_dis_zz\", self.dis_loss_zz, [\"enc_r\"])\n with tf.name_scope(\"gen_summary\"):\n tf.summary.scalar(\"loss_generator\", self.loss_generator, [\"gen\"])\n with tf.name_scope(\"enc_summary\"):\n tf.summary.scalar(\"loss_encoder_g\", self.loss_encoder_g, [\"enc_g\"])\n tf.summary.scalar(\"loss_encoder_r\", self.loss_encoder_r, [\"enc_r\"])\n with tf.name_scope(\"img_summary\"):\n tf.summary.image(\"input_image\", self.image_input, 1, [\"img_1\"])\n tf.summary.image(\"reconstructed\", self.image_gen, 1, [\"img_1\"])\n # From discriminator in part 1\n tf.summary.image(\"decoded_real\", self.decoded_real, 1, [\"img_1\"])\n tf.summary.image(\"decoded_fake\", self.decoded_fake, 1, [\"img_1\"])\n # Second Stage of Training\n tf.summary.image(\"input_enc\", self.image_input, 1, [\"img_2\"])\n tf.summary.image(\"reconstructed\", self.image_gen_enc, 1, [\"img_2\"])\n # From discriminator in part 2\n tf.summary.image(\"decoded_enc_real\", self.decoded_enc_real, 1, [\"img_2\"])\n tf.summary.image(\"decoded_enc_fake\", self.decoded_enc_fake, 1, [\"img_2\"])\n # Testing\n tf.summary.image(\"input_image\", self.image_input, 1, [\"test\"])\n tf.summary.image(\"reconstructed\", self.image_gen_enc_r_ema, 1, [\"test\"])\n tf.summary.image(\"residual\", self.rec_residual, 1, [\"test\"])\n\n self.sum_op_dis = tf.summary.merge_all(\"dis\")\n self.sum_op_gen = tf.summary.merge_all(\"gen\")\n self.sum_op_enc_g = tf.summary.merge_all(\"enc_g\")\n self.sum_op_enc_r = tf.summary.merge_all(\"enc_r\")\n self.sum_op_im_1 = tf.summary.merge_all(\"img_1\")\n self.sum_op_im_2 = tf.summary.merge_all(\"img_2\")\n self.sum_op_im_test = tf.summary.merge_all(\"test\")\n self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])\n\n ###############################################################################################\n # MODULES\n ###############################################################################################\n def generator(self, noise_input, getter=None):\n with tf.variable_scope(\"Generator\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Dense(\n units=2 * 2 * 256, kernel_initializer=self.init_kernel, name=\"fc\"\n )(noise_input)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n x_g = tf.reshape(x_g, [-1, 2, 2, 256])\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=128,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=64,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=32,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.layers.batch_normalization(\n x_g,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_gen,\n name=\"batch_normalization\",\n )\n x_g = tf.nn.leaky_relu(\n features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name=\"relu\"\n )\n net_name = \"Layer_5\"\n with tf.variable_scope(net_name):\n x_g = tf.layers.Conv2DTranspose(\n filters=1,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2t\",\n )(x_g)\n x_g = tf.tanh(x_g, name=\"tanh\")\n return x_g\n\n def discriminator(self, image_input, getter=None, do_spectral_norm=False):\n layers = sn if do_spectral_norm else tf.layers\n with tf.variable_scope(\"Discriminator\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n with tf.variable_scope(\"Encoder\"):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=32,\n kernel_size=5,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 14 x 14 x 64\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=64,\n kernel_size=5,\n padding=\"same\",\n strides=2,\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 7 x 7 x 128\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = layers.conv2d(\n x_e,\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=2,\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n # 4 x 4 x 256\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = layers.dense(\n x_e,\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )\n\n embedding = x_e\n with tf.variable_scope(\"Decoder\"):\n net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])\n net_name = \"layer_1\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=256,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv1\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv1/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv1/relu\")\n\n net_name = \"layer_2\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=128,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv2\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv2/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv2/relu\")\n\n net_name = \"layer_3\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv3\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv3/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv3/relu\")\n net_name = \"layer_4\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=32,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv4\",\n )(net)\n net = tf.layers.batch_normalization(\n inputs=net,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_dis,\n name=\"tconv4/bn\",\n )\n net = tf.nn.relu(features=net, name=\"tconv4/relu\")\n net_name = \"layer_5\"\n with tf.variable_scope(net_name):\n net = tf.layers.Conv2DTranspose(\n filters=1,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"tconv5\",\n )(net)\n decoded = tf.nn.tanh(net, name=\"tconv5/tanh\")\n return embedding, decoded\n\n def encoder_g(self, image_input, getter=None):\n with tf.variable_scope(\"Encoder_G\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=256,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_g,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Dense(\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )(x_e)\n return x_e\n\n def encoder_r(self, image_input, getter=None):\n with tf.variable_scope(\"Encoder_R\", custom_getter=getter, reuse=tf.AUTO_REUSE):\n x_e = tf.reshape(\n image_input,\n [-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],\n )\n net_name = \"Layer_1\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=64,\n kernel_size=5,\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_2\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=128,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n net_name = \"Layer_3\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Conv2D(\n filters=256,\n kernel_size=5,\n padding=\"same\",\n strides=(2, 2),\n kernel_initializer=self.init_kernel,\n name=\"conv\",\n )(x_e)\n x_e = tf.layers.batch_normalization(\n x_e,\n momentum=self.config.trainer.batch_momentum,\n training=self.is_training_enc_r,\n )\n x_e = tf.nn.leaky_relu(\n features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name=\"leaky_relu\"\n )\n x_e = tf.layers.Flatten()(x_e)\n net_name = \"Layer_4\"\n with tf.variable_scope(net_name):\n x_e = tf.layers.Dense(\n units=self.config.trainer.noise_dim,\n kernel_initializer=self.init_kernel,\n name=\"fc\",\n )(x_e)\n return x_e\n\n # Regularizer discriminator for the Generator Encoder\n def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):\n \"\"\" Discriminator architecture in tensorflow\n\n Discriminates between (x, x) and (x, rec_x)\n Args:\n img_tensor:\n recreated_img:\n getter: for exponential moving average during inference\n reuse: sharing variables or not\n do_spectral_norm:\n \"\"\"\n layers = sn if do_spectral_norm else tf.layers\n with tf.variable_scope(\"Discriminator_xx\", reuse=tf.AUTO_REUSE, custom_getter=getter):\n net = tf.concat([img_tensor, recreated_img], axis=1)\n net_name = \"layer_1\"\n with tf.variable_scope(net_name):\n net = layers.conv2d(\n net,\n filters=64,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv1\",\n )\n net = tf.nn.leaky_relu(\n features=net, alpha=self.config.trainer.leakyReLU_alpha, name=\"conv2/leaky_relu\"\n )\n net = tf.layers.dropout(\n net,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_g,\n name=\"dropout\",\n )\n with tf.variable_scope(net_name, reuse=True):\n weights = tf.get_variable(\"conv1/kernel\")\n\n net_name = \"layer_2\"\n with tf.variable_scope(net_name):\n net = layers.conv2d(\n net,\n filters=128,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n kernel_initializer=self.init_kernel,\n name=\"conv2\",\n )\n net = tf.nn.leaky_relu(\n features=net, alpha=self.config.trainer.leakyReLU_alpha, name=\"conv2/leaky_relu\"\n )\n net = tf.layers.dropout(\n net,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_g,\n name=\"dropout\",\n )\n net = tf.layers.Flatten()(net)\n\n intermediate_layer = net\n\n net_name = \"layer_3\"\n with tf.variable_scope(net_name):\n net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name=\"fc\")\n logits = tf.squeeze(net)\n\n return logits, intermediate_layer\n\n # Regularizer discriminator for the Reconstruction Encoder\n\n def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):\n \"\"\" Discriminator architecture in tensorflow\n\n Discriminates between (z, z) and (z, rec_z)\n Args:\n noise_tensor:\n recreated_noise:\n getter: for exponential moving average during inference\n reuse: sharing variables or not\n do_spectral_norm:\n \"\"\"\n layers = sn if do_spectral_norm else tf.layers\n\n with tf.variable_scope(\"Discriminator_zz\", reuse=tf.AUTO_REUSE, custom_getter=getter):\n y = tf.concat([noise_tensor, recreated_noise], axis=-1)\n\n net_name = \"y_layer_1\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name=\"fc\")\n y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)\n y = tf.layers.dropout(\n y,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_r,\n name=\"dropout\",\n )\n\n net_name = \"y_layer_2\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name=\"fc\")\n y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)\n y = tf.layers.dropout(\n y,\n rate=self.config.trainer.dropout_rate,\n training=self.is_training_enc_r,\n name=\"dropout\",\n )\n\n intermediate_layer = y\n\n net_name = \"y_layer_3\"\n with tf.variable_scope(net_name):\n y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name=\"fc\")\n logits = tf.squeeze(y)\n\n return logits, intermediate_layer\n\n ###############################################################################################\n # CUSTOM LOSSES\n ###############################################################################################\n def mse_loss(self, pred, data, mode=\"norm\", order=2):\n if mode == \"norm\":\n delta = pred - data\n delta = tf.layers.Flatten()(delta)\n loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)\n elif mode == \"mse\":\n loss_val = tf.reduce_mean(tf.squared_difference(pred, data))\n return loss_val\n\n def pullaway_loss(self, embeddings):\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n normalized_embeddings = embeddings / norm\n similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)\n batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)\n pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))\n return pt_loss\n\n def init_saver(self):\n self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)\n",
"from base.base_train import BaseTrain\nfrom tqdm import tqdm\nimport numpy as np\nfrom time import sleep\nfrom time import time\nfrom utils.evaluations import do_roc, save_results\n\n\nclass BIGANTrainer(BaseTrain):\n def __init__(self, sess, model, data, config, summarizer):\n super(BIGANTrainer, self).__init__(sess, model, data, config, summarizer)\n self.batch_size = self.config.data_loader.batch_size\n self.noise_dim = self.config.trainer.noise_dim\n self.img_dims = self.config.trainer.image_dims\n # Inititalize the train Dataset Iterator\n self.sess.run(self.data.iterator.initializer)\n # Initialize the test Dataset Iterator\n self.sess.run(self.data.test_iterator.initializer)\n if self.config.data_loader.validation:\n self.sess.run(self.data.valid_iterator.initializer)\n self.best_valid_loss = 0\n self.nb_without_improvements = 0\n\n def train_epoch(self):\n begin = time()\n # Attach the epoch loop to a variable\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n # Define the lists for summaries and losses\n gen_losses = []\n disc_losses = []\n enc_losses = []\n summaries = []\n\n # Get the current epoch counter\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n image = self.data.image\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n gen, dis, enc, sum_g, sum_d = self.train_step(image, cur_epoch)\n gen_losses.append(gen)\n disc_losses.append(dis)\n enc_losses.append(enc)\n summaries.append(sum_g)\n summaries.append(sum_d)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)\n # Check for reconstruction\n if cur_epoch % self.config.log.frequency_test == 0:\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n image_eval = self.sess.run(image)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training: False,\n }\n reconstruction = self.sess.run(self.model.sum_op_im, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])\n # Get the means of the loss values to display\n gen_m = np.mean(gen_losses)\n dis_m = np.mean(disc_losses)\n enc_m = np.mean(enc_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss gen= {:4f} | loss dis = {:4f} | loss enc = {:4f}\".format(\n cur_epoch, time() - begin, gen_m, dis_m, enc_m\n )\n )\n # Save the model state\n self.model.save(self.sess)\n if (\n cur_epoch + 1\n ) % self.config.trainer.frequency_eval == 0 and self.config.trainer.enable_early_stop:\n valid_loss = 0\n image_valid = self.sess.run(self.data.valid_image)\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n feed_dict = {\n self.model.noise_tensor: noise,\n self.model.image_input: image_valid,\n self.model.is_training: False,\n }\n vl = self.sess.run([self.model.rec_error_valid], feed_dict=feed_dict)\n valid_loss += vl[0]\n if self.config.log.enable_summary:\n sm = self.sess.run(self.model.sum_op_valid, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=[sm], summarizer=\"valid\")\n\n self.logger.info(\"Validation: valid loss {:.4f}\".format(valid_loss))\n if (\n valid_loss < self.best_valid_loss\n or cur_epoch == self.config.trainer.frequency_eval - 1\n ):\n self.best_valid_loss = valid_loss\n self.logger.info(\n \"Best model - valid loss = {:.4f} - saving...\".format(self.best_valid_loss)\n )\n # Save the model state\n self.model.save(self.sess)\n self.nb_without_improvements = 0\n else:\n self.nb_without_improvements += self.config.trainer.frequency_eval\n if self.nb_without_improvements > self.config.trainer.patience:\n self.patience_lost = True\n self.logger.warning(\n \"Early stopping at epoch {} with weights from epoch {}\".format(\n cur_epoch, cur_epoch - self.nb_without_improvements\n )\n )\n\n def test_epoch(self):\n self.logger.warn(\"Testing evaluation...\")\n scores_1 = []\n scores_2 = []\n inference_time = []\n true_labels = []\n summaries = []\n # Create the scores\n test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in test_loop:\n test_batch_begin = time()\n test_batch, test_labels = self.sess.run([self.data.test_image, self.data.test_label])\n test_loop.refresh() # to show immediately the update\n sleep(0.01)\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n feed_dict = {\n self.model.image_input: test_batch,\n self.model.noise_tensor: noise,\n self.model.is_training: False,\n }\n scores_1 += self.sess.run(self.model.list_scores_1, feed_dict=feed_dict).tolist()\n scores_2 += self.sess.run(self.model.list_scores_2, feed_dict=feed_dict).tolist()\n summaries += self.sess.run([self.model.sum_op_im_test], feed_dict=feed_dict)\n inference_time.append(time() - test_batch_begin)\n true_labels += test_labels.tolist()\n # Since the higher anomaly score indicates the anomalous one, and we inverted the labels to show that\n # normal images are 0 meaning that contains no anomaly and anomalous images are 1 meaning that it contains\n # an anomalous region, we first scale the scores and then invert them to match the scores\n scores_1 = np.asarray(scores_1)\n scores_2 = np.asarray(scores_2)\n true_labels = np.asarray(true_labels)\n inference_time = np.mean(inference_time)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries, summarizer=\"test\")\n self.logger.info(\"Testing: Mean inference time is {:4f}\".format(inference_time))\n step = self.sess.run(self.model.global_step_tensor)\n percentiles = np.asarray(self.config.trainer.percentiles)\n save_results(\n self.config.log.result_dir,\n scores_1,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"fm_1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"fm_2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n\n def train_step(self, image, cur_epoch):\n image_eval = self.sess.run(image)\n # Train the discriminator\n ld, sm_d = 0, None\n if self.config.trainer.mode == \"standard\":\n disc_iters = 1\n else:\n disc_iters = self.config.trainer.critic_iters\n for _ in range(disc_iters):\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n true_labels, generated_labels = self.generate_labels(\n self.config.trainer.soft_labels, self.config.trainer.flip_labels\n )\n real_noise, fake_noise = self.generate_noise(\n self.config.trainer.include_noise, cur_epoch\n )\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.generated_labels: generated_labels,\n self.model.true_labels: true_labels,\n self.model.real_noise: real_noise,\n self.model.fake_noise: fake_noise,\n self.model.is_training: True,\n }\n # Train Discriminator\n _, ld, sm_d = self.sess.run(\n [self.model.train_dis_op, self.model.loss_discriminator, self.model.sum_op_dis],\n feed_dict=feed_dict,\n )\n if self.config.trainer.mode == \"wgan\":\n _ = self.sess.run(self.model.clip_disc_weights)\n # Train Generator and Encoder\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n true_labels, generated_labels = self.generate_labels(\n self.config.trainer.soft_labels, self.config.trainer.flip_labels\n )\n real_noise, fake_noise = self.generate_noise(self.config.trainer.include_noise, cur_epoch)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.generated_labels: generated_labels,\n self.model.true_labels: true_labels,\n self.model.real_noise: real_noise,\n self.model.fake_noise: fake_noise,\n self.model.is_training: True,\n }\n _, _, le, lg, sm_g = self.sess.run(\n [\n self.model.train_gen_op,\n self.model.train_enc_op,\n self.model.loss_encoder,\n self.model.loss_generator,\n self.model.sum_op_gen,\n ],\n feed_dict=feed_dict,\n )\n\n return lg, np.mean(ld), le, sm_g, sm_d\n\n def generate_labels(self, soft_labels, flip_labels):\n\n if not soft_labels:\n true_labels = np.ones((self.config.data_loader.batch_size, 1))\n generated_labels = np.zeros((self.config.data_loader.batch_size, 1))\n else:\n generated_labels = np.zeros(\n (self.config.data_loader.batch_size, 1)\n ) + np.random.uniform(low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1])\n flipped_idx = np.random.choice(\n np.arange(len(generated_labels)),\n size=int(self.config.trainer.noise_probability * len(generated_labels)),\n )\n generated_labels[flipped_idx] = 1 - generated_labels[flipped_idx]\n true_labels = np.ones((self.config.data_loader.batch_size, 1)) - np.random.uniform(\n low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1]\n )\n flipped_idx = np.random.choice(\n np.arange(len(true_labels)),\n size=int(self.config.trainer.noise_probability * len(true_labels)),\n )\n true_labels[flipped_idx] = 1 - true_labels[flipped_idx]\n if flip_labels:\n return generated_labels, true_labels\n else:\n return true_labels, generated_labels\n\n def generate_noise(self, include_noise, cur_epoch):\n sigma = max(0.75 * (10.0 - cur_epoch) / (10), 0.05)\n if include_noise:\n # If we want to add this is will add the noises\n real_noise = np.random.normal(\n scale=sigma,\n size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,\n )\n fake_noise = np.random.normal(\n scale=sigma,\n size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,\n )\n else:\n # Otherwise we are just going to add zeros which will not break anything\n real_noise = np.zeros(\n ([self.config.data_loader.batch_size] + self.config.trainer.image_dims)\n )\n fake_noise = np.zeros(\n ([self.config.data_loader.batch_size] + self.config.trainer.image_dims)\n )\n return real_noise, fake_noise\n",
"from base.base_train_sequential import BaseTrainSequential\nfrom tqdm import tqdm\nimport numpy as np\nfrom time import sleep\nfrom time import time\nfrom utils.evaluations import save_results\n\n\nclass SENCEBGANTrainer_Denoiser(BaseTrainSequential):\n def __init__(self, sess, model, data, config, logger):\n super(SENCEBGANTrainer_Denoiser, self).__init__(sess, model, data, config, logger)\n self.batch_size = self.config.data_loader.batch_size\n self.noise_dim = self.config.trainer.noise_dim\n self.img_dims = self.config.trainer.image_dims\n # Inititalize the train Dataset Iterator\n self.sess.run(self.data.iterator.initializer)\n # Initialize the test Dataset Iterator\n self.sess.run(self.data.test_iterator.initializer)\n if self.config.data_loader.validation:\n self.sess.run(self.data.valid_iterator.initializer)\n self.best_valid_loss = 0\n self.nb_without_improvements = 0\n\n def train_epoch_gan(self):\n # Attach the epoch loop to a variable\n begin = time()\n # Make the loop of the epoch iterations\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n gen_losses = []\n disc_losses = []\n summaries = []\n image = self.data.image\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n lg, ld, sum_g, sum_d = self.train_step_gan(image, cur_epoch)\n gen_losses.append(lg)\n disc_losses.append(ld)\n summaries.append(sum_g)\n summaries.append(sum_d)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)\n\n # Check for reconstruction\n if cur_epoch % self.config.log.frequency_test == 0:\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n image_eval = self.sess.run(image)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: False,\n }\n reconstruction = self.sess.run(self.model.sum_op_im_1, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])\n gen_m = np.mean(gen_losses)\n dis_m = np.mean(disc_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss gen= {:4f} | loss dis = {:4f} \".format(\n cur_epoch, time() - begin, gen_m, dis_m\n )\n )\n self.model.save(self.sess)\n\n def train_epoch_enc_gen(self):\n # Attach the epoch loop to a variable\n begin = time()\n # Make the loop of the epoch iterations\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n enc_losses = []\n disc_xx_losses = []\n summaries = []\n image = self.data.image\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n le, sum_e, ldxx = self.train_step_enc_gen(image, cur_epoch)\n enc_losses.append(le)\n if self.config.trainer.enable_disc_xx:\n disc_xx_losses.append(ldxx)\n summaries.append(sum_e)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries, summarizer=\"valid\")\n # Check for reconstruction\n if cur_epoch % self.config.log.frequency_test == 0:\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n image_eval = self.sess.run(image)\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: False,\n self.model.is_training_enc_g: False,\n self.model.is_training_enc_r: False,\n self.model.is_training_dis: False,\n }\n reconstruction = self.sess.run(self.model.sum_op_im_2, feed_dict=feed_dict)\n self.summarizer.add_tensorboard(\n step=cur_epoch, summaries=[reconstruction], summarizer=\"valid\"\n )\n enc_m = np.mean(enc_losses)\n if self.config.trainer.enable_disc_xx:\n dis_xx_m = np.mean(disc_xx_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss enc generation= {:4f} | loss dis xx = {:4f}\".format(\n cur_epoch, time() - begin, enc_m, dis_xx_m\n )\n )\n else:\n self.logger.info(\n \"Epoch: {} | time = {} s | loss enc generation= {:4f} \".format(\n cur_epoch, time() - begin, enc_m\n )\n )\n self.model.save(self.sess)\n\n def train_epoch_enc_rec(self):\n # Attach the epoch loop to a variable\n begin = time()\n # Make the loop of the epoch iterations\n loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))\n enc_losses = []\n summaries = []\n image = self.data.image\n cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)\n for _ in loop:\n loop.set_description(\"Epoch:{}\".format(cur_epoch + 1))\n loop.refresh() # to show immediately the update\n sleep(0.01)\n le, sum_e = self.train_step_enc_rec(image, cur_epoch)\n enc_losses.append(le)\n summaries.append(sum_e)\n self.logger.info(\"Epoch {} terminated\".format(cur_epoch))\n self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries, summarizer=\"valid_2\")\n enc_m = np.mean(enc_losses)\n self.logger.info(\n \"Epoch: {} | time = {} s | loss Denoiser= {:4f} \".format(\n cur_epoch, time() - begin, enc_m\n )\n )\n self.model.save(self.sess)\n\n def train_step_gan(self, image, cur_epoch):\n ld_t, lg_t, sm_g, sm_d = [], [], None, None\n image_eval = self.sess.run(image)\n if self.config.trainer.mode == \"standard\":\n disc_iters = 1\n else:\n disc_iters = self.config.trainer.critic_iters\n for _ in range(disc_iters):\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: True,\n self.model.is_training_dis: True,\n self.model.is_training_enc_g: False,\n self.model.is_training_enc_r: False,\n }\n _, ld, sm_d = self.sess.run(\n [self.model.train_dis_op, self.model.loss_discriminator, self.model.sum_op_dis],\n feed_dict=feed_dict,\n )\n ld_t.append(ld)\n\n if self.config.trainer.mode == \"standard\":\n gen_iters = 1\n else:\n gen_iters = 3\n for _ in range(gen_iters):\n image_eval = self.sess.run(image)\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: True,\n self.model.is_training_dis: True,\n self.model.is_training_enc_g: False,\n self.model.is_training_enc_r: False,\n }\n _, lg, sm_g = self.sess.run(\n [self.model.train_gen_op, self.model.loss_generator, self.model.sum_op_gen],\n feed_dict=feed_dict,\n )\n lg_t.append(lg)\n\n return np.mean(lg_t), np.mean(ld_t), sm_g, sm_d\n\n def train_step_enc_gen(self, image, cur_epoch):\n image_eval = self.sess.run(image)\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n ldxx = 0\n feed_dict = {\n self.model.image_input: image_eval,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: False,\n self.model.is_training_dis: False,\n self.model.is_training_enc_g: True,\n self.model.is_training_enc_r: False,\n }\n if self.config.trainer.enable_disc_xx:\n _, le, sm_e, = self.sess.run(\n [self.model.train_enc_g_op, self.model.loss_encoder_g, self.model.sum_op_enc_g],\n feed_dict=feed_dict,\n )\n _, ldxx = self.sess.run(\n [self.model.train_dis_op_xx, self.model.dis_loss_xx], feed_dict=feed_dict\n )\n # Additional generator discriminator training\n # _ = self.sess.run([self.model.train_gen_op], feed_dict=feed_dict)\n # _ = self.sess.run([self.model.train_dis_op], feed_dict=feed_dict)\n else:\n _, le, sm_e = self.sess.run(\n [self.model.train_enc_g_op, self.model.loss_encoder_g, self.model.sum_op_enc_g],\n feed_dict=feed_dict,\n )\n return le, sm_e, ldxx\n\n def train_step_enc_rec(self, image, cur_epoch):\n image_eval = self.sess.run(image)\n noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])\n noise_2 = np.random.normal(\n loc=0.0,\n scale=1.0,\n size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,\n )\n \n feed_dict = {\n self.model.image_input: image_eval,\n self.model.denoiser_noise: noise_2,\n self.model.is_training_gen: False,\n self.model.is_training_dis: False,\n self.model.is_training_enc_g: False,\n self.model.is_training_enc_r: True,\n }\n _, le, sm_e = self.sess.run(\n [self.model.train_den_op, self.model.den_loss, self.model.sum_op_den],\n feed_dict=feed_dict,\n )\n return le, sm_e\n\n def test_epoch(self):\n self.logger.warn(\"Testing evaluation...\")\n scores_im1 = []\n scores_im2 = []\n scores_comb = []\n scores_mask1 = []\n scores_mask2 = []\n scores_pipe = []\n scores_pipe_2 = []\n inference_time = []\n true_labels = []\n # Create the scores\n test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))\n for _ in test_loop:\n test_batch_begin = time()\n test_batch, test_labels = self.sess.run([self.data.test_image, self.data.test_label])\n test_loop.refresh() # to show immediately the update\n sleep(0.01)\n noise = np.random.normal(\n loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]\n )\n feed_dict = {\n self.model.image_input: test_batch,\n self.model.noise_tensor: noise,\n self.model.is_training_gen: False,\n self.model.is_training_dis: False,\n self.model.is_training_enc_g: False,\n self.model.is_training_enc_r: False,\n }\n scores_im1 += self.sess.run(self.model.img_score_l1, feed_dict=feed_dict).tolist()\n scores_im2 += self.sess.run(self.model.img_score_l2, feed_dict=feed_dict).tolist()\n scores_comb += self.sess.run(self.model.score_comb, feed_dict=feed_dict).tolist()\n scores_mask1 += self.sess.run(self.model.mask_score_1, feed_dict=feed_dict).tolist()\n scores_mask2 += self.sess.run(self.model.mask_score_2, feed_dict=feed_dict).tolist()\n scores_pipe += self.sess.run(self.model.pipe_score, feed_dict=feed_dict).tolist()\n scores_pipe_2 += self.sess.run(self.model.pipe_score_2, feed_dict=feed_dict).tolist()\n if self.config.trainer.enable_disc_xx:\n # scores_final_3 += self.sess.run(\n # self.model.final_score_3, feed_dict=feed_dict\n # ).tolist()\n scores_final_4 += self.sess.run(\n self.model.final_score_4, feed_dict=feed_dict\n ).tolist()\n if self.config.trainer.enable_disc_zz:\n # scores_final_5 += self.sess.run(\n # self.model.final_score_5, feed_dict=feed_dict\n # ).tolist()\n scores_final_6 += self.sess.run(\n self.model.final_score_6, feed_dict=feed_dict\n ).tolist()\n inference_time.append(time() - test_batch_begin)\n true_labels += test_labels.tolist()\n scores_im1 = np.asarray(scores_im1)\n scores_im2 = np.asarray(scores_im2)\n scores_comb = np.asarray(scores_comb)\n scores_pipe = np.asarray(scores_pipe)\n scores_pipe_2 = np.asarray(scores_pipe_2)\n scores_mask1 = np.asarray(scores_mask1)\n scores_mask2 = np.asarray(scores_mask2)\n if self.config.trainer.enable_disc_xx:\n #scores_final_3 = np.asarray(scores_final_3)\n scores_final_4 = np.asarray(scores_final_4)\n if self.config.trainer.enable_disc_zz:\n #scores_final_5 = np.asarray(scores_final_5)\n scores_final_6 = np.asarray(scores_final_6)\n true_labels = np.asarray(true_labels)\n inference_time = np.mean(inference_time)\n self.logger.info(\"Testing: Mean inference time is {:4f}\".format(inference_time))\n step = self.sess.run(self.model.global_step_tensor)\n percentiles = np.asarray(self.config.trainer.percentiles)\n save_results(\n self.config.log.result_dir,\n scores_im1,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"im1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_im2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"im2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_comb,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"comb\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask1,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_mask2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"mask_2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_pipe,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_pipe_1\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n save_results(\n self.config.log.result_dir,\n scores_pipe_2,\n true_labels,\n self.config.model.name,\n self.config.data_loader.dataset_name,\n \"scores_pipe_2\",\n \"paper\",\n self.config.trainer.label,\n self.config.data_loader.random_seed,\n self.logger,\n step,\n percentile=percentiles,\n )\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.layers.dropout",
"tensorflow.reduce_sum",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.tanh",
"tensorflow.train.AdamOptimizer",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.layers.batch_normalization",
"tensorflow.layers.Conv2DTranspose",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.squeeze",
"tensorflow.layers.dense",
"tensorflow.name_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.square",
"tensorflow.train.Saver",
"tensorflow.random_normal_initializer",
"tensorflow.matmul",
"tensorflow.norm",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.layers.Dense",
"tensorflow.nn.tanh",
"tensorflow.zeros_like",
"tensorflow.summary.merge_all",
"tensorflow.layers.Flatten",
"tensorflow.layers.Conv2D",
"tensorflow.summary.merge",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.relu",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.variable_scope",
"tensorflow.squared_difference",
"tensorflow.math.maximum"
],
[
"numpy.asarray",
"numpy.ones",
"numpy.random.normal",
"numpy.mean",
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.random.normal",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.